ath10k: defer irq registration until hif start()
[cascardo/linux.git] / drivers / net / wireless / ath / ath10k / pci.c
1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17
18 #include <linux/pci.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22 #include <linux/bitops.h>
23
24 #include "core.h"
25 #include "debug.h"
26
27 #include "targaddrs.h"
28 #include "bmi.h"
29
30 #include "hif.h"
31 #include "htc.h"
32
33 #include "ce.h"
34 #include "pci.h"
35
36 static unsigned int ath10k_target_ps;
37 module_param(ath10k_target_ps, uint, 0644);
38 MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
39
40 #define QCA988X_2_0_DEVICE_ID   (0x003c)
41
42 static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
43         { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
44         {0}
45 };
46
47 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
48                                        u32 *data);
49
50 static void ath10k_pci_process_ce(struct ath10k *ar);
51 static int ath10k_pci_post_rx(struct ath10k *ar);
52 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
53                                              int num);
54 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
55 static void ath10k_pci_stop_ce(struct ath10k *ar);
56 static int ath10k_pci_device_reset(struct ath10k *ar);
57 static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
58 static int ath10k_pci_init_irq(struct ath10k *ar);
59 static int ath10k_pci_deinit_irq(struct ath10k *ar);
60 static int ath10k_pci_request_irq(struct ath10k *ar);
61 static void ath10k_pci_free_irq(struct ath10k *ar);
62 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
63                                struct ath10k_ce_pipe *rx_pipe,
64                                struct bmi_xfer *xfer);
65 static void ath10k_pci_cleanup_ce(struct ath10k *ar);
66
67 static const struct ce_attr host_ce_config_wlan[] = {
68         /* CE0: host->target HTC control and raw streams */
69         {
70                 .flags = CE_ATTR_FLAGS,
71                 .src_nentries = 16,
72                 .src_sz_max = 256,
73                 .dest_nentries = 0,
74         },
75
76         /* CE1: target->host HTT + HTC control */
77         {
78                 .flags = CE_ATTR_FLAGS,
79                 .src_nentries = 0,
80                 .src_sz_max = 512,
81                 .dest_nentries = 512,
82         },
83
84         /* CE2: target->host WMI */
85         {
86                 .flags = CE_ATTR_FLAGS,
87                 .src_nentries = 0,
88                 .src_sz_max = 2048,
89                 .dest_nentries = 32,
90         },
91
92         /* CE3: host->target WMI */
93         {
94                 .flags = CE_ATTR_FLAGS,
95                 .src_nentries = 32,
96                 .src_sz_max = 2048,
97                 .dest_nentries = 0,
98         },
99
100         /* CE4: host->target HTT */
101         {
102                 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
103                 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
104                 .src_sz_max = 256,
105                 .dest_nentries = 0,
106         },
107
108         /* CE5: unused */
109         {
110                 .flags = CE_ATTR_FLAGS,
111                 .src_nentries = 0,
112                 .src_sz_max = 0,
113                 .dest_nentries = 0,
114         },
115
116         /* CE6: target autonomous hif_memcpy */
117         {
118                 .flags = CE_ATTR_FLAGS,
119                 .src_nentries = 0,
120                 .src_sz_max = 0,
121                 .dest_nentries = 0,
122         },
123
124         /* CE7: ce_diag, the Diagnostic Window */
125         {
126                 .flags = CE_ATTR_FLAGS,
127                 .src_nentries = 2,
128                 .src_sz_max = DIAG_TRANSFER_LIMIT,
129                 .dest_nentries = 2,
130         },
131 };
132
133 /* Target firmware's Copy Engine configuration. */
134 static const struct ce_pipe_config target_ce_config_wlan[] = {
135         /* CE0: host->target HTC control and raw streams */
136         {
137                 .pipenum = 0,
138                 .pipedir = PIPEDIR_OUT,
139                 .nentries = 32,
140                 .nbytes_max = 256,
141                 .flags = CE_ATTR_FLAGS,
142                 .reserved = 0,
143         },
144
145         /* CE1: target->host HTT + HTC control */
146         {
147                 .pipenum = 1,
148                 .pipedir = PIPEDIR_IN,
149                 .nentries = 32,
150                 .nbytes_max = 512,
151                 .flags = CE_ATTR_FLAGS,
152                 .reserved = 0,
153         },
154
155         /* CE2: target->host WMI */
156         {
157                 .pipenum = 2,
158                 .pipedir = PIPEDIR_IN,
159                 .nentries = 32,
160                 .nbytes_max = 2048,
161                 .flags = CE_ATTR_FLAGS,
162                 .reserved = 0,
163         },
164
165         /* CE3: host->target WMI */
166         {
167                 .pipenum = 3,
168                 .pipedir = PIPEDIR_OUT,
169                 .nentries = 32,
170                 .nbytes_max = 2048,
171                 .flags = CE_ATTR_FLAGS,
172                 .reserved = 0,
173         },
174
175         /* CE4: host->target HTT */
176         {
177                 .pipenum = 4,
178                 .pipedir = PIPEDIR_OUT,
179                 .nentries = 256,
180                 .nbytes_max = 256,
181                 .flags = CE_ATTR_FLAGS,
182                 .reserved = 0,
183         },
184
185         /* NB: 50% of src nentries, since tx has 2 frags */
186
187         /* CE5: unused */
188         {
189                 .pipenum = 5,
190                 .pipedir = PIPEDIR_OUT,
191                 .nentries = 32,
192                 .nbytes_max = 2048,
193                 .flags = CE_ATTR_FLAGS,
194                 .reserved = 0,
195         },
196
197         /* CE6: Reserved for target autonomous hif_memcpy */
198         {
199                 .pipenum = 6,
200                 .pipedir = PIPEDIR_INOUT,
201                 .nentries = 32,
202                 .nbytes_max = 4096,
203                 .flags = CE_ATTR_FLAGS,
204                 .reserved = 0,
205         },
206
207         /* CE7 used only by Host */
208 };
209
210 static bool ath10k_pci_irq_pending(struct ath10k *ar)
211 {
212         u32 cause;
213
214         /* Check if the shared legacy irq is for us */
215         cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
216                                   PCIE_INTR_CAUSE_ADDRESS);
217         if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
218                 return true;
219
220         return false;
221 }
222
223 /*
224  * Diagnostic read/write access is provided for startup/config/debug usage.
225  * Caller must guarantee proper alignment, when applicable, and single user
226  * at any moment.
227  */
228 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
229                                     int nbytes)
230 {
231         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
232         int ret = 0;
233         u32 buf;
234         unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
235         unsigned int id;
236         unsigned int flags;
237         struct ath10k_ce_pipe *ce_diag;
238         /* Host buffer address in CE space */
239         u32 ce_data;
240         dma_addr_t ce_data_base = 0;
241         void *data_buf = NULL;
242         int i;
243
244         /*
245          * This code cannot handle reads to non-memory space. Redirect to the
246          * register read fn but preserve the multi word read capability of
247          * this fn
248          */
249         if (address < DRAM_BASE_ADDRESS) {
250                 if (!IS_ALIGNED(address, 4) ||
251                     !IS_ALIGNED((unsigned long)data, 4))
252                         return -EIO;
253
254                 while ((nbytes >= 4) &&  ((ret = ath10k_pci_diag_read_access(
255                                            ar, address, (u32 *)data)) == 0)) {
256                         nbytes -= sizeof(u32);
257                         address += sizeof(u32);
258                         data += sizeof(u32);
259                 }
260                 return ret;
261         }
262
263         ce_diag = ar_pci->ce_diag;
264
265         /*
266          * Allocate a temporary bounce buffer to hold caller's data
267          * to be DMA'ed from Target. This guarantees
268          *   1) 4-byte alignment
269          *   2) Buffer in DMA-able space
270          */
271         orig_nbytes = nbytes;
272         data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
273                                                          orig_nbytes,
274                                                          &ce_data_base);
275
276         if (!data_buf) {
277                 ret = -ENOMEM;
278                 goto done;
279         }
280         memset(data_buf, 0, orig_nbytes);
281
282         remaining_bytes = orig_nbytes;
283         ce_data = ce_data_base;
284         while (remaining_bytes) {
285                 nbytes = min_t(unsigned int, remaining_bytes,
286                                DIAG_TRANSFER_LIMIT);
287
288                 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
289                 if (ret != 0)
290                         goto done;
291
292                 /* Request CE to send from Target(!) address to Host buffer */
293                 /*
294                  * The address supplied by the caller is in the
295                  * Target CPU virtual address space.
296                  *
297                  * In order to use this address with the diagnostic CE,
298                  * convert it from Target CPU virtual address space
299                  * to CE address space
300                  */
301                 ath10k_pci_wake(ar);
302                 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
303                                                      address);
304                 ath10k_pci_sleep(ar);
305
306                 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
307                                  0);
308                 if (ret)
309                         goto done;
310
311                 i = 0;
312                 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
313                                                      &completed_nbytes,
314                                                      &id) != 0) {
315                         mdelay(1);
316                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
317                                 ret = -EBUSY;
318                                 goto done;
319                         }
320                 }
321
322                 if (nbytes != completed_nbytes) {
323                         ret = -EIO;
324                         goto done;
325                 }
326
327                 if (buf != (u32) address) {
328                         ret = -EIO;
329                         goto done;
330                 }
331
332                 i = 0;
333                 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
334                                                      &completed_nbytes,
335                                                      &id, &flags) != 0) {
336                         mdelay(1);
337
338                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
339                                 ret = -EBUSY;
340                                 goto done;
341                         }
342                 }
343
344                 if (nbytes != completed_nbytes) {
345                         ret = -EIO;
346                         goto done;
347                 }
348
349                 if (buf != ce_data) {
350                         ret = -EIO;
351                         goto done;
352                 }
353
354                 remaining_bytes -= nbytes;
355                 address += nbytes;
356                 ce_data += nbytes;
357         }
358
359 done:
360         if (ret == 0) {
361                 /* Copy data from allocated DMA buf to caller's buf */
362                 WARN_ON_ONCE(orig_nbytes & 3);
363                 for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
364                         ((u32 *)data)[i] =
365                                 __le32_to_cpu(((__le32 *)data_buf)[i]);
366                 }
367         } else
368                 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
369                            __func__, address);
370
371         if (data_buf)
372                 pci_free_consistent(ar_pci->pdev, orig_nbytes,
373                                     data_buf, ce_data_base);
374
375         return ret;
376 }
377
378 /* Read 4-byte aligned data from Target memory or register */
379 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
380                                        u32 *data)
381 {
382         /* Assume range doesn't cross this boundary */
383         if (address >= DRAM_BASE_ADDRESS)
384                 return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
385
386         ath10k_pci_wake(ar);
387         *data = ath10k_pci_read32(ar, address);
388         ath10k_pci_sleep(ar);
389         return 0;
390 }
391
392 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
393                                      const void *data, int nbytes)
394 {
395         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
396         int ret = 0;
397         u32 buf;
398         unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
399         unsigned int id;
400         unsigned int flags;
401         struct ath10k_ce_pipe *ce_diag;
402         void *data_buf = NULL;
403         u32 ce_data;    /* Host buffer address in CE space */
404         dma_addr_t ce_data_base = 0;
405         int i;
406
407         ce_diag = ar_pci->ce_diag;
408
409         /*
410          * Allocate a temporary bounce buffer to hold caller's data
411          * to be DMA'ed to Target. This guarantees
412          *   1) 4-byte alignment
413          *   2) Buffer in DMA-able space
414          */
415         orig_nbytes = nbytes;
416         data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
417                                                          orig_nbytes,
418                                                          &ce_data_base);
419         if (!data_buf) {
420                 ret = -ENOMEM;
421                 goto done;
422         }
423
424         /* Copy caller's data to allocated DMA buf */
425         WARN_ON_ONCE(orig_nbytes & 3);
426         for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
427                 ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
428
429         /*
430          * The address supplied by the caller is in the
431          * Target CPU virtual address space.
432          *
433          * In order to use this address with the diagnostic CE,
434          * convert it from
435          *    Target CPU virtual address space
436          * to
437          *    CE address space
438          */
439         ath10k_pci_wake(ar);
440         address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
441         ath10k_pci_sleep(ar);
442
443         remaining_bytes = orig_nbytes;
444         ce_data = ce_data_base;
445         while (remaining_bytes) {
446                 /* FIXME: check cast */
447                 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
448
449                 /* Set up to receive directly into Target(!) address */
450                 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
451                 if (ret != 0)
452                         goto done;
453
454                 /*
455                  * Request CE to send caller-supplied data that
456                  * was copied to bounce buffer to Target(!) address.
457                  */
458                 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
459                                      nbytes, 0, 0);
460                 if (ret != 0)
461                         goto done;
462
463                 i = 0;
464                 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
465                                                      &completed_nbytes,
466                                                      &id) != 0) {
467                         mdelay(1);
468
469                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
470                                 ret = -EBUSY;
471                                 goto done;
472                         }
473                 }
474
475                 if (nbytes != completed_nbytes) {
476                         ret = -EIO;
477                         goto done;
478                 }
479
480                 if (buf != ce_data) {
481                         ret = -EIO;
482                         goto done;
483                 }
484
485                 i = 0;
486                 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
487                                                      &completed_nbytes,
488                                                      &id, &flags) != 0) {
489                         mdelay(1);
490
491                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
492                                 ret = -EBUSY;
493                                 goto done;
494                         }
495                 }
496
497                 if (nbytes != completed_nbytes) {
498                         ret = -EIO;
499                         goto done;
500                 }
501
502                 if (buf != address) {
503                         ret = -EIO;
504                         goto done;
505                 }
506
507                 remaining_bytes -= nbytes;
508                 address += nbytes;
509                 ce_data += nbytes;
510         }
511
512 done:
513         if (data_buf) {
514                 pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
515                                     ce_data_base);
516         }
517
518         if (ret != 0)
519                 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
520                            address);
521
522         return ret;
523 }
524
525 /* Write 4B data to Target memory or register */
526 static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
527                                         u32 data)
528 {
529         /* Assume range doesn't cross this boundary */
530         if (address >= DRAM_BASE_ADDRESS)
531                 return ath10k_pci_diag_write_mem(ar, address, &data,
532                                                  sizeof(u32));
533
534         ath10k_pci_wake(ar);
535         ath10k_pci_write32(ar, address, data);
536         ath10k_pci_sleep(ar);
537         return 0;
538 }
539
540 static bool ath10k_pci_target_is_awake(struct ath10k *ar)
541 {
542         void __iomem *mem = ath10k_pci_priv(ar)->mem;
543         u32 val;
544         val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
545                        RTC_STATE_ADDRESS);
546         return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
547 }
548
549 int ath10k_do_pci_wake(struct ath10k *ar)
550 {
551         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
552         void __iomem *pci_addr = ar_pci->mem;
553         int tot_delay = 0;
554         int curr_delay = 5;
555
556         if (atomic_read(&ar_pci->keep_awake_count) == 0) {
557                 /* Force AWAKE */
558                 iowrite32(PCIE_SOC_WAKE_V_MASK,
559                           pci_addr + PCIE_LOCAL_BASE_ADDRESS +
560                           PCIE_SOC_WAKE_ADDRESS);
561         }
562         atomic_inc(&ar_pci->keep_awake_count);
563
564         if (ar_pci->verified_awake)
565                 return 0;
566
567         for (;;) {
568                 if (ath10k_pci_target_is_awake(ar)) {
569                         ar_pci->verified_awake = true;
570                         return 0;
571                 }
572
573                 if (tot_delay > PCIE_WAKE_TIMEOUT) {
574                         ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
575                                     PCIE_WAKE_TIMEOUT,
576                                     atomic_read(&ar_pci->keep_awake_count));
577                         return -ETIMEDOUT;
578                 }
579
580                 udelay(curr_delay);
581                 tot_delay += curr_delay;
582
583                 if (curr_delay < 50)
584                         curr_delay += 5;
585         }
586 }
587
588 void ath10k_do_pci_sleep(struct ath10k *ar)
589 {
590         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
591         void __iomem *pci_addr = ar_pci->mem;
592
593         if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
594                 /* Allow sleep */
595                 ar_pci->verified_awake = false;
596                 iowrite32(PCIE_SOC_WAKE_RESET,
597                           pci_addr + PCIE_LOCAL_BASE_ADDRESS +
598                           PCIE_SOC_WAKE_ADDRESS);
599         }
600 }
601
602 /*
603  * FIXME: Handle OOM properly.
604  */
605 static inline
606 struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
607 {
608         struct ath10k_pci_compl *compl = NULL;
609
610         spin_lock_bh(&pipe_info->pipe_lock);
611         if (list_empty(&pipe_info->compl_free)) {
612                 ath10k_warn("Completion buffers are full\n");
613                 goto exit;
614         }
615         compl = list_first_entry(&pipe_info->compl_free,
616                                  struct ath10k_pci_compl, list);
617         list_del(&compl->list);
618 exit:
619         spin_unlock_bh(&pipe_info->pipe_lock);
620         return compl;
621 }
622
623 /* Called by lower (CE) layer when a send to Target completes. */
624 static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
625 {
626         struct ath10k *ar = ce_state->ar;
627         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
628         struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
629         struct ath10k_pci_compl *compl;
630         void *transfer_context;
631         u32 ce_data;
632         unsigned int nbytes;
633         unsigned int transfer_id;
634
635         while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
636                                              &ce_data, &nbytes,
637                                              &transfer_id) == 0) {
638                 compl = get_free_compl(pipe_info);
639                 if (!compl)
640                         break;
641
642                 compl->state = ATH10K_PCI_COMPL_SEND;
643                 compl->ce_state = ce_state;
644                 compl->pipe_info = pipe_info;
645                 compl->skb = transfer_context;
646                 compl->nbytes = nbytes;
647                 compl->transfer_id = transfer_id;
648                 compl->flags = 0;
649
650                 /*
651                  * Add the completion to the processing queue.
652                  */
653                 spin_lock_bh(&ar_pci->compl_lock);
654                 list_add_tail(&compl->list, &ar_pci->compl_process);
655                 spin_unlock_bh(&ar_pci->compl_lock);
656         }
657
658         ath10k_pci_process_ce(ar);
659 }
660
661 /* Called by lower (CE) layer when data is received from the Target. */
662 static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
663 {
664         struct ath10k *ar = ce_state->ar;
665         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
666         struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
667         struct ath10k_pci_compl *compl;
668         struct sk_buff *skb;
669         void *transfer_context;
670         u32 ce_data;
671         unsigned int nbytes;
672         unsigned int transfer_id;
673         unsigned int flags;
674
675         while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
676                                              &ce_data, &nbytes, &transfer_id,
677                                              &flags) == 0) {
678                 compl = get_free_compl(pipe_info);
679                 if (!compl)
680                         break;
681
682                 compl->state = ATH10K_PCI_COMPL_RECV;
683                 compl->ce_state = ce_state;
684                 compl->pipe_info = pipe_info;
685                 compl->skb = transfer_context;
686                 compl->nbytes = nbytes;
687                 compl->transfer_id = transfer_id;
688                 compl->flags = flags;
689
690                 skb = transfer_context;
691                 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
692                                  skb->len + skb_tailroom(skb),
693                                  DMA_FROM_DEVICE);
694                 /*
695                  * Add the completion to the processing queue.
696                  */
697                 spin_lock_bh(&ar_pci->compl_lock);
698                 list_add_tail(&compl->list, &ar_pci->compl_process);
699                 spin_unlock_bh(&ar_pci->compl_lock);
700         }
701
702         ath10k_pci_process_ce(ar);
703 }
704
705 /* Send the first nbytes bytes of the buffer */
706 static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
707                                     unsigned int transfer_id,
708                                     unsigned int bytes, struct sk_buff *nbuf)
709 {
710         struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
711         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
712         struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
713         struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
714         unsigned int len;
715         u32 flags = 0;
716         int ret;
717
718         len = min(bytes, nbuf->len);
719         bytes -= len;
720
721         if (len & 3)
722                 ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
723
724         ath10k_dbg(ATH10K_DBG_PCI,
725                    "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
726                    nbuf->data, (unsigned long long) skb_cb->paddr,
727                    nbuf->len, len);
728         ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
729                         "ath10k tx: data: ",
730                         nbuf->data, nbuf->len);
731
732         ret = ath10k_ce_send(ce_hdl, nbuf, skb_cb->paddr, len, transfer_id,
733                              flags);
734         if (ret)
735                 ath10k_warn("failed to send sk_buff to CE: %p\n", nbuf);
736
737         return ret;
738 }
739
740 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
741 {
742         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
743         return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
744 }
745
746 static void ath10k_pci_hif_dump_area(struct ath10k *ar)
747 {
748         u32 reg_dump_area = 0;
749         u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
750         u32 host_addr;
751         int ret;
752         u32 i;
753
754         ath10k_err("firmware crashed!\n");
755         ath10k_err("hardware name %s version 0x%x\n",
756                    ar->hw_params.name, ar->target_version);
757         ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major,
758                    ar->fw_version_minor, ar->fw_version_release,
759                    ar->fw_version_build);
760
761         host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
762         ret = ath10k_pci_diag_read_mem(ar, host_addr,
763                                        &reg_dump_area, sizeof(u32));
764         if (ret) {
765                 ath10k_err("failed to read FW dump area address: %d\n", ret);
766                 return;
767         }
768
769         ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
770
771         ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
772                                        &reg_dump_values[0],
773                                        REG_DUMP_COUNT_QCA988X * sizeof(u32));
774         if (ret != 0) {
775                 ath10k_err("failed to read FW dump area: %d\n", ret);
776                 return;
777         }
778
779         BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
780
781         ath10k_err("target Register Dump\n");
782         for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
783                 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
784                            i,
785                            reg_dump_values[i],
786                            reg_dump_values[i + 1],
787                            reg_dump_values[i + 2],
788                            reg_dump_values[i + 3]);
789
790         queue_work(ar->workqueue, &ar->restart_work);
791 }
792
793 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
794                                                int force)
795 {
796         if (!force) {
797                 int resources;
798                 /*
799                  * Decide whether to actually poll for completions, or just
800                  * wait for a later chance.
801                  * If there seem to be plenty of resources left, then just wait
802                  * since checking involves reading a CE register, which is a
803                  * relatively expensive operation.
804                  */
805                 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
806
807                 /*
808                  * If at least 50% of the total resources are still available,
809                  * don't bother checking again yet.
810                  */
811                 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
812                         return;
813         }
814         ath10k_ce_per_engine_service(ar, pipe);
815 }
816
817 static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
818                                          struct ath10k_hif_cb *callbacks)
819 {
820         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
821
822         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
823
824         memcpy(&ar_pci->msg_callbacks_current, callbacks,
825                sizeof(ar_pci->msg_callbacks_current));
826 }
827
828 static int ath10k_pci_alloc_compl(struct ath10k *ar)
829 {
830         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
831         const struct ce_attr *attr;
832         struct ath10k_pci_pipe *pipe_info;
833         struct ath10k_pci_compl *compl;
834         int i, pipe_num, completions;
835
836         spin_lock_init(&ar_pci->compl_lock);
837         INIT_LIST_HEAD(&ar_pci->compl_process);
838
839         for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
840                 pipe_info = &ar_pci->pipe_info[pipe_num];
841
842                 spin_lock_init(&pipe_info->pipe_lock);
843                 INIT_LIST_HEAD(&pipe_info->compl_free);
844
845                 /* Handle Diagnostic CE specially */
846                 if (pipe_info->ce_hdl == ar_pci->ce_diag)
847                         continue;
848
849                 attr = &host_ce_config_wlan[pipe_num];
850                 completions = 0;
851
852                 if (attr->src_nentries)
853                         completions += attr->src_nentries;
854
855                 if (attr->dest_nentries)
856                         completions += attr->dest_nentries;
857
858                 for (i = 0; i < completions; i++) {
859                         compl = kmalloc(sizeof(*compl), GFP_KERNEL);
860                         if (!compl) {
861                                 ath10k_warn("No memory for completion state\n");
862                                 ath10k_pci_cleanup_ce(ar);
863                                 return -ENOMEM;
864                         }
865
866                         compl->state = ATH10K_PCI_COMPL_FREE;
867                         list_add_tail(&compl->list, &pipe_info->compl_free);
868                 }
869         }
870
871         return 0;
872 }
873
874 static int ath10k_pci_setup_ce_irq(struct ath10k *ar)
875 {
876         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
877         const struct ce_attr *attr;
878         struct ath10k_pci_pipe *pipe_info;
879         int pipe_num, disable_interrupts;
880
881         for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
882                 pipe_info = &ar_pci->pipe_info[pipe_num];
883
884                 /* Handle Diagnostic CE specially */
885                 if (pipe_info->ce_hdl == ar_pci->ce_diag)
886                         continue;
887
888                 attr = &host_ce_config_wlan[pipe_num];
889
890                 if (attr->src_nentries) {
891                         disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
892                         ath10k_ce_send_cb_register(pipe_info->ce_hdl,
893                                                    ath10k_pci_ce_send_done,
894                                                    disable_interrupts);
895                 }
896
897                 if (attr->dest_nentries)
898                         ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
899                                                    ath10k_pci_ce_recv_data);
900         }
901
902         return 0;
903 }
904
905 static void ath10k_pci_kill_tasklet(struct ath10k *ar)
906 {
907         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
908         int i;
909
910         tasklet_kill(&ar_pci->intr_tq);
911         tasklet_kill(&ar_pci->msi_fw_err);
912
913         for (i = 0; i < CE_COUNT; i++)
914                 tasklet_kill(&ar_pci->pipe_info[i].intr);
915 }
916
917 static void ath10k_pci_stop_ce(struct ath10k *ar)
918 {
919         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
920         struct ath10k_pci_compl *compl;
921         struct sk_buff *skb;
922
923         /* Mark pending completions as aborted, so that upper layers free up
924          * their associated resources */
925         spin_lock_bh(&ar_pci->compl_lock);
926         list_for_each_entry(compl, &ar_pci->compl_process, list) {
927                 skb = compl->skb;
928                 ATH10K_SKB_CB(skb)->is_aborted = true;
929         }
930         spin_unlock_bh(&ar_pci->compl_lock);
931 }
932
933 static void ath10k_pci_cleanup_ce(struct ath10k *ar)
934 {
935         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
936         struct ath10k_pci_compl *compl, *tmp;
937         struct ath10k_pci_pipe *pipe_info;
938         struct sk_buff *netbuf;
939         int pipe_num;
940
941         /* Free pending completions. */
942         spin_lock_bh(&ar_pci->compl_lock);
943         if (!list_empty(&ar_pci->compl_process))
944                 ath10k_warn("pending completions still present! possible memory leaks.\n");
945
946         list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
947                 list_del(&compl->list);
948                 netbuf = compl->skb;
949                 dev_kfree_skb_any(netbuf);
950                 kfree(compl);
951         }
952         spin_unlock_bh(&ar_pci->compl_lock);
953
954         /* Free unused completions for each pipe. */
955         for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
956                 pipe_info = &ar_pci->pipe_info[pipe_num];
957
958                 spin_lock_bh(&pipe_info->pipe_lock);
959                 list_for_each_entry_safe(compl, tmp,
960                                          &pipe_info->compl_free, list) {
961                         list_del(&compl->list);
962                         kfree(compl);
963                 }
964                 spin_unlock_bh(&pipe_info->pipe_lock);
965         }
966 }
967
968 static void ath10k_pci_process_ce(struct ath10k *ar)
969 {
970         struct ath10k_pci *ar_pci = ar->hif.priv;
971         struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
972         struct ath10k_pci_compl *compl;
973         struct sk_buff *skb;
974         unsigned int nbytes;
975         int ret, send_done = 0;
976
977         /* Upper layers aren't ready to handle tx/rx completions in parallel so
978          * we must serialize all completion processing. */
979
980         spin_lock_bh(&ar_pci->compl_lock);
981         if (ar_pci->compl_processing) {
982                 spin_unlock_bh(&ar_pci->compl_lock);
983                 return;
984         }
985         ar_pci->compl_processing = true;
986         spin_unlock_bh(&ar_pci->compl_lock);
987
988         for (;;) {
989                 spin_lock_bh(&ar_pci->compl_lock);
990                 if (list_empty(&ar_pci->compl_process)) {
991                         spin_unlock_bh(&ar_pci->compl_lock);
992                         break;
993                 }
994                 compl = list_first_entry(&ar_pci->compl_process,
995                                          struct ath10k_pci_compl, list);
996                 list_del(&compl->list);
997                 spin_unlock_bh(&ar_pci->compl_lock);
998
999                 switch (compl->state) {
1000                 case ATH10K_PCI_COMPL_SEND:
1001                         cb->tx_completion(ar,
1002                                           compl->skb,
1003                                           compl->transfer_id);
1004                         send_done = 1;
1005                         break;
1006                 case ATH10K_PCI_COMPL_RECV:
1007                         ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
1008                         if (ret) {
1009                                 ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1010                                             compl->pipe_info->pipe_num, ret);
1011                                 break;
1012                         }
1013
1014                         skb = compl->skb;
1015                         nbytes = compl->nbytes;
1016
1017                         ath10k_dbg(ATH10K_DBG_PCI,
1018                                    "ath10k_pci_ce_recv_data netbuf=%p  nbytes=%d\n",
1019                                    skb, nbytes);
1020                         ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
1021                                         "ath10k rx: ", skb->data, nbytes);
1022
1023                         if (skb->len + skb_tailroom(skb) >= nbytes) {
1024                                 skb_trim(skb, 0);
1025                                 skb_put(skb, nbytes);
1026                                 cb->rx_completion(ar, skb,
1027                                                   compl->pipe_info->pipe_num);
1028                         } else {
1029                                 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
1030                                             nbytes,
1031                                             skb->len + skb_tailroom(skb));
1032                         }
1033                         break;
1034                 case ATH10K_PCI_COMPL_FREE:
1035                         ath10k_warn("free completion cannot be processed\n");
1036                         break;
1037                 default:
1038                         ath10k_warn("invalid completion state (%d)\n",
1039                                     compl->state);
1040                         break;
1041                 }
1042
1043                 compl->state = ATH10K_PCI_COMPL_FREE;
1044
1045                 /*
1046                  * Add completion back to the pipe's free list.
1047                  */
1048                 spin_lock_bh(&compl->pipe_info->pipe_lock);
1049                 list_add_tail(&compl->list, &compl->pipe_info->compl_free);
1050                 spin_unlock_bh(&compl->pipe_info->pipe_lock);
1051         }
1052
1053         spin_lock_bh(&ar_pci->compl_lock);
1054         ar_pci->compl_processing = false;
1055         spin_unlock_bh(&ar_pci->compl_lock);
1056 }
1057
1058 /* TODO - temporary mapping while we have too few CE's */
1059 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
1060                                               u16 service_id, u8 *ul_pipe,
1061                                               u8 *dl_pipe, int *ul_is_polled,
1062                                               int *dl_is_polled)
1063 {
1064         int ret = 0;
1065
1066         /* polling for received messages not supported */
1067         *dl_is_polled = 0;
1068
1069         switch (service_id) {
1070         case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
1071                 /*
1072                  * Host->target HTT gets its own pipe, so it can be polled
1073                  * while other pipes are interrupt driven.
1074                  */
1075                 *ul_pipe = 4;
1076                 /*
1077                  * Use the same target->host pipe for HTC ctrl, HTC raw
1078                  * streams, and HTT.
1079                  */
1080                 *dl_pipe = 1;
1081                 break;
1082
1083         case ATH10K_HTC_SVC_ID_RSVD_CTRL:
1084         case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
1085                 /*
1086                  * Note: HTC_RAW_STREAMS_SVC is currently unused, and
1087                  * HTC_CTRL_RSVD_SVC could share the same pipe as the
1088                  * WMI services.  So, if another CE is needed, change
1089                  * this to *ul_pipe = 3, which frees up CE 0.
1090                  */
1091                 /* *ul_pipe = 3; */
1092                 *ul_pipe = 0;
1093                 *dl_pipe = 1;
1094                 break;
1095
1096         case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
1097         case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
1098         case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
1099         case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
1100
1101         case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1102                 *ul_pipe = 3;
1103                 *dl_pipe = 2;
1104                 break;
1105
1106                 /* pipe 5 unused   */
1107                 /* pipe 6 reserved */
1108                 /* pipe 7 reserved */
1109
1110         default:
1111                 ret = -1;
1112                 break;
1113         }
1114         *ul_is_polled =
1115                 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1116
1117         return ret;
1118 }
1119
1120 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1121                                                 u8 *ul_pipe, u8 *dl_pipe)
1122 {
1123         int ul_is_polled, dl_is_polled;
1124
1125         (void)ath10k_pci_hif_map_service_to_pipe(ar,
1126                                                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1127                                                  ul_pipe,
1128                                                  dl_pipe,
1129                                                  &ul_is_polled,
1130                                                  &dl_is_polled);
1131 }
1132
1133 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
1134                                    int num)
1135 {
1136         struct ath10k *ar = pipe_info->hif_ce_state;
1137         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1138         struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
1139         struct sk_buff *skb;
1140         dma_addr_t ce_data;
1141         int i, ret = 0;
1142
1143         if (pipe_info->buf_sz == 0)
1144                 return 0;
1145
1146         for (i = 0; i < num; i++) {
1147                 skb = dev_alloc_skb(pipe_info->buf_sz);
1148                 if (!skb) {
1149                         ath10k_warn("failed to allocate skbuff for pipe %d\n",
1150                                     num);
1151                         ret = -ENOMEM;
1152                         goto err;
1153                 }
1154
1155                 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1156
1157                 ce_data = dma_map_single(ar->dev, skb->data,
1158                                          skb->len + skb_tailroom(skb),
1159                                          DMA_FROM_DEVICE);
1160
1161                 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1162                         ath10k_warn("failed to DMA map sk_buff\n");
1163                         dev_kfree_skb_any(skb);
1164                         ret = -EIO;
1165                         goto err;
1166                 }
1167
1168                 ATH10K_SKB_CB(skb)->paddr = ce_data;
1169
1170                 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1171                                                pipe_info->buf_sz,
1172                                                PCI_DMA_FROMDEVICE);
1173
1174                 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1175                                                  ce_data);
1176                 if (ret) {
1177                         ath10k_warn("failed to enqueue to pipe %d: %d\n",
1178                                     num, ret);
1179                         goto err;
1180                 }
1181         }
1182
1183         return ret;
1184
1185 err:
1186         ath10k_pci_rx_pipe_cleanup(pipe_info);
1187         return ret;
1188 }
1189
1190 static int ath10k_pci_post_rx(struct ath10k *ar)
1191 {
1192         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1193         struct ath10k_pci_pipe *pipe_info;
1194         const struct ce_attr *attr;
1195         int pipe_num, ret = 0;
1196
1197         for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1198                 pipe_info = &ar_pci->pipe_info[pipe_num];
1199                 attr = &host_ce_config_wlan[pipe_num];
1200
1201                 if (attr->dest_nentries == 0)
1202                         continue;
1203
1204                 ret = ath10k_pci_post_rx_pipe(pipe_info,
1205                                               attr->dest_nentries - 1);
1206                 if (ret) {
1207                         ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1208                                     pipe_num, ret);
1209
1210                         for (; pipe_num >= 0; pipe_num--) {
1211                                 pipe_info = &ar_pci->pipe_info[pipe_num];
1212                                 ath10k_pci_rx_pipe_cleanup(pipe_info);
1213                         }
1214                         return ret;
1215                 }
1216         }
1217
1218         return 0;
1219 }
1220
1221 static int ath10k_pci_hif_start(struct ath10k *ar)
1222 {
1223         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1224         int ret;
1225
1226         ret = ath10k_pci_alloc_compl(ar);
1227         if (ret) {
1228                 ath10k_warn("failed to allocate CE completions: %d\n", ret);
1229                 return ret;
1230         }
1231
1232         ret = ath10k_pci_request_irq(ar);
1233         if (ret) {
1234                 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1235                             ret);
1236                 goto err_free_compl;
1237         }
1238
1239         ret = ath10k_pci_setup_ce_irq(ar);
1240         if (ret) {
1241                 ath10k_warn("failed to setup CE interrupts: %d\n", ret);
1242                 goto err_stop;
1243         }
1244
1245         /* Post buffers once to start things off. */
1246         ret = ath10k_pci_post_rx(ar);
1247         if (ret) {
1248                 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1249                             ret);
1250                 goto err_stop;
1251         }
1252
1253         ar_pci->started = 1;
1254         return 0;
1255
1256 err_stop:
1257         ath10k_ce_disable_interrupts(ar);
1258         ath10k_pci_free_irq(ar);
1259         ath10k_pci_kill_tasklet(ar);
1260         ath10k_pci_stop_ce(ar);
1261         ath10k_pci_process_ce(ar);
1262 err_free_compl:
1263         ath10k_pci_cleanup_ce(ar);
1264         return ret;
1265 }
1266
1267 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1268 {
1269         struct ath10k *ar;
1270         struct ath10k_pci *ar_pci;
1271         struct ath10k_ce_pipe *ce_hdl;
1272         u32 buf_sz;
1273         struct sk_buff *netbuf;
1274         u32 ce_data;
1275
1276         buf_sz = pipe_info->buf_sz;
1277
1278         /* Unused Copy Engine */
1279         if (buf_sz == 0)
1280                 return;
1281
1282         ar = pipe_info->hif_ce_state;
1283         ar_pci = ath10k_pci_priv(ar);
1284
1285         if (!ar_pci->started)
1286                 return;
1287
1288         ce_hdl = pipe_info->ce_hdl;
1289
1290         while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1291                                           &ce_data) == 0) {
1292                 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1293                                  netbuf->len + skb_tailroom(netbuf),
1294                                  DMA_FROM_DEVICE);
1295                 dev_kfree_skb_any(netbuf);
1296         }
1297 }
1298
1299 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1300 {
1301         struct ath10k *ar;
1302         struct ath10k_pci *ar_pci;
1303         struct ath10k_ce_pipe *ce_hdl;
1304         struct sk_buff *netbuf;
1305         u32 ce_data;
1306         unsigned int nbytes;
1307         unsigned int id;
1308         u32 buf_sz;
1309
1310         buf_sz = pipe_info->buf_sz;
1311
1312         /* Unused Copy Engine */
1313         if (buf_sz == 0)
1314                 return;
1315
1316         ar = pipe_info->hif_ce_state;
1317         ar_pci = ath10k_pci_priv(ar);
1318
1319         if (!ar_pci->started)
1320                 return;
1321
1322         ce_hdl = pipe_info->ce_hdl;
1323
1324         while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1325                                           &ce_data, &nbytes, &id) == 0) {
1326                 /*
1327                  * Indicate the completion to higer layer to free
1328                  * the buffer
1329                  */
1330
1331                 if (!netbuf) {
1332                         ath10k_warn("invalid sk_buff on CE %d - NULL pointer. firmware crashed?\n",
1333                                     ce_hdl->id);
1334                         continue;
1335                 }
1336
1337                 ATH10K_SKB_CB(netbuf)->is_aborted = true;
1338                 ar_pci->msg_callbacks_current.tx_completion(ar,
1339                                                             netbuf,
1340                                                             id);
1341         }
1342 }
1343
1344 /*
1345  * Cleanup residual buffers for device shutdown:
1346  *    buffers that were enqueued for receive
1347  *    buffers that were to be sent
1348  * Note: Buffers that had completed but which were
1349  * not yet processed are on a completion queue. They
1350  * are handled when the completion thread shuts down.
1351  */
1352 static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1353 {
1354         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1355         int pipe_num;
1356
1357         for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1358                 struct ath10k_pci_pipe *pipe_info;
1359
1360                 pipe_info = &ar_pci->pipe_info[pipe_num];
1361                 ath10k_pci_rx_pipe_cleanup(pipe_info);
1362                 ath10k_pci_tx_pipe_cleanup(pipe_info);
1363         }
1364 }
1365
1366 static void ath10k_pci_ce_deinit(struct ath10k *ar)
1367 {
1368         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1369         struct ath10k_pci_pipe *pipe_info;
1370         int pipe_num;
1371
1372         for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1373                 pipe_info = &ar_pci->pipe_info[pipe_num];
1374                 if (pipe_info->ce_hdl) {
1375                         ath10k_ce_deinit(pipe_info->ce_hdl);
1376                         pipe_info->ce_hdl = NULL;
1377                         pipe_info->buf_sz = 0;
1378                 }
1379         }
1380 }
1381
1382 static void ath10k_pci_hif_stop(struct ath10k *ar)
1383 {
1384         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1385         int ret;
1386
1387         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
1388
1389         ret = ath10k_ce_disable_interrupts(ar);
1390         if (ret)
1391                 ath10k_warn("failed to disable CE interrupts: %d\n", ret);
1392
1393         ath10k_pci_free_irq(ar);
1394         ath10k_pci_kill_tasklet(ar);
1395         ath10k_pci_stop_ce(ar);
1396
1397         /* At this point, asynchronous threads are stopped, the target should
1398          * not DMA nor interrupt. We process the leftovers and then free
1399          * everything else up. */
1400
1401         ath10k_pci_process_ce(ar);
1402         ath10k_pci_cleanup_ce(ar);
1403         ath10k_pci_buffer_cleanup(ar);
1404
1405         /* Make the sure the device won't access any structures on the host by
1406          * resetting it. The device was fed with PCI CE ringbuffer
1407          * configuration during init. If ringbuffers are freed and the device
1408          * were to access them this could lead to memory corruption on the
1409          * host. */
1410         ath10k_pci_device_reset(ar);
1411
1412         ar_pci->started = 0;
1413 }
1414
1415 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1416                                            void *req, u32 req_len,
1417                                            void *resp, u32 *resp_len)
1418 {
1419         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1420         struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1421         struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1422         struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1423         struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1424         dma_addr_t req_paddr = 0;
1425         dma_addr_t resp_paddr = 0;
1426         struct bmi_xfer xfer = {};
1427         void *treq, *tresp = NULL;
1428         int ret = 0;
1429
1430         might_sleep();
1431
1432         if (resp && !resp_len)
1433                 return -EINVAL;
1434
1435         if (resp && resp_len && *resp_len == 0)
1436                 return -EINVAL;
1437
1438         treq = kmemdup(req, req_len, GFP_KERNEL);
1439         if (!treq)
1440                 return -ENOMEM;
1441
1442         req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1443         ret = dma_mapping_error(ar->dev, req_paddr);
1444         if (ret)
1445                 goto err_dma;
1446
1447         if (resp && resp_len) {
1448                 tresp = kzalloc(*resp_len, GFP_KERNEL);
1449                 if (!tresp) {
1450                         ret = -ENOMEM;
1451                         goto err_req;
1452                 }
1453
1454                 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1455                                             DMA_FROM_DEVICE);
1456                 ret = dma_mapping_error(ar->dev, resp_paddr);
1457                 if (ret)
1458                         goto err_req;
1459
1460                 xfer.wait_for_resp = true;
1461                 xfer.resp_len = 0;
1462
1463                 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1464         }
1465
1466         init_completion(&xfer.done);
1467
1468         ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1469         if (ret)
1470                 goto err_resp;
1471
1472         ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
1473         if (ret) {
1474                 u32 unused_buffer;
1475                 unsigned int unused_nbytes;
1476                 unsigned int unused_id;
1477
1478                 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1479                                            &unused_nbytes, &unused_id);
1480         } else {
1481                 /* non-zero means we did not time out */
1482                 ret = 0;
1483         }
1484
1485 err_resp:
1486         if (resp) {
1487                 u32 unused_buffer;
1488
1489                 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1490                 dma_unmap_single(ar->dev, resp_paddr,
1491                                  *resp_len, DMA_FROM_DEVICE);
1492         }
1493 err_req:
1494         dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1495
1496         if (ret == 0 && resp_len) {
1497                 *resp_len = min(*resp_len, xfer.resp_len);
1498                 memcpy(resp, tresp, xfer.resp_len);
1499         }
1500 err_dma:
1501         kfree(treq);
1502         kfree(tresp);
1503
1504         return ret;
1505 }
1506
1507 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
1508 {
1509         struct bmi_xfer *xfer;
1510         u32 ce_data;
1511         unsigned int nbytes;
1512         unsigned int transfer_id;
1513
1514         if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1515                                           &nbytes, &transfer_id))
1516                 return;
1517
1518         if (xfer->wait_for_resp)
1519                 return;
1520
1521         complete(&xfer->done);
1522 }
1523
1524 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
1525 {
1526         struct bmi_xfer *xfer;
1527         u32 ce_data;
1528         unsigned int nbytes;
1529         unsigned int transfer_id;
1530         unsigned int flags;
1531
1532         if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1533                                           &nbytes, &transfer_id, &flags))
1534                 return;
1535
1536         if (!xfer->wait_for_resp) {
1537                 ath10k_warn("unexpected: BMI data received; ignoring\n");
1538                 return;
1539         }
1540
1541         xfer->resp_len = nbytes;
1542         complete(&xfer->done);
1543 }
1544
1545 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
1546                                struct ath10k_ce_pipe *rx_pipe,
1547                                struct bmi_xfer *xfer)
1548 {
1549         unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1550
1551         while (time_before_eq(jiffies, timeout)) {
1552                 ath10k_pci_bmi_send_done(tx_pipe);
1553                 ath10k_pci_bmi_recv_data(rx_pipe);
1554
1555                 if (completion_done(&xfer->done))
1556                         return 0;
1557
1558                 schedule();
1559         }
1560
1561         return -ETIMEDOUT;
1562 }
1563
1564 /*
1565  * Map from service/endpoint to Copy Engine.
1566  * This table is derived from the CE_PCI TABLE, above.
1567  * It is passed to the Target at startup for use by firmware.
1568  */
1569 static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1570         {
1571                  ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1572                  PIPEDIR_OUT,           /* out = UL = host -> target */
1573                  3,
1574         },
1575         {
1576                  ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1577                  PIPEDIR_IN,            /* in = DL = target -> host */
1578                  2,
1579         },
1580         {
1581                  ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1582                  PIPEDIR_OUT,           /* out = UL = host -> target */
1583                  3,
1584         },
1585         {
1586                  ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1587                  PIPEDIR_IN,            /* in = DL = target -> host */
1588                  2,
1589         },
1590         {
1591                  ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1592                  PIPEDIR_OUT,           /* out = UL = host -> target */
1593                  3,
1594         },
1595         {
1596                  ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1597                  PIPEDIR_IN,            /* in = DL = target -> host */
1598                  2,
1599         },
1600         {
1601                  ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1602                  PIPEDIR_OUT,           /* out = UL = host -> target */
1603                  3,
1604         },
1605         {
1606                  ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1607                  PIPEDIR_IN,            /* in = DL = target -> host */
1608                  2,
1609         },
1610         {
1611                  ATH10K_HTC_SVC_ID_WMI_CONTROL,
1612                  PIPEDIR_OUT,           /* out = UL = host -> target */
1613                  3,
1614         },
1615         {
1616                  ATH10K_HTC_SVC_ID_WMI_CONTROL,
1617                  PIPEDIR_IN,            /* in = DL = target -> host */
1618                  2,
1619         },
1620         {
1621                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1622                  PIPEDIR_OUT,           /* out = UL = host -> target */
1623                  0,             /* could be moved to 3 (share with WMI) */
1624         },
1625         {
1626                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1627                  PIPEDIR_IN,            /* in = DL = target -> host */
1628                  1,
1629         },
1630         {
1631                  ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,    /* not currently used */
1632                  PIPEDIR_OUT,           /* out = UL = host -> target */
1633                  0,
1634         },
1635         {
1636                  ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,    /* not currently used */
1637                  PIPEDIR_IN,            /* in = DL = target -> host */
1638                  1,
1639         },
1640         {
1641                  ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1642                  PIPEDIR_OUT,           /* out = UL = host -> target */
1643                  4,
1644         },
1645         {
1646                  ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1647                  PIPEDIR_IN,            /* in = DL = target -> host */
1648                  1,
1649         },
1650
1651         /* (Additions here) */
1652
1653         {                               /* Must be last */
1654                  0,
1655                  0,
1656                  0,
1657         },
1658 };
1659
1660 /*
1661  * Send an interrupt to the device to wake up the Target CPU
1662  * so it has an opportunity to notice any changed state.
1663  */
1664 static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1665 {
1666         int ret;
1667         u32 core_ctrl;
1668
1669         ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1670                                               CORE_CTRL_ADDRESS,
1671                                           &core_ctrl);
1672         if (ret) {
1673                 ath10k_warn("failed to read core_ctrl: %d\n", ret);
1674                 return ret;
1675         }
1676
1677         /* A_INUM_FIRMWARE interrupt to Target CPU */
1678         core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1679
1680         ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1681                                                CORE_CTRL_ADDRESS,
1682                                            core_ctrl);
1683         if (ret) {
1684                 ath10k_warn("failed to set target CPU interrupt mask: %d\n",
1685                             ret);
1686                 return ret;
1687         }
1688
1689         return 0;
1690 }
1691
1692 static int ath10k_pci_init_config(struct ath10k *ar)
1693 {
1694         u32 interconnect_targ_addr;
1695         u32 pcie_state_targ_addr = 0;
1696         u32 pipe_cfg_targ_addr = 0;
1697         u32 svc_to_pipe_map = 0;
1698         u32 pcie_config_flags = 0;
1699         u32 ealloc_value;
1700         u32 ealloc_targ_addr;
1701         u32 flag2_value;
1702         u32 flag2_targ_addr;
1703         int ret = 0;
1704
1705         /* Download to Target the CE Config and the service-to-CE map */
1706         interconnect_targ_addr =
1707                 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1708
1709         /* Supply Target-side CE configuration */
1710         ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1711                                           &pcie_state_targ_addr);
1712         if (ret != 0) {
1713                 ath10k_err("Failed to get pcie state addr: %d\n", ret);
1714                 return ret;
1715         }
1716
1717         if (pcie_state_targ_addr == 0) {
1718                 ret = -EIO;
1719                 ath10k_err("Invalid pcie state addr\n");
1720                 return ret;
1721         }
1722
1723         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1724                                           offsetof(struct pcie_state,
1725                                                    pipe_cfg_addr),
1726                                           &pipe_cfg_targ_addr);
1727         if (ret != 0) {
1728                 ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1729                 return ret;
1730         }
1731
1732         if (pipe_cfg_targ_addr == 0) {
1733                 ret = -EIO;
1734                 ath10k_err("Invalid pipe cfg addr\n");
1735                 return ret;
1736         }
1737
1738         ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1739                                  target_ce_config_wlan,
1740                                  sizeof(target_ce_config_wlan));
1741
1742         if (ret != 0) {
1743                 ath10k_err("Failed to write pipe cfg: %d\n", ret);
1744                 return ret;
1745         }
1746
1747         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1748                                           offsetof(struct pcie_state,
1749                                                    svc_to_pipe_map),
1750                                           &svc_to_pipe_map);
1751         if (ret != 0) {
1752                 ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1753                 return ret;
1754         }
1755
1756         if (svc_to_pipe_map == 0) {
1757                 ret = -EIO;
1758                 ath10k_err("Invalid svc_to_pipe map\n");
1759                 return ret;
1760         }
1761
1762         ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1763                                  target_service_to_ce_map_wlan,
1764                                  sizeof(target_service_to_ce_map_wlan));
1765         if (ret != 0) {
1766                 ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1767                 return ret;
1768         }
1769
1770         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1771                                           offsetof(struct pcie_state,
1772                                                    config_flags),
1773                                           &pcie_config_flags);
1774         if (ret != 0) {
1775                 ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1776                 return ret;
1777         }
1778
1779         pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1780
1781         ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1782                                  offsetof(struct pcie_state, config_flags),
1783                                  &pcie_config_flags,
1784                                  sizeof(pcie_config_flags));
1785         if (ret != 0) {
1786                 ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1787                 return ret;
1788         }
1789
1790         /* configure early allocation */
1791         ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1792
1793         ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1794         if (ret != 0) {
1795                 ath10k_err("Faile to get early alloc val: %d\n", ret);
1796                 return ret;
1797         }
1798
1799         /* first bank is switched to IRAM */
1800         ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1801                          HI_EARLY_ALLOC_MAGIC_MASK);
1802         ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1803                          HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1804
1805         ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1806         if (ret != 0) {
1807                 ath10k_err("Failed to set early alloc val: %d\n", ret);
1808                 return ret;
1809         }
1810
1811         /* Tell Target to proceed with initialization */
1812         flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1813
1814         ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1815         if (ret != 0) {
1816                 ath10k_err("Failed to get option val: %d\n", ret);
1817                 return ret;
1818         }
1819
1820         flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1821
1822         ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1823         if (ret != 0) {
1824                 ath10k_err("Failed to set option val: %d\n", ret);
1825                 return ret;
1826         }
1827
1828         return 0;
1829 }
1830
1831
1832
1833 static int ath10k_pci_ce_init(struct ath10k *ar)
1834 {
1835         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1836         struct ath10k_pci_pipe *pipe_info;
1837         const struct ce_attr *attr;
1838         int pipe_num;
1839
1840         for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1841                 pipe_info = &ar_pci->pipe_info[pipe_num];
1842                 pipe_info->pipe_num = pipe_num;
1843                 pipe_info->hif_ce_state = ar;
1844                 attr = &host_ce_config_wlan[pipe_num];
1845
1846                 pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1847                 if (pipe_info->ce_hdl == NULL) {
1848                         ath10k_err("failed to initialize CE for pipe: %d\n",
1849                                    pipe_num);
1850
1851                         /* It is safe to call it here. It checks if ce_hdl is
1852                          * valid for each pipe */
1853                         ath10k_pci_ce_deinit(ar);
1854                         return -1;
1855                 }
1856
1857                 if (pipe_num == CE_COUNT - 1) {
1858                         /*
1859                          * Reserve the ultimate CE for
1860                          * diagnostic Window support
1861                          */
1862                         ar_pci->ce_diag = pipe_info->ce_hdl;
1863                         continue;
1864                 }
1865
1866                 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1867         }
1868
1869         return 0;
1870 }
1871
1872 static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1873 {
1874         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1875         u32 fw_indicator_address, fw_indicator;
1876
1877         ath10k_pci_wake(ar);
1878
1879         fw_indicator_address = ar_pci->fw_indicator_address;
1880         fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
1881
1882         if (fw_indicator & FW_IND_EVENT_PENDING) {
1883                 /* ACK: clear Target-side pending event */
1884                 ath10k_pci_write32(ar, fw_indicator_address,
1885                                    fw_indicator & ~FW_IND_EVENT_PENDING);
1886
1887                 if (ar_pci->started) {
1888                         ath10k_pci_hif_dump_area(ar);
1889                 } else {
1890                         /*
1891                          * Probable Target failure before we're prepared
1892                          * to handle it.  Generally unexpected.
1893                          */
1894                         ath10k_warn("early firmware event indicated\n");
1895                 }
1896         }
1897
1898         ath10k_pci_sleep(ar);
1899 }
1900
1901 static int ath10k_pci_hif_power_up(struct ath10k *ar)
1902 {
1903         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1904         const char *irq_mode;
1905         int ret;
1906
1907         /*
1908          * Bring the target up cleanly.
1909          *
1910          * The target may be in an undefined state with an AUX-powered Target
1911          * and a Host in WoW mode. If the Host crashes, loses power, or is
1912          * restarted (without unloading the driver) then the Target is left
1913          * (aux) powered and running. On a subsequent driver load, the Target
1914          * is in an unexpected state. We try to catch that here in order to
1915          * reset the Target and retry the probe.
1916          */
1917         ret = ath10k_pci_device_reset(ar);
1918         if (ret) {
1919                 ath10k_err("failed to reset target: %d\n", ret);
1920                 goto err;
1921         }
1922
1923         if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1924                 /* Force AWAKE forever */
1925                 ath10k_do_pci_wake(ar);
1926
1927         ret = ath10k_pci_ce_init(ar);
1928         if (ret) {
1929                 ath10k_err("failed to initialize CE: %d\n", ret);
1930                 goto err_ps;
1931         }
1932
1933         ret = ath10k_ce_disable_interrupts(ar);
1934         if (ret) {
1935                 ath10k_err("failed to disable CE interrupts: %d\n", ret);
1936                 goto err_ce;
1937         }
1938
1939         ret = ath10k_pci_init_irq(ar);
1940         if (ret) {
1941                 ath10k_err("failed to init irqs: %d\n", ret);
1942                 goto err_ce;
1943         }
1944
1945         ret = ath10k_pci_wait_for_target_init(ar);
1946         if (ret) {
1947                 ath10k_err("failed to wait for target to init: %d\n", ret);
1948                 goto err_deinit_irq;
1949         }
1950
1951         ret = ath10k_pci_init_config(ar);
1952         if (ret) {
1953                 ath10k_err("failed to setup init config: %d\n", ret);
1954                 goto err_deinit_irq;
1955         }
1956
1957         ret = ath10k_pci_wake_target_cpu(ar);
1958         if (ret) {
1959                 ath10k_err("could not wake up target CPU: %d\n", ret);
1960                 goto err_deinit_irq;
1961         }
1962
1963         if (ar_pci->num_msi_intrs > 1)
1964                 irq_mode = "MSI-X";
1965         else if (ar_pci->num_msi_intrs == 1)
1966                 irq_mode = "MSI";
1967         else
1968                 irq_mode = "legacy";
1969
1970         if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
1971                 ath10k_info("pci irq %s\n", irq_mode);
1972
1973         return 0;
1974
1975 err_deinit_irq:
1976         ath10k_pci_deinit_irq(ar);
1977 err_ce:
1978         ath10k_pci_ce_deinit(ar);
1979         ath10k_pci_device_reset(ar);
1980 err_ps:
1981         if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1982                 ath10k_do_pci_sleep(ar);
1983 err:
1984         return ret;
1985 }
1986
1987 static void ath10k_pci_hif_power_down(struct ath10k *ar)
1988 {
1989         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1990
1991         ath10k_pci_deinit_irq(ar);
1992         ath10k_pci_device_reset(ar);
1993
1994         ath10k_pci_ce_deinit(ar);
1995         if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1996                 ath10k_do_pci_sleep(ar);
1997 }
1998
1999 #ifdef CONFIG_PM
2000
2001 #define ATH10K_PCI_PM_CONTROL 0x44
2002
2003 static int ath10k_pci_hif_suspend(struct ath10k *ar)
2004 {
2005         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2006         struct pci_dev *pdev = ar_pci->pdev;
2007         u32 val;
2008
2009         pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2010
2011         if ((val & 0x000000ff) != 0x3) {
2012                 pci_save_state(pdev);
2013                 pci_disable_device(pdev);
2014                 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2015                                        (val & 0xffffff00) | 0x03);
2016         }
2017
2018         return 0;
2019 }
2020
2021 static int ath10k_pci_hif_resume(struct ath10k *ar)
2022 {
2023         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2024         struct pci_dev *pdev = ar_pci->pdev;
2025         u32 val;
2026
2027         pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2028
2029         if ((val & 0x000000ff) != 0) {
2030                 pci_restore_state(pdev);
2031                 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2032                                        val & 0xffffff00);
2033                 /*
2034                  * Suspend/Resume resets the PCI configuration space,
2035                  * so we have to re-disable the RETRY_TIMEOUT register (0x41)
2036                  * to keep PCI Tx retries from interfering with C3 CPU state
2037                  */
2038                 pci_read_config_dword(pdev, 0x40, &val);
2039
2040                 if ((val & 0x0000ff00) != 0)
2041                         pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2042         }
2043
2044         return 0;
2045 }
2046 #endif
2047
2048 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
2049         .send_head              = ath10k_pci_hif_send_head,
2050         .exchange_bmi_msg       = ath10k_pci_hif_exchange_bmi_msg,
2051         .start                  = ath10k_pci_hif_start,
2052         .stop                   = ath10k_pci_hif_stop,
2053         .map_service_to_pipe    = ath10k_pci_hif_map_service_to_pipe,
2054         .get_default_pipe       = ath10k_pci_hif_get_default_pipe,
2055         .send_complete_check    = ath10k_pci_hif_send_complete_check,
2056         .set_callbacks          = ath10k_pci_hif_set_callbacks,
2057         .get_free_queue_number  = ath10k_pci_hif_get_free_queue_number,
2058         .power_up               = ath10k_pci_hif_power_up,
2059         .power_down             = ath10k_pci_hif_power_down,
2060 #ifdef CONFIG_PM
2061         .suspend                = ath10k_pci_hif_suspend,
2062         .resume                 = ath10k_pci_hif_resume,
2063 #endif
2064 };
2065
2066 static void ath10k_pci_ce_tasklet(unsigned long ptr)
2067 {
2068         struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
2069         struct ath10k_pci *ar_pci = pipe->ar_pci;
2070
2071         ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
2072 }
2073
2074 static void ath10k_msi_err_tasklet(unsigned long data)
2075 {
2076         struct ath10k *ar = (struct ath10k *)data;
2077
2078         ath10k_pci_fw_interrupt_handler(ar);
2079 }
2080
2081 /*
2082  * Handler for a per-engine interrupt on a PARTICULAR CE.
2083  * This is used in cases where each CE has a private MSI interrupt.
2084  */
2085 static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2086 {
2087         struct ath10k *ar = arg;
2088         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2089         int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2090
2091         if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
2092                 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
2093                 return IRQ_HANDLED;
2094         }
2095
2096         /*
2097          * NOTE: We are able to derive ce_id from irq because we
2098          * use a one-to-one mapping for CE's 0..5.
2099          * CE's 6 & 7 do not use interrupts at all.
2100          *
2101          * This mapping must be kept in sync with the mapping
2102          * used by firmware.
2103          */
2104         tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2105         return IRQ_HANDLED;
2106 }
2107
2108 static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2109 {
2110         struct ath10k *ar = arg;
2111         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2112
2113         tasklet_schedule(&ar_pci->msi_fw_err);
2114         return IRQ_HANDLED;
2115 }
2116
2117 /*
2118  * Top-level interrupt handler for all PCI interrupts from a Target.
2119  * When a block of MSI interrupts is allocated, this top-level handler
2120  * is not used; instead, we directly call the correct sub-handler.
2121  */
2122 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2123 {
2124         struct ath10k *ar = arg;
2125         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2126
2127         if (ar_pci->num_msi_intrs == 0) {
2128                 if (!ath10k_pci_irq_pending(ar))
2129                         return IRQ_NONE;
2130
2131                 /*
2132                  * IMPORTANT: INTR_CLR regiser has to be set after
2133                  * INTR_ENABLE is set to 0, otherwise interrupt can not be
2134                  * really cleared.
2135                  */
2136                 iowrite32(0, ar_pci->mem +
2137                           (SOC_CORE_BASE_ADDRESS |
2138                            PCIE_INTR_ENABLE_ADDRESS));
2139                 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2140                           PCIE_INTR_CE_MASK_ALL,
2141                           ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2142                                          PCIE_INTR_CLR_ADDRESS));
2143                 /*
2144                  * IMPORTANT: this extra read transaction is required to
2145                  * flush the posted write buffer.
2146                  */
2147                 (void) ioread32(ar_pci->mem +
2148                                 (SOC_CORE_BASE_ADDRESS |
2149                                  PCIE_INTR_ENABLE_ADDRESS));
2150         }
2151
2152         tasklet_schedule(&ar_pci->intr_tq);
2153
2154         return IRQ_HANDLED;
2155 }
2156
2157 static void ath10k_pci_tasklet(unsigned long data)
2158 {
2159         struct ath10k *ar = (struct ath10k *)data;
2160         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2161
2162         ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
2163         ath10k_ce_per_engine_service_any(ar);
2164
2165         if (ar_pci->num_msi_intrs == 0) {
2166                 /* Enable Legacy PCI line interrupts */
2167                 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2168                           PCIE_INTR_CE_MASK_ALL,
2169                           ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2170                                          PCIE_INTR_ENABLE_ADDRESS));
2171                 /*
2172                  * IMPORTANT: this extra read transaction is required to
2173                  * flush the posted write buffer
2174                  */
2175                 (void) ioread32(ar_pci->mem +
2176                                 (SOC_CORE_BASE_ADDRESS |
2177                                  PCIE_INTR_ENABLE_ADDRESS));
2178         }
2179 }
2180
2181 static int ath10k_pci_request_irq_msix(struct ath10k *ar)
2182 {
2183         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2184         int ret, i;
2185
2186         ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2187                           ath10k_pci_msi_fw_handler,
2188                           IRQF_SHARED, "ath10k_pci", ar);
2189         if (ret) {
2190                 ath10k_warn("failed to request MSI-X fw irq %d: %d\n",
2191                             ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2192                 return ret;
2193         }
2194
2195         for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2196                 ret = request_irq(ar_pci->pdev->irq + i,
2197                                   ath10k_pci_per_engine_handler,
2198                                   IRQF_SHARED, "ath10k_pci", ar);
2199                 if (ret) {
2200                         ath10k_warn("failed to request MSI-X ce irq %d: %d\n",
2201                                     ar_pci->pdev->irq + i, ret);
2202
2203                         for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2204                                 free_irq(ar_pci->pdev->irq + i, ar);
2205
2206                         free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
2207                         return ret;
2208                 }
2209         }
2210
2211         return 0;
2212 }
2213
2214 static int ath10k_pci_request_irq_msi(struct ath10k *ar)
2215 {
2216         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2217         int ret;
2218
2219         ret = request_irq(ar_pci->pdev->irq,
2220                           ath10k_pci_interrupt_handler,
2221                           IRQF_SHARED, "ath10k_pci", ar);
2222         if (ret) {
2223                 ath10k_warn("failed to request MSI irq %d: %d\n",
2224                             ar_pci->pdev->irq, ret);
2225                 return ret;
2226         }
2227
2228         return 0;
2229 }
2230
2231 static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
2232 {
2233         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2234         int ret;
2235
2236         ret = request_irq(ar_pci->pdev->irq,
2237                           ath10k_pci_interrupt_handler,
2238                           IRQF_SHARED, "ath10k_pci", ar);
2239         if (ret) {
2240                 ath10k_warn("failed to request legacy irq %d: %d\n",
2241                             ar_pci->pdev->irq, ret);
2242                 return ret;
2243         }
2244
2245         return 0;
2246 }
2247
2248 static int ath10k_pci_request_irq(struct ath10k *ar)
2249 {
2250         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2251
2252         switch (ar_pci->num_msi_intrs) {
2253         case 0:
2254                 return ath10k_pci_request_irq_legacy(ar);
2255         case 1:
2256                 return ath10k_pci_request_irq_msi(ar);
2257         case MSI_NUM_REQUEST:
2258                 return ath10k_pci_request_irq_msix(ar);
2259         }
2260
2261         ath10k_warn("unknown irq configuration upon request\n");
2262         return -EINVAL;
2263 }
2264
2265 static void ath10k_pci_free_irq(struct ath10k *ar)
2266 {
2267         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2268         int i;
2269
2270         /* There's at least one interrupt irregardless whether its legacy INTR
2271          * or MSI or MSI-X */
2272         for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2273                 free_irq(ar_pci->pdev->irq + i, ar);
2274 }
2275
2276 static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
2277 {
2278         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2279         int i;
2280
2281         tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
2282         tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2283                      (unsigned long)ar);
2284
2285         for (i = 0; i < CE_COUNT; i++) {
2286                 ar_pci->pipe_info[i].ar_pci = ar_pci;
2287                 tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
2288                              (unsigned long)&ar_pci->pipe_info[i]);
2289         }
2290 }
2291
2292 static int ath10k_pci_init_irq(struct ath10k *ar)
2293 {
2294         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2295         int ret;
2296
2297         ath10k_pci_init_irq_tasklets(ar);
2298
2299         if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features))
2300                 goto msi;
2301
2302         /* Try MSI-X */
2303         ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
2304         ret = pci_enable_msi_block(ar_pci->pdev, ar_pci->num_msi_intrs);
2305         if (ret == 0)
2306                 return 0;
2307         if (ret > 0)
2308                 pci_disable_msi(ar_pci->pdev);
2309
2310 msi:
2311         /* Try MSI */
2312         ar_pci->num_msi_intrs = 1;
2313         ret = pci_enable_msi(ar_pci->pdev);
2314         if (ret == 0)
2315                 return 0;
2316
2317         /* Try legacy irq
2318          *
2319          * A potential race occurs here: The CORE_BASE write
2320          * depends on target correctly decoding AXI address but
2321          * host won't know when target writes BAR to CORE_CTRL.
2322          * This write might get lost if target has NOT written BAR.
2323          * For now, fix the race by repeating the write in below
2324          * synchronization checking. */
2325         ar_pci->num_msi_intrs = 0;
2326
2327         ret = ath10k_pci_wake(ar);
2328         if (ret) {
2329                 ath10k_warn("failed to wake target: %d\n", ret);
2330                 return ret;
2331         }
2332
2333         ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2334                            PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
2335         ath10k_pci_sleep(ar);
2336
2337         return 0;
2338 }
2339
2340 static int ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
2341 {
2342         int ret;
2343
2344         ret = ath10k_pci_wake(ar);
2345         if (ret) {
2346                 ath10k_warn("failed to wake target: %d\n", ret);
2347                 return ret;
2348         }
2349
2350         ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2351                            0);
2352         ath10k_pci_sleep(ar);
2353
2354         return 0;
2355 }
2356
2357 static int ath10k_pci_deinit_irq(struct ath10k *ar)
2358 {
2359         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2360
2361         switch (ar_pci->num_msi_intrs) {
2362         case 0:
2363                 return ath10k_pci_deinit_irq_legacy(ar);
2364         case 1:
2365                 /* fall-through */
2366         case MSI_NUM_REQUEST:
2367                 pci_disable_msi(ar_pci->pdev);
2368                 return 0;
2369         }
2370
2371         ath10k_warn("unknown irq configuration upon deinit\n");
2372         return -EINVAL;
2373 }
2374
2375 static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
2376 {
2377         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2378         int wait_limit = 300; /* 3 sec */
2379         int ret;
2380
2381         ret = ath10k_pci_wake(ar);
2382         if (ret) {
2383                 ath10k_err("failed to wake up target: %d\n", ret);
2384                 return ret;
2385         }
2386
2387         while (wait_limit-- &&
2388                !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
2389                  FW_IND_INITIALIZED)) {
2390                 if (ar_pci->num_msi_intrs == 0)
2391                         /* Fix potential race by repeating CORE_BASE writes */
2392                         iowrite32(PCIE_INTR_FIRMWARE_MASK |
2393                                   PCIE_INTR_CE_MASK_ALL,
2394                                   ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2395                                                  PCIE_INTR_ENABLE_ADDRESS));
2396                 mdelay(10);
2397         }
2398
2399         if (wait_limit < 0) {
2400                 ath10k_err("target stalled\n");
2401                 ret = -EIO;
2402                 goto out;
2403         }
2404
2405 out:
2406         ath10k_pci_sleep(ar);
2407         return ret;
2408 }
2409
2410 static int ath10k_pci_device_reset(struct ath10k *ar)
2411 {
2412         int i, ret;
2413         u32 val;
2414
2415         ret = ath10k_do_pci_wake(ar);
2416         if (ret) {
2417                 ath10k_err("failed to wake up target: %d\n",
2418                            ret);
2419                 return ret;
2420         }
2421
2422         /* Put Target, including PCIe, into RESET. */
2423         val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
2424         val |= 1;
2425         ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2426
2427         for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2428                 if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2429                                           RTC_STATE_COLD_RESET_MASK)
2430                         break;
2431                 msleep(1);
2432         }
2433
2434         /* Pull Target, including PCIe, out of RESET. */
2435         val &= ~1;
2436         ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2437
2438         for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2439                 if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2440                                             RTC_STATE_COLD_RESET_MASK))
2441                         break;
2442                 msleep(1);
2443         }
2444
2445         ath10k_do_pci_sleep(ar);
2446         return 0;
2447 }
2448
2449 static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2450 {
2451         int i;
2452
2453         for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2454                 if (!test_bit(i, ar_pci->features))
2455                         continue;
2456
2457                 switch (i) {
2458                 case ATH10K_PCI_FEATURE_MSI_X:
2459                         ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
2460                         break;
2461                 case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
2462                         ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
2463                         break;
2464                 }
2465         }
2466 }
2467
2468 static int ath10k_pci_probe(struct pci_dev *pdev,
2469                             const struct pci_device_id *pci_dev)
2470 {
2471         void __iomem *mem;
2472         int ret = 0;
2473         struct ath10k *ar;
2474         struct ath10k_pci *ar_pci;
2475         u32 lcr_val, chip_id;
2476
2477         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2478
2479         ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2480         if (ar_pci == NULL)
2481                 return -ENOMEM;
2482
2483         ar_pci->pdev = pdev;
2484         ar_pci->dev = &pdev->dev;
2485
2486         switch (pci_dev->device) {
2487         case QCA988X_2_0_DEVICE_ID:
2488                 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2489                 break;
2490         default:
2491                 ret = -ENODEV;
2492                 ath10k_err("Unkown device ID: %d\n", pci_dev->device);
2493                 goto err_ar_pci;
2494         }
2495
2496         if (ath10k_target_ps)
2497                 set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2498
2499         ath10k_pci_dump_features(ar_pci);
2500
2501         ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
2502         if (!ar) {
2503                 ath10k_err("failed to create driver core\n");
2504                 ret = -EINVAL;
2505                 goto err_ar_pci;
2506         }
2507
2508         ar_pci->ar = ar;
2509         ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2510         atomic_set(&ar_pci->keep_awake_count, 0);
2511
2512         pci_set_drvdata(pdev, ar);
2513
2514         /*
2515          * Without any knowledge of the Host, the Target may have been reset or
2516          * power cycled and its Config Space may no longer reflect the PCI
2517          * address space that was assigned earlier by the PCI infrastructure.
2518          * Refresh it now.
2519          */
2520         ret = pci_assign_resource(pdev, BAR_NUM);
2521         if (ret) {
2522                 ath10k_err("failed to assign PCI space: %d\n", ret);
2523                 goto err_ar;
2524         }
2525
2526         ret = pci_enable_device(pdev);
2527         if (ret) {
2528                 ath10k_err("failed to enable PCI device: %d\n", ret);
2529                 goto err_ar;
2530         }
2531
2532         /* Request MMIO resources */
2533         ret = pci_request_region(pdev, BAR_NUM, "ath");
2534         if (ret) {
2535                 ath10k_err("failed to request MMIO region: %d\n", ret);
2536                 goto err_device;
2537         }
2538
2539         /*
2540          * Target structures have a limit of 32 bit DMA pointers.
2541          * DMA pointers can be wider than 32 bits by default on some systems.
2542          */
2543         ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2544         if (ret) {
2545                 ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret);
2546                 goto err_region;
2547         }
2548
2549         ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2550         if (ret) {
2551                 ath10k_err("failed to set consistent DMA mask to 32-bit\n");
2552                 goto err_region;
2553         }
2554
2555         /* Set bus master bit in PCI_COMMAND to enable DMA */
2556         pci_set_master(pdev);
2557
2558         /*
2559          * Temporary FIX: disable ASPM
2560          * Will be removed after the OTP is programmed
2561          */
2562         pci_read_config_dword(pdev, 0x80, &lcr_val);
2563         pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2564
2565         /* Arrange for access to Target SoC registers. */
2566         mem = pci_iomap(pdev, BAR_NUM, 0);
2567         if (!mem) {
2568                 ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM);
2569                 ret = -EIO;
2570                 goto err_master;
2571         }
2572
2573         ar_pci->mem = mem;
2574
2575         spin_lock_init(&ar_pci->ce_lock);
2576
2577         ret = ath10k_do_pci_wake(ar);
2578         if (ret) {
2579                 ath10k_err("Failed to get chip id: %d\n", ret);
2580                 goto err_iomap;
2581         }
2582
2583         chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
2584
2585         ath10k_do_pci_sleep(ar);
2586
2587         ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2588
2589         ret = ath10k_core_register(ar, chip_id);
2590         if (ret) {
2591                 ath10k_err("failed to register driver core: %d\n", ret);
2592                 goto err_iomap;
2593         }
2594
2595         return 0;
2596
2597 err_iomap:
2598         pci_iounmap(pdev, mem);
2599 err_master:
2600         pci_clear_master(pdev);
2601 err_region:
2602         pci_release_region(pdev, BAR_NUM);
2603 err_device:
2604         pci_disable_device(pdev);
2605 err_ar:
2606         ath10k_core_destroy(ar);
2607 err_ar_pci:
2608         /* call HIF PCI free here */
2609         kfree(ar_pci);
2610
2611         return ret;
2612 }
2613
2614 static void ath10k_pci_remove(struct pci_dev *pdev)
2615 {
2616         struct ath10k *ar = pci_get_drvdata(pdev);
2617         struct ath10k_pci *ar_pci;
2618
2619         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2620
2621         if (!ar)
2622                 return;
2623
2624         ar_pci = ath10k_pci_priv(ar);
2625
2626         if (!ar_pci)
2627                 return;
2628
2629         tasklet_kill(&ar_pci->msi_fw_err);
2630
2631         ath10k_core_unregister(ar);
2632
2633         pci_iounmap(pdev, ar_pci->mem);
2634         pci_release_region(pdev, BAR_NUM);
2635         pci_clear_master(pdev);
2636         pci_disable_device(pdev);
2637
2638         ath10k_core_destroy(ar);
2639         kfree(ar_pci);
2640 }
2641
2642 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2643
2644 static struct pci_driver ath10k_pci_driver = {
2645         .name = "ath10k_pci",
2646         .id_table = ath10k_pci_id_table,
2647         .probe = ath10k_pci_probe,
2648         .remove = ath10k_pci_remove,
2649 };
2650
2651 static int __init ath10k_pci_init(void)
2652 {
2653         int ret;
2654
2655         ret = pci_register_driver(&ath10k_pci_driver);
2656         if (ret)
2657                 ath10k_err("failed to register PCI driver: %d\n", ret);
2658
2659         return ret;
2660 }
2661 module_init(ath10k_pci_init);
2662
2663 static void __exit ath10k_pci_exit(void)
2664 {
2665         pci_unregister_driver(&ath10k_pci_driver);
2666 }
2667
2668 module_exit(ath10k_pci_exit);
2669
2670 MODULE_AUTHOR("Qualcomm Atheros");
2671 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2672 MODULE_LICENSE("Dual BSD/GPL");
2673 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2674 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
2675 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);