ath10k: convert ath10k_pci_wake() to return
[cascardo/linux.git] / drivers / net / wireless / ath / ath10k / pci.c
1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17
18 #include <linux/pci.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22
23 #include "core.h"
24 #include "debug.h"
25
26 #include "targaddrs.h"
27 #include "bmi.h"
28
29 #include "hif.h"
30 #include "htc.h"
31
32 #include "ce.h"
33 #include "pci.h"
34
35 static unsigned int ath10k_target_ps;
36 module_param(ath10k_target_ps, uint, 0644);
37 MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
38
39 #define QCA988X_2_0_DEVICE_ID   (0x003c)
40
41 static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
42         { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
43         {0}
44 };
45
46 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
47                                        u32 *data);
48
49 static void ath10k_pci_process_ce(struct ath10k *ar);
50 static int ath10k_pci_post_rx(struct ath10k *ar);
51 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
52                                              int num);
53 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
54 static void ath10k_pci_stop_ce(struct ath10k *ar);
55 static void ath10k_pci_device_reset(struct ath10k *ar);
56 static int ath10k_pci_reset_target(struct ath10k *ar);
57 static int ath10k_pci_start_intr(struct ath10k *ar);
58 static void ath10k_pci_stop_intr(struct ath10k *ar);
59
60 static const struct ce_attr host_ce_config_wlan[] = {
61         /* CE0: host->target HTC control and raw streams */
62         {
63                 .flags = CE_ATTR_FLAGS,
64                 .src_nentries = 16,
65                 .src_sz_max = 256,
66                 .dest_nentries = 0,
67         },
68
69         /* CE1: target->host HTT + HTC control */
70         {
71                 .flags = CE_ATTR_FLAGS,
72                 .src_nentries = 0,
73                 .src_sz_max = 512,
74                 .dest_nentries = 512,
75         },
76
77         /* CE2: target->host WMI */
78         {
79                 .flags = CE_ATTR_FLAGS,
80                 .src_nentries = 0,
81                 .src_sz_max = 2048,
82                 .dest_nentries = 32,
83         },
84
85         /* CE3: host->target WMI */
86         {
87                 .flags = CE_ATTR_FLAGS,
88                 .src_nentries = 32,
89                 .src_sz_max = 2048,
90                 .dest_nentries = 0,
91         },
92
93         /* CE4: host->target HTT */
94         {
95                 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
96                 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
97                 .src_sz_max = 256,
98                 .dest_nentries = 0,
99         },
100
101         /* CE5: unused */
102         {
103                 .flags = CE_ATTR_FLAGS,
104                 .src_nentries = 0,
105                 .src_sz_max = 0,
106                 .dest_nentries = 0,
107         },
108
109         /* CE6: target autonomous hif_memcpy */
110         {
111                 .flags = CE_ATTR_FLAGS,
112                 .src_nentries = 0,
113                 .src_sz_max = 0,
114                 .dest_nentries = 0,
115         },
116
117         /* CE7: ce_diag, the Diagnostic Window */
118         {
119                 .flags = CE_ATTR_FLAGS,
120                 .src_nentries = 2,
121                 .src_sz_max = DIAG_TRANSFER_LIMIT,
122                 .dest_nentries = 2,
123         },
124 };
125
126 /* Target firmware's Copy Engine configuration. */
127 static const struct ce_pipe_config target_ce_config_wlan[] = {
128         /* CE0: host->target HTC control and raw streams */
129         {
130                 .pipenum = 0,
131                 .pipedir = PIPEDIR_OUT,
132                 .nentries = 32,
133                 .nbytes_max = 256,
134                 .flags = CE_ATTR_FLAGS,
135                 .reserved = 0,
136         },
137
138         /* CE1: target->host HTT + HTC control */
139         {
140                 .pipenum = 1,
141                 .pipedir = PIPEDIR_IN,
142                 .nentries = 32,
143                 .nbytes_max = 512,
144                 .flags = CE_ATTR_FLAGS,
145                 .reserved = 0,
146         },
147
148         /* CE2: target->host WMI */
149         {
150                 .pipenum = 2,
151                 .pipedir = PIPEDIR_IN,
152                 .nentries = 32,
153                 .nbytes_max = 2048,
154                 .flags = CE_ATTR_FLAGS,
155                 .reserved = 0,
156         },
157
158         /* CE3: host->target WMI */
159         {
160                 .pipenum = 3,
161                 .pipedir = PIPEDIR_OUT,
162                 .nentries = 32,
163                 .nbytes_max = 2048,
164                 .flags = CE_ATTR_FLAGS,
165                 .reserved = 0,
166         },
167
168         /* CE4: host->target HTT */
169         {
170                 .pipenum = 4,
171                 .pipedir = PIPEDIR_OUT,
172                 .nentries = 256,
173                 .nbytes_max = 256,
174                 .flags = CE_ATTR_FLAGS,
175                 .reserved = 0,
176         },
177
178         /* NB: 50% of src nentries, since tx has 2 frags */
179
180         /* CE5: unused */
181         {
182                 .pipenum = 5,
183                 .pipedir = PIPEDIR_OUT,
184                 .nentries = 32,
185                 .nbytes_max = 2048,
186                 .flags = CE_ATTR_FLAGS,
187                 .reserved = 0,
188         },
189
190         /* CE6: Reserved for target autonomous hif_memcpy */
191         {
192                 .pipenum = 6,
193                 .pipedir = PIPEDIR_INOUT,
194                 .nentries = 32,
195                 .nbytes_max = 4096,
196                 .flags = CE_ATTR_FLAGS,
197                 .reserved = 0,
198         },
199
200         /* CE7 used only by Host */
201 };
202
203 /*
204  * Diagnostic read/write access is provided for startup/config/debug usage.
205  * Caller must guarantee proper alignment, when applicable, and single user
206  * at any moment.
207  */
208 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
209                                     int nbytes)
210 {
211         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
212         int ret = 0;
213         u32 buf;
214         unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
215         unsigned int id;
216         unsigned int flags;
217         struct ath10k_ce_pipe *ce_diag;
218         /* Host buffer address in CE space */
219         u32 ce_data;
220         dma_addr_t ce_data_base = 0;
221         void *data_buf = NULL;
222         int i;
223
224         /*
225          * This code cannot handle reads to non-memory space. Redirect to the
226          * register read fn but preserve the multi word read capability of
227          * this fn
228          */
229         if (address < DRAM_BASE_ADDRESS) {
230                 if (!IS_ALIGNED(address, 4) ||
231                     !IS_ALIGNED((unsigned long)data, 4))
232                         return -EIO;
233
234                 while ((nbytes >= 4) &&  ((ret = ath10k_pci_diag_read_access(
235                                            ar, address, (u32 *)data)) == 0)) {
236                         nbytes -= sizeof(u32);
237                         address += sizeof(u32);
238                         data += sizeof(u32);
239                 }
240                 return ret;
241         }
242
243         ce_diag = ar_pci->ce_diag;
244
245         /*
246          * Allocate a temporary bounce buffer to hold caller's data
247          * to be DMA'ed from Target. This guarantees
248          *   1) 4-byte alignment
249          *   2) Buffer in DMA-able space
250          */
251         orig_nbytes = nbytes;
252         data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
253                                                          orig_nbytes,
254                                                          &ce_data_base);
255
256         if (!data_buf) {
257                 ret = -ENOMEM;
258                 goto done;
259         }
260         memset(data_buf, 0, orig_nbytes);
261
262         remaining_bytes = orig_nbytes;
263         ce_data = ce_data_base;
264         while (remaining_bytes) {
265                 nbytes = min_t(unsigned int, remaining_bytes,
266                                DIAG_TRANSFER_LIMIT);
267
268                 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
269                 if (ret != 0)
270                         goto done;
271
272                 /* Request CE to send from Target(!) address to Host buffer */
273                 /*
274                  * The address supplied by the caller is in the
275                  * Target CPU virtual address space.
276                  *
277                  * In order to use this address with the diagnostic CE,
278                  * convert it from Target CPU virtual address space
279                  * to CE address space
280                  */
281                 ath10k_pci_wake(ar);
282                 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
283                                                      address);
284                 ath10k_pci_sleep(ar);
285
286                 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
287                                  0);
288                 if (ret)
289                         goto done;
290
291                 i = 0;
292                 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
293                                                      &completed_nbytes,
294                                                      &id) != 0) {
295                         mdelay(1);
296                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
297                                 ret = -EBUSY;
298                                 goto done;
299                         }
300                 }
301
302                 if (nbytes != completed_nbytes) {
303                         ret = -EIO;
304                         goto done;
305                 }
306
307                 if (buf != (u32) address) {
308                         ret = -EIO;
309                         goto done;
310                 }
311
312                 i = 0;
313                 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
314                                                      &completed_nbytes,
315                                                      &id, &flags) != 0) {
316                         mdelay(1);
317
318                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
319                                 ret = -EBUSY;
320                                 goto done;
321                         }
322                 }
323
324                 if (nbytes != completed_nbytes) {
325                         ret = -EIO;
326                         goto done;
327                 }
328
329                 if (buf != ce_data) {
330                         ret = -EIO;
331                         goto done;
332                 }
333
334                 remaining_bytes -= nbytes;
335                 address += nbytes;
336                 ce_data += nbytes;
337         }
338
339 done:
340         if (ret == 0) {
341                 /* Copy data from allocated DMA buf to caller's buf */
342                 WARN_ON_ONCE(orig_nbytes & 3);
343                 for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
344                         ((u32 *)data)[i] =
345                                 __le32_to_cpu(((__le32 *)data_buf)[i]);
346                 }
347         } else
348                 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
349                            __func__, address);
350
351         if (data_buf)
352                 pci_free_consistent(ar_pci->pdev, orig_nbytes,
353                                     data_buf, ce_data_base);
354
355         return ret;
356 }
357
358 /* Read 4-byte aligned data from Target memory or register */
359 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
360                                        u32 *data)
361 {
362         /* Assume range doesn't cross this boundary */
363         if (address >= DRAM_BASE_ADDRESS)
364                 return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
365
366         ath10k_pci_wake(ar);
367         *data = ath10k_pci_read32(ar, address);
368         ath10k_pci_sleep(ar);
369         return 0;
370 }
371
372 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
373                                      const void *data, int nbytes)
374 {
375         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
376         int ret = 0;
377         u32 buf;
378         unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
379         unsigned int id;
380         unsigned int flags;
381         struct ath10k_ce_pipe *ce_diag;
382         void *data_buf = NULL;
383         u32 ce_data;    /* Host buffer address in CE space */
384         dma_addr_t ce_data_base = 0;
385         int i;
386
387         ce_diag = ar_pci->ce_diag;
388
389         /*
390          * Allocate a temporary bounce buffer to hold caller's data
391          * to be DMA'ed to Target. This guarantees
392          *   1) 4-byte alignment
393          *   2) Buffer in DMA-able space
394          */
395         orig_nbytes = nbytes;
396         data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
397                                                          orig_nbytes,
398                                                          &ce_data_base);
399         if (!data_buf) {
400                 ret = -ENOMEM;
401                 goto done;
402         }
403
404         /* Copy caller's data to allocated DMA buf */
405         WARN_ON_ONCE(orig_nbytes & 3);
406         for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
407                 ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
408
409         /*
410          * The address supplied by the caller is in the
411          * Target CPU virtual address space.
412          *
413          * In order to use this address with the diagnostic CE,
414          * convert it from
415          *    Target CPU virtual address space
416          * to
417          *    CE address space
418          */
419         ath10k_pci_wake(ar);
420         address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
421         ath10k_pci_sleep(ar);
422
423         remaining_bytes = orig_nbytes;
424         ce_data = ce_data_base;
425         while (remaining_bytes) {
426                 /* FIXME: check cast */
427                 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
428
429                 /* Set up to receive directly into Target(!) address */
430                 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
431                 if (ret != 0)
432                         goto done;
433
434                 /*
435                  * Request CE to send caller-supplied data that
436                  * was copied to bounce buffer to Target(!) address.
437                  */
438                 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
439                                      nbytes, 0, 0);
440                 if (ret != 0)
441                         goto done;
442
443                 i = 0;
444                 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
445                                                      &completed_nbytes,
446                                                      &id) != 0) {
447                         mdelay(1);
448
449                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
450                                 ret = -EBUSY;
451                                 goto done;
452                         }
453                 }
454
455                 if (nbytes != completed_nbytes) {
456                         ret = -EIO;
457                         goto done;
458                 }
459
460                 if (buf != ce_data) {
461                         ret = -EIO;
462                         goto done;
463                 }
464
465                 i = 0;
466                 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
467                                                      &completed_nbytes,
468                                                      &id, &flags) != 0) {
469                         mdelay(1);
470
471                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
472                                 ret = -EBUSY;
473                                 goto done;
474                         }
475                 }
476
477                 if (nbytes != completed_nbytes) {
478                         ret = -EIO;
479                         goto done;
480                 }
481
482                 if (buf != address) {
483                         ret = -EIO;
484                         goto done;
485                 }
486
487                 remaining_bytes -= nbytes;
488                 address += nbytes;
489                 ce_data += nbytes;
490         }
491
492 done:
493         if (data_buf) {
494                 pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
495                                     ce_data_base);
496         }
497
498         if (ret != 0)
499                 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
500                            address);
501
502         return ret;
503 }
504
505 /* Write 4B data to Target memory or register */
506 static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
507                                         u32 data)
508 {
509         /* Assume range doesn't cross this boundary */
510         if (address >= DRAM_BASE_ADDRESS)
511                 return ath10k_pci_diag_write_mem(ar, address, &data,
512                                                  sizeof(u32));
513
514         ath10k_pci_wake(ar);
515         ath10k_pci_write32(ar, address, data);
516         ath10k_pci_sleep(ar);
517         return 0;
518 }
519
520 static bool ath10k_pci_target_is_awake(struct ath10k *ar)
521 {
522         void __iomem *mem = ath10k_pci_priv(ar)->mem;
523         u32 val;
524         val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
525                        RTC_STATE_ADDRESS);
526         return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
527 }
528
529 static void ath10k_pci_wait(struct ath10k *ar)
530 {
531         int n = 100;
532
533         while (n-- && !ath10k_pci_target_is_awake(ar))
534                 msleep(10);
535
536         if (n < 0)
537                 ath10k_warn("Unable to wakeup target\n");
538 }
539
540 int ath10k_do_pci_wake(struct ath10k *ar)
541 {
542         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
543         void __iomem *pci_addr = ar_pci->mem;
544         int tot_delay = 0;
545         int curr_delay = 5;
546
547         if (atomic_read(&ar_pci->keep_awake_count) == 0) {
548                 /* Force AWAKE */
549                 iowrite32(PCIE_SOC_WAKE_V_MASK,
550                           pci_addr + PCIE_LOCAL_BASE_ADDRESS +
551                           PCIE_SOC_WAKE_ADDRESS);
552         }
553         atomic_inc(&ar_pci->keep_awake_count);
554
555         if (ar_pci->verified_awake)
556                 return 0;
557
558         for (;;) {
559                 if (ath10k_pci_target_is_awake(ar)) {
560                         ar_pci->verified_awake = true;
561                         return 0;
562                 }
563
564                 if (tot_delay > PCIE_WAKE_TIMEOUT) {
565                         ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
566                                     PCIE_WAKE_TIMEOUT,
567                                     atomic_read(&ar_pci->keep_awake_count));
568                         return -ETIMEDOUT;
569                 }
570
571                 udelay(curr_delay);
572                 tot_delay += curr_delay;
573
574                 if (curr_delay < 50)
575                         curr_delay += 5;
576         }
577 }
578
579 void ath10k_do_pci_sleep(struct ath10k *ar)
580 {
581         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
582         void __iomem *pci_addr = ar_pci->mem;
583
584         if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
585                 /* Allow sleep */
586                 ar_pci->verified_awake = false;
587                 iowrite32(PCIE_SOC_WAKE_RESET,
588                           pci_addr + PCIE_LOCAL_BASE_ADDRESS +
589                           PCIE_SOC_WAKE_ADDRESS);
590         }
591 }
592
593 /*
594  * FIXME: Handle OOM properly.
595  */
596 static inline
597 struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
598 {
599         struct ath10k_pci_compl *compl = NULL;
600
601         spin_lock_bh(&pipe_info->pipe_lock);
602         if (list_empty(&pipe_info->compl_free)) {
603                 ath10k_warn("Completion buffers are full\n");
604                 goto exit;
605         }
606         compl = list_first_entry(&pipe_info->compl_free,
607                                  struct ath10k_pci_compl, list);
608         list_del(&compl->list);
609 exit:
610         spin_unlock_bh(&pipe_info->pipe_lock);
611         return compl;
612 }
613
614 /* Called by lower (CE) layer when a send to Target completes. */
615 static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state,
616                                     void *transfer_context,
617                                     u32 ce_data,
618                                     unsigned int nbytes,
619                                     unsigned int transfer_id)
620 {
621         struct ath10k *ar = ce_state->ar;
622         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
623         struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
624         struct ath10k_pci_compl *compl;
625         bool process = false;
626
627         do {
628                 /*
629                  * For the send completion of an item in sendlist, just
630                  * increment num_sends_allowed. The upper layer callback will
631                  * be triggered when last fragment is done with send.
632                  */
633                 if (transfer_context == CE_SENDLIST_ITEM_CTXT) {
634                         spin_lock_bh(&pipe_info->pipe_lock);
635                         pipe_info->num_sends_allowed++;
636                         spin_unlock_bh(&pipe_info->pipe_lock);
637                         continue;
638                 }
639
640                 compl = get_free_compl(pipe_info);
641                 if (!compl)
642                         break;
643
644                 compl->state = ATH10K_PCI_COMPL_SEND;
645                 compl->ce_state = ce_state;
646                 compl->pipe_info = pipe_info;
647                 compl->skb = transfer_context;
648                 compl->nbytes = nbytes;
649                 compl->transfer_id = transfer_id;
650                 compl->flags = 0;
651
652                 /*
653                  * Add the completion to the processing queue.
654                  */
655                 spin_lock_bh(&ar_pci->compl_lock);
656                 list_add_tail(&compl->list, &ar_pci->compl_process);
657                 spin_unlock_bh(&ar_pci->compl_lock);
658
659                 process = true;
660         } while (ath10k_ce_completed_send_next(ce_state,
661                                                            &transfer_context,
662                                                            &ce_data, &nbytes,
663                                                            &transfer_id) == 0);
664
665         /*
666          * If only some of the items within a sendlist have completed,
667          * don't invoke completion processing until the entire sendlist
668          * has been sent.
669          */
670         if (!process)
671                 return;
672
673         ath10k_pci_process_ce(ar);
674 }
675
676 /* Called by lower (CE) layer when data is received from the Target. */
677 static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state,
678                                     void *transfer_context, u32 ce_data,
679                                     unsigned int nbytes,
680                                     unsigned int transfer_id,
681                                     unsigned int flags)
682 {
683         struct ath10k *ar = ce_state->ar;
684         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
685         struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
686         struct ath10k_pci_compl *compl;
687         struct sk_buff *skb;
688
689         do {
690                 compl = get_free_compl(pipe_info);
691                 if (!compl)
692                         break;
693
694                 compl->state = ATH10K_PCI_COMPL_RECV;
695                 compl->ce_state = ce_state;
696                 compl->pipe_info = pipe_info;
697                 compl->skb = transfer_context;
698                 compl->nbytes = nbytes;
699                 compl->transfer_id = transfer_id;
700                 compl->flags = flags;
701
702                 skb = transfer_context;
703                 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
704                                  skb->len + skb_tailroom(skb),
705                                  DMA_FROM_DEVICE);
706                 /*
707                  * Add the completion to the processing queue.
708                  */
709                 spin_lock_bh(&ar_pci->compl_lock);
710                 list_add_tail(&compl->list, &ar_pci->compl_process);
711                 spin_unlock_bh(&ar_pci->compl_lock);
712
713         } while (ath10k_ce_completed_recv_next(ce_state,
714                                                            &transfer_context,
715                                                            &ce_data, &nbytes,
716                                                            &transfer_id,
717                                                            &flags) == 0);
718
719         ath10k_pci_process_ce(ar);
720 }
721
722 /* Send the first nbytes bytes of the buffer */
723 static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
724                                     unsigned int transfer_id,
725                                     unsigned int bytes, struct sk_buff *nbuf)
726 {
727         struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
728         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
729         struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
730         struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
731         struct ce_sendlist sendlist;
732         unsigned int len;
733         u32 flags = 0;
734         int ret;
735
736         memset(&sendlist, 0, sizeof(struct ce_sendlist));
737
738         len = min(bytes, nbuf->len);
739         bytes -= len;
740
741         if (len & 3)
742                 ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
743
744         ath10k_dbg(ATH10K_DBG_PCI,
745                    "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
746                    nbuf->data, (unsigned long long) skb_cb->paddr,
747                    nbuf->len, len);
748         ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
749                         "ath10k tx: data: ",
750                         nbuf->data, nbuf->len);
751
752         ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags);
753
754         /* Make sure we have resources to handle this request */
755         spin_lock_bh(&pipe_info->pipe_lock);
756         if (!pipe_info->num_sends_allowed) {
757                 ath10k_warn("Pipe: %d is full\n", pipe_id);
758                 spin_unlock_bh(&pipe_info->pipe_lock);
759                 return -ENOSR;
760         }
761         pipe_info->num_sends_allowed--;
762         spin_unlock_bh(&pipe_info->pipe_lock);
763
764         ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
765         if (ret)
766                 ath10k_warn("CE send failed: %p\n", nbuf);
767
768         return ret;
769 }
770
771 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
772 {
773         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
774         struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe]);
775         int ret;
776
777         spin_lock_bh(&pipe_info->pipe_lock);
778         ret = pipe_info->num_sends_allowed;
779         spin_unlock_bh(&pipe_info->pipe_lock);
780
781         return ret;
782 }
783
784 static void ath10k_pci_hif_dump_area(struct ath10k *ar)
785 {
786         u32 reg_dump_area = 0;
787         u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
788         u32 host_addr;
789         int ret;
790         u32 i;
791
792         ath10k_err("firmware crashed!\n");
793         ath10k_err("hardware name %s version 0x%x\n",
794                    ar->hw_params.name, ar->target_version);
795         ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major,
796                    ar->fw_version_minor, ar->fw_version_release,
797                    ar->fw_version_build);
798
799         host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
800         if (ath10k_pci_diag_read_mem(ar, host_addr,
801                                      &reg_dump_area, sizeof(u32)) != 0) {
802                 ath10k_warn("could not read hi_failure_state\n");
803                 return;
804         }
805
806         ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
807
808         ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
809                                        &reg_dump_values[0],
810                                        REG_DUMP_COUNT_QCA988X * sizeof(u32));
811         if (ret != 0) {
812                 ath10k_err("could not dump FW Dump Area\n");
813                 return;
814         }
815
816         BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
817
818         ath10k_err("target Register Dump\n");
819         for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
820                 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
821                            i,
822                            reg_dump_values[i],
823                            reg_dump_values[i + 1],
824                            reg_dump_values[i + 2],
825                            reg_dump_values[i + 3]);
826
827         ieee80211_queue_work(ar->hw, &ar->restart_work);
828 }
829
830 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
831                                                int force)
832 {
833         if (!force) {
834                 int resources;
835                 /*
836                  * Decide whether to actually poll for completions, or just
837                  * wait for a later chance.
838                  * If there seem to be plenty of resources left, then just wait
839                  * since checking involves reading a CE register, which is a
840                  * relatively expensive operation.
841                  */
842                 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
843
844                 /*
845                  * If at least 50% of the total resources are still available,
846                  * don't bother checking again yet.
847                  */
848                 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
849                         return;
850         }
851         ath10k_ce_per_engine_service(ar, pipe);
852 }
853
854 static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
855                                          struct ath10k_hif_cb *callbacks)
856 {
857         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
858
859         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
860
861         memcpy(&ar_pci->msg_callbacks_current, callbacks,
862                sizeof(ar_pci->msg_callbacks_current));
863 }
864
865 static int ath10k_pci_start_ce(struct ath10k *ar)
866 {
867         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
868         struct ath10k_ce_pipe *ce_diag = ar_pci->ce_diag;
869         const struct ce_attr *attr;
870         struct ath10k_pci_pipe *pipe_info;
871         struct ath10k_pci_compl *compl;
872         int i, pipe_num, completions, disable_interrupts;
873
874         spin_lock_init(&ar_pci->compl_lock);
875         INIT_LIST_HEAD(&ar_pci->compl_process);
876
877         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
878                 pipe_info = &ar_pci->pipe_info[pipe_num];
879
880                 spin_lock_init(&pipe_info->pipe_lock);
881                 INIT_LIST_HEAD(&pipe_info->compl_free);
882
883                 /* Handle Diagnostic CE specially */
884                 if (pipe_info->ce_hdl == ce_diag)
885                         continue;
886
887                 attr = &host_ce_config_wlan[pipe_num];
888                 completions = 0;
889
890                 if (attr->src_nentries) {
891                         disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
892                         ath10k_ce_send_cb_register(pipe_info->ce_hdl,
893                                                    ath10k_pci_ce_send_done,
894                                                    disable_interrupts);
895                         completions += attr->src_nentries;
896                         pipe_info->num_sends_allowed = attr->src_nentries - 1;
897                 }
898
899                 if (attr->dest_nentries) {
900                         ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
901                                                    ath10k_pci_ce_recv_data);
902                         completions += attr->dest_nentries;
903                 }
904
905                 if (completions == 0)
906                         continue;
907
908                 for (i = 0; i < completions; i++) {
909                         compl = kmalloc(sizeof(*compl), GFP_KERNEL);
910                         if (!compl) {
911                                 ath10k_warn("No memory for completion state\n");
912                                 ath10k_pci_stop_ce(ar);
913                                 return -ENOMEM;
914                         }
915
916                         compl->state = ATH10K_PCI_COMPL_FREE;
917                         list_add_tail(&compl->list, &pipe_info->compl_free);
918                 }
919         }
920
921         return 0;
922 }
923
924 static void ath10k_pci_stop_ce(struct ath10k *ar)
925 {
926         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
927         struct ath10k_pci_compl *compl;
928         struct sk_buff *skb;
929         int i;
930
931         ath10k_ce_disable_interrupts(ar);
932
933         /* Cancel the pending tasklet */
934         tasklet_kill(&ar_pci->intr_tq);
935
936         for (i = 0; i < CE_COUNT; i++)
937                 tasklet_kill(&ar_pci->pipe_info[i].intr);
938
939         /* Mark pending completions as aborted, so that upper layers free up
940          * their associated resources */
941         spin_lock_bh(&ar_pci->compl_lock);
942         list_for_each_entry(compl, &ar_pci->compl_process, list) {
943                 skb = compl->skb;
944                 ATH10K_SKB_CB(skb)->is_aborted = true;
945         }
946         spin_unlock_bh(&ar_pci->compl_lock);
947 }
948
949 static void ath10k_pci_cleanup_ce(struct ath10k *ar)
950 {
951         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
952         struct ath10k_pci_compl *compl, *tmp;
953         struct ath10k_pci_pipe *pipe_info;
954         struct sk_buff *netbuf;
955         int pipe_num;
956
957         /* Free pending completions. */
958         spin_lock_bh(&ar_pci->compl_lock);
959         if (!list_empty(&ar_pci->compl_process))
960                 ath10k_warn("pending completions still present! possible memory leaks.\n");
961
962         list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
963                 list_del(&compl->list);
964                 netbuf = compl->skb;
965                 dev_kfree_skb_any(netbuf);
966                 kfree(compl);
967         }
968         spin_unlock_bh(&ar_pci->compl_lock);
969
970         /* Free unused completions for each pipe. */
971         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
972                 pipe_info = &ar_pci->pipe_info[pipe_num];
973
974                 spin_lock_bh(&pipe_info->pipe_lock);
975                 list_for_each_entry_safe(compl, tmp,
976                                          &pipe_info->compl_free, list) {
977                         list_del(&compl->list);
978                         kfree(compl);
979                 }
980                 spin_unlock_bh(&pipe_info->pipe_lock);
981         }
982 }
983
984 static void ath10k_pci_process_ce(struct ath10k *ar)
985 {
986         struct ath10k_pci *ar_pci = ar->hif.priv;
987         struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
988         struct ath10k_pci_compl *compl;
989         struct sk_buff *skb;
990         unsigned int nbytes;
991         int ret, send_done = 0;
992
993         /* Upper layers aren't ready to handle tx/rx completions in parallel so
994          * we must serialize all completion processing. */
995
996         spin_lock_bh(&ar_pci->compl_lock);
997         if (ar_pci->compl_processing) {
998                 spin_unlock_bh(&ar_pci->compl_lock);
999                 return;
1000         }
1001         ar_pci->compl_processing = true;
1002         spin_unlock_bh(&ar_pci->compl_lock);
1003
1004         for (;;) {
1005                 spin_lock_bh(&ar_pci->compl_lock);
1006                 if (list_empty(&ar_pci->compl_process)) {
1007                         spin_unlock_bh(&ar_pci->compl_lock);
1008                         break;
1009                 }
1010                 compl = list_first_entry(&ar_pci->compl_process,
1011                                          struct ath10k_pci_compl, list);
1012                 list_del(&compl->list);
1013                 spin_unlock_bh(&ar_pci->compl_lock);
1014
1015                 switch (compl->state) {
1016                 case ATH10K_PCI_COMPL_SEND:
1017                         cb->tx_completion(ar,
1018                                           compl->skb,
1019                                           compl->transfer_id);
1020                         send_done = 1;
1021                         break;
1022                 case ATH10K_PCI_COMPL_RECV:
1023                         ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
1024                         if (ret) {
1025                                 ath10k_warn("Unable to post recv buffer for pipe: %d\n",
1026                                             compl->pipe_info->pipe_num);
1027                                 break;
1028                         }
1029
1030                         skb = compl->skb;
1031                         nbytes = compl->nbytes;
1032
1033                         ath10k_dbg(ATH10K_DBG_PCI,
1034                                    "ath10k_pci_ce_recv_data netbuf=%p  nbytes=%d\n",
1035                                    skb, nbytes);
1036                         ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
1037                                         "ath10k rx: ", skb->data, nbytes);
1038
1039                         if (skb->len + skb_tailroom(skb) >= nbytes) {
1040                                 skb_trim(skb, 0);
1041                                 skb_put(skb, nbytes);
1042                                 cb->rx_completion(ar, skb,
1043                                                   compl->pipe_info->pipe_num);
1044                         } else {
1045                                 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
1046                                             nbytes,
1047                                             skb->len + skb_tailroom(skb));
1048                         }
1049                         break;
1050                 case ATH10K_PCI_COMPL_FREE:
1051                         ath10k_warn("free completion cannot be processed\n");
1052                         break;
1053                 default:
1054                         ath10k_warn("invalid completion state (%d)\n",
1055                                     compl->state);
1056                         break;
1057                 }
1058
1059                 compl->state = ATH10K_PCI_COMPL_FREE;
1060
1061                 /*
1062                  * Add completion back to the pipe's free list.
1063                  */
1064                 spin_lock_bh(&compl->pipe_info->pipe_lock);
1065                 list_add_tail(&compl->list, &compl->pipe_info->compl_free);
1066                 compl->pipe_info->num_sends_allowed += send_done;
1067                 spin_unlock_bh(&compl->pipe_info->pipe_lock);
1068         }
1069
1070         spin_lock_bh(&ar_pci->compl_lock);
1071         ar_pci->compl_processing = false;
1072         spin_unlock_bh(&ar_pci->compl_lock);
1073 }
1074
1075 /* TODO - temporary mapping while we have too few CE's */
1076 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
1077                                               u16 service_id, u8 *ul_pipe,
1078                                               u8 *dl_pipe, int *ul_is_polled,
1079                                               int *dl_is_polled)
1080 {
1081         int ret = 0;
1082
1083         /* polling for received messages not supported */
1084         *dl_is_polled = 0;
1085
1086         switch (service_id) {
1087         case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
1088                 /*
1089                  * Host->target HTT gets its own pipe, so it can be polled
1090                  * while other pipes are interrupt driven.
1091                  */
1092                 *ul_pipe = 4;
1093                 /*
1094                  * Use the same target->host pipe for HTC ctrl, HTC raw
1095                  * streams, and HTT.
1096                  */
1097                 *dl_pipe = 1;
1098                 break;
1099
1100         case ATH10K_HTC_SVC_ID_RSVD_CTRL:
1101         case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
1102                 /*
1103                  * Note: HTC_RAW_STREAMS_SVC is currently unused, and
1104                  * HTC_CTRL_RSVD_SVC could share the same pipe as the
1105                  * WMI services.  So, if another CE is needed, change
1106                  * this to *ul_pipe = 3, which frees up CE 0.
1107                  */
1108                 /* *ul_pipe = 3; */
1109                 *ul_pipe = 0;
1110                 *dl_pipe = 1;
1111                 break;
1112
1113         case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
1114         case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
1115         case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
1116         case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
1117
1118         case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1119                 *ul_pipe = 3;
1120                 *dl_pipe = 2;
1121                 break;
1122
1123                 /* pipe 5 unused   */
1124                 /* pipe 6 reserved */
1125                 /* pipe 7 reserved */
1126
1127         default:
1128                 ret = -1;
1129                 break;
1130         }
1131         *ul_is_polled =
1132                 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1133
1134         return ret;
1135 }
1136
1137 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1138                                                 u8 *ul_pipe, u8 *dl_pipe)
1139 {
1140         int ul_is_polled, dl_is_polled;
1141
1142         (void)ath10k_pci_hif_map_service_to_pipe(ar,
1143                                                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1144                                                  ul_pipe,
1145                                                  dl_pipe,
1146                                                  &ul_is_polled,
1147                                                  &dl_is_polled);
1148 }
1149
1150 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
1151                                    int num)
1152 {
1153         struct ath10k *ar = pipe_info->hif_ce_state;
1154         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1155         struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
1156         struct sk_buff *skb;
1157         dma_addr_t ce_data;
1158         int i, ret = 0;
1159
1160         if (pipe_info->buf_sz == 0)
1161                 return 0;
1162
1163         for (i = 0; i < num; i++) {
1164                 skb = dev_alloc_skb(pipe_info->buf_sz);
1165                 if (!skb) {
1166                         ath10k_warn("could not allocate skbuff for pipe %d\n",
1167                                     num);
1168                         ret = -ENOMEM;
1169                         goto err;
1170                 }
1171
1172                 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1173
1174                 ce_data = dma_map_single(ar->dev, skb->data,
1175                                          skb->len + skb_tailroom(skb),
1176                                          DMA_FROM_DEVICE);
1177
1178                 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1179                         ath10k_warn("could not dma map skbuff\n");
1180                         dev_kfree_skb_any(skb);
1181                         ret = -EIO;
1182                         goto err;
1183                 }
1184
1185                 ATH10K_SKB_CB(skb)->paddr = ce_data;
1186
1187                 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1188                                                pipe_info->buf_sz,
1189                                                PCI_DMA_FROMDEVICE);
1190
1191                 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1192                                                  ce_data);
1193                 if (ret) {
1194                         ath10k_warn("could not enqueue to pipe %d (%d)\n",
1195                                     num, ret);
1196                         goto err;
1197                 }
1198         }
1199
1200         return ret;
1201
1202 err:
1203         ath10k_pci_rx_pipe_cleanup(pipe_info);
1204         return ret;
1205 }
1206
1207 static int ath10k_pci_post_rx(struct ath10k *ar)
1208 {
1209         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1210         struct ath10k_pci_pipe *pipe_info;
1211         const struct ce_attr *attr;
1212         int pipe_num, ret = 0;
1213
1214         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1215                 pipe_info = &ar_pci->pipe_info[pipe_num];
1216                 attr = &host_ce_config_wlan[pipe_num];
1217
1218                 if (attr->dest_nentries == 0)
1219                         continue;
1220
1221                 ret = ath10k_pci_post_rx_pipe(pipe_info,
1222                                               attr->dest_nentries - 1);
1223                 if (ret) {
1224                         ath10k_warn("Unable to replenish recv buffers for pipe: %d\n",
1225                                     pipe_num);
1226
1227                         for (; pipe_num >= 0; pipe_num--) {
1228                                 pipe_info = &ar_pci->pipe_info[pipe_num];
1229                                 ath10k_pci_rx_pipe_cleanup(pipe_info);
1230                         }
1231                         return ret;
1232                 }
1233         }
1234
1235         return 0;
1236 }
1237
1238 static int ath10k_pci_hif_start(struct ath10k *ar)
1239 {
1240         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1241         int ret;
1242
1243         ret = ath10k_pci_start_ce(ar);
1244         if (ret) {
1245                 ath10k_warn("could not start CE (%d)\n", ret);
1246                 return ret;
1247         }
1248
1249         /* Post buffers once to start things off. */
1250         ret = ath10k_pci_post_rx(ar);
1251         if (ret) {
1252                 ath10k_warn("could not post rx pipes (%d)\n", ret);
1253                 return ret;
1254         }
1255
1256         ar_pci->started = 1;
1257         return 0;
1258 }
1259
1260 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1261 {
1262         struct ath10k *ar;
1263         struct ath10k_pci *ar_pci;
1264         struct ath10k_ce_pipe *ce_hdl;
1265         u32 buf_sz;
1266         struct sk_buff *netbuf;
1267         u32 ce_data;
1268
1269         buf_sz = pipe_info->buf_sz;
1270
1271         /* Unused Copy Engine */
1272         if (buf_sz == 0)
1273                 return;
1274
1275         ar = pipe_info->hif_ce_state;
1276         ar_pci = ath10k_pci_priv(ar);
1277
1278         if (!ar_pci->started)
1279                 return;
1280
1281         ce_hdl = pipe_info->ce_hdl;
1282
1283         while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1284                                           &ce_data) == 0) {
1285                 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1286                                  netbuf->len + skb_tailroom(netbuf),
1287                                  DMA_FROM_DEVICE);
1288                 dev_kfree_skb_any(netbuf);
1289         }
1290 }
1291
1292 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1293 {
1294         struct ath10k *ar;
1295         struct ath10k_pci *ar_pci;
1296         struct ath10k_ce_pipe *ce_hdl;
1297         struct sk_buff *netbuf;
1298         u32 ce_data;
1299         unsigned int nbytes;
1300         unsigned int id;
1301         u32 buf_sz;
1302
1303         buf_sz = pipe_info->buf_sz;
1304
1305         /* Unused Copy Engine */
1306         if (buf_sz == 0)
1307                 return;
1308
1309         ar = pipe_info->hif_ce_state;
1310         ar_pci = ath10k_pci_priv(ar);
1311
1312         if (!ar_pci->started)
1313                 return;
1314
1315         ce_hdl = pipe_info->ce_hdl;
1316
1317         while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1318                                           &ce_data, &nbytes, &id) == 0) {
1319                 if (netbuf != CE_SENDLIST_ITEM_CTXT)
1320                         /*
1321                          * Indicate the completion to higer layer to free
1322                          * the buffer
1323                          */
1324                         ATH10K_SKB_CB(netbuf)->is_aborted = true;
1325                         ar_pci->msg_callbacks_current.tx_completion(ar,
1326                                                                     netbuf,
1327                                                                     id);
1328         }
1329 }
1330
1331 /*
1332  * Cleanup residual buffers for device shutdown:
1333  *    buffers that were enqueued for receive
1334  *    buffers that were to be sent
1335  * Note: Buffers that had completed but which were
1336  * not yet processed are on a completion queue. They
1337  * are handled when the completion thread shuts down.
1338  */
1339 static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1340 {
1341         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1342         int pipe_num;
1343
1344         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1345                 struct ath10k_pci_pipe *pipe_info;
1346
1347                 pipe_info = &ar_pci->pipe_info[pipe_num];
1348                 ath10k_pci_rx_pipe_cleanup(pipe_info);
1349                 ath10k_pci_tx_pipe_cleanup(pipe_info);
1350         }
1351 }
1352
1353 static void ath10k_pci_ce_deinit(struct ath10k *ar)
1354 {
1355         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1356         struct ath10k_pci_pipe *pipe_info;
1357         int pipe_num;
1358
1359         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1360                 pipe_info = &ar_pci->pipe_info[pipe_num];
1361                 if (pipe_info->ce_hdl) {
1362                         ath10k_ce_deinit(pipe_info->ce_hdl);
1363                         pipe_info->ce_hdl = NULL;
1364                         pipe_info->buf_sz = 0;
1365                 }
1366         }
1367 }
1368
1369 static void ath10k_pci_disable_irqs(struct ath10k *ar)
1370 {
1371         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1372         int i;
1373
1374         for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
1375                 disable_irq(ar_pci->pdev->irq + i);
1376 }
1377
1378 static void ath10k_pci_hif_stop(struct ath10k *ar)
1379 {
1380         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1381
1382         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
1383
1384         /* Irqs are never explicitly re-enabled. They are implicitly re-enabled
1385          * by ath10k_pci_start_intr(). */
1386         ath10k_pci_disable_irqs(ar);
1387
1388         ath10k_pci_stop_ce(ar);
1389
1390         /* At this point, asynchronous threads are stopped, the target should
1391          * not DMA nor interrupt. We process the leftovers and then free
1392          * everything else up. */
1393
1394         ath10k_pci_process_ce(ar);
1395         ath10k_pci_cleanup_ce(ar);
1396         ath10k_pci_buffer_cleanup(ar);
1397
1398         ar_pci->started = 0;
1399 }
1400
1401 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1402                                            void *req, u32 req_len,
1403                                            void *resp, u32 *resp_len)
1404 {
1405         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1406         struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1407         struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1408         struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1409         struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1410         dma_addr_t req_paddr = 0;
1411         dma_addr_t resp_paddr = 0;
1412         struct bmi_xfer xfer = {};
1413         void *treq, *tresp = NULL;
1414         int ret = 0;
1415
1416         if (resp && !resp_len)
1417                 return -EINVAL;
1418
1419         if (resp && resp_len && *resp_len == 0)
1420                 return -EINVAL;
1421
1422         treq = kmemdup(req, req_len, GFP_KERNEL);
1423         if (!treq)
1424                 return -ENOMEM;
1425
1426         req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1427         ret = dma_mapping_error(ar->dev, req_paddr);
1428         if (ret)
1429                 goto err_dma;
1430
1431         if (resp && resp_len) {
1432                 tresp = kzalloc(*resp_len, GFP_KERNEL);
1433                 if (!tresp) {
1434                         ret = -ENOMEM;
1435                         goto err_req;
1436                 }
1437
1438                 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1439                                             DMA_FROM_DEVICE);
1440                 ret = dma_mapping_error(ar->dev, resp_paddr);
1441                 if (ret)
1442                         goto err_req;
1443
1444                 xfer.wait_for_resp = true;
1445                 xfer.resp_len = 0;
1446
1447                 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1448         }
1449
1450         init_completion(&xfer.done);
1451
1452         ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1453         if (ret)
1454                 goto err_resp;
1455
1456         ret = wait_for_completion_timeout(&xfer.done,
1457                                           BMI_COMMUNICATION_TIMEOUT_HZ);
1458         if (ret <= 0) {
1459                 u32 unused_buffer;
1460                 unsigned int unused_nbytes;
1461                 unsigned int unused_id;
1462
1463                 ret = -ETIMEDOUT;
1464                 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1465                                            &unused_nbytes, &unused_id);
1466         } else {
1467                 /* non-zero means we did not time out */
1468                 ret = 0;
1469         }
1470
1471 err_resp:
1472         if (resp) {
1473                 u32 unused_buffer;
1474
1475                 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1476                 dma_unmap_single(ar->dev, resp_paddr,
1477                                  *resp_len, DMA_FROM_DEVICE);
1478         }
1479 err_req:
1480         dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1481
1482         if (ret == 0 && resp_len) {
1483                 *resp_len = min(*resp_len, xfer.resp_len);
1484                 memcpy(resp, tresp, xfer.resp_len);
1485         }
1486 err_dma:
1487         kfree(treq);
1488         kfree(tresp);
1489
1490         return ret;
1491 }
1492
1493 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state,
1494                                      void *transfer_context,
1495                                      u32 data,
1496                                      unsigned int nbytes,
1497                                      unsigned int transfer_id)
1498 {
1499         struct bmi_xfer *xfer = transfer_context;
1500
1501         if (xfer->wait_for_resp)
1502                 return;
1503
1504         complete(&xfer->done);
1505 }
1506
1507 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state,
1508                                      void *transfer_context,
1509                                      u32 data,
1510                                      unsigned int nbytes,
1511                                      unsigned int transfer_id,
1512                                      unsigned int flags)
1513 {
1514         struct bmi_xfer *xfer = transfer_context;
1515
1516         if (!xfer->wait_for_resp) {
1517                 ath10k_warn("unexpected: BMI data received; ignoring\n");
1518                 return;
1519         }
1520
1521         xfer->resp_len = nbytes;
1522         complete(&xfer->done);
1523 }
1524
1525 /*
1526  * Map from service/endpoint to Copy Engine.
1527  * This table is derived from the CE_PCI TABLE, above.
1528  * It is passed to the Target at startup for use by firmware.
1529  */
1530 static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1531         {
1532                  ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1533                  PIPEDIR_OUT,           /* out = UL = host -> target */
1534                  3,
1535         },
1536         {
1537                  ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1538                  PIPEDIR_IN,            /* in = DL = target -> host */
1539                  2,
1540         },
1541         {
1542                  ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1543                  PIPEDIR_OUT,           /* out = UL = host -> target */
1544                  3,
1545         },
1546         {
1547                  ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1548                  PIPEDIR_IN,            /* in = DL = target -> host */
1549                  2,
1550         },
1551         {
1552                  ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1553                  PIPEDIR_OUT,           /* out = UL = host -> target */
1554                  3,
1555         },
1556         {
1557                  ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1558                  PIPEDIR_IN,            /* in = DL = target -> host */
1559                  2,
1560         },
1561         {
1562                  ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1563                  PIPEDIR_OUT,           /* out = UL = host -> target */
1564                  3,
1565         },
1566         {
1567                  ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1568                  PIPEDIR_IN,            /* in = DL = target -> host */
1569                  2,
1570         },
1571         {
1572                  ATH10K_HTC_SVC_ID_WMI_CONTROL,
1573                  PIPEDIR_OUT,           /* out = UL = host -> target */
1574                  3,
1575         },
1576         {
1577                  ATH10K_HTC_SVC_ID_WMI_CONTROL,
1578                  PIPEDIR_IN,            /* in = DL = target -> host */
1579                  2,
1580         },
1581         {
1582                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1583                  PIPEDIR_OUT,           /* out = UL = host -> target */
1584                  0,             /* could be moved to 3 (share with WMI) */
1585         },
1586         {
1587                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1588                  PIPEDIR_IN,            /* in = DL = target -> host */
1589                  1,
1590         },
1591         {
1592                  ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,    /* not currently used */
1593                  PIPEDIR_OUT,           /* out = UL = host -> target */
1594                  0,
1595         },
1596         {
1597                  ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,    /* not currently used */
1598                  PIPEDIR_IN,            /* in = DL = target -> host */
1599                  1,
1600         },
1601         {
1602                  ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1603                  PIPEDIR_OUT,           /* out = UL = host -> target */
1604                  4,
1605         },
1606         {
1607                  ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1608                  PIPEDIR_IN,            /* in = DL = target -> host */
1609                  1,
1610         },
1611
1612         /* (Additions here) */
1613
1614         {                               /* Must be last */
1615                  0,
1616                  0,
1617                  0,
1618         },
1619 };
1620
1621 /*
1622  * Send an interrupt to the device to wake up the Target CPU
1623  * so it has an opportunity to notice any changed state.
1624  */
1625 static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1626 {
1627         int ret;
1628         u32 core_ctrl;
1629
1630         ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1631                                               CORE_CTRL_ADDRESS,
1632                                           &core_ctrl);
1633         if (ret) {
1634                 ath10k_warn("Unable to read core ctrl\n");
1635                 return ret;
1636         }
1637
1638         /* A_INUM_FIRMWARE interrupt to Target CPU */
1639         core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1640
1641         ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1642                                                CORE_CTRL_ADDRESS,
1643                                            core_ctrl);
1644         if (ret)
1645                 ath10k_warn("Unable to set interrupt mask\n");
1646
1647         return ret;
1648 }
1649
1650 static int ath10k_pci_init_config(struct ath10k *ar)
1651 {
1652         u32 interconnect_targ_addr;
1653         u32 pcie_state_targ_addr = 0;
1654         u32 pipe_cfg_targ_addr = 0;
1655         u32 svc_to_pipe_map = 0;
1656         u32 pcie_config_flags = 0;
1657         u32 ealloc_value;
1658         u32 ealloc_targ_addr;
1659         u32 flag2_value;
1660         u32 flag2_targ_addr;
1661         int ret = 0;
1662
1663         /* Download to Target the CE Config and the service-to-CE map */
1664         interconnect_targ_addr =
1665                 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1666
1667         /* Supply Target-side CE configuration */
1668         ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1669                                           &pcie_state_targ_addr);
1670         if (ret != 0) {
1671                 ath10k_err("Failed to get pcie state addr: %d\n", ret);
1672                 return ret;
1673         }
1674
1675         if (pcie_state_targ_addr == 0) {
1676                 ret = -EIO;
1677                 ath10k_err("Invalid pcie state addr\n");
1678                 return ret;
1679         }
1680
1681         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1682                                           offsetof(struct pcie_state,
1683                                                    pipe_cfg_addr),
1684                                           &pipe_cfg_targ_addr);
1685         if (ret != 0) {
1686                 ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1687                 return ret;
1688         }
1689
1690         if (pipe_cfg_targ_addr == 0) {
1691                 ret = -EIO;
1692                 ath10k_err("Invalid pipe cfg addr\n");
1693                 return ret;
1694         }
1695
1696         ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1697                                  target_ce_config_wlan,
1698                                  sizeof(target_ce_config_wlan));
1699
1700         if (ret != 0) {
1701                 ath10k_err("Failed to write pipe cfg: %d\n", ret);
1702                 return ret;
1703         }
1704
1705         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1706                                           offsetof(struct pcie_state,
1707                                                    svc_to_pipe_map),
1708                                           &svc_to_pipe_map);
1709         if (ret != 0) {
1710                 ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1711                 return ret;
1712         }
1713
1714         if (svc_to_pipe_map == 0) {
1715                 ret = -EIO;
1716                 ath10k_err("Invalid svc_to_pipe map\n");
1717                 return ret;
1718         }
1719
1720         ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1721                                  target_service_to_ce_map_wlan,
1722                                  sizeof(target_service_to_ce_map_wlan));
1723         if (ret != 0) {
1724                 ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1725                 return ret;
1726         }
1727
1728         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1729                                           offsetof(struct pcie_state,
1730                                                    config_flags),
1731                                           &pcie_config_flags);
1732         if (ret != 0) {
1733                 ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1734                 return ret;
1735         }
1736
1737         pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1738
1739         ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1740                                  offsetof(struct pcie_state, config_flags),
1741                                  &pcie_config_flags,
1742                                  sizeof(pcie_config_flags));
1743         if (ret != 0) {
1744                 ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1745                 return ret;
1746         }
1747
1748         /* configure early allocation */
1749         ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1750
1751         ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1752         if (ret != 0) {
1753                 ath10k_err("Faile to get early alloc val: %d\n", ret);
1754                 return ret;
1755         }
1756
1757         /* first bank is switched to IRAM */
1758         ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1759                          HI_EARLY_ALLOC_MAGIC_MASK);
1760         ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1761                          HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1762
1763         ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1764         if (ret != 0) {
1765                 ath10k_err("Failed to set early alloc val: %d\n", ret);
1766                 return ret;
1767         }
1768
1769         /* Tell Target to proceed with initialization */
1770         flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1771
1772         ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1773         if (ret != 0) {
1774                 ath10k_err("Failed to get option val: %d\n", ret);
1775                 return ret;
1776         }
1777
1778         flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1779
1780         ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1781         if (ret != 0) {
1782                 ath10k_err("Failed to set option val: %d\n", ret);
1783                 return ret;
1784         }
1785
1786         return 0;
1787 }
1788
1789
1790
1791 static int ath10k_pci_ce_init(struct ath10k *ar)
1792 {
1793         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1794         struct ath10k_pci_pipe *pipe_info;
1795         const struct ce_attr *attr;
1796         int pipe_num;
1797
1798         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1799                 pipe_info = &ar_pci->pipe_info[pipe_num];
1800                 pipe_info->pipe_num = pipe_num;
1801                 pipe_info->hif_ce_state = ar;
1802                 attr = &host_ce_config_wlan[pipe_num];
1803
1804                 pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1805                 if (pipe_info->ce_hdl == NULL) {
1806                         ath10k_err("Unable to initialize CE for pipe: %d\n",
1807                                    pipe_num);
1808
1809                         /* It is safe to call it here. It checks if ce_hdl is
1810                          * valid for each pipe */
1811                         ath10k_pci_ce_deinit(ar);
1812                         return -1;
1813                 }
1814
1815                 if (pipe_num == ar_pci->ce_count - 1) {
1816                         /*
1817                          * Reserve the ultimate CE for
1818                          * diagnostic Window support
1819                          */
1820                         ar_pci->ce_diag =
1821                         ar_pci->pipe_info[ar_pci->ce_count - 1].ce_hdl;
1822                         continue;
1823                 }
1824
1825                 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1826         }
1827
1828         /*
1829          * Initially, establish CE completion handlers for use with BMI.
1830          * These are overwritten with generic handlers after we exit BMI phase.
1831          */
1832         pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1833         ath10k_ce_send_cb_register(pipe_info->ce_hdl,
1834                                    ath10k_pci_bmi_send_done, 0);
1835
1836         pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1837         ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
1838                                    ath10k_pci_bmi_recv_data);
1839
1840         return 0;
1841 }
1842
1843 static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1844 {
1845         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1846         u32 fw_indicator_address, fw_indicator;
1847
1848         ath10k_pci_wake(ar);
1849
1850         fw_indicator_address = ar_pci->fw_indicator_address;
1851         fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
1852
1853         if (fw_indicator & FW_IND_EVENT_PENDING) {
1854                 /* ACK: clear Target-side pending event */
1855                 ath10k_pci_write32(ar, fw_indicator_address,
1856                                    fw_indicator & ~FW_IND_EVENT_PENDING);
1857
1858                 if (ar_pci->started) {
1859                         ath10k_pci_hif_dump_area(ar);
1860                 } else {
1861                         /*
1862                          * Probable Target failure before we're prepared
1863                          * to handle it.  Generally unexpected.
1864                          */
1865                         ath10k_warn("early firmware event indicated\n");
1866                 }
1867         }
1868
1869         ath10k_pci_sleep(ar);
1870 }
1871
1872 static int ath10k_pci_hif_power_up(struct ath10k *ar)
1873 {
1874         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1875         int ret;
1876
1877         ret = ath10k_pci_start_intr(ar);
1878         if (ret) {
1879                 ath10k_err("could not start interrupt handling (%d)\n", ret);
1880                 goto err;
1881         }
1882
1883         /*
1884          * Bring the target up cleanly.
1885          *
1886          * The target may be in an undefined state with an AUX-powered Target
1887          * and a Host in WoW mode. If the Host crashes, loses power, or is
1888          * restarted (without unloading the driver) then the Target is left
1889          * (aux) powered and running. On a subsequent driver load, the Target
1890          * is in an unexpected state. We try to catch that here in order to
1891          * reset the Target and retry the probe.
1892          */
1893         ath10k_pci_device_reset(ar);
1894
1895         ret = ath10k_pci_reset_target(ar);
1896         if (ret)
1897                 goto err_irq;
1898
1899         if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1900                 /* Force AWAKE forever */
1901                 ath10k_do_pci_wake(ar);
1902
1903         ret = ath10k_pci_ce_init(ar);
1904         if (ret)
1905                 goto err_ps;
1906
1907         ret = ath10k_pci_init_config(ar);
1908         if (ret)
1909                 goto err_ce;
1910
1911         ret = ath10k_pci_wake_target_cpu(ar);
1912         if (ret) {
1913                 ath10k_err("could not wake up target CPU (%d)\n", ret);
1914                 goto err_ce;
1915         }
1916
1917         return 0;
1918
1919 err_ce:
1920         ath10k_pci_ce_deinit(ar);
1921 err_ps:
1922         if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1923                 ath10k_do_pci_sleep(ar);
1924 err_irq:
1925         ath10k_pci_stop_intr(ar);
1926 err:
1927         return ret;
1928 }
1929
1930 static void ath10k_pci_hif_power_down(struct ath10k *ar)
1931 {
1932         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1933
1934         ath10k_pci_stop_intr(ar);
1935
1936         ath10k_pci_ce_deinit(ar);
1937         if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1938                 ath10k_do_pci_sleep(ar);
1939 }
1940
1941 #ifdef CONFIG_PM
1942
1943 #define ATH10K_PCI_PM_CONTROL 0x44
1944
1945 static int ath10k_pci_hif_suspend(struct ath10k *ar)
1946 {
1947         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1948         struct pci_dev *pdev = ar_pci->pdev;
1949         u32 val;
1950
1951         pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1952
1953         if ((val & 0x000000ff) != 0x3) {
1954                 pci_save_state(pdev);
1955                 pci_disable_device(pdev);
1956                 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1957                                        (val & 0xffffff00) | 0x03);
1958         }
1959
1960         return 0;
1961 }
1962
1963 static int ath10k_pci_hif_resume(struct ath10k *ar)
1964 {
1965         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1966         struct pci_dev *pdev = ar_pci->pdev;
1967         u32 val;
1968
1969         pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1970
1971         if ((val & 0x000000ff) != 0) {
1972                 pci_restore_state(pdev);
1973                 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1974                                        val & 0xffffff00);
1975                 /*
1976                  * Suspend/Resume resets the PCI configuration space,
1977                  * so we have to re-disable the RETRY_TIMEOUT register (0x41)
1978                  * to keep PCI Tx retries from interfering with C3 CPU state
1979                  */
1980                 pci_read_config_dword(pdev, 0x40, &val);
1981
1982                 if ((val & 0x0000ff00) != 0)
1983                         pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
1984         }
1985
1986         return 0;
1987 }
1988 #endif
1989
1990 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
1991         .send_head              = ath10k_pci_hif_send_head,
1992         .exchange_bmi_msg       = ath10k_pci_hif_exchange_bmi_msg,
1993         .start                  = ath10k_pci_hif_start,
1994         .stop                   = ath10k_pci_hif_stop,
1995         .map_service_to_pipe    = ath10k_pci_hif_map_service_to_pipe,
1996         .get_default_pipe       = ath10k_pci_hif_get_default_pipe,
1997         .send_complete_check    = ath10k_pci_hif_send_complete_check,
1998         .set_callbacks          = ath10k_pci_hif_set_callbacks,
1999         .get_free_queue_number  = ath10k_pci_hif_get_free_queue_number,
2000         .power_up               = ath10k_pci_hif_power_up,
2001         .power_down             = ath10k_pci_hif_power_down,
2002 #ifdef CONFIG_PM
2003         .suspend                = ath10k_pci_hif_suspend,
2004         .resume                 = ath10k_pci_hif_resume,
2005 #endif
2006 };
2007
2008 static void ath10k_pci_ce_tasklet(unsigned long ptr)
2009 {
2010         struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
2011         struct ath10k_pci *ar_pci = pipe->ar_pci;
2012
2013         ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
2014 }
2015
2016 static void ath10k_msi_err_tasklet(unsigned long data)
2017 {
2018         struct ath10k *ar = (struct ath10k *)data;
2019
2020         ath10k_pci_fw_interrupt_handler(ar);
2021 }
2022
2023 /*
2024  * Handler for a per-engine interrupt on a PARTICULAR CE.
2025  * This is used in cases where each CE has a private MSI interrupt.
2026  */
2027 static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2028 {
2029         struct ath10k *ar = arg;
2030         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2031         int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2032
2033         if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
2034                 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
2035                 return IRQ_HANDLED;
2036         }
2037
2038         /*
2039          * NOTE: We are able to derive ce_id from irq because we
2040          * use a one-to-one mapping for CE's 0..5.
2041          * CE's 6 & 7 do not use interrupts at all.
2042          *
2043          * This mapping must be kept in sync with the mapping
2044          * used by firmware.
2045          */
2046         tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2047         return IRQ_HANDLED;
2048 }
2049
2050 static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2051 {
2052         struct ath10k *ar = arg;
2053         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2054
2055         tasklet_schedule(&ar_pci->msi_fw_err);
2056         return IRQ_HANDLED;
2057 }
2058
2059 /*
2060  * Top-level interrupt handler for all PCI interrupts from a Target.
2061  * When a block of MSI interrupts is allocated, this top-level handler
2062  * is not used; instead, we directly call the correct sub-handler.
2063  */
2064 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2065 {
2066         struct ath10k *ar = arg;
2067         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2068
2069         if (ar_pci->num_msi_intrs == 0) {
2070                 /*
2071                  * IMPORTANT: INTR_CLR regiser has to be set after
2072                  * INTR_ENABLE is set to 0, otherwise interrupt can not be
2073                  * really cleared.
2074                  */
2075                 iowrite32(0, ar_pci->mem +
2076                           (SOC_CORE_BASE_ADDRESS |
2077                            PCIE_INTR_ENABLE_ADDRESS));
2078                 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2079                           PCIE_INTR_CE_MASK_ALL,
2080                           ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2081                                          PCIE_INTR_CLR_ADDRESS));
2082                 /*
2083                  * IMPORTANT: this extra read transaction is required to
2084                  * flush the posted write buffer.
2085                  */
2086                 (void) ioread32(ar_pci->mem +
2087                                 (SOC_CORE_BASE_ADDRESS |
2088                                  PCIE_INTR_ENABLE_ADDRESS));
2089         }
2090
2091         tasklet_schedule(&ar_pci->intr_tq);
2092
2093         return IRQ_HANDLED;
2094 }
2095
2096 static void ath10k_pci_tasklet(unsigned long data)
2097 {
2098         struct ath10k *ar = (struct ath10k *)data;
2099         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2100
2101         ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
2102         ath10k_ce_per_engine_service_any(ar);
2103
2104         if (ar_pci->num_msi_intrs == 0) {
2105                 /* Enable Legacy PCI line interrupts */
2106                 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2107                           PCIE_INTR_CE_MASK_ALL,
2108                           ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2109                                          PCIE_INTR_ENABLE_ADDRESS));
2110                 /*
2111                  * IMPORTANT: this extra read transaction is required to
2112                  * flush the posted write buffer
2113                  */
2114                 (void) ioread32(ar_pci->mem +
2115                                 (SOC_CORE_BASE_ADDRESS |
2116                                  PCIE_INTR_ENABLE_ADDRESS));
2117         }
2118 }
2119
2120 static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
2121 {
2122         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2123         int ret;
2124         int i;
2125
2126         ret = pci_enable_msi_block(ar_pci->pdev, num);
2127         if (ret)
2128                 return ret;
2129
2130         ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2131                           ath10k_pci_msi_fw_handler,
2132                           IRQF_SHARED, "ath10k_pci", ar);
2133         if (ret) {
2134                 ath10k_warn("request_irq(%d) failed %d\n",
2135                             ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2136
2137                 pci_disable_msi(ar_pci->pdev);
2138                 return ret;
2139         }
2140
2141         for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2142                 ret = request_irq(ar_pci->pdev->irq + i,
2143                                   ath10k_pci_per_engine_handler,
2144                                   IRQF_SHARED, "ath10k_pci", ar);
2145                 if (ret) {
2146                         ath10k_warn("request_irq(%d) failed %d\n",
2147                                     ar_pci->pdev->irq + i, ret);
2148
2149                         for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2150                                 free_irq(ar_pci->pdev->irq + i, ar);
2151
2152                         free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
2153                         pci_disable_msi(ar_pci->pdev);
2154                         return ret;
2155                 }
2156         }
2157
2158         ath10k_info("MSI-X interrupt handling (%d intrs)\n", num);
2159         return 0;
2160 }
2161
2162 static int ath10k_pci_start_intr_msi(struct ath10k *ar)
2163 {
2164         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2165         int ret;
2166
2167         ret = pci_enable_msi(ar_pci->pdev);
2168         if (ret < 0)
2169                 return ret;
2170
2171         ret = request_irq(ar_pci->pdev->irq,
2172                           ath10k_pci_interrupt_handler,
2173                           IRQF_SHARED, "ath10k_pci", ar);
2174         if (ret < 0) {
2175                 pci_disable_msi(ar_pci->pdev);
2176                 return ret;
2177         }
2178
2179         ath10k_info("MSI interrupt handling\n");
2180         return 0;
2181 }
2182
2183 static int ath10k_pci_start_intr_legacy(struct ath10k *ar)
2184 {
2185         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2186         int ret;
2187
2188         ret = request_irq(ar_pci->pdev->irq,
2189                           ath10k_pci_interrupt_handler,
2190                           IRQF_SHARED, "ath10k_pci", ar);
2191         if (ret < 0)
2192                 return ret;
2193
2194         /*
2195          * Make sure to wake the Target before enabling Legacy
2196          * Interrupt.
2197          */
2198         iowrite32(PCIE_SOC_WAKE_V_MASK,
2199                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2200                   PCIE_SOC_WAKE_ADDRESS);
2201
2202         ath10k_pci_wait(ar);
2203
2204         /*
2205          * A potential race occurs here: The CORE_BASE write
2206          * depends on target correctly decoding AXI address but
2207          * host won't know when target writes BAR to CORE_CTRL.
2208          * This write might get lost if target has NOT written BAR.
2209          * For now, fix the race by repeating the write in below
2210          * synchronization checking.
2211          */
2212         iowrite32(PCIE_INTR_FIRMWARE_MASK |
2213                   PCIE_INTR_CE_MASK_ALL,
2214                   ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2215                                  PCIE_INTR_ENABLE_ADDRESS));
2216         iowrite32(PCIE_SOC_WAKE_RESET,
2217                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2218                   PCIE_SOC_WAKE_ADDRESS);
2219
2220         ath10k_info("legacy interrupt handling\n");
2221         return 0;
2222 }
2223
2224 static int ath10k_pci_start_intr(struct ath10k *ar)
2225 {
2226         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2227         int num = MSI_NUM_REQUEST;
2228         int ret;
2229         int i;
2230
2231         tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long) ar);
2232         tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2233                      (unsigned long) ar);
2234
2235         for (i = 0; i < CE_COUNT; i++) {
2236                 ar_pci->pipe_info[i].ar_pci = ar_pci;
2237                 tasklet_init(&ar_pci->pipe_info[i].intr,
2238                              ath10k_pci_ce_tasklet,
2239                              (unsigned long)&ar_pci->pipe_info[i]);
2240         }
2241
2242         if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features))
2243                 num = 1;
2244
2245         if (num > 1) {
2246                 ret = ath10k_pci_start_intr_msix(ar, num);
2247                 if (ret == 0)
2248                         goto exit;
2249
2250                 ath10k_warn("MSI-X didn't succeed (%d), trying MSI\n", ret);
2251                 num = 1;
2252         }
2253
2254         if (num == 1) {
2255                 ret = ath10k_pci_start_intr_msi(ar);
2256                 if (ret == 0)
2257                         goto exit;
2258
2259                 ath10k_warn("MSI didn't succeed (%d), trying legacy INTR\n",
2260                             ret);
2261                 num = 0;
2262         }
2263
2264         ret = ath10k_pci_start_intr_legacy(ar);
2265
2266 exit:
2267         ar_pci->num_msi_intrs = num;
2268         ar_pci->ce_count = CE_COUNT;
2269         return ret;
2270 }
2271
2272 static void ath10k_pci_stop_intr(struct ath10k *ar)
2273 {
2274         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2275         int i;
2276
2277         /* There's at least one interrupt irregardless whether its legacy INTR
2278          * or MSI or MSI-X */
2279         for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2280                 free_irq(ar_pci->pdev->irq + i, ar);
2281
2282         if (ar_pci->num_msi_intrs > 0)
2283                 pci_disable_msi(ar_pci->pdev);
2284 }
2285
2286 static int ath10k_pci_reset_target(struct ath10k *ar)
2287 {
2288         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2289         int wait_limit = 300; /* 3 sec */
2290
2291         /* Wait for Target to finish initialization before we proceed. */
2292         iowrite32(PCIE_SOC_WAKE_V_MASK,
2293                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2294                   PCIE_SOC_WAKE_ADDRESS);
2295
2296         ath10k_pci_wait(ar);
2297
2298         while (wait_limit-- &&
2299                !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
2300                  FW_IND_INITIALIZED)) {
2301                 if (ar_pci->num_msi_intrs == 0)
2302                         /* Fix potential race by repeating CORE_BASE writes */
2303                         iowrite32(PCIE_INTR_FIRMWARE_MASK |
2304                                   PCIE_INTR_CE_MASK_ALL,
2305                                   ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2306                                                  PCIE_INTR_ENABLE_ADDRESS));
2307                 mdelay(10);
2308         }
2309
2310         if (wait_limit < 0) {
2311                 ath10k_err("Target stalled\n");
2312                 iowrite32(PCIE_SOC_WAKE_RESET,
2313                           ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2314                           PCIE_SOC_WAKE_ADDRESS);
2315                 return -EIO;
2316         }
2317
2318         iowrite32(PCIE_SOC_WAKE_RESET,
2319                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2320                   PCIE_SOC_WAKE_ADDRESS);
2321
2322         return 0;
2323 }
2324
2325 static void ath10k_pci_device_reset(struct ath10k *ar)
2326 {
2327         int i;
2328         u32 val;
2329
2330         if (!SOC_GLOBAL_RESET_ADDRESS)
2331                 return;
2332
2333         ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
2334                                PCIE_SOC_WAKE_V_MASK);
2335         for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2336                 if (ath10k_pci_target_is_awake(ar))
2337                         break;
2338                 msleep(1);
2339         }
2340
2341         /* Put Target, including PCIe, into RESET. */
2342         val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
2343         val |= 1;
2344         ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2345
2346         for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2347                 if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2348                                           RTC_STATE_COLD_RESET_MASK)
2349                         break;
2350                 msleep(1);
2351         }
2352
2353         /* Pull Target, including PCIe, out of RESET. */
2354         val &= ~1;
2355         ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2356
2357         for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2358                 if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2359                                             RTC_STATE_COLD_RESET_MASK))
2360                         break;
2361                 msleep(1);
2362         }
2363
2364         ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2365 }
2366
2367 static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2368 {
2369         int i;
2370
2371         for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2372                 if (!test_bit(i, ar_pci->features))
2373                         continue;
2374
2375                 switch (i) {
2376                 case ATH10K_PCI_FEATURE_MSI_X:
2377                         ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n");
2378                         break;
2379                 case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
2380                         ath10k_dbg(ATH10K_DBG_PCI, "QCA98XX SoC power save enabled\n");
2381                         break;
2382                 }
2383         }
2384 }
2385
2386 static int ath10k_pci_probe(struct pci_dev *pdev,
2387                             const struct pci_device_id *pci_dev)
2388 {
2389         void __iomem *mem;
2390         int ret = 0;
2391         struct ath10k *ar;
2392         struct ath10k_pci *ar_pci;
2393         u32 lcr_val;
2394
2395         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2396
2397         ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2398         if (ar_pci == NULL)
2399                 return -ENOMEM;
2400
2401         ar_pci->pdev = pdev;
2402         ar_pci->dev = &pdev->dev;
2403
2404         switch (pci_dev->device) {
2405         case QCA988X_2_0_DEVICE_ID:
2406                 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2407                 break;
2408         default:
2409                 ret = -ENODEV;
2410                 ath10k_err("Unkown device ID: %d\n", pci_dev->device);
2411                 goto err_ar_pci;
2412         }
2413
2414         if (ath10k_target_ps)
2415                 set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2416
2417         ath10k_pci_dump_features(ar_pci);
2418
2419         ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
2420         if (!ar) {
2421                 ath10k_err("ath10k_core_create failed!\n");
2422                 ret = -EINVAL;
2423                 goto err_ar_pci;
2424         }
2425
2426         ar_pci->ar = ar;
2427         ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2428         atomic_set(&ar_pci->keep_awake_count, 0);
2429
2430         pci_set_drvdata(pdev, ar);
2431
2432         /*
2433          * Without any knowledge of the Host, the Target may have been reset or
2434          * power cycled and its Config Space may no longer reflect the PCI
2435          * address space that was assigned earlier by the PCI infrastructure.
2436          * Refresh it now.
2437          */
2438         ret = pci_assign_resource(pdev, BAR_NUM);
2439         if (ret) {
2440                 ath10k_err("cannot assign PCI space: %d\n", ret);
2441                 goto err_ar;
2442         }
2443
2444         ret = pci_enable_device(pdev);
2445         if (ret) {
2446                 ath10k_err("cannot enable PCI device: %d\n", ret);
2447                 goto err_ar;
2448         }
2449
2450         /* Request MMIO resources */
2451         ret = pci_request_region(pdev, BAR_NUM, "ath");
2452         if (ret) {
2453                 ath10k_err("PCI MMIO reservation error: %d\n", ret);
2454                 goto err_device;
2455         }
2456
2457         /*
2458          * Target structures have a limit of 32 bit DMA pointers.
2459          * DMA pointers can be wider than 32 bits by default on some systems.
2460          */
2461         ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2462         if (ret) {
2463                 ath10k_err("32-bit DMA not available: %d\n", ret);
2464                 goto err_region;
2465         }
2466
2467         ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2468         if (ret) {
2469                 ath10k_err("cannot enable 32-bit consistent DMA\n");
2470                 goto err_region;
2471         }
2472
2473         /* Set bus master bit in PCI_COMMAND to enable DMA */
2474         pci_set_master(pdev);
2475
2476         /*
2477          * Temporary FIX: disable ASPM
2478          * Will be removed after the OTP is programmed
2479          */
2480         pci_read_config_dword(pdev, 0x80, &lcr_val);
2481         pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2482
2483         /* Arrange for access to Target SoC registers. */
2484         mem = pci_iomap(pdev, BAR_NUM, 0);
2485         if (!mem) {
2486                 ath10k_err("PCI iomap error\n");
2487                 ret = -EIO;
2488                 goto err_master;
2489         }
2490
2491         ar_pci->mem = mem;
2492
2493         spin_lock_init(&ar_pci->ce_lock);
2494
2495         ret = ath10k_core_register(ar);
2496         if (ret) {
2497                 ath10k_err("could not register driver core (%d)\n", ret);
2498                 goto err_iomap;
2499         }
2500
2501         return 0;
2502
2503 err_iomap:
2504         pci_iounmap(pdev, mem);
2505 err_master:
2506         pci_clear_master(pdev);
2507 err_region:
2508         pci_release_region(pdev, BAR_NUM);
2509 err_device:
2510         pci_disable_device(pdev);
2511 err_ar:
2512         pci_set_drvdata(pdev, NULL);
2513         ath10k_core_destroy(ar);
2514 err_ar_pci:
2515         /* call HIF PCI free here */
2516         kfree(ar_pci);
2517
2518         return ret;
2519 }
2520
2521 static void ath10k_pci_remove(struct pci_dev *pdev)
2522 {
2523         struct ath10k *ar = pci_get_drvdata(pdev);
2524         struct ath10k_pci *ar_pci;
2525
2526         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2527
2528         if (!ar)
2529                 return;
2530
2531         ar_pci = ath10k_pci_priv(ar);
2532
2533         if (!ar_pci)
2534                 return;
2535
2536         tasklet_kill(&ar_pci->msi_fw_err);
2537
2538         ath10k_core_unregister(ar);
2539
2540         pci_set_drvdata(pdev, NULL);
2541         pci_iounmap(pdev, ar_pci->mem);
2542         pci_release_region(pdev, BAR_NUM);
2543         pci_clear_master(pdev);
2544         pci_disable_device(pdev);
2545
2546         ath10k_core_destroy(ar);
2547         kfree(ar_pci);
2548 }
2549
2550 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2551
2552 static struct pci_driver ath10k_pci_driver = {
2553         .name = "ath10k_pci",
2554         .id_table = ath10k_pci_id_table,
2555         .probe = ath10k_pci_probe,
2556         .remove = ath10k_pci_remove,
2557 };
2558
2559 static int __init ath10k_pci_init(void)
2560 {
2561         int ret;
2562
2563         ret = pci_register_driver(&ath10k_pci_driver);
2564         if (ret)
2565                 ath10k_err("pci_register_driver failed [%d]\n", ret);
2566
2567         return ret;
2568 }
2569 module_init(ath10k_pci_init);
2570
2571 static void __exit ath10k_pci_exit(void)
2572 {
2573         pci_unregister_driver(&ath10k_pci_driver);
2574 }
2575
2576 module_exit(ath10k_pci_exit);
2577
2578 MODULE_AUTHOR("Qualcomm Atheros");
2579 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2580 MODULE_LICENSE("Dual BSD/GPL");
2581 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2582 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
2583 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);