ath10k: pci: make host_ce_config_wlan[] more readable
[cascardo/linux.git] / drivers / net / wireless / ath / ath10k / pci.c
1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17
18 #include <linux/pci.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22
23 #include "core.h"
24 #include "debug.h"
25
26 #include "targaddrs.h"
27 #include "bmi.h"
28
29 #include "hif.h"
30 #include "htc.h"
31
32 #include "ce.h"
33 #include "pci.h"
34
35 static unsigned int ath10k_target_ps;
36 module_param(ath10k_target_ps, uint, 0644);
37 MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
38
39 #define QCA988X_2_0_DEVICE_ID   (0x003c)
40
41 static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
42         { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
43         {0}
44 };
45
46 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
47                                        u32 *data);
48
49 static void ath10k_pci_process_ce(struct ath10k *ar);
50 static int ath10k_pci_post_rx(struct ath10k *ar);
51 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
52                                              int num);
53 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
54 static void ath10k_pci_stop_ce(struct ath10k *ar);
55 static void ath10k_pci_device_reset(struct ath10k *ar);
56 static int ath10k_pci_reset_target(struct ath10k *ar);
57 static int ath10k_pci_start_intr(struct ath10k *ar);
58 static void ath10k_pci_stop_intr(struct ath10k *ar);
59
60 static const struct ce_attr host_ce_config_wlan[] = {
61         /* CE0: host->target HTC control and raw streams */
62         {
63                 .flags = CE_ATTR_FLAGS,
64                 .src_nentries = 16,
65                 .src_sz_max = 256,
66                 .dest_nentries = 0,
67         },
68
69         /* CE1: target->host HTT + HTC control */
70         {
71                 .flags = CE_ATTR_FLAGS,
72                 .src_nentries = 0,
73                 .src_sz_max = 512,
74                 .dest_nentries = 512,
75         },
76
77         /* CE2: target->host WMI */
78         {
79                 .flags = CE_ATTR_FLAGS,
80                 .src_nentries = 0,
81                 .src_sz_max = 2048,
82                 .dest_nentries = 32,
83         },
84
85         /* CE3: host->target WMI */
86         {
87                 .flags = CE_ATTR_FLAGS,
88                 .src_nentries = 32,
89                 .src_sz_max = 2048,
90                 .dest_nentries = 0,
91         },
92
93         /* CE4: host->target HTT */
94         {
95                 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
96                 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
97                 .src_sz_max = 256,
98                 .dest_nentries = 0,
99         },
100
101         /* CE5: unused */
102         {
103                 .flags = CE_ATTR_FLAGS,
104                 .src_nentries = 0,
105                 .src_sz_max = 0,
106                 .dest_nentries = 0,
107         },
108
109         /* CE6: target autonomous hif_memcpy */
110         {
111                 .flags = CE_ATTR_FLAGS,
112                 .src_nentries = 0,
113                 .src_sz_max = 0,
114                 .dest_nentries = 0,
115         },
116
117         /* CE7: ce_diag, the Diagnostic Window */
118         {
119                 .flags = CE_ATTR_FLAGS,
120                 .src_nentries = 2,
121                 .src_sz_max = DIAG_TRANSFER_LIMIT,
122                 .dest_nentries = 2,
123         },
124 };
125
126 /* Target firmware's Copy Engine configuration. */
127 static const struct ce_pipe_config target_ce_config_wlan[] = {
128         /* host->target HTC control and raw streams */
129         { /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0,},
130         /* target->host HTT + HTC control */
131         { /* CE1 */ 1, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0,},
132         /* target->host WMI */
133         { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,},
134         /* host->target WMI */
135         { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
136         /* host->target HTT */
137         { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0,},
138         /* NB: 50% of src nentries, since tx has 2 frags */
139         /* unused */
140         { /* CE5 */ 5, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
141         /* Reserved for target autonomous hif_memcpy */
142         { /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0,},
143         /* CE7 used only by Host */
144 };
145
146 /*
147  * Diagnostic read/write access is provided for startup/config/debug usage.
148  * Caller must guarantee proper alignment, when applicable, and single user
149  * at any moment.
150  */
151 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
152                                     int nbytes)
153 {
154         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
155         int ret = 0;
156         u32 buf;
157         unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
158         unsigned int id;
159         unsigned int flags;
160         struct ath10k_ce_pipe *ce_diag;
161         /* Host buffer address in CE space */
162         u32 ce_data;
163         dma_addr_t ce_data_base = 0;
164         void *data_buf = NULL;
165         int i;
166
167         /*
168          * This code cannot handle reads to non-memory space. Redirect to the
169          * register read fn but preserve the multi word read capability of
170          * this fn
171          */
172         if (address < DRAM_BASE_ADDRESS) {
173                 if (!IS_ALIGNED(address, 4) ||
174                     !IS_ALIGNED((unsigned long)data, 4))
175                         return -EIO;
176
177                 while ((nbytes >= 4) &&  ((ret = ath10k_pci_diag_read_access(
178                                            ar, address, (u32 *)data)) == 0)) {
179                         nbytes -= sizeof(u32);
180                         address += sizeof(u32);
181                         data += sizeof(u32);
182                 }
183                 return ret;
184         }
185
186         ce_diag = ar_pci->ce_diag;
187
188         /*
189          * Allocate a temporary bounce buffer to hold caller's data
190          * to be DMA'ed from Target. This guarantees
191          *   1) 4-byte alignment
192          *   2) Buffer in DMA-able space
193          */
194         orig_nbytes = nbytes;
195         data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
196                                                          orig_nbytes,
197                                                          &ce_data_base);
198
199         if (!data_buf) {
200                 ret = -ENOMEM;
201                 goto done;
202         }
203         memset(data_buf, 0, orig_nbytes);
204
205         remaining_bytes = orig_nbytes;
206         ce_data = ce_data_base;
207         while (remaining_bytes) {
208                 nbytes = min_t(unsigned int, remaining_bytes,
209                                DIAG_TRANSFER_LIMIT);
210
211                 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
212                 if (ret != 0)
213                         goto done;
214
215                 /* Request CE to send from Target(!) address to Host buffer */
216                 /*
217                  * The address supplied by the caller is in the
218                  * Target CPU virtual address space.
219                  *
220                  * In order to use this address with the diagnostic CE,
221                  * convert it from Target CPU virtual address space
222                  * to CE address space
223                  */
224                 ath10k_pci_wake(ar);
225                 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
226                                                      address);
227                 ath10k_pci_sleep(ar);
228
229                 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
230                                  0);
231                 if (ret)
232                         goto done;
233
234                 i = 0;
235                 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
236                                                      &completed_nbytes,
237                                                      &id) != 0) {
238                         mdelay(1);
239                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
240                                 ret = -EBUSY;
241                                 goto done;
242                         }
243                 }
244
245                 if (nbytes != completed_nbytes) {
246                         ret = -EIO;
247                         goto done;
248                 }
249
250                 if (buf != (u32) address) {
251                         ret = -EIO;
252                         goto done;
253                 }
254
255                 i = 0;
256                 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
257                                                      &completed_nbytes,
258                                                      &id, &flags) != 0) {
259                         mdelay(1);
260
261                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
262                                 ret = -EBUSY;
263                                 goto done;
264                         }
265                 }
266
267                 if (nbytes != completed_nbytes) {
268                         ret = -EIO;
269                         goto done;
270                 }
271
272                 if (buf != ce_data) {
273                         ret = -EIO;
274                         goto done;
275                 }
276
277                 remaining_bytes -= nbytes;
278                 address += nbytes;
279                 ce_data += nbytes;
280         }
281
282 done:
283         if (ret == 0) {
284                 /* Copy data from allocated DMA buf to caller's buf */
285                 WARN_ON_ONCE(orig_nbytes & 3);
286                 for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
287                         ((u32 *)data)[i] =
288                                 __le32_to_cpu(((__le32 *)data_buf)[i]);
289                 }
290         } else
291                 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
292                            __func__, address);
293
294         if (data_buf)
295                 pci_free_consistent(ar_pci->pdev, orig_nbytes,
296                                     data_buf, ce_data_base);
297
298         return ret;
299 }
300
301 /* Read 4-byte aligned data from Target memory or register */
302 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
303                                        u32 *data)
304 {
305         /* Assume range doesn't cross this boundary */
306         if (address >= DRAM_BASE_ADDRESS)
307                 return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
308
309         ath10k_pci_wake(ar);
310         *data = ath10k_pci_read32(ar, address);
311         ath10k_pci_sleep(ar);
312         return 0;
313 }
314
315 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
316                                      const void *data, int nbytes)
317 {
318         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
319         int ret = 0;
320         u32 buf;
321         unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
322         unsigned int id;
323         unsigned int flags;
324         struct ath10k_ce_pipe *ce_diag;
325         void *data_buf = NULL;
326         u32 ce_data;    /* Host buffer address in CE space */
327         dma_addr_t ce_data_base = 0;
328         int i;
329
330         ce_diag = ar_pci->ce_diag;
331
332         /*
333          * Allocate a temporary bounce buffer to hold caller's data
334          * to be DMA'ed to Target. This guarantees
335          *   1) 4-byte alignment
336          *   2) Buffer in DMA-able space
337          */
338         orig_nbytes = nbytes;
339         data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
340                                                          orig_nbytes,
341                                                          &ce_data_base);
342         if (!data_buf) {
343                 ret = -ENOMEM;
344                 goto done;
345         }
346
347         /* Copy caller's data to allocated DMA buf */
348         WARN_ON_ONCE(orig_nbytes & 3);
349         for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
350                 ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
351
352         /*
353          * The address supplied by the caller is in the
354          * Target CPU virtual address space.
355          *
356          * In order to use this address with the diagnostic CE,
357          * convert it from
358          *    Target CPU virtual address space
359          * to
360          *    CE address space
361          */
362         ath10k_pci_wake(ar);
363         address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
364         ath10k_pci_sleep(ar);
365
366         remaining_bytes = orig_nbytes;
367         ce_data = ce_data_base;
368         while (remaining_bytes) {
369                 /* FIXME: check cast */
370                 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
371
372                 /* Set up to receive directly into Target(!) address */
373                 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
374                 if (ret != 0)
375                         goto done;
376
377                 /*
378                  * Request CE to send caller-supplied data that
379                  * was copied to bounce buffer to Target(!) address.
380                  */
381                 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
382                                      nbytes, 0, 0);
383                 if (ret != 0)
384                         goto done;
385
386                 i = 0;
387                 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
388                                                      &completed_nbytes,
389                                                      &id) != 0) {
390                         mdelay(1);
391
392                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
393                                 ret = -EBUSY;
394                                 goto done;
395                         }
396                 }
397
398                 if (nbytes != completed_nbytes) {
399                         ret = -EIO;
400                         goto done;
401                 }
402
403                 if (buf != ce_data) {
404                         ret = -EIO;
405                         goto done;
406                 }
407
408                 i = 0;
409                 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
410                                                      &completed_nbytes,
411                                                      &id, &flags) != 0) {
412                         mdelay(1);
413
414                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
415                                 ret = -EBUSY;
416                                 goto done;
417                         }
418                 }
419
420                 if (nbytes != completed_nbytes) {
421                         ret = -EIO;
422                         goto done;
423                 }
424
425                 if (buf != address) {
426                         ret = -EIO;
427                         goto done;
428                 }
429
430                 remaining_bytes -= nbytes;
431                 address += nbytes;
432                 ce_data += nbytes;
433         }
434
435 done:
436         if (data_buf) {
437                 pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
438                                     ce_data_base);
439         }
440
441         if (ret != 0)
442                 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
443                            address);
444
445         return ret;
446 }
447
448 /* Write 4B data to Target memory or register */
449 static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
450                                         u32 data)
451 {
452         /* Assume range doesn't cross this boundary */
453         if (address >= DRAM_BASE_ADDRESS)
454                 return ath10k_pci_diag_write_mem(ar, address, &data,
455                                                  sizeof(u32));
456
457         ath10k_pci_wake(ar);
458         ath10k_pci_write32(ar, address, data);
459         ath10k_pci_sleep(ar);
460         return 0;
461 }
462
463 static bool ath10k_pci_target_is_awake(struct ath10k *ar)
464 {
465         void __iomem *mem = ath10k_pci_priv(ar)->mem;
466         u32 val;
467         val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
468                        RTC_STATE_ADDRESS);
469         return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
470 }
471
472 static void ath10k_pci_wait(struct ath10k *ar)
473 {
474         int n = 100;
475
476         while (n-- && !ath10k_pci_target_is_awake(ar))
477                 msleep(10);
478
479         if (n < 0)
480                 ath10k_warn("Unable to wakeup target\n");
481 }
482
483 void ath10k_do_pci_wake(struct ath10k *ar)
484 {
485         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
486         void __iomem *pci_addr = ar_pci->mem;
487         int tot_delay = 0;
488         int curr_delay = 5;
489
490         if (atomic_read(&ar_pci->keep_awake_count) == 0) {
491                 /* Force AWAKE */
492                 iowrite32(PCIE_SOC_WAKE_V_MASK,
493                           pci_addr + PCIE_LOCAL_BASE_ADDRESS +
494                           PCIE_SOC_WAKE_ADDRESS);
495         }
496         atomic_inc(&ar_pci->keep_awake_count);
497
498         if (ar_pci->verified_awake)
499                 return;
500
501         for (;;) {
502                 if (ath10k_pci_target_is_awake(ar)) {
503                         ar_pci->verified_awake = true;
504                         break;
505                 }
506
507                 if (tot_delay > PCIE_WAKE_TIMEOUT) {
508                         ath10k_warn("target takes too long to wake up (awake count %d)\n",
509                                     atomic_read(&ar_pci->keep_awake_count));
510                         break;
511                 }
512
513                 udelay(curr_delay);
514                 tot_delay += curr_delay;
515
516                 if (curr_delay < 50)
517                         curr_delay += 5;
518         }
519 }
520
521 void ath10k_do_pci_sleep(struct ath10k *ar)
522 {
523         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
524         void __iomem *pci_addr = ar_pci->mem;
525
526         if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
527                 /* Allow sleep */
528                 ar_pci->verified_awake = false;
529                 iowrite32(PCIE_SOC_WAKE_RESET,
530                           pci_addr + PCIE_LOCAL_BASE_ADDRESS +
531                           PCIE_SOC_WAKE_ADDRESS);
532         }
533 }
534
535 /*
536  * FIXME: Handle OOM properly.
537  */
538 static inline
539 struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
540 {
541         struct ath10k_pci_compl *compl = NULL;
542
543         spin_lock_bh(&pipe_info->pipe_lock);
544         if (list_empty(&pipe_info->compl_free)) {
545                 ath10k_warn("Completion buffers are full\n");
546                 goto exit;
547         }
548         compl = list_first_entry(&pipe_info->compl_free,
549                                  struct ath10k_pci_compl, list);
550         list_del(&compl->list);
551 exit:
552         spin_unlock_bh(&pipe_info->pipe_lock);
553         return compl;
554 }
555
556 /* Called by lower (CE) layer when a send to Target completes. */
557 static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state,
558                                     void *transfer_context,
559                                     u32 ce_data,
560                                     unsigned int nbytes,
561                                     unsigned int transfer_id)
562 {
563         struct ath10k *ar = ce_state->ar;
564         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
565         struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
566         struct ath10k_pci_compl *compl;
567         bool process = false;
568
569         do {
570                 /*
571                  * For the send completion of an item in sendlist, just
572                  * increment num_sends_allowed. The upper layer callback will
573                  * be triggered when last fragment is done with send.
574                  */
575                 if (transfer_context == CE_SENDLIST_ITEM_CTXT) {
576                         spin_lock_bh(&pipe_info->pipe_lock);
577                         pipe_info->num_sends_allowed++;
578                         spin_unlock_bh(&pipe_info->pipe_lock);
579                         continue;
580                 }
581
582                 compl = get_free_compl(pipe_info);
583                 if (!compl)
584                         break;
585
586                 compl->state = ATH10K_PCI_COMPL_SEND;
587                 compl->ce_state = ce_state;
588                 compl->pipe_info = pipe_info;
589                 compl->transfer_context = transfer_context;
590                 compl->nbytes = nbytes;
591                 compl->transfer_id = transfer_id;
592                 compl->flags = 0;
593
594                 /*
595                  * Add the completion to the processing queue.
596                  */
597                 spin_lock_bh(&ar_pci->compl_lock);
598                 list_add_tail(&compl->list, &ar_pci->compl_process);
599                 spin_unlock_bh(&ar_pci->compl_lock);
600
601                 process = true;
602         } while (ath10k_ce_completed_send_next(ce_state,
603                                                            &transfer_context,
604                                                            &ce_data, &nbytes,
605                                                            &transfer_id) == 0);
606
607         /*
608          * If only some of the items within a sendlist have completed,
609          * don't invoke completion processing until the entire sendlist
610          * has been sent.
611          */
612         if (!process)
613                 return;
614
615         ath10k_pci_process_ce(ar);
616 }
617
618 /* Called by lower (CE) layer when data is received from the Target. */
619 static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state,
620                                     void *transfer_context, u32 ce_data,
621                                     unsigned int nbytes,
622                                     unsigned int transfer_id,
623                                     unsigned int flags)
624 {
625         struct ath10k *ar = ce_state->ar;
626         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
627         struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
628         struct ath10k_pci_compl *compl;
629         struct sk_buff *skb;
630
631         do {
632                 compl = get_free_compl(pipe_info);
633                 if (!compl)
634                         break;
635
636                 compl->state = ATH10K_PCI_COMPL_RECV;
637                 compl->ce_state = ce_state;
638                 compl->pipe_info = pipe_info;
639                 compl->transfer_context = transfer_context;
640                 compl->nbytes = nbytes;
641                 compl->transfer_id = transfer_id;
642                 compl->flags = flags;
643
644                 skb = transfer_context;
645                 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
646                                  skb->len + skb_tailroom(skb),
647                                  DMA_FROM_DEVICE);
648                 /*
649                  * Add the completion to the processing queue.
650                  */
651                 spin_lock_bh(&ar_pci->compl_lock);
652                 list_add_tail(&compl->list, &ar_pci->compl_process);
653                 spin_unlock_bh(&ar_pci->compl_lock);
654
655         } while (ath10k_ce_completed_recv_next(ce_state,
656                                                            &transfer_context,
657                                                            &ce_data, &nbytes,
658                                                            &transfer_id,
659                                                            &flags) == 0);
660
661         ath10k_pci_process_ce(ar);
662 }
663
664 /* Send the first nbytes bytes of the buffer */
665 static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
666                                     unsigned int transfer_id,
667                                     unsigned int bytes, struct sk_buff *nbuf)
668 {
669         struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
670         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
671         struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
672         struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
673         struct ce_sendlist sendlist;
674         unsigned int len;
675         u32 flags = 0;
676         int ret;
677
678         memset(&sendlist, 0, sizeof(struct ce_sendlist));
679
680         len = min(bytes, nbuf->len);
681         bytes -= len;
682
683         if (len & 3)
684                 ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
685
686         ath10k_dbg(ATH10K_DBG_PCI,
687                    "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
688                    nbuf->data, (unsigned long long) skb_cb->paddr,
689                    nbuf->len, len);
690         ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
691                         "ath10k tx: data: ",
692                         nbuf->data, nbuf->len);
693
694         ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags);
695
696         /* Make sure we have resources to handle this request */
697         spin_lock_bh(&pipe_info->pipe_lock);
698         if (!pipe_info->num_sends_allowed) {
699                 ath10k_warn("Pipe: %d is full\n", pipe_id);
700                 spin_unlock_bh(&pipe_info->pipe_lock);
701                 return -ENOSR;
702         }
703         pipe_info->num_sends_allowed--;
704         spin_unlock_bh(&pipe_info->pipe_lock);
705
706         ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
707         if (ret)
708                 ath10k_warn("CE send failed: %p\n", nbuf);
709
710         return ret;
711 }
712
713 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
714 {
715         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
716         struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe]);
717         int ret;
718
719         spin_lock_bh(&pipe_info->pipe_lock);
720         ret = pipe_info->num_sends_allowed;
721         spin_unlock_bh(&pipe_info->pipe_lock);
722
723         return ret;
724 }
725
726 static void ath10k_pci_hif_dump_area(struct ath10k *ar)
727 {
728         u32 reg_dump_area = 0;
729         u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
730         u32 host_addr;
731         int ret;
732         u32 i;
733
734         ath10k_err("firmware crashed!\n");
735         ath10k_err("hardware name %s version 0x%x\n",
736                    ar->hw_params.name, ar->target_version);
737         ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major,
738                    ar->fw_version_minor, ar->fw_version_release,
739                    ar->fw_version_build);
740
741         host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
742         if (ath10k_pci_diag_read_mem(ar, host_addr,
743                                      &reg_dump_area, sizeof(u32)) != 0) {
744                 ath10k_warn("could not read hi_failure_state\n");
745                 return;
746         }
747
748         ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
749
750         ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
751                                        &reg_dump_values[0],
752                                        REG_DUMP_COUNT_QCA988X * sizeof(u32));
753         if (ret != 0) {
754                 ath10k_err("could not dump FW Dump Area\n");
755                 return;
756         }
757
758         BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
759
760         ath10k_err("target Register Dump\n");
761         for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
762                 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
763                            i,
764                            reg_dump_values[i],
765                            reg_dump_values[i + 1],
766                            reg_dump_values[i + 2],
767                            reg_dump_values[i + 3]);
768
769         ieee80211_queue_work(ar->hw, &ar->restart_work);
770 }
771
772 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
773                                                int force)
774 {
775         if (!force) {
776                 int resources;
777                 /*
778                  * Decide whether to actually poll for completions, or just
779                  * wait for a later chance.
780                  * If there seem to be plenty of resources left, then just wait
781                  * since checking involves reading a CE register, which is a
782                  * relatively expensive operation.
783                  */
784                 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
785
786                 /*
787                  * If at least 50% of the total resources are still available,
788                  * don't bother checking again yet.
789                  */
790                 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
791                         return;
792         }
793         ath10k_ce_per_engine_service(ar, pipe);
794 }
795
796 static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
797                                          struct ath10k_hif_cb *callbacks)
798 {
799         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
800
801         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
802
803         memcpy(&ar_pci->msg_callbacks_current, callbacks,
804                sizeof(ar_pci->msg_callbacks_current));
805 }
806
807 static int ath10k_pci_start_ce(struct ath10k *ar)
808 {
809         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
810         struct ath10k_ce_pipe *ce_diag = ar_pci->ce_diag;
811         const struct ce_attr *attr;
812         struct ath10k_pci_pipe *pipe_info;
813         struct ath10k_pci_compl *compl;
814         int i, pipe_num, completions, disable_interrupts;
815
816         spin_lock_init(&ar_pci->compl_lock);
817         INIT_LIST_HEAD(&ar_pci->compl_process);
818
819         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
820                 pipe_info = &ar_pci->pipe_info[pipe_num];
821
822                 spin_lock_init(&pipe_info->pipe_lock);
823                 INIT_LIST_HEAD(&pipe_info->compl_free);
824
825                 /* Handle Diagnostic CE specially */
826                 if (pipe_info->ce_hdl == ce_diag)
827                         continue;
828
829                 attr = &host_ce_config_wlan[pipe_num];
830                 completions = 0;
831
832                 if (attr->src_nentries) {
833                         disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
834                         ath10k_ce_send_cb_register(pipe_info->ce_hdl,
835                                                    ath10k_pci_ce_send_done,
836                                                    disable_interrupts);
837                         completions += attr->src_nentries;
838                         pipe_info->num_sends_allowed = attr->src_nentries - 1;
839                 }
840
841                 if (attr->dest_nentries) {
842                         ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
843                                                    ath10k_pci_ce_recv_data);
844                         completions += attr->dest_nentries;
845                 }
846
847                 if (completions == 0)
848                         continue;
849
850                 for (i = 0; i < completions; i++) {
851                         compl = kmalloc(sizeof(*compl), GFP_KERNEL);
852                         if (!compl) {
853                                 ath10k_warn("No memory for completion state\n");
854                                 ath10k_pci_stop_ce(ar);
855                                 return -ENOMEM;
856                         }
857
858                         compl->state = ATH10K_PCI_COMPL_FREE;
859                         list_add_tail(&compl->list, &pipe_info->compl_free);
860                 }
861         }
862
863         return 0;
864 }
865
866 static void ath10k_pci_stop_ce(struct ath10k *ar)
867 {
868         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
869         struct ath10k_pci_compl *compl;
870         struct sk_buff *skb;
871         int i;
872
873         ath10k_ce_disable_interrupts(ar);
874
875         /* Cancel the pending tasklet */
876         tasklet_kill(&ar_pci->intr_tq);
877
878         for (i = 0; i < CE_COUNT; i++)
879                 tasklet_kill(&ar_pci->pipe_info[i].intr);
880
881         /* Mark pending completions as aborted, so that upper layers free up
882          * their associated resources */
883         spin_lock_bh(&ar_pci->compl_lock);
884         list_for_each_entry(compl, &ar_pci->compl_process, list) {
885                 skb = (struct sk_buff *)compl->transfer_context;
886                 ATH10K_SKB_CB(skb)->is_aborted = true;
887         }
888         spin_unlock_bh(&ar_pci->compl_lock);
889 }
890
891 static void ath10k_pci_cleanup_ce(struct ath10k *ar)
892 {
893         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
894         struct ath10k_pci_compl *compl, *tmp;
895         struct ath10k_pci_pipe *pipe_info;
896         struct sk_buff *netbuf;
897         int pipe_num;
898
899         /* Free pending completions. */
900         spin_lock_bh(&ar_pci->compl_lock);
901         if (!list_empty(&ar_pci->compl_process))
902                 ath10k_warn("pending completions still present! possible memory leaks.\n");
903
904         list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
905                 list_del(&compl->list);
906                 netbuf = (struct sk_buff *)compl->transfer_context;
907                 dev_kfree_skb_any(netbuf);
908                 kfree(compl);
909         }
910         spin_unlock_bh(&ar_pci->compl_lock);
911
912         /* Free unused completions for each pipe. */
913         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
914                 pipe_info = &ar_pci->pipe_info[pipe_num];
915
916                 spin_lock_bh(&pipe_info->pipe_lock);
917                 list_for_each_entry_safe(compl, tmp,
918                                          &pipe_info->compl_free, list) {
919                         list_del(&compl->list);
920                         kfree(compl);
921                 }
922                 spin_unlock_bh(&pipe_info->pipe_lock);
923         }
924 }
925
926 static void ath10k_pci_process_ce(struct ath10k *ar)
927 {
928         struct ath10k_pci *ar_pci = ar->hif.priv;
929         struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
930         struct ath10k_pci_compl *compl;
931         struct sk_buff *skb;
932         unsigned int nbytes;
933         int ret, send_done = 0;
934
935         /* Upper layers aren't ready to handle tx/rx completions in parallel so
936          * we must serialize all completion processing. */
937
938         spin_lock_bh(&ar_pci->compl_lock);
939         if (ar_pci->compl_processing) {
940                 spin_unlock_bh(&ar_pci->compl_lock);
941                 return;
942         }
943         ar_pci->compl_processing = true;
944         spin_unlock_bh(&ar_pci->compl_lock);
945
946         for (;;) {
947                 spin_lock_bh(&ar_pci->compl_lock);
948                 if (list_empty(&ar_pci->compl_process)) {
949                         spin_unlock_bh(&ar_pci->compl_lock);
950                         break;
951                 }
952                 compl = list_first_entry(&ar_pci->compl_process,
953                                          struct ath10k_pci_compl, list);
954                 list_del(&compl->list);
955                 spin_unlock_bh(&ar_pci->compl_lock);
956
957                 switch (compl->state) {
958                 case ATH10K_PCI_COMPL_SEND:
959                         cb->tx_completion(ar,
960                                           compl->transfer_context,
961                                           compl->transfer_id);
962                         send_done = 1;
963                         break;
964                 case ATH10K_PCI_COMPL_RECV:
965                         ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
966                         if (ret) {
967                                 ath10k_warn("Unable to post recv buffer for pipe: %d\n",
968                                             compl->pipe_info->pipe_num);
969                                 break;
970                         }
971
972                         skb = (struct sk_buff *)compl->transfer_context;
973                         nbytes = compl->nbytes;
974
975                         ath10k_dbg(ATH10K_DBG_PCI,
976                                    "ath10k_pci_ce_recv_data netbuf=%p  nbytes=%d\n",
977                                    skb, nbytes);
978                         ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
979                                         "ath10k rx: ", skb->data, nbytes);
980
981                         if (skb->len + skb_tailroom(skb) >= nbytes) {
982                                 skb_trim(skb, 0);
983                                 skb_put(skb, nbytes);
984                                 cb->rx_completion(ar, skb,
985                                                   compl->pipe_info->pipe_num);
986                         } else {
987                                 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
988                                             nbytes,
989                                             skb->len + skb_tailroom(skb));
990                         }
991                         break;
992                 case ATH10K_PCI_COMPL_FREE:
993                         ath10k_warn("free completion cannot be processed\n");
994                         break;
995                 default:
996                         ath10k_warn("invalid completion state (%d)\n",
997                                     compl->state);
998                         break;
999                 }
1000
1001                 compl->state = ATH10K_PCI_COMPL_FREE;
1002
1003                 /*
1004                  * Add completion back to the pipe's free list.
1005                  */
1006                 spin_lock_bh(&compl->pipe_info->pipe_lock);
1007                 list_add_tail(&compl->list, &compl->pipe_info->compl_free);
1008                 compl->pipe_info->num_sends_allowed += send_done;
1009                 spin_unlock_bh(&compl->pipe_info->pipe_lock);
1010         }
1011
1012         spin_lock_bh(&ar_pci->compl_lock);
1013         ar_pci->compl_processing = false;
1014         spin_unlock_bh(&ar_pci->compl_lock);
1015 }
1016
1017 /* TODO - temporary mapping while we have too few CE's */
1018 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
1019                                               u16 service_id, u8 *ul_pipe,
1020                                               u8 *dl_pipe, int *ul_is_polled,
1021                                               int *dl_is_polled)
1022 {
1023         int ret = 0;
1024
1025         /* polling for received messages not supported */
1026         *dl_is_polled = 0;
1027
1028         switch (service_id) {
1029         case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
1030                 /*
1031                  * Host->target HTT gets its own pipe, so it can be polled
1032                  * while other pipes are interrupt driven.
1033                  */
1034                 *ul_pipe = 4;
1035                 /*
1036                  * Use the same target->host pipe for HTC ctrl, HTC raw
1037                  * streams, and HTT.
1038                  */
1039                 *dl_pipe = 1;
1040                 break;
1041
1042         case ATH10K_HTC_SVC_ID_RSVD_CTRL:
1043         case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
1044                 /*
1045                  * Note: HTC_RAW_STREAMS_SVC is currently unused, and
1046                  * HTC_CTRL_RSVD_SVC could share the same pipe as the
1047                  * WMI services.  So, if another CE is needed, change
1048                  * this to *ul_pipe = 3, which frees up CE 0.
1049                  */
1050                 /* *ul_pipe = 3; */
1051                 *ul_pipe = 0;
1052                 *dl_pipe = 1;
1053                 break;
1054
1055         case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
1056         case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
1057         case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
1058         case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
1059
1060         case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1061                 *ul_pipe = 3;
1062                 *dl_pipe = 2;
1063                 break;
1064
1065                 /* pipe 5 unused   */
1066                 /* pipe 6 reserved */
1067                 /* pipe 7 reserved */
1068
1069         default:
1070                 ret = -1;
1071                 break;
1072         }
1073         *ul_is_polled =
1074                 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1075
1076         return ret;
1077 }
1078
1079 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1080                                                 u8 *ul_pipe, u8 *dl_pipe)
1081 {
1082         int ul_is_polled, dl_is_polled;
1083
1084         (void)ath10k_pci_hif_map_service_to_pipe(ar,
1085                                                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1086                                                  ul_pipe,
1087                                                  dl_pipe,
1088                                                  &ul_is_polled,
1089                                                  &dl_is_polled);
1090 }
1091
1092 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
1093                                    int num)
1094 {
1095         struct ath10k *ar = pipe_info->hif_ce_state;
1096         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1097         struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
1098         struct sk_buff *skb;
1099         dma_addr_t ce_data;
1100         int i, ret = 0;
1101
1102         if (pipe_info->buf_sz == 0)
1103                 return 0;
1104
1105         for (i = 0; i < num; i++) {
1106                 skb = dev_alloc_skb(pipe_info->buf_sz);
1107                 if (!skb) {
1108                         ath10k_warn("could not allocate skbuff for pipe %d\n",
1109                                     num);
1110                         ret = -ENOMEM;
1111                         goto err;
1112                 }
1113
1114                 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1115
1116                 ce_data = dma_map_single(ar->dev, skb->data,
1117                                          skb->len + skb_tailroom(skb),
1118                                          DMA_FROM_DEVICE);
1119
1120                 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1121                         ath10k_warn("could not dma map skbuff\n");
1122                         dev_kfree_skb_any(skb);
1123                         ret = -EIO;
1124                         goto err;
1125                 }
1126
1127                 ATH10K_SKB_CB(skb)->paddr = ce_data;
1128
1129                 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1130                                                pipe_info->buf_sz,
1131                                                PCI_DMA_FROMDEVICE);
1132
1133                 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1134                                                  ce_data);
1135                 if (ret) {
1136                         ath10k_warn("could not enqueue to pipe %d (%d)\n",
1137                                     num, ret);
1138                         goto err;
1139                 }
1140         }
1141
1142         return ret;
1143
1144 err:
1145         ath10k_pci_rx_pipe_cleanup(pipe_info);
1146         return ret;
1147 }
1148
1149 static int ath10k_pci_post_rx(struct ath10k *ar)
1150 {
1151         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1152         struct ath10k_pci_pipe *pipe_info;
1153         const struct ce_attr *attr;
1154         int pipe_num, ret = 0;
1155
1156         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1157                 pipe_info = &ar_pci->pipe_info[pipe_num];
1158                 attr = &host_ce_config_wlan[pipe_num];
1159
1160                 if (attr->dest_nentries == 0)
1161                         continue;
1162
1163                 ret = ath10k_pci_post_rx_pipe(pipe_info,
1164                                               attr->dest_nentries - 1);
1165                 if (ret) {
1166                         ath10k_warn("Unable to replenish recv buffers for pipe: %d\n",
1167                                     pipe_num);
1168
1169                         for (; pipe_num >= 0; pipe_num--) {
1170                                 pipe_info = &ar_pci->pipe_info[pipe_num];
1171                                 ath10k_pci_rx_pipe_cleanup(pipe_info);
1172                         }
1173                         return ret;
1174                 }
1175         }
1176
1177         return 0;
1178 }
1179
1180 static int ath10k_pci_hif_start(struct ath10k *ar)
1181 {
1182         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1183         int ret;
1184
1185         ret = ath10k_pci_start_ce(ar);
1186         if (ret) {
1187                 ath10k_warn("could not start CE (%d)\n", ret);
1188                 return ret;
1189         }
1190
1191         /* Post buffers once to start things off. */
1192         ret = ath10k_pci_post_rx(ar);
1193         if (ret) {
1194                 ath10k_warn("could not post rx pipes (%d)\n", ret);
1195                 return ret;
1196         }
1197
1198         ar_pci->started = 1;
1199         return 0;
1200 }
1201
1202 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1203 {
1204         struct ath10k *ar;
1205         struct ath10k_pci *ar_pci;
1206         struct ath10k_ce_pipe *ce_hdl;
1207         u32 buf_sz;
1208         struct sk_buff *netbuf;
1209         u32 ce_data;
1210
1211         buf_sz = pipe_info->buf_sz;
1212
1213         /* Unused Copy Engine */
1214         if (buf_sz == 0)
1215                 return;
1216
1217         ar = pipe_info->hif_ce_state;
1218         ar_pci = ath10k_pci_priv(ar);
1219
1220         if (!ar_pci->started)
1221                 return;
1222
1223         ce_hdl = pipe_info->ce_hdl;
1224
1225         while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1226                                           &ce_data) == 0) {
1227                 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1228                                  netbuf->len + skb_tailroom(netbuf),
1229                                  DMA_FROM_DEVICE);
1230                 dev_kfree_skb_any(netbuf);
1231         }
1232 }
1233
1234 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1235 {
1236         struct ath10k *ar;
1237         struct ath10k_pci *ar_pci;
1238         struct ath10k_ce_pipe *ce_hdl;
1239         struct sk_buff *netbuf;
1240         u32 ce_data;
1241         unsigned int nbytes;
1242         unsigned int id;
1243         u32 buf_sz;
1244
1245         buf_sz = pipe_info->buf_sz;
1246
1247         /* Unused Copy Engine */
1248         if (buf_sz == 0)
1249                 return;
1250
1251         ar = pipe_info->hif_ce_state;
1252         ar_pci = ath10k_pci_priv(ar);
1253
1254         if (!ar_pci->started)
1255                 return;
1256
1257         ce_hdl = pipe_info->ce_hdl;
1258
1259         while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1260                                           &ce_data, &nbytes, &id) == 0) {
1261                 if (netbuf != CE_SENDLIST_ITEM_CTXT)
1262                         /*
1263                          * Indicate the completion to higer layer to free
1264                          * the buffer
1265                          */
1266                         ATH10K_SKB_CB(netbuf)->is_aborted = true;
1267                         ar_pci->msg_callbacks_current.tx_completion(ar,
1268                                                                     netbuf,
1269                                                                     id);
1270         }
1271 }
1272
1273 /*
1274  * Cleanup residual buffers for device shutdown:
1275  *    buffers that were enqueued for receive
1276  *    buffers that were to be sent
1277  * Note: Buffers that had completed but which were
1278  * not yet processed are on a completion queue. They
1279  * are handled when the completion thread shuts down.
1280  */
1281 static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1282 {
1283         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1284         int pipe_num;
1285
1286         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1287                 struct ath10k_pci_pipe *pipe_info;
1288
1289                 pipe_info = &ar_pci->pipe_info[pipe_num];
1290                 ath10k_pci_rx_pipe_cleanup(pipe_info);
1291                 ath10k_pci_tx_pipe_cleanup(pipe_info);
1292         }
1293 }
1294
1295 static void ath10k_pci_ce_deinit(struct ath10k *ar)
1296 {
1297         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1298         struct ath10k_pci_pipe *pipe_info;
1299         int pipe_num;
1300
1301         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1302                 pipe_info = &ar_pci->pipe_info[pipe_num];
1303                 if (pipe_info->ce_hdl) {
1304                         ath10k_ce_deinit(pipe_info->ce_hdl);
1305                         pipe_info->ce_hdl = NULL;
1306                         pipe_info->buf_sz = 0;
1307                 }
1308         }
1309 }
1310
1311 static void ath10k_pci_disable_irqs(struct ath10k *ar)
1312 {
1313         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1314         int i;
1315
1316         for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
1317                 disable_irq(ar_pci->pdev->irq + i);
1318 }
1319
1320 static void ath10k_pci_hif_stop(struct ath10k *ar)
1321 {
1322         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1323
1324         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
1325
1326         /* Irqs are never explicitly re-enabled. They are implicitly re-enabled
1327          * by ath10k_pci_start_intr(). */
1328         ath10k_pci_disable_irqs(ar);
1329
1330         ath10k_pci_stop_ce(ar);
1331
1332         /* At this point, asynchronous threads are stopped, the target should
1333          * not DMA nor interrupt. We process the leftovers and then free
1334          * everything else up. */
1335
1336         ath10k_pci_process_ce(ar);
1337         ath10k_pci_cleanup_ce(ar);
1338         ath10k_pci_buffer_cleanup(ar);
1339
1340         ar_pci->started = 0;
1341 }
1342
1343 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1344                                            void *req, u32 req_len,
1345                                            void *resp, u32 *resp_len)
1346 {
1347         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1348         struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1349         struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1350         struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1351         struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1352         dma_addr_t req_paddr = 0;
1353         dma_addr_t resp_paddr = 0;
1354         struct bmi_xfer xfer = {};
1355         void *treq, *tresp = NULL;
1356         int ret = 0;
1357
1358         if (resp && !resp_len)
1359                 return -EINVAL;
1360
1361         if (resp && resp_len && *resp_len == 0)
1362                 return -EINVAL;
1363
1364         treq = kmemdup(req, req_len, GFP_KERNEL);
1365         if (!treq)
1366                 return -ENOMEM;
1367
1368         req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1369         ret = dma_mapping_error(ar->dev, req_paddr);
1370         if (ret)
1371                 goto err_dma;
1372
1373         if (resp && resp_len) {
1374                 tresp = kzalloc(*resp_len, GFP_KERNEL);
1375                 if (!tresp) {
1376                         ret = -ENOMEM;
1377                         goto err_req;
1378                 }
1379
1380                 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1381                                             DMA_FROM_DEVICE);
1382                 ret = dma_mapping_error(ar->dev, resp_paddr);
1383                 if (ret)
1384                         goto err_req;
1385
1386                 xfer.wait_for_resp = true;
1387                 xfer.resp_len = 0;
1388
1389                 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1390         }
1391
1392         init_completion(&xfer.done);
1393
1394         ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1395         if (ret)
1396                 goto err_resp;
1397
1398         ret = wait_for_completion_timeout(&xfer.done,
1399                                           BMI_COMMUNICATION_TIMEOUT_HZ);
1400         if (ret <= 0) {
1401                 u32 unused_buffer;
1402                 unsigned int unused_nbytes;
1403                 unsigned int unused_id;
1404
1405                 ret = -ETIMEDOUT;
1406                 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1407                                            &unused_nbytes, &unused_id);
1408         } else {
1409                 /* non-zero means we did not time out */
1410                 ret = 0;
1411         }
1412
1413 err_resp:
1414         if (resp) {
1415                 u32 unused_buffer;
1416
1417                 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1418                 dma_unmap_single(ar->dev, resp_paddr,
1419                                  *resp_len, DMA_FROM_DEVICE);
1420         }
1421 err_req:
1422         dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1423
1424         if (ret == 0 && resp_len) {
1425                 *resp_len = min(*resp_len, xfer.resp_len);
1426                 memcpy(resp, tresp, xfer.resp_len);
1427         }
1428 err_dma:
1429         kfree(treq);
1430         kfree(tresp);
1431
1432         return ret;
1433 }
1434
1435 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state,
1436                                      void *transfer_context,
1437                                      u32 data,
1438                                      unsigned int nbytes,
1439                                      unsigned int transfer_id)
1440 {
1441         struct bmi_xfer *xfer = transfer_context;
1442
1443         if (xfer->wait_for_resp)
1444                 return;
1445
1446         complete(&xfer->done);
1447 }
1448
1449 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state,
1450                                      void *transfer_context,
1451                                      u32 data,
1452                                      unsigned int nbytes,
1453                                      unsigned int transfer_id,
1454                                      unsigned int flags)
1455 {
1456         struct bmi_xfer *xfer = transfer_context;
1457
1458         if (!xfer->wait_for_resp) {
1459                 ath10k_warn("unexpected: BMI data received; ignoring\n");
1460                 return;
1461         }
1462
1463         xfer->resp_len = nbytes;
1464         complete(&xfer->done);
1465 }
1466
1467 /*
1468  * Map from service/endpoint to Copy Engine.
1469  * This table is derived from the CE_PCI TABLE, above.
1470  * It is passed to the Target at startup for use by firmware.
1471  */
1472 static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1473         {
1474                  ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1475                  PIPEDIR_OUT,           /* out = UL = host -> target */
1476                  3,
1477         },
1478         {
1479                  ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1480                  PIPEDIR_IN,            /* in = DL = target -> host */
1481                  2,
1482         },
1483         {
1484                  ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1485                  PIPEDIR_OUT,           /* out = UL = host -> target */
1486                  3,
1487         },
1488         {
1489                  ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1490                  PIPEDIR_IN,            /* in = DL = target -> host */
1491                  2,
1492         },
1493         {
1494                  ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1495                  PIPEDIR_OUT,           /* out = UL = host -> target */
1496                  3,
1497         },
1498         {
1499                  ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1500                  PIPEDIR_IN,            /* in = DL = target -> host */
1501                  2,
1502         },
1503         {
1504                  ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1505                  PIPEDIR_OUT,           /* out = UL = host -> target */
1506                  3,
1507         },
1508         {
1509                  ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1510                  PIPEDIR_IN,            /* in = DL = target -> host */
1511                  2,
1512         },
1513         {
1514                  ATH10K_HTC_SVC_ID_WMI_CONTROL,
1515                  PIPEDIR_OUT,           /* out = UL = host -> target */
1516                  3,
1517         },
1518         {
1519                  ATH10K_HTC_SVC_ID_WMI_CONTROL,
1520                  PIPEDIR_IN,            /* in = DL = target -> host */
1521                  2,
1522         },
1523         {
1524                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1525                  PIPEDIR_OUT,           /* out = UL = host -> target */
1526                  0,             /* could be moved to 3 (share with WMI) */
1527         },
1528         {
1529                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1530                  PIPEDIR_IN,            /* in = DL = target -> host */
1531                  1,
1532         },
1533         {
1534                  ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,    /* not currently used */
1535                  PIPEDIR_OUT,           /* out = UL = host -> target */
1536                  0,
1537         },
1538         {
1539                  ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,    /* not currently used */
1540                  PIPEDIR_IN,            /* in = DL = target -> host */
1541                  1,
1542         },
1543         {
1544                  ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1545                  PIPEDIR_OUT,           /* out = UL = host -> target */
1546                  4,
1547         },
1548         {
1549                  ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1550                  PIPEDIR_IN,            /* in = DL = target -> host */
1551                  1,
1552         },
1553
1554         /* (Additions here) */
1555
1556         {                               /* Must be last */
1557                  0,
1558                  0,
1559                  0,
1560         },
1561 };
1562
1563 /*
1564  * Send an interrupt to the device to wake up the Target CPU
1565  * so it has an opportunity to notice any changed state.
1566  */
1567 static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1568 {
1569         int ret;
1570         u32 core_ctrl;
1571
1572         ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1573                                               CORE_CTRL_ADDRESS,
1574                                           &core_ctrl);
1575         if (ret) {
1576                 ath10k_warn("Unable to read core ctrl\n");
1577                 return ret;
1578         }
1579
1580         /* A_INUM_FIRMWARE interrupt to Target CPU */
1581         core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1582
1583         ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1584                                                CORE_CTRL_ADDRESS,
1585                                            core_ctrl);
1586         if (ret)
1587                 ath10k_warn("Unable to set interrupt mask\n");
1588
1589         return ret;
1590 }
1591
1592 static int ath10k_pci_init_config(struct ath10k *ar)
1593 {
1594         u32 interconnect_targ_addr;
1595         u32 pcie_state_targ_addr = 0;
1596         u32 pipe_cfg_targ_addr = 0;
1597         u32 svc_to_pipe_map = 0;
1598         u32 pcie_config_flags = 0;
1599         u32 ealloc_value;
1600         u32 ealloc_targ_addr;
1601         u32 flag2_value;
1602         u32 flag2_targ_addr;
1603         int ret = 0;
1604
1605         /* Download to Target the CE Config and the service-to-CE map */
1606         interconnect_targ_addr =
1607                 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1608
1609         /* Supply Target-side CE configuration */
1610         ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1611                                           &pcie_state_targ_addr);
1612         if (ret != 0) {
1613                 ath10k_err("Failed to get pcie state addr: %d\n", ret);
1614                 return ret;
1615         }
1616
1617         if (pcie_state_targ_addr == 0) {
1618                 ret = -EIO;
1619                 ath10k_err("Invalid pcie state addr\n");
1620                 return ret;
1621         }
1622
1623         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1624                                           offsetof(struct pcie_state,
1625                                                    pipe_cfg_addr),
1626                                           &pipe_cfg_targ_addr);
1627         if (ret != 0) {
1628                 ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1629                 return ret;
1630         }
1631
1632         if (pipe_cfg_targ_addr == 0) {
1633                 ret = -EIO;
1634                 ath10k_err("Invalid pipe cfg addr\n");
1635                 return ret;
1636         }
1637
1638         ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1639                                  target_ce_config_wlan,
1640                                  sizeof(target_ce_config_wlan));
1641
1642         if (ret != 0) {
1643                 ath10k_err("Failed to write pipe cfg: %d\n", ret);
1644                 return ret;
1645         }
1646
1647         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1648                                           offsetof(struct pcie_state,
1649                                                    svc_to_pipe_map),
1650                                           &svc_to_pipe_map);
1651         if (ret != 0) {
1652                 ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1653                 return ret;
1654         }
1655
1656         if (svc_to_pipe_map == 0) {
1657                 ret = -EIO;
1658                 ath10k_err("Invalid svc_to_pipe map\n");
1659                 return ret;
1660         }
1661
1662         ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1663                                  target_service_to_ce_map_wlan,
1664                                  sizeof(target_service_to_ce_map_wlan));
1665         if (ret != 0) {
1666                 ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1667                 return ret;
1668         }
1669
1670         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1671                                           offsetof(struct pcie_state,
1672                                                    config_flags),
1673                                           &pcie_config_flags);
1674         if (ret != 0) {
1675                 ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1676                 return ret;
1677         }
1678
1679         pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1680
1681         ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1682                                  offsetof(struct pcie_state, config_flags),
1683                                  &pcie_config_flags,
1684                                  sizeof(pcie_config_flags));
1685         if (ret != 0) {
1686                 ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1687                 return ret;
1688         }
1689
1690         /* configure early allocation */
1691         ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1692
1693         ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1694         if (ret != 0) {
1695                 ath10k_err("Faile to get early alloc val: %d\n", ret);
1696                 return ret;
1697         }
1698
1699         /* first bank is switched to IRAM */
1700         ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1701                          HI_EARLY_ALLOC_MAGIC_MASK);
1702         ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1703                          HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1704
1705         ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1706         if (ret != 0) {
1707                 ath10k_err("Failed to set early alloc val: %d\n", ret);
1708                 return ret;
1709         }
1710
1711         /* Tell Target to proceed with initialization */
1712         flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1713
1714         ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1715         if (ret != 0) {
1716                 ath10k_err("Failed to get option val: %d\n", ret);
1717                 return ret;
1718         }
1719
1720         flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1721
1722         ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1723         if (ret != 0) {
1724                 ath10k_err("Failed to set option val: %d\n", ret);
1725                 return ret;
1726         }
1727
1728         return 0;
1729 }
1730
1731
1732
1733 static int ath10k_pci_ce_init(struct ath10k *ar)
1734 {
1735         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1736         struct ath10k_pci_pipe *pipe_info;
1737         const struct ce_attr *attr;
1738         int pipe_num;
1739
1740         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1741                 pipe_info = &ar_pci->pipe_info[pipe_num];
1742                 pipe_info->pipe_num = pipe_num;
1743                 pipe_info->hif_ce_state = ar;
1744                 attr = &host_ce_config_wlan[pipe_num];
1745
1746                 pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1747                 if (pipe_info->ce_hdl == NULL) {
1748                         ath10k_err("Unable to initialize CE for pipe: %d\n",
1749                                    pipe_num);
1750
1751                         /* It is safe to call it here. It checks if ce_hdl is
1752                          * valid for each pipe */
1753                         ath10k_pci_ce_deinit(ar);
1754                         return -1;
1755                 }
1756
1757                 if (pipe_num == ar_pci->ce_count - 1) {
1758                         /*
1759                          * Reserve the ultimate CE for
1760                          * diagnostic Window support
1761                          */
1762                         ar_pci->ce_diag =
1763                         ar_pci->pipe_info[ar_pci->ce_count - 1].ce_hdl;
1764                         continue;
1765                 }
1766
1767                 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1768         }
1769
1770         /*
1771          * Initially, establish CE completion handlers for use with BMI.
1772          * These are overwritten with generic handlers after we exit BMI phase.
1773          */
1774         pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1775         ath10k_ce_send_cb_register(pipe_info->ce_hdl,
1776                                    ath10k_pci_bmi_send_done, 0);
1777
1778         pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1779         ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
1780                                    ath10k_pci_bmi_recv_data);
1781
1782         return 0;
1783 }
1784
1785 static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1786 {
1787         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1788         u32 fw_indicator_address, fw_indicator;
1789
1790         ath10k_pci_wake(ar);
1791
1792         fw_indicator_address = ar_pci->fw_indicator_address;
1793         fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
1794
1795         if (fw_indicator & FW_IND_EVENT_PENDING) {
1796                 /* ACK: clear Target-side pending event */
1797                 ath10k_pci_write32(ar, fw_indicator_address,
1798                                    fw_indicator & ~FW_IND_EVENT_PENDING);
1799
1800                 if (ar_pci->started) {
1801                         ath10k_pci_hif_dump_area(ar);
1802                 } else {
1803                         /*
1804                          * Probable Target failure before we're prepared
1805                          * to handle it.  Generally unexpected.
1806                          */
1807                         ath10k_warn("early firmware event indicated\n");
1808                 }
1809         }
1810
1811         ath10k_pci_sleep(ar);
1812 }
1813
1814 static int ath10k_pci_hif_power_up(struct ath10k *ar)
1815 {
1816         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1817         int ret;
1818
1819         ret = ath10k_pci_start_intr(ar);
1820         if (ret) {
1821                 ath10k_err("could not start interrupt handling (%d)\n", ret);
1822                 goto err;
1823         }
1824
1825         /*
1826          * Bring the target up cleanly.
1827          *
1828          * The target may be in an undefined state with an AUX-powered Target
1829          * and a Host in WoW mode. If the Host crashes, loses power, or is
1830          * restarted (without unloading the driver) then the Target is left
1831          * (aux) powered and running. On a subsequent driver load, the Target
1832          * is in an unexpected state. We try to catch that here in order to
1833          * reset the Target and retry the probe.
1834          */
1835         ath10k_pci_device_reset(ar);
1836
1837         ret = ath10k_pci_reset_target(ar);
1838         if (ret)
1839                 goto err_irq;
1840
1841         if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1842                 /* Force AWAKE forever */
1843                 ath10k_do_pci_wake(ar);
1844
1845         ret = ath10k_pci_ce_init(ar);
1846         if (ret)
1847                 goto err_ps;
1848
1849         ret = ath10k_pci_init_config(ar);
1850         if (ret)
1851                 goto err_ce;
1852
1853         ret = ath10k_pci_wake_target_cpu(ar);
1854         if (ret) {
1855                 ath10k_err("could not wake up target CPU (%d)\n", ret);
1856                 goto err_ce;
1857         }
1858
1859         return 0;
1860
1861 err_ce:
1862         ath10k_pci_ce_deinit(ar);
1863 err_ps:
1864         if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1865                 ath10k_do_pci_sleep(ar);
1866 err_irq:
1867         ath10k_pci_stop_intr(ar);
1868 err:
1869         return ret;
1870 }
1871
1872 static void ath10k_pci_hif_power_down(struct ath10k *ar)
1873 {
1874         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1875
1876         ath10k_pci_stop_intr(ar);
1877
1878         ath10k_pci_ce_deinit(ar);
1879         if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1880                 ath10k_do_pci_sleep(ar);
1881 }
1882
1883 #ifdef CONFIG_PM
1884
1885 #define ATH10K_PCI_PM_CONTROL 0x44
1886
1887 static int ath10k_pci_hif_suspend(struct ath10k *ar)
1888 {
1889         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1890         struct pci_dev *pdev = ar_pci->pdev;
1891         u32 val;
1892
1893         pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1894
1895         if ((val & 0x000000ff) != 0x3) {
1896                 pci_save_state(pdev);
1897                 pci_disable_device(pdev);
1898                 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1899                                        (val & 0xffffff00) | 0x03);
1900         }
1901
1902         return 0;
1903 }
1904
1905 static int ath10k_pci_hif_resume(struct ath10k *ar)
1906 {
1907         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1908         struct pci_dev *pdev = ar_pci->pdev;
1909         u32 val;
1910
1911         pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1912
1913         if ((val & 0x000000ff) != 0) {
1914                 pci_restore_state(pdev);
1915                 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1916                                        val & 0xffffff00);
1917                 /*
1918                  * Suspend/Resume resets the PCI configuration space,
1919                  * so we have to re-disable the RETRY_TIMEOUT register (0x41)
1920                  * to keep PCI Tx retries from interfering with C3 CPU state
1921                  */
1922                 pci_read_config_dword(pdev, 0x40, &val);
1923
1924                 if ((val & 0x0000ff00) != 0)
1925                         pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
1926         }
1927
1928         return 0;
1929 }
1930 #endif
1931
1932 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
1933         .send_head              = ath10k_pci_hif_send_head,
1934         .exchange_bmi_msg       = ath10k_pci_hif_exchange_bmi_msg,
1935         .start                  = ath10k_pci_hif_start,
1936         .stop                   = ath10k_pci_hif_stop,
1937         .map_service_to_pipe    = ath10k_pci_hif_map_service_to_pipe,
1938         .get_default_pipe       = ath10k_pci_hif_get_default_pipe,
1939         .send_complete_check    = ath10k_pci_hif_send_complete_check,
1940         .set_callbacks          = ath10k_pci_hif_set_callbacks,
1941         .get_free_queue_number  = ath10k_pci_hif_get_free_queue_number,
1942         .power_up               = ath10k_pci_hif_power_up,
1943         .power_down             = ath10k_pci_hif_power_down,
1944 #ifdef CONFIG_PM
1945         .suspend                = ath10k_pci_hif_suspend,
1946         .resume                 = ath10k_pci_hif_resume,
1947 #endif
1948 };
1949
1950 static void ath10k_pci_ce_tasklet(unsigned long ptr)
1951 {
1952         struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
1953         struct ath10k_pci *ar_pci = pipe->ar_pci;
1954
1955         ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
1956 }
1957
1958 static void ath10k_msi_err_tasklet(unsigned long data)
1959 {
1960         struct ath10k *ar = (struct ath10k *)data;
1961
1962         ath10k_pci_fw_interrupt_handler(ar);
1963 }
1964
1965 /*
1966  * Handler for a per-engine interrupt on a PARTICULAR CE.
1967  * This is used in cases where each CE has a private MSI interrupt.
1968  */
1969 static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
1970 {
1971         struct ath10k *ar = arg;
1972         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1973         int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
1974
1975         if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
1976                 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
1977                 return IRQ_HANDLED;
1978         }
1979
1980         /*
1981          * NOTE: We are able to derive ce_id from irq because we
1982          * use a one-to-one mapping for CE's 0..5.
1983          * CE's 6 & 7 do not use interrupts at all.
1984          *
1985          * This mapping must be kept in sync with the mapping
1986          * used by firmware.
1987          */
1988         tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
1989         return IRQ_HANDLED;
1990 }
1991
1992 static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
1993 {
1994         struct ath10k *ar = arg;
1995         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1996
1997         tasklet_schedule(&ar_pci->msi_fw_err);
1998         return IRQ_HANDLED;
1999 }
2000
2001 /*
2002  * Top-level interrupt handler for all PCI interrupts from a Target.
2003  * When a block of MSI interrupts is allocated, this top-level handler
2004  * is not used; instead, we directly call the correct sub-handler.
2005  */
2006 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2007 {
2008         struct ath10k *ar = arg;
2009         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2010
2011         if (ar_pci->num_msi_intrs == 0) {
2012                 /*
2013                  * IMPORTANT: INTR_CLR regiser has to be set after
2014                  * INTR_ENABLE is set to 0, otherwise interrupt can not be
2015                  * really cleared.
2016                  */
2017                 iowrite32(0, ar_pci->mem +
2018                           (SOC_CORE_BASE_ADDRESS |
2019                            PCIE_INTR_ENABLE_ADDRESS));
2020                 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2021                           PCIE_INTR_CE_MASK_ALL,
2022                           ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2023                                          PCIE_INTR_CLR_ADDRESS));
2024                 /*
2025                  * IMPORTANT: this extra read transaction is required to
2026                  * flush the posted write buffer.
2027                  */
2028                 (void) ioread32(ar_pci->mem +
2029                                 (SOC_CORE_BASE_ADDRESS |
2030                                  PCIE_INTR_ENABLE_ADDRESS));
2031         }
2032
2033         tasklet_schedule(&ar_pci->intr_tq);
2034
2035         return IRQ_HANDLED;
2036 }
2037
2038 static void ath10k_pci_tasklet(unsigned long data)
2039 {
2040         struct ath10k *ar = (struct ath10k *)data;
2041         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2042
2043         ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
2044         ath10k_ce_per_engine_service_any(ar);
2045
2046         if (ar_pci->num_msi_intrs == 0) {
2047                 /* Enable Legacy PCI line interrupts */
2048                 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2049                           PCIE_INTR_CE_MASK_ALL,
2050                           ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2051                                          PCIE_INTR_ENABLE_ADDRESS));
2052                 /*
2053                  * IMPORTANT: this extra read transaction is required to
2054                  * flush the posted write buffer
2055                  */
2056                 (void) ioread32(ar_pci->mem +
2057                                 (SOC_CORE_BASE_ADDRESS |
2058                                  PCIE_INTR_ENABLE_ADDRESS));
2059         }
2060 }
2061
2062 static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
2063 {
2064         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2065         int ret;
2066         int i;
2067
2068         ret = pci_enable_msi_block(ar_pci->pdev, num);
2069         if (ret)
2070                 return ret;
2071
2072         ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2073                           ath10k_pci_msi_fw_handler,
2074                           IRQF_SHARED, "ath10k_pci", ar);
2075         if (ret) {
2076                 ath10k_warn("request_irq(%d) failed %d\n",
2077                             ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2078
2079                 pci_disable_msi(ar_pci->pdev);
2080                 return ret;
2081         }
2082
2083         for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2084                 ret = request_irq(ar_pci->pdev->irq + i,
2085                                   ath10k_pci_per_engine_handler,
2086                                   IRQF_SHARED, "ath10k_pci", ar);
2087                 if (ret) {
2088                         ath10k_warn("request_irq(%d) failed %d\n",
2089                                     ar_pci->pdev->irq + i, ret);
2090
2091                         for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2092                                 free_irq(ar_pci->pdev->irq + i, ar);
2093
2094                         free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
2095                         pci_disable_msi(ar_pci->pdev);
2096                         return ret;
2097                 }
2098         }
2099
2100         ath10k_info("MSI-X interrupt handling (%d intrs)\n", num);
2101         return 0;
2102 }
2103
2104 static int ath10k_pci_start_intr_msi(struct ath10k *ar)
2105 {
2106         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2107         int ret;
2108
2109         ret = pci_enable_msi(ar_pci->pdev);
2110         if (ret < 0)
2111                 return ret;
2112
2113         ret = request_irq(ar_pci->pdev->irq,
2114                           ath10k_pci_interrupt_handler,
2115                           IRQF_SHARED, "ath10k_pci", ar);
2116         if (ret < 0) {
2117                 pci_disable_msi(ar_pci->pdev);
2118                 return ret;
2119         }
2120
2121         ath10k_info("MSI interrupt handling\n");
2122         return 0;
2123 }
2124
2125 static int ath10k_pci_start_intr_legacy(struct ath10k *ar)
2126 {
2127         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2128         int ret;
2129
2130         ret = request_irq(ar_pci->pdev->irq,
2131                           ath10k_pci_interrupt_handler,
2132                           IRQF_SHARED, "ath10k_pci", ar);
2133         if (ret < 0)
2134                 return ret;
2135
2136         /*
2137          * Make sure to wake the Target before enabling Legacy
2138          * Interrupt.
2139          */
2140         iowrite32(PCIE_SOC_WAKE_V_MASK,
2141                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2142                   PCIE_SOC_WAKE_ADDRESS);
2143
2144         ath10k_pci_wait(ar);
2145
2146         /*
2147          * A potential race occurs here: The CORE_BASE write
2148          * depends on target correctly decoding AXI address but
2149          * host won't know when target writes BAR to CORE_CTRL.
2150          * This write might get lost if target has NOT written BAR.
2151          * For now, fix the race by repeating the write in below
2152          * synchronization checking.
2153          */
2154         iowrite32(PCIE_INTR_FIRMWARE_MASK |
2155                   PCIE_INTR_CE_MASK_ALL,
2156                   ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2157                                  PCIE_INTR_ENABLE_ADDRESS));
2158         iowrite32(PCIE_SOC_WAKE_RESET,
2159                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2160                   PCIE_SOC_WAKE_ADDRESS);
2161
2162         ath10k_info("legacy interrupt handling\n");
2163         return 0;
2164 }
2165
2166 static int ath10k_pci_start_intr(struct ath10k *ar)
2167 {
2168         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2169         int num = MSI_NUM_REQUEST;
2170         int ret;
2171         int i;
2172
2173         tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long) ar);
2174         tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2175                      (unsigned long) ar);
2176
2177         for (i = 0; i < CE_COUNT; i++) {
2178                 ar_pci->pipe_info[i].ar_pci = ar_pci;
2179                 tasklet_init(&ar_pci->pipe_info[i].intr,
2180                              ath10k_pci_ce_tasklet,
2181                              (unsigned long)&ar_pci->pipe_info[i]);
2182         }
2183
2184         if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features))
2185                 num = 1;
2186
2187         if (num > 1) {
2188                 ret = ath10k_pci_start_intr_msix(ar, num);
2189                 if (ret == 0)
2190                         goto exit;
2191
2192                 ath10k_warn("MSI-X didn't succeed (%d), trying MSI\n", ret);
2193                 num = 1;
2194         }
2195
2196         if (num == 1) {
2197                 ret = ath10k_pci_start_intr_msi(ar);
2198                 if (ret == 0)
2199                         goto exit;
2200
2201                 ath10k_warn("MSI didn't succeed (%d), trying legacy INTR\n",
2202                             ret);
2203                 num = 0;
2204         }
2205
2206         ret = ath10k_pci_start_intr_legacy(ar);
2207
2208 exit:
2209         ar_pci->num_msi_intrs = num;
2210         ar_pci->ce_count = CE_COUNT;
2211         return ret;
2212 }
2213
2214 static void ath10k_pci_stop_intr(struct ath10k *ar)
2215 {
2216         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2217         int i;
2218
2219         /* There's at least one interrupt irregardless whether its legacy INTR
2220          * or MSI or MSI-X */
2221         for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2222                 free_irq(ar_pci->pdev->irq + i, ar);
2223
2224         if (ar_pci->num_msi_intrs > 0)
2225                 pci_disable_msi(ar_pci->pdev);
2226 }
2227
2228 static int ath10k_pci_reset_target(struct ath10k *ar)
2229 {
2230         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2231         int wait_limit = 300; /* 3 sec */
2232
2233         /* Wait for Target to finish initialization before we proceed. */
2234         iowrite32(PCIE_SOC_WAKE_V_MASK,
2235                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2236                   PCIE_SOC_WAKE_ADDRESS);
2237
2238         ath10k_pci_wait(ar);
2239
2240         while (wait_limit-- &&
2241                !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
2242                  FW_IND_INITIALIZED)) {
2243                 if (ar_pci->num_msi_intrs == 0)
2244                         /* Fix potential race by repeating CORE_BASE writes */
2245                         iowrite32(PCIE_INTR_FIRMWARE_MASK |
2246                                   PCIE_INTR_CE_MASK_ALL,
2247                                   ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2248                                                  PCIE_INTR_ENABLE_ADDRESS));
2249                 mdelay(10);
2250         }
2251
2252         if (wait_limit < 0) {
2253                 ath10k_err("Target stalled\n");
2254                 iowrite32(PCIE_SOC_WAKE_RESET,
2255                           ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2256                           PCIE_SOC_WAKE_ADDRESS);
2257                 return -EIO;
2258         }
2259
2260         iowrite32(PCIE_SOC_WAKE_RESET,
2261                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2262                   PCIE_SOC_WAKE_ADDRESS);
2263
2264         return 0;
2265 }
2266
2267 static void ath10k_pci_device_reset(struct ath10k *ar)
2268 {
2269         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2270         void __iomem *mem = ar_pci->mem;
2271         int i;
2272         u32 val;
2273
2274         if (!SOC_GLOBAL_RESET_ADDRESS)
2275                 return;
2276
2277         if (!mem)
2278                 return;
2279
2280         ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS,
2281                                PCIE_SOC_WAKE_V_MASK);
2282         for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2283                 if (ath10k_pci_target_is_awake(ar))
2284                         break;
2285                 msleep(1);
2286         }
2287
2288         /* Put Target, including PCIe, into RESET. */
2289         val = ath10k_pci_reg_read32(mem, SOC_GLOBAL_RESET_ADDRESS);
2290         val |= 1;
2291         ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
2292
2293         for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2294                 if (ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
2295                                           RTC_STATE_COLD_RESET_MASK)
2296                         break;
2297                 msleep(1);
2298         }
2299
2300         /* Pull Target, including PCIe, out of RESET. */
2301         val &= ~1;
2302         ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
2303
2304         for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2305                 if (!(ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
2306                                             RTC_STATE_COLD_RESET_MASK))
2307                         break;
2308                 msleep(1);
2309         }
2310
2311         ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2312 }
2313
2314 static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2315 {
2316         int i;
2317
2318         for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2319                 if (!test_bit(i, ar_pci->features))
2320                         continue;
2321
2322                 switch (i) {
2323                 case ATH10K_PCI_FEATURE_MSI_X:
2324                         ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n");
2325                         break;
2326                 case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
2327                         ath10k_dbg(ATH10K_DBG_PCI, "QCA98XX SoC power save enabled\n");
2328                         break;
2329                 }
2330         }
2331 }
2332
2333 static int ath10k_pci_probe(struct pci_dev *pdev,
2334                             const struct pci_device_id *pci_dev)
2335 {
2336         void __iomem *mem;
2337         int ret = 0;
2338         struct ath10k *ar;
2339         struct ath10k_pci *ar_pci;
2340         u32 lcr_val;
2341
2342         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2343
2344         ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2345         if (ar_pci == NULL)
2346                 return -ENOMEM;
2347
2348         ar_pci->pdev = pdev;
2349         ar_pci->dev = &pdev->dev;
2350
2351         switch (pci_dev->device) {
2352         case QCA988X_2_0_DEVICE_ID:
2353                 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2354                 break;
2355         default:
2356                 ret = -ENODEV;
2357                 ath10k_err("Unkown device ID: %d\n", pci_dev->device);
2358                 goto err_ar_pci;
2359         }
2360
2361         if (ath10k_target_ps)
2362                 set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2363
2364         ath10k_pci_dump_features(ar_pci);
2365
2366         ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
2367         if (!ar) {
2368                 ath10k_err("ath10k_core_create failed!\n");
2369                 ret = -EINVAL;
2370                 goto err_ar_pci;
2371         }
2372
2373         ar_pci->ar = ar;
2374         ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2375         atomic_set(&ar_pci->keep_awake_count, 0);
2376
2377         pci_set_drvdata(pdev, ar);
2378
2379         /*
2380          * Without any knowledge of the Host, the Target may have been reset or
2381          * power cycled and its Config Space may no longer reflect the PCI
2382          * address space that was assigned earlier by the PCI infrastructure.
2383          * Refresh it now.
2384          */
2385         ret = pci_assign_resource(pdev, BAR_NUM);
2386         if (ret) {
2387                 ath10k_err("cannot assign PCI space: %d\n", ret);
2388                 goto err_ar;
2389         }
2390
2391         ret = pci_enable_device(pdev);
2392         if (ret) {
2393                 ath10k_err("cannot enable PCI device: %d\n", ret);
2394                 goto err_ar;
2395         }
2396
2397         /* Request MMIO resources */
2398         ret = pci_request_region(pdev, BAR_NUM, "ath");
2399         if (ret) {
2400                 ath10k_err("PCI MMIO reservation error: %d\n", ret);
2401                 goto err_device;
2402         }
2403
2404         /*
2405          * Target structures have a limit of 32 bit DMA pointers.
2406          * DMA pointers can be wider than 32 bits by default on some systems.
2407          */
2408         ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2409         if (ret) {
2410                 ath10k_err("32-bit DMA not available: %d\n", ret);
2411                 goto err_region;
2412         }
2413
2414         ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2415         if (ret) {
2416                 ath10k_err("cannot enable 32-bit consistent DMA\n");
2417                 goto err_region;
2418         }
2419
2420         /* Set bus master bit in PCI_COMMAND to enable DMA */
2421         pci_set_master(pdev);
2422
2423         /*
2424          * Temporary FIX: disable ASPM
2425          * Will be removed after the OTP is programmed
2426          */
2427         pci_read_config_dword(pdev, 0x80, &lcr_val);
2428         pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2429
2430         /* Arrange for access to Target SoC registers. */
2431         mem = pci_iomap(pdev, BAR_NUM, 0);
2432         if (!mem) {
2433                 ath10k_err("PCI iomap error\n");
2434                 ret = -EIO;
2435                 goto err_master;
2436         }
2437
2438         ar_pci->mem = mem;
2439
2440         spin_lock_init(&ar_pci->ce_lock);
2441
2442         ret = ath10k_core_register(ar);
2443         if (ret) {
2444                 ath10k_err("could not register driver core (%d)\n", ret);
2445                 goto err_iomap;
2446         }
2447
2448         return 0;
2449
2450 err_iomap:
2451         pci_iounmap(pdev, mem);
2452 err_master:
2453         pci_clear_master(pdev);
2454 err_region:
2455         pci_release_region(pdev, BAR_NUM);
2456 err_device:
2457         pci_disable_device(pdev);
2458 err_ar:
2459         pci_set_drvdata(pdev, NULL);
2460         ath10k_core_destroy(ar);
2461 err_ar_pci:
2462         /* call HIF PCI free here */
2463         kfree(ar_pci);
2464
2465         return ret;
2466 }
2467
2468 static void ath10k_pci_remove(struct pci_dev *pdev)
2469 {
2470         struct ath10k *ar = pci_get_drvdata(pdev);
2471         struct ath10k_pci *ar_pci;
2472
2473         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2474
2475         if (!ar)
2476                 return;
2477
2478         ar_pci = ath10k_pci_priv(ar);
2479
2480         if (!ar_pci)
2481                 return;
2482
2483         tasklet_kill(&ar_pci->msi_fw_err);
2484
2485         ath10k_core_unregister(ar);
2486
2487         pci_set_drvdata(pdev, NULL);
2488         pci_iounmap(pdev, ar_pci->mem);
2489         pci_release_region(pdev, BAR_NUM);
2490         pci_clear_master(pdev);
2491         pci_disable_device(pdev);
2492
2493         ath10k_core_destroy(ar);
2494         kfree(ar_pci);
2495 }
2496
2497 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2498
2499 static struct pci_driver ath10k_pci_driver = {
2500         .name = "ath10k_pci",
2501         .id_table = ath10k_pci_id_table,
2502         .probe = ath10k_pci_probe,
2503         .remove = ath10k_pci_remove,
2504 };
2505
2506 static int __init ath10k_pci_init(void)
2507 {
2508         int ret;
2509
2510         ret = pci_register_driver(&ath10k_pci_driver);
2511         if (ret)
2512                 ath10k_err("pci_register_driver failed [%d]\n", ret);
2513
2514         return ret;
2515 }
2516 module_init(ath10k_pci_init);
2517
2518 static void __exit ath10k_pci_exit(void)
2519 {
2520         pci_unregister_driver(&ath10k_pci_driver);
2521 }
2522
2523 module_exit(ath10k_pci_exit);
2524
2525 MODULE_AUTHOR("Qualcomm Atheros");
2526 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2527 MODULE_LICENSE("Dual BSD/GPL");
2528 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2529 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
2530 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);