ath10k: mac80211 driver for Qualcomm Atheros 802.11ac CQA98xx devices
[cascardo/linux.git] / drivers / net / wireless / ath / ath10k / pci.c
1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17
18 #include <linux/pci.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22
23 #include "core.h"
24 #include "debug.h"
25
26 #include "targaddrs.h"
27 #include "bmi.h"
28
29 #include "hif.h"
30 #include "htc.h"
31
32 #include "ce.h"
33 #include "pci.h"
34
35 unsigned int ath10k_target_ps;
36 module_param(ath10k_target_ps, uint, 0644);
37 MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
38
39 #define QCA988X_1_0_DEVICE_ID   (0xabcd)
40 #define QCA988X_2_0_DEVICE_ID   (0x003c)
41
42 static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
43         { PCI_VDEVICE(ATHEROS, QCA988X_1_0_DEVICE_ID) }, /* PCI-E QCA988X V1 */
44         { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
45         {0}
46 };
47
48 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
49                                        u32 *data);
50
51 static void ath10k_pci_process_ce(struct ath10k *ar);
52 static int ath10k_pci_post_rx(struct ath10k *ar);
53 static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
54                                              int num);
55 static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info);
56 static void ath10k_pci_stop_ce(struct ath10k *ar);
57
58 static const struct ce_attr host_ce_config_wlan[] = {
59         /* host->target HTC control and raw streams */
60         { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL,},
61         /* could be moved to share CE3 */
62         /* target->host HTT + HTC control */
63         { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL,},
64         /* target->host WMI */
65         { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,},
66         /* host->target WMI */
67         { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,},
68         /* host->target HTT */
69         { /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 0,
70                     CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,},
71         /* unused */
72         { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
73         /* Target autonomous hif_memcpy */
74         { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
75         /* ce_diag, the Diagnostic Window */
76         { /* CE7 */ CE_ATTR_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,},
77 };
78
79 /* Target firmware's Copy Engine configuration. */
80 static const struct ce_pipe_config target_ce_config_wlan[] = {
81         /* host->target HTC control and raw streams */
82         { /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0,},
83         /* target->host HTT + HTC control */
84         { /* CE1 */ 1, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0,},
85         /* target->host WMI */
86         { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,},
87         /* host->target WMI */
88         { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
89         /* host->target HTT */
90         { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0,},
91         /* NB: 50% of src nentries, since tx has 2 frags */
92         /* unused */
93         { /* CE5 */ 5, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
94         /* Reserved for target autonomous hif_memcpy */
95         { /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0,},
96         /* CE7 used only by Host */
97 };
98
99 /*
100  * Diagnostic read/write access is provided for startup/config/debug usage.
101  * Caller must guarantee proper alignment, when applicable, and single user
102  * at any moment.
103  */
104 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
105                                     int nbytes)
106 {
107         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
108         int ret = 0;
109         u32 buf;
110         unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
111         unsigned int id;
112         unsigned int flags;
113         struct ce_state *ce_diag;
114         /* Host buffer address in CE space */
115         u32 ce_data;
116         dma_addr_t ce_data_base = 0;
117         void *data_buf = NULL;
118         int i;
119
120         /*
121          * This code cannot handle reads to non-memory space. Redirect to the
122          * register read fn but preserve the multi word read capability of
123          * this fn
124          */
125         if (address < DRAM_BASE_ADDRESS) {
126                 if (!IS_ALIGNED(address, 4) ||
127                     !IS_ALIGNED((unsigned long)data, 4))
128                         return -EIO;
129
130                 while ((nbytes >= 4) &&  ((ret = ath10k_pci_diag_read_access(
131                                            ar, address, (u32 *)data)) == 0)) {
132                         nbytes -= sizeof(u32);
133                         address += sizeof(u32);
134                         data += sizeof(u32);
135                 }
136                 return ret;
137         }
138
139         ce_diag = ar_pci->ce_diag;
140
141         /*
142          * Allocate a temporary bounce buffer to hold caller's data
143          * to be DMA'ed from Target. This guarantees
144          *   1) 4-byte alignment
145          *   2) Buffer in DMA-able space
146          */
147         orig_nbytes = nbytes;
148         data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
149                                                          orig_nbytes,
150                                                          &ce_data_base);
151
152         if (!data_buf) {
153                 ret = -ENOMEM;
154                 goto done;
155         }
156         memset(data_buf, 0, orig_nbytes);
157
158         remaining_bytes = orig_nbytes;
159         ce_data = ce_data_base;
160         while (remaining_bytes) {
161                 nbytes = min_t(unsigned int, remaining_bytes,
162                                DIAG_TRANSFER_LIMIT);
163
164                 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
165                 if (ret != 0)
166                         goto done;
167
168                 /* Request CE to send from Target(!) address to Host buffer */
169                 /*
170                  * The address supplied by the caller is in the
171                  * Target CPU virtual address space.
172                  *
173                  * In order to use this address with the diagnostic CE,
174                  * convert it from Target CPU virtual address space
175                  * to CE address space
176                  */
177                 ath10k_pci_wake(ar);
178                 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
179                                                      address);
180                 ath10k_pci_sleep(ar);
181
182                 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
183                                  0);
184                 if (ret)
185                         goto done;
186
187                 i = 0;
188                 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
189                                                      &completed_nbytes,
190                                                      &id) != 0) {
191                         mdelay(1);
192                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
193                                 ret = -EBUSY;
194                                 goto done;
195                         }
196                 }
197
198                 if (nbytes != completed_nbytes) {
199                         ret = -EIO;
200                         goto done;
201                 }
202
203                 if (buf != (u32) address) {
204                         ret = -EIO;
205                         goto done;
206                 }
207
208                 i = 0;
209                 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
210                                                      &completed_nbytes,
211                                                      &id, &flags) != 0) {
212                         mdelay(1);
213
214                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
215                                 ret = -EBUSY;
216                                 goto done;
217                         }
218                 }
219
220                 if (nbytes != completed_nbytes) {
221                         ret = -EIO;
222                         goto done;
223                 }
224
225                 if (buf != ce_data) {
226                         ret = -EIO;
227                         goto done;
228                 }
229
230                 remaining_bytes -= nbytes;
231                 address += nbytes;
232                 ce_data += nbytes;
233         }
234
235 done:
236         if (ret == 0) {
237                 /* Copy data from allocated DMA buf to caller's buf */
238                 WARN_ON_ONCE(orig_nbytes & 3);
239                 for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
240                         ((u32 *)data)[i] =
241                                 __le32_to_cpu(((__le32 *)data_buf)[i]);
242                 }
243         } else
244                 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
245                            __func__, address);
246
247         if (data_buf)
248                 pci_free_consistent(ar_pci->pdev, orig_nbytes,
249                                     data_buf, ce_data_base);
250
251         return ret;
252 }
253
254 /* Read 4-byte aligned data from Target memory or register */
255 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
256                                        u32 *data)
257 {
258         /* Assume range doesn't cross this boundary */
259         if (address >= DRAM_BASE_ADDRESS)
260                 return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
261
262         ath10k_pci_wake(ar);
263         *data = ath10k_pci_read32(ar, address);
264         ath10k_pci_sleep(ar);
265         return 0;
266 }
267
268 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
269                                      const void *data, int nbytes)
270 {
271         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
272         int ret = 0;
273         u32 buf;
274         unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
275         unsigned int id;
276         unsigned int flags;
277         struct ce_state *ce_diag;
278         void *data_buf = NULL;
279         u32 ce_data;    /* Host buffer address in CE space */
280         dma_addr_t ce_data_base = 0;
281         int i;
282
283         ce_diag = ar_pci->ce_diag;
284
285         /*
286          * Allocate a temporary bounce buffer to hold caller's data
287          * to be DMA'ed to Target. This guarantees
288          *   1) 4-byte alignment
289          *   2) Buffer in DMA-able space
290          */
291         orig_nbytes = nbytes;
292         data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
293                                                          orig_nbytes,
294                                                          &ce_data_base);
295         if (!data_buf) {
296                 ret = -ENOMEM;
297                 goto done;
298         }
299
300         /* Copy caller's data to allocated DMA buf */
301         WARN_ON_ONCE(orig_nbytes & 3);
302         for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
303                 ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
304
305         /*
306          * The address supplied by the caller is in the
307          * Target CPU virtual address space.
308          *
309          * In order to use this address with the diagnostic CE,
310          * convert it from
311          *    Target CPU virtual address space
312          * to
313          *    CE address space
314          */
315         ath10k_pci_wake(ar);
316         address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
317         ath10k_pci_sleep(ar);
318
319         remaining_bytes = orig_nbytes;
320         ce_data = ce_data_base;
321         while (remaining_bytes) {
322                 /* FIXME: check cast */
323                 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
324
325                 /* Set up to receive directly into Target(!) address */
326                 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
327                 if (ret != 0)
328                         goto done;
329
330                 /*
331                  * Request CE to send caller-supplied data that
332                  * was copied to bounce buffer to Target(!) address.
333                  */
334                 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
335                                      nbytes, 0, 0);
336                 if (ret != 0)
337                         goto done;
338
339                 i = 0;
340                 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
341                                                      &completed_nbytes,
342                                                      &id) != 0) {
343                         mdelay(1);
344
345                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
346                                 ret = -EBUSY;
347                                 goto done;
348                         }
349                 }
350
351                 if (nbytes != completed_nbytes) {
352                         ret = -EIO;
353                         goto done;
354                 }
355
356                 if (buf != ce_data) {
357                         ret = -EIO;
358                         goto done;
359                 }
360
361                 i = 0;
362                 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
363                                                      &completed_nbytes,
364                                                      &id, &flags) != 0) {
365                         mdelay(1);
366
367                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
368                                 ret = -EBUSY;
369                                 goto done;
370                         }
371                 }
372
373                 if (nbytes != completed_nbytes) {
374                         ret = -EIO;
375                         goto done;
376                 }
377
378                 if (buf != address) {
379                         ret = -EIO;
380                         goto done;
381                 }
382
383                 remaining_bytes -= nbytes;
384                 address += nbytes;
385                 ce_data += nbytes;
386         }
387
388 done:
389         if (data_buf) {
390                 pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
391                                     ce_data_base);
392         }
393
394         if (ret != 0)
395                 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
396                            address);
397
398         return ret;
399 }
400
401 /* Write 4B data to Target memory or register */
402 static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
403                                         u32 data)
404 {
405         /* Assume range doesn't cross this boundary */
406         if (address >= DRAM_BASE_ADDRESS)
407                 return ath10k_pci_diag_write_mem(ar, address, &data,
408                                                  sizeof(u32));
409
410         ath10k_pci_wake(ar);
411         ath10k_pci_write32(ar, address, data);
412         ath10k_pci_sleep(ar);
413         return 0;
414 }
415
416 static bool ath10k_pci_target_is_awake(struct ath10k *ar)
417 {
418         void __iomem *mem = ath10k_pci_priv(ar)->mem;
419         u32 val;
420         val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
421                        RTC_STATE_ADDRESS);
422         return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
423 }
424
425 static void ath10k_pci_wait(struct ath10k *ar)
426 {
427         int n = 100;
428
429         while (n-- && !ath10k_pci_target_is_awake(ar))
430                 msleep(10);
431
432         if (n < 0)
433                 ath10k_warn("Unable to wakeup target\n");
434 }
435
436 void ath10k_do_pci_wake(struct ath10k *ar)
437 {
438         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
439         void __iomem *pci_addr = ar_pci->mem;
440         int tot_delay = 0;
441         int curr_delay = 5;
442
443         if (atomic_read(&ar_pci->keep_awake_count) == 0) {
444                 /* Force AWAKE */
445                 iowrite32(PCIE_SOC_WAKE_V_MASK,
446                           pci_addr + PCIE_LOCAL_BASE_ADDRESS +
447                           PCIE_SOC_WAKE_ADDRESS);
448         }
449         atomic_inc(&ar_pci->keep_awake_count);
450
451         if (ar_pci->verified_awake)
452                 return;
453
454         for (;;) {
455                 if (ath10k_pci_target_is_awake(ar)) {
456                         ar_pci->verified_awake = true;
457                         break;
458                 }
459
460                 if (tot_delay > PCIE_WAKE_TIMEOUT) {
461                         ath10k_warn("target takes too long to wake up (awake count %d)\n",
462                                     atomic_read(&ar_pci->keep_awake_count));
463                         break;
464                 }
465
466                 udelay(curr_delay);
467                 tot_delay += curr_delay;
468
469                 if (curr_delay < 50)
470                         curr_delay += 5;
471         }
472 }
473
474 void ath10k_do_pci_sleep(struct ath10k *ar)
475 {
476         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
477         void __iomem *pci_addr = ar_pci->mem;
478
479         if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
480                 /* Allow sleep */
481                 ar_pci->verified_awake = false;
482                 iowrite32(PCIE_SOC_WAKE_RESET,
483                           pci_addr + PCIE_LOCAL_BASE_ADDRESS +
484                           PCIE_SOC_WAKE_ADDRESS);
485         }
486 }
487
488 /*
489  * FIXME: Handle OOM properly.
490  */
491 static inline
492 struct ath10k_pci_compl *get_free_compl(struct hif_ce_pipe_info *pipe_info)
493 {
494         struct ath10k_pci_compl *compl = NULL;
495
496         spin_lock_bh(&pipe_info->pipe_lock);
497         if (list_empty(&pipe_info->compl_free)) {
498                 ath10k_warn("Completion buffers are full\n");
499                 goto exit;
500         }
501         compl = list_first_entry(&pipe_info->compl_free,
502                                  struct ath10k_pci_compl, list);
503         list_del(&compl->list);
504 exit:
505         spin_unlock_bh(&pipe_info->pipe_lock);
506         return compl;
507 }
508
509 /* Called by lower (CE) layer when a send to Target completes. */
510 static void ath10k_pci_ce_send_done(struct ce_state *ce_state,
511                                     void *transfer_context,
512                                     u32 ce_data,
513                                     unsigned int nbytes,
514                                     unsigned int transfer_id)
515 {
516         struct ath10k *ar = ce_state->ar;
517         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
518         struct hif_ce_pipe_info *pipe_info =  &ar_pci->pipe_info[ce_state->id];
519         struct ath10k_pci_compl *compl;
520         bool process = false;
521
522         do {
523                 /*
524                  * For the send completion of an item in sendlist, just
525                  * increment num_sends_allowed. The upper layer callback will
526                  * be triggered when last fragment is done with send.
527                  */
528                 if (transfer_context == CE_SENDLIST_ITEM_CTXT) {
529                         spin_lock_bh(&pipe_info->pipe_lock);
530                         pipe_info->num_sends_allowed++;
531                         spin_unlock_bh(&pipe_info->pipe_lock);
532                         continue;
533                 }
534
535                 compl = get_free_compl(pipe_info);
536                 if (!compl)
537                         break;
538
539                 compl->send_or_recv = HIF_CE_COMPLETE_SEND;
540                 compl->ce_state = ce_state;
541                 compl->pipe_info = pipe_info;
542                 compl->transfer_context = transfer_context;
543                 compl->nbytes = nbytes;
544                 compl->transfer_id = transfer_id;
545                 compl->flags = 0;
546
547                 /*
548                  * Add the completion to the processing queue.
549                  */
550                 spin_lock_bh(&ar_pci->compl_lock);
551                 list_add_tail(&compl->list, &ar_pci->compl_process);
552                 spin_unlock_bh(&ar_pci->compl_lock);
553
554                 process = true;
555         } while (ath10k_ce_completed_send_next(ce_state,
556                                                            &transfer_context,
557                                                            &ce_data, &nbytes,
558                                                            &transfer_id) == 0);
559
560         /*
561          * If only some of the items within a sendlist have completed,
562          * don't invoke completion processing until the entire sendlist
563          * has been sent.
564          */
565         if (!process)
566                 return;
567
568         ath10k_pci_process_ce(ar);
569 }
570
571 /* Called by lower (CE) layer when data is received from the Target. */
572 static void ath10k_pci_ce_recv_data(struct ce_state *ce_state,
573                                     void *transfer_context, u32 ce_data,
574                                     unsigned int nbytes,
575                                     unsigned int transfer_id,
576                                     unsigned int flags)
577 {
578         struct ath10k *ar = ce_state->ar;
579         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
580         struct hif_ce_pipe_info *pipe_info =  &ar_pci->pipe_info[ce_state->id];
581         struct ath10k_pci_compl *compl;
582         struct sk_buff *skb;
583
584         do {
585                 compl = get_free_compl(pipe_info);
586                 if (!compl)
587                         break;
588
589                 compl->send_or_recv = HIF_CE_COMPLETE_RECV;
590                 compl->ce_state = ce_state;
591                 compl->pipe_info = pipe_info;
592                 compl->transfer_context = transfer_context;
593                 compl->nbytes = nbytes;
594                 compl->transfer_id = transfer_id;
595                 compl->flags = flags;
596
597                 skb = transfer_context;
598                 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
599                                  skb->len + skb_tailroom(skb),
600                                  DMA_FROM_DEVICE);
601                 /*
602                  * Add the completion to the processing queue.
603                  */
604                 spin_lock_bh(&ar_pci->compl_lock);
605                 list_add_tail(&compl->list, &ar_pci->compl_process);
606                 spin_unlock_bh(&ar_pci->compl_lock);
607
608         } while (ath10k_ce_completed_recv_next(ce_state,
609                                                            &transfer_context,
610                                                            &ce_data, &nbytes,
611                                                            &transfer_id,
612                                                            &flags) == 0);
613
614         ath10k_pci_process_ce(ar);
615 }
616
617 /* Send the first nbytes bytes of the buffer */
618 static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
619                                     unsigned int transfer_id,
620                                     unsigned int bytes, struct sk_buff *nbuf)
621 {
622         struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
623         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
624         struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe_id]);
625         struct ce_state *ce_hdl = pipe_info->ce_hdl;
626         struct ce_sendlist sendlist;
627         unsigned int len;
628         u32 flags = 0;
629         int ret;
630
631         memset(&sendlist, 0, sizeof(struct ce_sendlist));
632
633         len = min(bytes, nbuf->len);
634         bytes -= len;
635
636         if (len & 3)
637                 ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
638
639         ath10k_dbg(ATH10K_DBG_PCI,
640                    "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
641                    nbuf->data, (unsigned long long) skb_cb->paddr,
642                    nbuf->len, len);
643         ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
644                         "ath10k tx: data: ",
645                         nbuf->data, nbuf->len);
646
647         ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags);
648
649         /* Make sure we have resources to handle this request */
650         spin_lock_bh(&pipe_info->pipe_lock);
651         if (!pipe_info->num_sends_allowed) {
652                 ath10k_warn("Pipe: %d is full\n", pipe_id);
653                 spin_unlock_bh(&pipe_info->pipe_lock);
654                 return -ENOSR;
655         }
656         pipe_info->num_sends_allowed--;
657         spin_unlock_bh(&pipe_info->pipe_lock);
658
659         ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
660         if (ret)
661                 ath10k_warn("CE send failed: %p\n", nbuf);
662
663         return ret;
664 }
665
666 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
667 {
668         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
669         struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe]);
670         int ret;
671
672         spin_lock_bh(&pipe_info->pipe_lock);
673         ret = pipe_info->num_sends_allowed;
674         spin_unlock_bh(&pipe_info->pipe_lock);
675
676         return ret;
677 }
678
679 static void ath10k_pci_hif_dump_area(struct ath10k *ar)
680 {
681         u32 reg_dump_area = 0;
682         u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
683         u32 host_addr;
684         int ret;
685         u32 i;
686
687         ath10k_err("firmware crashed!\n");
688         ath10k_err("hardware name %s version 0x%x\n",
689                    ar->hw_params.name, ar->target_version);
690         ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major,
691                    ar->fw_version_minor, ar->fw_version_release,
692                    ar->fw_version_build);
693
694         host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
695         if (ath10k_pci_diag_read_mem(ar, host_addr,
696                                      &reg_dump_area, sizeof(u32)) != 0) {
697                 ath10k_warn("could not read hi_failure_state\n");
698                 return;
699         }
700
701         ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
702
703         ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
704                                        &reg_dump_values[0],
705                                        REG_DUMP_COUNT_QCA988X * sizeof(u32));
706         if (ret != 0) {
707                 ath10k_err("could not dump FW Dump Area\n");
708                 return;
709         }
710
711         BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
712
713         ath10k_err("target Register Dump\n");
714         for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
715                 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
716                            i,
717                            reg_dump_values[i],
718                            reg_dump_values[i + 1],
719                            reg_dump_values[i + 2],
720                            reg_dump_values[i + 3]);
721 }
722
723 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
724                                                int force)
725 {
726         if (!force) {
727                 int resources;
728                 /*
729                  * Decide whether to actually poll for completions, or just
730                  * wait for a later chance.
731                  * If there seem to be plenty of resources left, then just wait
732                  * since checking involves reading a CE register, which is a
733                  * relatively expensive operation.
734                  */
735                 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
736
737                 /*
738                  * If at least 50% of the total resources are still available,
739                  * don't bother checking again yet.
740                  */
741                 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
742                         return;
743         }
744         ath10k_ce_per_engine_service(ar, pipe);
745 }
746
747 static void ath10k_pci_hif_post_init(struct ath10k *ar,
748                                      struct ath10k_hif_cb *callbacks)
749 {
750         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
751
752         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
753
754         memcpy(&ar_pci->msg_callbacks_current, callbacks,
755                sizeof(ar_pci->msg_callbacks_current));
756 }
757
758 static int ath10k_pci_start_ce(struct ath10k *ar)
759 {
760         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
761         struct ce_state *ce_diag = ar_pci->ce_diag;
762         const struct ce_attr *attr;
763         struct hif_ce_pipe_info *pipe_info;
764         struct ath10k_pci_compl *compl;
765         int i, pipe_num, completions, disable_interrupts;
766
767         spin_lock_init(&ar_pci->compl_lock);
768         INIT_LIST_HEAD(&ar_pci->compl_process);
769
770         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
771                 pipe_info = &ar_pci->pipe_info[pipe_num];
772
773                 spin_lock_init(&pipe_info->pipe_lock);
774                 INIT_LIST_HEAD(&pipe_info->compl_free);
775
776                 /* Handle Diagnostic CE specially */
777                 if (pipe_info->ce_hdl == ce_diag)
778                         continue;
779
780                 attr = &host_ce_config_wlan[pipe_num];
781                 completions = 0;
782
783                 if (attr->src_nentries) {
784                         disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
785                         ath10k_ce_send_cb_register(pipe_info->ce_hdl,
786                                                    ath10k_pci_ce_send_done,
787                                                    disable_interrupts);
788                         completions += attr->src_nentries;
789                         pipe_info->num_sends_allowed = attr->src_nentries - 1;
790                 }
791
792                 if (attr->dest_nentries) {
793                         ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
794                                                    ath10k_pci_ce_recv_data);
795                         completions += attr->dest_nentries;
796                 }
797
798                 if (completions == 0)
799                         continue;
800
801                 for (i = 0; i < completions; i++) {
802                         compl = kmalloc(sizeof(struct ath10k_pci_compl),
803                                         GFP_KERNEL);
804                         if (!compl) {
805                                 ath10k_warn("No memory for completion state\n");
806                                 ath10k_pci_stop_ce(ar);
807                                 return -ENOMEM;
808                         }
809
810                         compl->send_or_recv = HIF_CE_COMPLETE_FREE;
811                         list_add_tail(&compl->list, &pipe_info->compl_free);
812                 }
813         }
814
815         return 0;
816 }
817
818 static void ath10k_pci_stop_ce(struct ath10k *ar)
819 {
820         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
821         struct ath10k_pci_compl *compl;
822         struct sk_buff *skb;
823         int i;
824
825         ath10k_ce_disable_interrupts(ar);
826
827         /* Cancel the pending tasklet */
828         tasklet_kill(&ar_pci->intr_tq);
829
830         for (i = 0; i < CE_COUNT; i++)
831                 tasklet_kill(&ar_pci->pipe_info[i].intr);
832
833         /* Mark pending completions as aborted, so that upper layers free up
834          * their associated resources */
835         spin_lock_bh(&ar_pci->compl_lock);
836         list_for_each_entry(compl, &ar_pci->compl_process, list) {
837                 skb = (struct sk_buff *)compl->transfer_context;
838                 ATH10K_SKB_CB(skb)->is_aborted = true;
839         }
840         spin_unlock_bh(&ar_pci->compl_lock);
841 }
842
843 static void ath10k_pci_cleanup_ce(struct ath10k *ar)
844 {
845         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
846         struct ath10k_pci_compl *compl, *tmp;
847         struct hif_ce_pipe_info *pipe_info;
848         struct sk_buff *netbuf;
849         int pipe_num;
850
851         /* Free pending completions. */
852         spin_lock_bh(&ar_pci->compl_lock);
853         if (!list_empty(&ar_pci->compl_process))
854                 ath10k_warn("pending completions still present! possible memory leaks.\n");
855
856         list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
857                 list_del(&compl->list);
858                 netbuf = (struct sk_buff *)compl->transfer_context;
859                 dev_kfree_skb_any(netbuf);
860                 kfree(compl);
861         }
862         spin_unlock_bh(&ar_pci->compl_lock);
863
864         /* Free unused completions for each pipe. */
865         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
866                 pipe_info = &ar_pci->pipe_info[pipe_num];
867
868                 spin_lock_bh(&pipe_info->pipe_lock);
869                 list_for_each_entry_safe(compl, tmp,
870                                          &pipe_info->compl_free, list) {
871                         list_del(&compl->list);
872                         kfree(compl);
873                 }
874                 spin_unlock_bh(&pipe_info->pipe_lock);
875         }
876 }
877
878 static void ath10k_pci_process_ce(struct ath10k *ar)
879 {
880         struct ath10k_pci *ar_pci = ar->hif.priv;
881         struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
882         struct ath10k_pci_compl *compl;
883         struct sk_buff *skb;
884         unsigned int nbytes;
885         int ret, send_done = 0;
886
887         /* Upper layers aren't ready to handle tx/rx completions in parallel so
888          * we must serialize all completion processing. */
889
890         spin_lock_bh(&ar_pci->compl_lock);
891         if (ar_pci->compl_processing) {
892                 spin_unlock_bh(&ar_pci->compl_lock);
893                 return;
894         }
895         ar_pci->compl_processing = true;
896         spin_unlock_bh(&ar_pci->compl_lock);
897
898         for (;;) {
899                 spin_lock_bh(&ar_pci->compl_lock);
900                 if (list_empty(&ar_pci->compl_process)) {
901                         spin_unlock_bh(&ar_pci->compl_lock);
902                         break;
903                 }
904                 compl = list_first_entry(&ar_pci->compl_process,
905                                          struct ath10k_pci_compl, list);
906                 list_del(&compl->list);
907                 spin_unlock_bh(&ar_pci->compl_lock);
908
909                 if (compl->send_or_recv == HIF_CE_COMPLETE_SEND) {
910                         cb->tx_completion(ar,
911                                           compl->transfer_context,
912                                           compl->transfer_id);
913                         send_done = 1;
914                 } else {
915                         ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
916                         if (ret) {
917                                 ath10k_warn("Unable to post recv buffer for pipe: %d\n",
918                                             compl->pipe_info->pipe_num);
919                                 break;
920                         }
921
922                         skb = (struct sk_buff *)compl->transfer_context;
923                         nbytes = compl->nbytes;
924
925                         ath10k_dbg(ATH10K_DBG_PCI,
926                                    "ath10k_pci_ce_recv_data netbuf=%p  nbytes=%d\n",
927                                    skb, nbytes);
928                         ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
929                                         "ath10k rx: ", skb->data, nbytes);
930
931                         if (skb->len + skb_tailroom(skb) >= nbytes) {
932                                 skb_trim(skb, 0);
933                                 skb_put(skb, nbytes);
934                                 cb->rx_completion(ar, skb,
935                                                   compl->pipe_info->pipe_num);
936                         } else {
937                                 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
938                                             nbytes,
939                                             skb->len + skb_tailroom(skb));
940                         }
941                 }
942
943                 compl->send_or_recv = HIF_CE_COMPLETE_FREE;
944
945                 /*
946                  * Add completion back to the pipe's free list.
947                  */
948                 spin_lock_bh(&compl->pipe_info->pipe_lock);
949                 list_add_tail(&compl->list, &compl->pipe_info->compl_free);
950                 compl->pipe_info->num_sends_allowed += send_done;
951                 spin_unlock_bh(&compl->pipe_info->pipe_lock);
952         }
953
954         spin_lock_bh(&ar_pci->compl_lock);
955         ar_pci->compl_processing = false;
956         spin_unlock_bh(&ar_pci->compl_lock);
957 }
958
959 /* TODO - temporary mapping while we have too few CE's */
960 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
961                                               u16 service_id, u8 *ul_pipe,
962                                               u8 *dl_pipe, int *ul_is_polled,
963                                               int *dl_is_polled)
964 {
965         int ret = 0;
966
967         /* polling for received messages not supported */
968         *dl_is_polled = 0;
969
970         switch (service_id) {
971         case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
972                 /*
973                  * Host->target HTT gets its own pipe, so it can be polled
974                  * while other pipes are interrupt driven.
975                  */
976                 *ul_pipe = 4;
977                 /*
978                  * Use the same target->host pipe for HTC ctrl, HTC raw
979                  * streams, and HTT.
980                  */
981                 *dl_pipe = 1;
982                 break;
983
984         case ATH10K_HTC_SVC_ID_RSVD_CTRL:
985         case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
986                 /*
987                  * Note: HTC_RAW_STREAMS_SVC is currently unused, and
988                  * HTC_CTRL_RSVD_SVC could share the same pipe as the
989                  * WMI services.  So, if another CE is needed, change
990                  * this to *ul_pipe = 3, which frees up CE 0.
991                  */
992                 /* *ul_pipe = 3; */
993                 *ul_pipe = 0;
994                 *dl_pipe = 1;
995                 break;
996
997         case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
998         case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
999         case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
1000         case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
1001
1002         case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1003                 *ul_pipe = 3;
1004                 *dl_pipe = 2;
1005                 break;
1006
1007                 /* pipe 5 unused   */
1008                 /* pipe 6 reserved */
1009                 /* pipe 7 reserved */
1010
1011         default:
1012                 ret = -1;
1013                 break;
1014         }
1015         *ul_is_polled =
1016                 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1017
1018         return ret;
1019 }
1020
1021 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1022                                                 u8 *ul_pipe, u8 *dl_pipe)
1023 {
1024         int ul_is_polled, dl_is_polled;
1025
1026         (void)ath10k_pci_hif_map_service_to_pipe(ar,
1027                                                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1028                                                  ul_pipe,
1029                                                  dl_pipe,
1030                                                  &ul_is_polled,
1031                                                  &dl_is_polled);
1032 }
1033
1034 static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
1035                                    int num)
1036 {
1037         struct ath10k *ar = pipe_info->hif_ce_state;
1038         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1039         struct ce_state *ce_state = pipe_info->ce_hdl;
1040         struct sk_buff *skb;
1041         dma_addr_t ce_data;
1042         int i, ret = 0;
1043
1044         if (pipe_info->buf_sz == 0)
1045                 return 0;
1046
1047         for (i = 0; i < num; i++) {
1048                 skb = dev_alloc_skb(pipe_info->buf_sz);
1049                 if (!skb) {
1050                         ath10k_warn("could not allocate skbuff for pipe %d\n",
1051                                     num);
1052                         ret = -ENOMEM;
1053                         goto err;
1054                 }
1055
1056                 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1057
1058                 ce_data = dma_map_single(ar->dev, skb->data,
1059                                          skb->len + skb_tailroom(skb),
1060                                          DMA_FROM_DEVICE);
1061
1062                 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1063                         ath10k_warn("could not dma map skbuff\n");
1064                         dev_kfree_skb_any(skb);
1065                         ret = -EIO;
1066                         goto err;
1067                 }
1068
1069                 ATH10K_SKB_CB(skb)->paddr = ce_data;
1070
1071                 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1072                                                pipe_info->buf_sz,
1073                                                PCI_DMA_FROMDEVICE);
1074
1075                 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1076                                                  ce_data);
1077                 if (ret) {
1078                         ath10k_warn("could not enqueue to pipe %d (%d)\n",
1079                                     num, ret);
1080                         goto err;
1081                 }
1082         }
1083
1084         return ret;
1085
1086 err:
1087         ath10k_pci_rx_pipe_cleanup(pipe_info);
1088         return ret;
1089 }
1090
1091 static int ath10k_pci_post_rx(struct ath10k *ar)
1092 {
1093         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1094         struct hif_ce_pipe_info *pipe_info;
1095         const struct ce_attr *attr;
1096         int pipe_num, ret = 0;
1097
1098         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1099                 pipe_info = &ar_pci->pipe_info[pipe_num];
1100                 attr = &host_ce_config_wlan[pipe_num];
1101
1102                 if (attr->dest_nentries == 0)
1103                         continue;
1104
1105                 ret = ath10k_pci_post_rx_pipe(pipe_info,
1106                                               attr->dest_nentries - 1);
1107                 if (ret) {
1108                         ath10k_warn("Unable to replenish recv buffers for pipe: %d\n",
1109                                     pipe_num);
1110
1111                         for (; pipe_num >= 0; pipe_num--) {
1112                                 pipe_info = &ar_pci->pipe_info[pipe_num];
1113                                 ath10k_pci_rx_pipe_cleanup(pipe_info);
1114                         }
1115                         return ret;
1116                 }
1117         }
1118
1119         return 0;
1120 }
1121
1122 static int ath10k_pci_hif_start(struct ath10k *ar)
1123 {
1124         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1125         int ret;
1126
1127         ret = ath10k_pci_start_ce(ar);
1128         if (ret) {
1129                 ath10k_warn("could not start CE (%d)\n", ret);
1130                 return ret;
1131         }
1132
1133         /* Post buffers once to start things off. */
1134         ret = ath10k_pci_post_rx(ar);
1135         if (ret) {
1136                 ath10k_warn("could not post rx pipes (%d)\n", ret);
1137                 return ret;
1138         }
1139
1140         ar_pci->started = 1;
1141         return 0;
1142 }
1143
1144 static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
1145 {
1146         struct ath10k *ar;
1147         struct ath10k_pci *ar_pci;
1148         struct ce_state *ce_hdl;
1149         u32 buf_sz;
1150         struct sk_buff *netbuf;
1151         u32 ce_data;
1152
1153         buf_sz = pipe_info->buf_sz;
1154
1155         /* Unused Copy Engine */
1156         if (buf_sz == 0)
1157                 return;
1158
1159         ar = pipe_info->hif_ce_state;
1160         ar_pci = ath10k_pci_priv(ar);
1161
1162         if (!ar_pci->started)
1163                 return;
1164
1165         ce_hdl = pipe_info->ce_hdl;
1166
1167         while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1168                                           &ce_data) == 0) {
1169                 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1170                                  netbuf->len + skb_tailroom(netbuf),
1171                                  DMA_FROM_DEVICE);
1172                 dev_kfree_skb_any(netbuf);
1173         }
1174 }
1175
1176 static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
1177 {
1178         struct ath10k *ar;
1179         struct ath10k_pci *ar_pci;
1180         struct ce_state *ce_hdl;
1181         struct sk_buff *netbuf;
1182         u32 ce_data;
1183         unsigned int nbytes;
1184         unsigned int id;
1185         u32 buf_sz;
1186
1187         buf_sz = pipe_info->buf_sz;
1188
1189         /* Unused Copy Engine */
1190         if (buf_sz == 0)
1191                 return;
1192
1193         ar = pipe_info->hif_ce_state;
1194         ar_pci = ath10k_pci_priv(ar);
1195
1196         if (!ar_pci->started)
1197                 return;
1198
1199         ce_hdl = pipe_info->ce_hdl;
1200
1201         while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1202                                           &ce_data, &nbytes, &id) == 0) {
1203                 if (netbuf != CE_SENDLIST_ITEM_CTXT)
1204                         /*
1205                          * Indicate the completion to higer layer to free
1206                          * the buffer
1207                          */
1208                         ATH10K_SKB_CB(netbuf)->is_aborted = true;
1209                         ar_pci->msg_callbacks_current.tx_completion(ar,
1210                                                                     netbuf,
1211                                                                     id);
1212         }
1213 }
1214
1215 /*
1216  * Cleanup residual buffers for device shutdown:
1217  *    buffers that were enqueued for receive
1218  *    buffers that were to be sent
1219  * Note: Buffers that had completed but which were
1220  * not yet processed are on a completion queue. They
1221  * are handled when the completion thread shuts down.
1222  */
1223 static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1224 {
1225         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1226         int pipe_num;
1227
1228         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1229                 struct hif_ce_pipe_info *pipe_info;
1230
1231                 pipe_info = &ar_pci->pipe_info[pipe_num];
1232                 ath10k_pci_rx_pipe_cleanup(pipe_info);
1233                 ath10k_pci_tx_pipe_cleanup(pipe_info);
1234         }
1235 }
1236
1237 static void ath10k_pci_ce_deinit(struct ath10k *ar)
1238 {
1239         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1240         struct hif_ce_pipe_info *pipe_info;
1241         int pipe_num;
1242
1243         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1244                 pipe_info = &ar_pci->pipe_info[pipe_num];
1245                 if (pipe_info->ce_hdl) {
1246                         ath10k_ce_deinit(pipe_info->ce_hdl);
1247                         pipe_info->ce_hdl = NULL;
1248                         pipe_info->buf_sz = 0;
1249                 }
1250         }
1251 }
1252
1253 static void ath10k_pci_hif_stop(struct ath10k *ar)
1254 {
1255         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
1256
1257         ath10k_pci_stop_ce(ar);
1258
1259         /* At this point, asynchronous threads are stopped, the target should
1260          * not DMA nor interrupt. We process the leftovers and then free
1261          * everything else up. */
1262
1263         ath10k_pci_process_ce(ar);
1264         ath10k_pci_cleanup_ce(ar);
1265         ath10k_pci_buffer_cleanup(ar);
1266         ath10k_pci_ce_deinit(ar);
1267 }
1268
1269 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1270                                            void *req, u32 req_len,
1271                                            void *resp, u32 *resp_len)
1272 {
1273         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1274         struct ce_state *ce_tx = ar_pci->pipe_info[BMI_CE_NUM_TO_TARG].ce_hdl;
1275         struct ce_state *ce_rx = ar_pci->pipe_info[BMI_CE_NUM_TO_HOST].ce_hdl;
1276         dma_addr_t req_paddr = 0;
1277         dma_addr_t resp_paddr = 0;
1278         struct bmi_xfer xfer = {};
1279         void *treq, *tresp = NULL;
1280         int ret = 0;
1281
1282         if (resp && !resp_len)
1283                 return -EINVAL;
1284
1285         if (resp && resp_len && *resp_len == 0)
1286                 return -EINVAL;
1287
1288         treq = kmemdup(req, req_len, GFP_KERNEL);
1289         if (!treq)
1290                 return -ENOMEM;
1291
1292         req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1293         ret = dma_mapping_error(ar->dev, req_paddr);
1294         if (ret)
1295                 goto err_dma;
1296
1297         if (resp && resp_len) {
1298                 tresp = kzalloc(*resp_len, GFP_KERNEL);
1299                 if (!tresp) {
1300                         ret = -ENOMEM;
1301                         goto err_req;
1302                 }
1303
1304                 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1305                                             DMA_FROM_DEVICE);
1306                 ret = dma_mapping_error(ar->dev, resp_paddr);
1307                 if (ret)
1308                         goto err_req;
1309
1310                 xfer.wait_for_resp = true;
1311                 xfer.resp_len = 0;
1312
1313                 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1314         }
1315
1316         init_completion(&xfer.done);
1317
1318         ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1319         if (ret)
1320                 goto err_resp;
1321
1322         ret = wait_for_completion_timeout(&xfer.done,
1323                                           BMI_COMMUNICATION_TIMEOUT_HZ);
1324         if (ret <= 0) {
1325                 u32 unused_buffer;
1326                 unsigned int unused_nbytes;
1327                 unsigned int unused_id;
1328
1329                 ret = -ETIMEDOUT;
1330                 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1331                                            &unused_nbytes, &unused_id);
1332         } else {
1333                 /* non-zero means we did not time out */
1334                 ret = 0;
1335         }
1336
1337 err_resp:
1338         if (resp) {
1339                 u32 unused_buffer;
1340
1341                 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1342                 dma_unmap_single(ar->dev, resp_paddr,
1343                                  *resp_len, DMA_FROM_DEVICE);
1344         }
1345 err_req:
1346         dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1347
1348         if (ret == 0 && resp_len) {
1349                 *resp_len = min(*resp_len, xfer.resp_len);
1350                 memcpy(resp, tresp, xfer.resp_len);
1351         }
1352 err_dma:
1353         kfree(treq);
1354         kfree(tresp);
1355
1356         return ret;
1357 }
1358
1359 static void ath10k_pci_bmi_send_done(struct ce_state *ce_state,
1360                                      void *transfer_context,
1361                                      u32 data,
1362                                      unsigned int nbytes,
1363                                      unsigned int transfer_id)
1364 {
1365         struct bmi_xfer *xfer = transfer_context;
1366
1367         if (xfer->wait_for_resp)
1368                 return;
1369
1370         complete(&xfer->done);
1371 }
1372
1373 static void ath10k_pci_bmi_recv_data(struct ce_state *ce_state,
1374                                      void *transfer_context,
1375                                      u32 data,
1376                                      unsigned int nbytes,
1377                                      unsigned int transfer_id,
1378                                      unsigned int flags)
1379 {
1380         struct bmi_xfer *xfer = transfer_context;
1381
1382         if (!xfer->wait_for_resp) {
1383                 ath10k_warn("unexpected: BMI data received; ignoring\n");
1384                 return;
1385         }
1386
1387         xfer->resp_len = nbytes;
1388         complete(&xfer->done);
1389 }
1390
1391 /*
1392  * Map from service/endpoint to Copy Engine.
1393  * This table is derived from the CE_PCI TABLE, above.
1394  * It is passed to the Target at startup for use by firmware.
1395  */
1396 static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1397         {
1398                  ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1399                  PIPEDIR_OUT,           /* out = UL = host -> target */
1400                  3,
1401         },
1402         {
1403                  ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1404                  PIPEDIR_IN,            /* in = DL = target -> host */
1405                  2,
1406         },
1407         {
1408                  ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1409                  PIPEDIR_OUT,           /* out = UL = host -> target */
1410                  3,
1411         },
1412         {
1413                  ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1414                  PIPEDIR_IN,            /* in = DL = target -> host */
1415                  2,
1416         },
1417         {
1418                  ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1419                  PIPEDIR_OUT,           /* out = UL = host -> target */
1420                  3,
1421         },
1422         {
1423                  ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1424                  PIPEDIR_IN,            /* in = DL = target -> host */
1425                  2,
1426         },
1427         {
1428                  ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1429                  PIPEDIR_OUT,           /* out = UL = host -> target */
1430                  3,
1431         },
1432         {
1433                  ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1434                  PIPEDIR_IN,            /* in = DL = target -> host */
1435                  2,
1436         },
1437         {
1438                  ATH10K_HTC_SVC_ID_WMI_CONTROL,
1439                  PIPEDIR_OUT,           /* out = UL = host -> target */
1440                  3,
1441         },
1442         {
1443                  ATH10K_HTC_SVC_ID_WMI_CONTROL,
1444                  PIPEDIR_IN,            /* in = DL = target -> host */
1445                  2,
1446         },
1447         {
1448                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1449                  PIPEDIR_OUT,           /* out = UL = host -> target */
1450                  0,             /* could be moved to 3 (share with WMI) */
1451         },
1452         {
1453                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1454                  PIPEDIR_IN,            /* in = DL = target -> host */
1455                  1,
1456         },
1457         {
1458                  ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,    /* not currently used */
1459                  PIPEDIR_OUT,           /* out = UL = host -> target */
1460                  0,
1461         },
1462         {
1463                  ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,    /* not currently used */
1464                  PIPEDIR_IN,            /* in = DL = target -> host */
1465                  1,
1466         },
1467         {
1468                  ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1469                  PIPEDIR_OUT,           /* out = UL = host -> target */
1470                  4,
1471         },
1472         {
1473                  ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1474                  PIPEDIR_IN,            /* in = DL = target -> host */
1475                  1,
1476         },
1477
1478         /* (Additions here) */
1479
1480         {                               /* Must be last */
1481                  0,
1482                  0,
1483                  0,
1484         },
1485 };
1486
1487 /*
1488  * Send an interrupt to the device to wake up the Target CPU
1489  * so it has an opportunity to notice any changed state.
1490  */
1491 static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1492 {
1493         int ret;
1494         u32 core_ctrl;
1495
1496         ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1497                                               CORE_CTRL_ADDRESS,
1498                                           &core_ctrl);
1499         if (ret) {
1500                 ath10k_warn("Unable to read core ctrl\n");
1501                 return ret;
1502         }
1503
1504         /* A_INUM_FIRMWARE interrupt to Target CPU */
1505         core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1506
1507         ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1508                                                CORE_CTRL_ADDRESS,
1509                                            core_ctrl);
1510         if (ret)
1511                 ath10k_warn("Unable to set interrupt mask\n");
1512
1513         return ret;
1514 }
1515
1516 static int ath10k_pci_init_config(struct ath10k *ar)
1517 {
1518         u32 interconnect_targ_addr;
1519         u32 pcie_state_targ_addr = 0;
1520         u32 pipe_cfg_targ_addr = 0;
1521         u32 svc_to_pipe_map = 0;
1522         u32 pcie_config_flags = 0;
1523         u32 ealloc_value;
1524         u32 ealloc_targ_addr;
1525         u32 flag2_value;
1526         u32 flag2_targ_addr;
1527         int ret = 0;
1528
1529         /* Download to Target the CE Config and the service-to-CE map */
1530         interconnect_targ_addr =
1531                 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1532
1533         /* Supply Target-side CE configuration */
1534         ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1535                                           &pcie_state_targ_addr);
1536         if (ret != 0) {
1537                 ath10k_err("Failed to get pcie state addr: %d\n", ret);
1538                 return ret;
1539         }
1540
1541         if (pcie_state_targ_addr == 0) {
1542                 ret = -EIO;
1543                 ath10k_err("Invalid pcie state addr\n");
1544                 return ret;
1545         }
1546
1547         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1548                                           offsetof(struct pcie_state,
1549                                                    pipe_cfg_addr),
1550                                           &pipe_cfg_targ_addr);
1551         if (ret != 0) {
1552                 ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1553                 return ret;
1554         }
1555
1556         if (pipe_cfg_targ_addr == 0) {
1557                 ret = -EIO;
1558                 ath10k_err("Invalid pipe cfg addr\n");
1559                 return ret;
1560         }
1561
1562         ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1563                                  target_ce_config_wlan,
1564                                  sizeof(target_ce_config_wlan));
1565
1566         if (ret != 0) {
1567                 ath10k_err("Failed to write pipe cfg: %d\n", ret);
1568                 return ret;
1569         }
1570
1571         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1572                                           offsetof(struct pcie_state,
1573                                                    svc_to_pipe_map),
1574                                           &svc_to_pipe_map);
1575         if (ret != 0) {
1576                 ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1577                 return ret;
1578         }
1579
1580         if (svc_to_pipe_map == 0) {
1581                 ret = -EIO;
1582                 ath10k_err("Invalid svc_to_pipe map\n");
1583                 return ret;
1584         }
1585
1586         ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1587                                  target_service_to_ce_map_wlan,
1588                                  sizeof(target_service_to_ce_map_wlan));
1589         if (ret != 0) {
1590                 ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1591                 return ret;
1592         }
1593
1594         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1595                                           offsetof(struct pcie_state,
1596                                                    config_flags),
1597                                           &pcie_config_flags);
1598         if (ret != 0) {
1599                 ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1600                 return ret;
1601         }
1602
1603         pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1604
1605         ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1606                                  offsetof(struct pcie_state, config_flags),
1607                                  &pcie_config_flags,
1608                                  sizeof(pcie_config_flags));
1609         if (ret != 0) {
1610                 ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1611                 return ret;
1612         }
1613
1614         /* configure early allocation */
1615         ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1616
1617         ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1618         if (ret != 0) {
1619                 ath10k_err("Faile to get early alloc val: %d\n", ret);
1620                 return ret;
1621         }
1622
1623         /* first bank is switched to IRAM */
1624         ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1625                          HI_EARLY_ALLOC_MAGIC_MASK);
1626         ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1627                          HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1628
1629         ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1630         if (ret != 0) {
1631                 ath10k_err("Failed to set early alloc val: %d\n", ret);
1632                 return ret;
1633         }
1634
1635         /* Tell Target to proceed with initialization */
1636         flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1637
1638         ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1639         if (ret != 0) {
1640                 ath10k_err("Failed to get option val: %d\n", ret);
1641                 return ret;
1642         }
1643
1644         flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1645
1646         ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1647         if (ret != 0) {
1648                 ath10k_err("Failed to set option val: %d\n", ret);
1649                 return ret;
1650         }
1651
1652         return 0;
1653 }
1654
1655
1656
1657 static int ath10k_pci_ce_init(struct ath10k *ar)
1658 {
1659         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1660         struct hif_ce_pipe_info *pipe_info;
1661         const struct ce_attr *attr;
1662         int pipe_num;
1663
1664         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1665                 pipe_info = &ar_pci->pipe_info[pipe_num];
1666                 pipe_info->pipe_num = pipe_num;
1667                 pipe_info->hif_ce_state = ar;
1668                 attr = &host_ce_config_wlan[pipe_num];
1669
1670                 pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1671                 if (pipe_info->ce_hdl == NULL) {
1672                         ath10k_err("Unable to initialize CE for pipe: %d\n",
1673                                    pipe_num);
1674
1675                         /* It is safe to call it here. It checks if ce_hdl is
1676                          * valid for each pipe */
1677                         ath10k_pci_ce_deinit(ar);
1678                         return -1;
1679                 }
1680
1681                 if (pipe_num == ar_pci->ce_count - 1) {
1682                         /*
1683                          * Reserve the ultimate CE for
1684                          * diagnostic Window support
1685                          */
1686                         ar_pci->ce_diag =
1687                         ar_pci->pipe_info[ar_pci->ce_count - 1].ce_hdl;
1688                         continue;
1689                 }
1690
1691                 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1692         }
1693
1694         /*
1695          * Initially, establish CE completion handlers for use with BMI.
1696          * These are overwritten with generic handlers after we exit BMI phase.
1697          */
1698         pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1699         ath10k_ce_send_cb_register(pipe_info->ce_hdl,
1700                                    ath10k_pci_bmi_send_done, 0);
1701
1702         pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1703         ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
1704                                    ath10k_pci_bmi_recv_data);
1705
1706         return 0;
1707 }
1708
1709 static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1710 {
1711         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1712         u32 fw_indicator_address, fw_indicator;
1713
1714         ath10k_pci_wake(ar);
1715
1716         fw_indicator_address = ar_pci->fw_indicator_address;
1717         fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
1718
1719         if (fw_indicator & FW_IND_EVENT_PENDING) {
1720                 /* ACK: clear Target-side pending event */
1721                 ath10k_pci_write32(ar, fw_indicator_address,
1722                                    fw_indicator & ~FW_IND_EVENT_PENDING);
1723
1724                 if (ar_pci->started) {
1725                         ath10k_pci_hif_dump_area(ar);
1726                 } else {
1727                         /*
1728                          * Probable Target failure before we're prepared
1729                          * to handle it.  Generally unexpected.
1730                          */
1731                         ath10k_warn("early firmware event indicated\n");
1732                 }
1733         }
1734
1735         ath10k_pci_sleep(ar);
1736 }
1737
1738 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
1739         .send_head              = ath10k_pci_hif_send_head,
1740         .exchange_bmi_msg       = ath10k_pci_hif_exchange_bmi_msg,
1741         .start                  = ath10k_pci_hif_start,
1742         .stop                   = ath10k_pci_hif_stop,
1743         .map_service_to_pipe    = ath10k_pci_hif_map_service_to_pipe,
1744         .get_default_pipe       = ath10k_pci_hif_get_default_pipe,
1745         .send_complete_check    = ath10k_pci_hif_send_complete_check,
1746         .init                   = ath10k_pci_hif_post_init,
1747         .get_free_queue_number  = ath10k_pci_hif_get_free_queue_number,
1748 };
1749
1750 static void ath10k_pci_ce_tasklet(unsigned long ptr)
1751 {
1752         struct hif_ce_pipe_info *pipe = (struct hif_ce_pipe_info *)ptr;
1753         struct ath10k_pci *ar_pci = pipe->ar_pci;
1754
1755         ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
1756 }
1757
1758 static void ath10k_msi_err_tasklet(unsigned long data)
1759 {
1760         struct ath10k *ar = (struct ath10k *)data;
1761
1762         ath10k_pci_fw_interrupt_handler(ar);
1763 }
1764
1765 /*
1766  * Handler for a per-engine interrupt on a PARTICULAR CE.
1767  * This is used in cases where each CE has a private MSI interrupt.
1768  */
1769 static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
1770 {
1771         struct ath10k *ar = arg;
1772         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1773         int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
1774
1775         if (ce_id < 0 || ce_id > ARRAY_SIZE(ar_pci->pipe_info)) {
1776                 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
1777                 return IRQ_HANDLED;
1778         }
1779
1780         /*
1781          * NOTE: We are able to derive ce_id from irq because we
1782          * use a one-to-one mapping for CE's 0..5.
1783          * CE's 6 & 7 do not use interrupts at all.
1784          *
1785          * This mapping must be kept in sync with the mapping
1786          * used by firmware.
1787          */
1788         tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
1789         return IRQ_HANDLED;
1790 }
1791
1792 static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
1793 {
1794         struct ath10k *ar = arg;
1795         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1796
1797         tasklet_schedule(&ar_pci->msi_fw_err);
1798         return IRQ_HANDLED;
1799 }
1800
1801 /*
1802  * Top-level interrupt handler for all PCI interrupts from a Target.
1803  * When a block of MSI interrupts is allocated, this top-level handler
1804  * is not used; instead, we directly call the correct sub-handler.
1805  */
1806 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
1807 {
1808         struct ath10k *ar = arg;
1809         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1810
1811         if (ar_pci->num_msi_intrs == 0) {
1812                 /*
1813                  * IMPORTANT: INTR_CLR regiser has to be set after
1814                  * INTR_ENABLE is set to 0, otherwise interrupt can not be
1815                  * really cleared.
1816                  */
1817                 iowrite32(0, ar_pci->mem +
1818                           (SOC_CORE_BASE_ADDRESS |
1819                            PCIE_INTR_ENABLE_ADDRESS));
1820                 iowrite32(PCIE_INTR_FIRMWARE_MASK |
1821                           PCIE_INTR_CE_MASK_ALL,
1822                           ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
1823                                          PCIE_INTR_CLR_ADDRESS));
1824                 /*
1825                  * IMPORTANT: this extra read transaction is required to
1826                  * flush the posted write buffer.
1827                  */
1828                 (void) ioread32(ar_pci->mem +
1829                                 (SOC_CORE_BASE_ADDRESS |
1830                                  PCIE_INTR_ENABLE_ADDRESS));
1831         }
1832
1833         tasklet_schedule(&ar_pci->intr_tq);
1834
1835         return IRQ_HANDLED;
1836 }
1837
1838 static void ath10k_pci_tasklet(unsigned long data)
1839 {
1840         struct ath10k *ar = (struct ath10k *)data;
1841         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1842
1843         ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
1844         ath10k_ce_per_engine_service_any(ar);
1845
1846         if (ar_pci->num_msi_intrs == 0) {
1847                 /* Enable Legacy PCI line interrupts */
1848                 iowrite32(PCIE_INTR_FIRMWARE_MASK |
1849                           PCIE_INTR_CE_MASK_ALL,
1850                           ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
1851                                          PCIE_INTR_ENABLE_ADDRESS));
1852                 /*
1853                  * IMPORTANT: this extra read transaction is required to
1854                  * flush the posted write buffer
1855                  */
1856                 (void) ioread32(ar_pci->mem +
1857                                 (SOC_CORE_BASE_ADDRESS |
1858                                  PCIE_INTR_ENABLE_ADDRESS));
1859         }
1860 }
1861
1862 static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
1863 {
1864         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1865         int ret;
1866         int i;
1867
1868         ret = pci_enable_msi_block(ar_pci->pdev, num);
1869         if (ret)
1870                 return ret;
1871
1872         ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
1873                           ath10k_pci_msi_fw_handler,
1874                           IRQF_SHARED, "ath10k_pci", ar);
1875         if (ret)
1876                 return ret;
1877
1878         for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
1879                 ret = request_irq(ar_pci->pdev->irq + i,
1880                                   ath10k_pci_per_engine_handler,
1881                                   IRQF_SHARED, "ath10k_pci", ar);
1882                 if (ret) {
1883                         ath10k_warn("request_irq(%d) failed %d\n",
1884                                     ar_pci->pdev->irq + i, ret);
1885
1886                         for (; i >= MSI_ASSIGN_CE_INITIAL; i--)
1887                                 free_irq(ar_pci->pdev->irq, ar);
1888
1889                         pci_disable_msi(ar_pci->pdev);
1890                         return ret;
1891                 }
1892         }
1893
1894         ath10k_info("MSI-X interrupt handling (%d intrs)\n", num);
1895         return 0;
1896 }
1897
1898 static int ath10k_pci_start_intr_msi(struct ath10k *ar)
1899 {
1900         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1901         int ret;
1902
1903         ret = pci_enable_msi(ar_pci->pdev);
1904         if (ret < 0)
1905                 return ret;
1906
1907         ret = request_irq(ar_pci->pdev->irq,
1908                           ath10k_pci_interrupt_handler,
1909                           IRQF_SHARED, "ath10k_pci", ar);
1910         if (ret < 0) {
1911                 pci_disable_msi(ar_pci->pdev);
1912                 return ret;
1913         }
1914
1915         ath10k_info("MSI interrupt handling\n");
1916         return 0;
1917 }
1918
1919 static int ath10k_pci_start_intr_legacy(struct ath10k *ar)
1920 {
1921         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1922         int ret;
1923
1924         ret = request_irq(ar_pci->pdev->irq,
1925                           ath10k_pci_interrupt_handler,
1926                           IRQF_SHARED, "ath10k_pci", ar);
1927         if (ret < 0)
1928                 return ret;
1929
1930         /*
1931          * Make sure to wake the Target before enabling Legacy
1932          * Interrupt.
1933          */
1934         iowrite32(PCIE_SOC_WAKE_V_MASK,
1935                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
1936                   PCIE_SOC_WAKE_ADDRESS);
1937
1938         ath10k_pci_wait(ar);
1939
1940         /*
1941          * A potential race occurs here: The CORE_BASE write
1942          * depends on target correctly decoding AXI address but
1943          * host won't know when target writes BAR to CORE_CTRL.
1944          * This write might get lost if target has NOT written BAR.
1945          * For now, fix the race by repeating the write in below
1946          * synchronization checking.
1947          */
1948         iowrite32(PCIE_INTR_FIRMWARE_MASK |
1949                   PCIE_INTR_CE_MASK_ALL,
1950                   ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
1951                                  PCIE_INTR_ENABLE_ADDRESS));
1952         iowrite32(PCIE_SOC_WAKE_RESET,
1953                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
1954                   PCIE_SOC_WAKE_ADDRESS);
1955
1956         ath10k_info("legacy interrupt handling\n");
1957         return 0;
1958 }
1959
1960 static int ath10k_pci_start_intr(struct ath10k *ar)
1961 {
1962         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1963         int num = MSI_NUM_REQUEST;
1964         int ret;
1965         int i;
1966
1967         tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long) ar);
1968         tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
1969                      (unsigned long) ar);
1970
1971         for (i = 0; i < CE_COUNT; i++) {
1972                 ar_pci->pipe_info[i].ar_pci = ar_pci;
1973                 tasklet_init(&ar_pci->pipe_info[i].intr,
1974                              ath10k_pci_ce_tasklet,
1975                              (unsigned long)&ar_pci->pipe_info[i]);
1976         }
1977
1978         if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features))
1979                 num = 1;
1980
1981         if (num > 1) {
1982                 ret = ath10k_pci_start_intr_msix(ar, num);
1983                 if (ret == 0)
1984                         goto exit;
1985
1986                 ath10k_warn("MSI-X didn't succeed (%d), trying MSI\n", ret);
1987                 num = 1;
1988         }
1989
1990         if (num == 1) {
1991                 ret = ath10k_pci_start_intr_msi(ar);
1992                 if (ret == 0)
1993                         goto exit;
1994
1995                 ath10k_warn("MSI didn't succeed (%d), trying legacy INTR\n",
1996                             ret);
1997                 num = 0;
1998         }
1999
2000         ret = ath10k_pci_start_intr_legacy(ar);
2001
2002 exit:
2003         ar_pci->num_msi_intrs = num;
2004         ar_pci->ce_count = CE_COUNT;
2005         return ret;
2006 }
2007
2008 static void ath10k_pci_stop_intr(struct ath10k *ar)
2009 {
2010         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2011         int i;
2012
2013         /* There's at least one interrupt irregardless whether its legacy INTR
2014          * or MSI or MSI-X */
2015         for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2016                 free_irq(ar_pci->pdev->irq + i, ar);
2017
2018         if (ar_pci->num_msi_intrs > 0)
2019                 pci_disable_msi(ar_pci->pdev);
2020 }
2021
2022 static int ath10k_pci_reset_target(struct ath10k *ar)
2023 {
2024         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2025         int wait_limit = 300; /* 3 sec */
2026
2027         /* Wait for Target to finish initialization before we proceed. */
2028         iowrite32(PCIE_SOC_WAKE_V_MASK,
2029                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2030                   PCIE_SOC_WAKE_ADDRESS);
2031
2032         ath10k_pci_wait(ar);
2033
2034         while (wait_limit-- &&
2035                !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
2036                  FW_IND_INITIALIZED)) {
2037                 if (ar_pci->num_msi_intrs == 0)
2038                         /* Fix potential race by repeating CORE_BASE writes */
2039                         iowrite32(PCIE_INTR_FIRMWARE_MASK |
2040                                   PCIE_INTR_CE_MASK_ALL,
2041                                   ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2042                                                  PCIE_INTR_ENABLE_ADDRESS));
2043                 mdelay(10);
2044         }
2045
2046         if (wait_limit < 0) {
2047                 ath10k_err("Target stalled\n");
2048                 iowrite32(PCIE_SOC_WAKE_RESET,
2049                           ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2050                           PCIE_SOC_WAKE_ADDRESS);
2051                 return -EIO;
2052         }
2053
2054         iowrite32(PCIE_SOC_WAKE_RESET,
2055                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2056                   PCIE_SOC_WAKE_ADDRESS);
2057
2058         return 0;
2059 }
2060
2061 static void ath10k_pci_device_reset(struct ath10k_pci *ar_pci)
2062 {
2063         struct ath10k *ar = ar_pci->ar;
2064         void __iomem *mem = ar_pci->mem;
2065         int i;
2066         u32 val;
2067
2068         if (!SOC_GLOBAL_RESET_ADDRESS)
2069                 return;
2070
2071         if (!mem)
2072                 return;
2073
2074         ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS,
2075                                PCIE_SOC_WAKE_V_MASK);
2076         for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2077                 if (ath10k_pci_target_is_awake(ar))
2078                         break;
2079                 msleep(1);
2080         }
2081
2082         /* Put Target, including PCIe, into RESET. */
2083         val = ath10k_pci_reg_read32(mem, SOC_GLOBAL_RESET_ADDRESS);
2084         val |= 1;
2085         ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
2086
2087         for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2088                 if (ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
2089                                           RTC_STATE_COLD_RESET_MASK)
2090                         break;
2091                 msleep(1);
2092         }
2093
2094         /* Pull Target, including PCIe, out of RESET. */
2095         val &= ~1;
2096         ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
2097
2098         for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2099                 if (!(ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
2100                                             RTC_STATE_COLD_RESET_MASK))
2101                         break;
2102                 msleep(1);
2103         }
2104
2105         ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2106 }
2107
2108 static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2109 {
2110         int i;
2111
2112         for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2113                 if (!test_bit(i, ar_pci->features))
2114                         continue;
2115
2116                 switch (i) {
2117                 case ATH10K_PCI_FEATURE_MSI_X:
2118                         ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n");
2119                         break;
2120                 case ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND:
2121                         ath10k_dbg(ATH10K_DBG_PCI, "QCA988X_1.0 workaround enabled\n");
2122                         break;
2123                 }
2124         }
2125 }
2126
2127 static int ath10k_pci_probe(struct pci_dev *pdev,
2128                             const struct pci_device_id *pci_dev)
2129 {
2130         void __iomem *mem;
2131         int ret = 0;
2132         struct ath10k *ar;
2133         struct ath10k_pci *ar_pci;
2134         u32 lcr_val;
2135
2136         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2137
2138         ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2139         if (ar_pci == NULL)
2140                 return -ENOMEM;
2141
2142         ar_pci->pdev = pdev;
2143         ar_pci->dev = &pdev->dev;
2144
2145         switch (pci_dev->device) {
2146         case QCA988X_1_0_DEVICE_ID:
2147                 set_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features);
2148                 break;
2149         case QCA988X_2_0_DEVICE_ID:
2150                 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2151                 break;
2152         default:
2153                 ret = -ENODEV;
2154                 ath10k_err("Unkown device ID: %d\n", pci_dev->device);
2155                 goto err_ar_pci;
2156         }
2157
2158         ath10k_pci_dump_features(ar_pci);
2159
2160         ar = ath10k_core_create(ar_pci, ar_pci->dev, ATH10K_BUS_PCI,
2161                                 &ath10k_pci_hif_ops);
2162         if (!ar) {
2163                 ath10k_err("ath10k_core_create failed!\n");
2164                 ret = -EINVAL;
2165                 goto err_ar_pci;
2166         }
2167
2168         /* Enable QCA988X_1.0 HW workarounds */
2169         if (test_bit(ATH10K_PCI_FEATURE_HW_1_0_WARKAROUND, ar_pci->features))
2170                 spin_lock_init(&ar_pci->hw_v1_workaround_lock);
2171
2172         ar_pci->ar = ar;
2173         ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2174         atomic_set(&ar_pci->keep_awake_count, 0);
2175
2176         pci_set_drvdata(pdev, ar);
2177
2178         /*
2179          * Without any knowledge of the Host, the Target may have been reset or
2180          * power cycled and its Config Space may no longer reflect the PCI
2181          * address space that was assigned earlier by the PCI infrastructure.
2182          * Refresh it now.
2183          */
2184         ret = pci_assign_resource(pdev, BAR_NUM);
2185         if (ret) {
2186                 ath10k_err("cannot assign PCI space: %d\n", ret);
2187                 goto err_ar;
2188         }
2189
2190         ret = pci_enable_device(pdev);
2191         if (ret) {
2192                 ath10k_err("cannot enable PCI device: %d\n", ret);
2193                 goto err_ar;
2194         }
2195
2196         /* Request MMIO resources */
2197         ret = pci_request_region(pdev, BAR_NUM, "ath");
2198         if (ret) {
2199                 ath10k_err("PCI MMIO reservation error: %d\n", ret);
2200                 goto err_device;
2201         }
2202
2203         /*
2204          * Target structures have a limit of 32 bit DMA pointers.
2205          * DMA pointers can be wider than 32 bits by default on some systems.
2206          */
2207         ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2208         if (ret) {
2209                 ath10k_err("32-bit DMA not available: %d\n", ret);
2210                 goto err_region;
2211         }
2212
2213         ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2214         if (ret) {
2215                 ath10k_err("cannot enable 32-bit consistent DMA\n");
2216                 goto err_region;
2217         }
2218
2219         /* Set bus master bit in PCI_COMMAND to enable DMA */
2220         pci_set_master(pdev);
2221
2222         /*
2223          * Temporary FIX: disable ASPM
2224          * Will be removed after the OTP is programmed
2225          */
2226         pci_read_config_dword(pdev, 0x80, &lcr_val);
2227         pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2228
2229         /* Arrange for access to Target SoC registers. */
2230         mem = pci_iomap(pdev, BAR_NUM, 0);
2231         if (!mem) {
2232                 ath10k_err("PCI iomap error\n");
2233                 ret = -EIO;
2234                 goto err_master;
2235         }
2236
2237         ar_pci->mem = mem;
2238
2239         spin_lock_init(&ar_pci->ce_lock);
2240
2241         ar_pci->cacheline_sz = dma_get_cache_alignment();
2242
2243         ret = ath10k_pci_start_intr(ar);
2244         if (ret) {
2245                 ath10k_err("could not start interrupt handling (%d)\n", ret);
2246                 goto err_iomap;
2247         }
2248
2249         /*
2250          * Bring the target up cleanly.
2251          *
2252          * The target may be in an undefined state with an AUX-powered Target
2253          * and a Host in WoW mode. If the Host crashes, loses power, or is
2254          * restarted (without unloading the driver) then the Target is left
2255          * (aux) powered and running. On a subsequent driver load, the Target
2256          * is in an unexpected state. We try to catch that here in order to
2257          * reset the Target and retry the probe.
2258          */
2259         ath10k_pci_device_reset(ar_pci);
2260
2261         ret = ath10k_pci_reset_target(ar);
2262         if (ret)
2263                 goto err_intr;
2264
2265         if (ath10k_target_ps) {
2266                 ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save enabled\n");
2267         } else {
2268                 /* Force AWAKE forever */
2269                 ath10k_dbg(ATH10K_DBG_PCI, "on-chip power save disabled\n");
2270                 ath10k_do_pci_wake(ar);
2271         }
2272
2273         ret = ath10k_pci_ce_init(ar);
2274         if (ret)
2275                 goto err_intr;
2276
2277         ret = ath10k_pci_init_config(ar);
2278         if (ret)
2279                 goto err_ce;
2280
2281         ret = ath10k_pci_wake_target_cpu(ar);
2282         if (ret) {
2283                 ath10k_err("could not wake up target CPU (%d)\n", ret);
2284                 goto err_ce;
2285         }
2286
2287         ret = ath10k_core_register(ar);
2288         if (ret) {
2289                 ath10k_err("could not register driver core (%d)\n", ret);
2290                 goto err_ce;
2291         }
2292
2293         return 0;
2294
2295 err_ce:
2296         ath10k_pci_ce_deinit(ar);
2297 err_intr:
2298         ath10k_pci_stop_intr(ar);
2299 err_iomap:
2300         pci_iounmap(pdev, mem);
2301 err_master:
2302         pci_clear_master(pdev);
2303 err_region:
2304         pci_release_region(pdev, BAR_NUM);
2305 err_device:
2306         pci_disable_device(pdev);
2307 err_ar:
2308         pci_set_drvdata(pdev, NULL);
2309         ath10k_core_destroy(ar);
2310 err_ar_pci:
2311         /* call HIF PCI free here */
2312         kfree(ar_pci);
2313
2314         return ret;
2315 }
2316
2317 static void ath10k_pci_remove(struct pci_dev *pdev)
2318 {
2319         struct ath10k *ar = pci_get_drvdata(pdev);
2320         struct ath10k_pci *ar_pci;
2321
2322         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2323
2324         if (!ar)
2325                 return;
2326
2327         ar_pci = ath10k_pci_priv(ar);
2328
2329         if (!ar_pci)
2330                 return;
2331
2332         tasklet_kill(&ar_pci->msi_fw_err);
2333
2334         ath10k_core_unregister(ar);
2335         ath10k_pci_stop_intr(ar);
2336
2337         pci_set_drvdata(pdev, NULL);
2338         pci_iounmap(pdev, ar_pci->mem);
2339         pci_release_region(pdev, BAR_NUM);
2340         pci_clear_master(pdev);
2341         pci_disable_device(pdev);
2342
2343         ath10k_core_destroy(ar);
2344         kfree(ar_pci);
2345 }
2346
2347 #if defined(CONFIG_PM_SLEEP)
2348
2349 #define ATH10K_PCI_PM_CONTROL 0x44
2350
2351 static int ath10k_pci_suspend(struct device *device)
2352 {
2353         struct pci_dev *pdev = to_pci_dev(device);
2354         struct ath10k *ar = pci_get_drvdata(pdev);
2355         struct ath10k_pci *ar_pci;
2356         u32 val;
2357         int ret, retval;
2358
2359         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2360
2361         if (!ar)
2362                 return -ENODEV;
2363
2364         ar_pci = ath10k_pci_priv(ar);
2365         if (!ar_pci)
2366                 return -ENODEV;
2367
2368         if (ath10k_core_target_suspend(ar))
2369                 return -EBUSY;
2370
2371         ret = wait_event_interruptible_timeout(ar->event_queue,
2372                                                 ar->is_target_paused == true,
2373                                                 1 * HZ);
2374         if (ret < 0) {
2375                 ath10k_warn("suspend interrupted (%d)\n", ret);
2376                 retval = ret;
2377                 goto resume;
2378         } else if (ret == 0) {
2379                 ath10k_warn("suspend timed out - target pause event never came\n");
2380                 retval = EIO;
2381                 goto resume;
2382         }
2383
2384         /*
2385          * reset is_target_paused and host can check that in next time,
2386          * or it will always be TRUE and host just skip the waiting
2387          * condition, it causes target assert due to host already
2388          * suspend
2389          */
2390         ar->is_target_paused = false;
2391
2392         pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2393
2394         if ((val & 0x000000ff) != 0x3) {
2395                 pci_save_state(pdev);
2396                 pci_disable_device(pdev);
2397                 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2398                                        (val & 0xffffff00) | 0x03);
2399         }
2400
2401         return 0;
2402 resume:
2403         ret = ath10k_core_target_resume(ar);
2404         if (ret)
2405                 ath10k_warn("could not resume (%d)\n", ret);
2406
2407         return retval;
2408 }
2409
2410 static int ath10k_pci_resume(struct device *device)
2411 {
2412         struct pci_dev *pdev = to_pci_dev(device);
2413         struct ath10k *ar = pci_get_drvdata(pdev);
2414         struct ath10k_pci *ar_pci;
2415         int ret;
2416         u32 val;
2417
2418         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2419
2420         if (!ar)
2421                 return -ENODEV;
2422         ar_pci = ath10k_pci_priv(ar);
2423
2424         if (!ar_pci)
2425                 return -ENODEV;
2426
2427         ret = pci_enable_device(pdev);
2428         if (ret) {
2429                 ath10k_warn("cannot enable PCI device: %d\n", ret);
2430                 return ret;
2431         }
2432
2433         pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2434
2435         if ((val & 0x000000ff) != 0) {
2436                 pci_restore_state(pdev);
2437                 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2438                                        val & 0xffffff00);
2439                 /*
2440                  * Suspend/Resume resets the PCI configuration space,
2441                  * so we have to re-disable the RETRY_TIMEOUT register (0x41)
2442                  * to keep PCI Tx retries from interfering with C3 CPU state
2443                  */
2444                 pci_read_config_dword(pdev, 0x40, &val);
2445
2446                 if ((val & 0x0000ff00) != 0)
2447                         pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2448         }
2449
2450         ret = ath10k_core_target_resume(ar);
2451         if (ret)
2452                 ath10k_warn("target resume failed: %d\n", ret);
2453
2454         return ret;
2455 }
2456
2457 static SIMPLE_DEV_PM_OPS(ath10k_dev_pm_ops,
2458                          ath10k_pci_suspend,
2459                          ath10k_pci_resume);
2460
2461 #define ATH10K_PCI_PM_OPS (&ath10k_dev_pm_ops)
2462
2463 #else
2464
2465 #define ATH10K_PCI_PM_OPS NULL
2466
2467 #endif /* CONFIG_PM_SLEEP */
2468
2469 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2470
2471 static struct pci_driver ath10k_pci_driver = {
2472         .name = "ath10k_pci",
2473         .id_table = ath10k_pci_id_table,
2474         .probe = ath10k_pci_probe,
2475         .remove = ath10k_pci_remove,
2476         .driver.pm = ATH10K_PCI_PM_OPS,
2477 };
2478
2479 static int __init ath10k_pci_init(void)
2480 {
2481         int ret;
2482
2483         ret = pci_register_driver(&ath10k_pci_driver);
2484         if (ret)
2485                 ath10k_err("pci_register_driver failed [%d]\n", ret);
2486
2487         return ret;
2488 }
2489 module_init(ath10k_pci_init);
2490
2491 static void __exit ath10k_pci_exit(void)
2492 {
2493         pci_unregister_driver(&ath10k_pci_driver);
2494 }
2495
2496 module_exit(ath10k_pci_exit);
2497
2498 MODULE_AUTHOR("Qualcomm Atheros");
2499 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2500 MODULE_LICENSE("Dual BSD/GPL");
2501 MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_FW_FILE);
2502 MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_OTP_FILE);
2503 MODULE_FIRMWARE(QCA988X_HW_1_0_FW_DIR "/" QCA988X_HW_1_0_BOARD_DATA_FILE);
2504 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2505 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
2506 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);