880fcd0d604f82314190faab55b3302230d146e4
[cascardo/linux.git] / drivers / net / wireless / ath / ath10k / pci.c
1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17
18 #include <linux/pci.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22
23 #include "core.h"
24 #include "debug.h"
25
26 #include "targaddrs.h"
27 #include "bmi.h"
28
29 #include "hif.h"
30 #include "htc.h"
31
32 #include "ce.h"
33 #include "pci.h"
34
35 static unsigned int ath10k_target_ps;
36 module_param(ath10k_target_ps, uint, 0644);
37 MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
38
39 #define QCA988X_2_0_DEVICE_ID   (0x003c)
40
41 static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
42         { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
43         {0}
44 };
45
46 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
47                                        u32 *data);
48
49 static void ath10k_pci_process_ce(struct ath10k *ar);
50 static int ath10k_pci_post_rx(struct ath10k *ar);
51 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
52                                              int num);
53 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
54 static void ath10k_pci_stop_ce(struct ath10k *ar);
55 static void ath10k_pci_device_reset(struct ath10k *ar);
56 static int ath10k_pci_reset_target(struct ath10k *ar);
57 static int ath10k_pci_start_intr(struct ath10k *ar);
58 static void ath10k_pci_stop_intr(struct ath10k *ar);
59
60 static const struct ce_attr host_ce_config_wlan[] = {
61         /* CE0: host->target HTC control and raw streams */
62         {
63                 .flags = CE_ATTR_FLAGS,
64                 .src_nentries = 16,
65                 .src_sz_max = 256,
66                 .dest_nentries = 0,
67         },
68
69         /* CE1: target->host HTT + HTC control */
70         {
71                 .flags = CE_ATTR_FLAGS,
72                 .src_nentries = 0,
73                 .src_sz_max = 512,
74                 .dest_nentries = 512,
75         },
76
77         /* CE2: target->host WMI */
78         {
79                 .flags = CE_ATTR_FLAGS,
80                 .src_nentries = 0,
81                 .src_sz_max = 2048,
82                 .dest_nentries = 32,
83         },
84
85         /* CE3: host->target WMI */
86         {
87                 .flags = CE_ATTR_FLAGS,
88                 .src_nentries = 32,
89                 .src_sz_max = 2048,
90                 .dest_nentries = 0,
91         },
92
93         /* CE4: host->target HTT */
94         {
95                 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
96                 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
97                 .src_sz_max = 256,
98                 .dest_nentries = 0,
99         },
100
101         /* CE5: unused */
102         {
103                 .flags = CE_ATTR_FLAGS,
104                 .src_nentries = 0,
105                 .src_sz_max = 0,
106                 .dest_nentries = 0,
107         },
108
109         /* CE6: target autonomous hif_memcpy */
110         {
111                 .flags = CE_ATTR_FLAGS,
112                 .src_nentries = 0,
113                 .src_sz_max = 0,
114                 .dest_nentries = 0,
115         },
116
117         /* CE7: ce_diag, the Diagnostic Window */
118         {
119                 .flags = CE_ATTR_FLAGS,
120                 .src_nentries = 2,
121                 .src_sz_max = DIAG_TRANSFER_LIMIT,
122                 .dest_nentries = 2,
123         },
124 };
125
126 /* Target firmware's Copy Engine configuration. */
127 static const struct ce_pipe_config target_ce_config_wlan[] = {
128         /* CE0: host->target HTC control and raw streams */
129         {
130                 .pipenum = 0,
131                 .pipedir = PIPEDIR_OUT,
132                 .nentries = 32,
133                 .nbytes_max = 256,
134                 .flags = CE_ATTR_FLAGS,
135                 .reserved = 0,
136         },
137
138         /* CE1: target->host HTT + HTC control */
139         {
140                 .pipenum = 1,
141                 .pipedir = PIPEDIR_IN,
142                 .nentries = 32,
143                 .nbytes_max = 512,
144                 .flags = CE_ATTR_FLAGS,
145                 .reserved = 0,
146         },
147
148         /* CE2: target->host WMI */
149         {
150                 .pipenum = 2,
151                 .pipedir = PIPEDIR_IN,
152                 .nentries = 32,
153                 .nbytes_max = 2048,
154                 .flags = CE_ATTR_FLAGS,
155                 .reserved = 0,
156         },
157
158         /* CE3: host->target WMI */
159         {
160                 .pipenum = 3,
161                 .pipedir = PIPEDIR_OUT,
162                 .nentries = 32,
163                 .nbytes_max = 2048,
164                 .flags = CE_ATTR_FLAGS,
165                 .reserved = 0,
166         },
167
168         /* CE4: host->target HTT */
169         {
170                 .pipenum = 4,
171                 .pipedir = PIPEDIR_OUT,
172                 .nentries = 256,
173                 .nbytes_max = 256,
174                 .flags = CE_ATTR_FLAGS,
175                 .reserved = 0,
176         },
177
178         /* NB: 50% of src nentries, since tx has 2 frags */
179
180         /* CE5: unused */
181         {
182                 .pipenum = 5,
183                 .pipedir = PIPEDIR_OUT,
184                 .nentries = 32,
185                 .nbytes_max = 2048,
186                 .flags = CE_ATTR_FLAGS,
187                 .reserved = 0,
188         },
189
190         /* CE6: Reserved for target autonomous hif_memcpy */
191         {
192                 .pipenum = 6,
193                 .pipedir = PIPEDIR_INOUT,
194                 .nentries = 32,
195                 .nbytes_max = 4096,
196                 .flags = CE_ATTR_FLAGS,
197                 .reserved = 0,
198         },
199
200         /* CE7 used only by Host */
201 };
202
203 /*
204  * Diagnostic read/write access is provided for startup/config/debug usage.
205  * Caller must guarantee proper alignment, when applicable, and single user
206  * at any moment.
207  */
208 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
209                                     int nbytes)
210 {
211         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
212         int ret = 0;
213         u32 buf;
214         unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
215         unsigned int id;
216         unsigned int flags;
217         struct ath10k_ce_pipe *ce_diag;
218         /* Host buffer address in CE space */
219         u32 ce_data;
220         dma_addr_t ce_data_base = 0;
221         void *data_buf = NULL;
222         int i;
223
224         /*
225          * This code cannot handle reads to non-memory space. Redirect to the
226          * register read fn but preserve the multi word read capability of
227          * this fn
228          */
229         if (address < DRAM_BASE_ADDRESS) {
230                 if (!IS_ALIGNED(address, 4) ||
231                     !IS_ALIGNED((unsigned long)data, 4))
232                         return -EIO;
233
234                 while ((nbytes >= 4) &&  ((ret = ath10k_pci_diag_read_access(
235                                            ar, address, (u32 *)data)) == 0)) {
236                         nbytes -= sizeof(u32);
237                         address += sizeof(u32);
238                         data += sizeof(u32);
239                 }
240                 return ret;
241         }
242
243         ce_diag = ar_pci->ce_diag;
244
245         /*
246          * Allocate a temporary bounce buffer to hold caller's data
247          * to be DMA'ed from Target. This guarantees
248          *   1) 4-byte alignment
249          *   2) Buffer in DMA-able space
250          */
251         orig_nbytes = nbytes;
252         data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
253                                                          orig_nbytes,
254                                                          &ce_data_base);
255
256         if (!data_buf) {
257                 ret = -ENOMEM;
258                 goto done;
259         }
260         memset(data_buf, 0, orig_nbytes);
261
262         remaining_bytes = orig_nbytes;
263         ce_data = ce_data_base;
264         while (remaining_bytes) {
265                 nbytes = min_t(unsigned int, remaining_bytes,
266                                DIAG_TRANSFER_LIMIT);
267
268                 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
269                 if (ret != 0)
270                         goto done;
271
272                 /* Request CE to send from Target(!) address to Host buffer */
273                 /*
274                  * The address supplied by the caller is in the
275                  * Target CPU virtual address space.
276                  *
277                  * In order to use this address with the diagnostic CE,
278                  * convert it from Target CPU virtual address space
279                  * to CE address space
280                  */
281                 ath10k_pci_wake(ar);
282                 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
283                                                      address);
284                 ath10k_pci_sleep(ar);
285
286                 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
287                                  0);
288                 if (ret)
289                         goto done;
290
291                 i = 0;
292                 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
293                                                      &completed_nbytes,
294                                                      &id) != 0) {
295                         mdelay(1);
296                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
297                                 ret = -EBUSY;
298                                 goto done;
299                         }
300                 }
301
302                 if (nbytes != completed_nbytes) {
303                         ret = -EIO;
304                         goto done;
305                 }
306
307                 if (buf != (u32) address) {
308                         ret = -EIO;
309                         goto done;
310                 }
311
312                 i = 0;
313                 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
314                                                      &completed_nbytes,
315                                                      &id, &flags) != 0) {
316                         mdelay(1);
317
318                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
319                                 ret = -EBUSY;
320                                 goto done;
321                         }
322                 }
323
324                 if (nbytes != completed_nbytes) {
325                         ret = -EIO;
326                         goto done;
327                 }
328
329                 if (buf != ce_data) {
330                         ret = -EIO;
331                         goto done;
332                 }
333
334                 remaining_bytes -= nbytes;
335                 address += nbytes;
336                 ce_data += nbytes;
337         }
338
339 done:
340         if (ret == 0) {
341                 /* Copy data from allocated DMA buf to caller's buf */
342                 WARN_ON_ONCE(orig_nbytes & 3);
343                 for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
344                         ((u32 *)data)[i] =
345                                 __le32_to_cpu(((__le32 *)data_buf)[i]);
346                 }
347         } else
348                 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
349                            __func__, address);
350
351         if (data_buf)
352                 pci_free_consistent(ar_pci->pdev, orig_nbytes,
353                                     data_buf, ce_data_base);
354
355         return ret;
356 }
357
358 /* Read 4-byte aligned data from Target memory or register */
359 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
360                                        u32 *data)
361 {
362         /* Assume range doesn't cross this boundary */
363         if (address >= DRAM_BASE_ADDRESS)
364                 return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
365
366         ath10k_pci_wake(ar);
367         *data = ath10k_pci_read32(ar, address);
368         ath10k_pci_sleep(ar);
369         return 0;
370 }
371
372 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
373                                      const void *data, int nbytes)
374 {
375         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
376         int ret = 0;
377         u32 buf;
378         unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
379         unsigned int id;
380         unsigned int flags;
381         struct ath10k_ce_pipe *ce_diag;
382         void *data_buf = NULL;
383         u32 ce_data;    /* Host buffer address in CE space */
384         dma_addr_t ce_data_base = 0;
385         int i;
386
387         ce_diag = ar_pci->ce_diag;
388
389         /*
390          * Allocate a temporary bounce buffer to hold caller's data
391          * to be DMA'ed to Target. This guarantees
392          *   1) 4-byte alignment
393          *   2) Buffer in DMA-able space
394          */
395         orig_nbytes = nbytes;
396         data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
397                                                          orig_nbytes,
398                                                          &ce_data_base);
399         if (!data_buf) {
400                 ret = -ENOMEM;
401                 goto done;
402         }
403
404         /* Copy caller's data to allocated DMA buf */
405         WARN_ON_ONCE(orig_nbytes & 3);
406         for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
407                 ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
408
409         /*
410          * The address supplied by the caller is in the
411          * Target CPU virtual address space.
412          *
413          * In order to use this address with the diagnostic CE,
414          * convert it from
415          *    Target CPU virtual address space
416          * to
417          *    CE address space
418          */
419         ath10k_pci_wake(ar);
420         address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
421         ath10k_pci_sleep(ar);
422
423         remaining_bytes = orig_nbytes;
424         ce_data = ce_data_base;
425         while (remaining_bytes) {
426                 /* FIXME: check cast */
427                 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
428
429                 /* Set up to receive directly into Target(!) address */
430                 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
431                 if (ret != 0)
432                         goto done;
433
434                 /*
435                  * Request CE to send caller-supplied data that
436                  * was copied to bounce buffer to Target(!) address.
437                  */
438                 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
439                                      nbytes, 0, 0);
440                 if (ret != 0)
441                         goto done;
442
443                 i = 0;
444                 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
445                                                      &completed_nbytes,
446                                                      &id) != 0) {
447                         mdelay(1);
448
449                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
450                                 ret = -EBUSY;
451                                 goto done;
452                         }
453                 }
454
455                 if (nbytes != completed_nbytes) {
456                         ret = -EIO;
457                         goto done;
458                 }
459
460                 if (buf != ce_data) {
461                         ret = -EIO;
462                         goto done;
463                 }
464
465                 i = 0;
466                 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
467                                                      &completed_nbytes,
468                                                      &id, &flags) != 0) {
469                         mdelay(1);
470
471                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
472                                 ret = -EBUSY;
473                                 goto done;
474                         }
475                 }
476
477                 if (nbytes != completed_nbytes) {
478                         ret = -EIO;
479                         goto done;
480                 }
481
482                 if (buf != address) {
483                         ret = -EIO;
484                         goto done;
485                 }
486
487                 remaining_bytes -= nbytes;
488                 address += nbytes;
489                 ce_data += nbytes;
490         }
491
492 done:
493         if (data_buf) {
494                 pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
495                                     ce_data_base);
496         }
497
498         if (ret != 0)
499                 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
500                            address);
501
502         return ret;
503 }
504
505 /* Write 4B data to Target memory or register */
506 static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
507                                         u32 data)
508 {
509         /* Assume range doesn't cross this boundary */
510         if (address >= DRAM_BASE_ADDRESS)
511                 return ath10k_pci_diag_write_mem(ar, address, &data,
512                                                  sizeof(u32));
513
514         ath10k_pci_wake(ar);
515         ath10k_pci_write32(ar, address, data);
516         ath10k_pci_sleep(ar);
517         return 0;
518 }
519
520 static bool ath10k_pci_target_is_awake(struct ath10k *ar)
521 {
522         void __iomem *mem = ath10k_pci_priv(ar)->mem;
523         u32 val;
524         val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
525                        RTC_STATE_ADDRESS);
526         return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
527 }
528
529 static void ath10k_pci_wait(struct ath10k *ar)
530 {
531         int n = 100;
532
533         while (n-- && !ath10k_pci_target_is_awake(ar))
534                 msleep(10);
535
536         if (n < 0)
537                 ath10k_warn("Unable to wakeup target\n");
538 }
539
540 int ath10k_do_pci_wake(struct ath10k *ar)
541 {
542         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
543         void __iomem *pci_addr = ar_pci->mem;
544         int tot_delay = 0;
545         int curr_delay = 5;
546
547         if (atomic_read(&ar_pci->keep_awake_count) == 0) {
548                 /* Force AWAKE */
549                 iowrite32(PCIE_SOC_WAKE_V_MASK,
550                           pci_addr + PCIE_LOCAL_BASE_ADDRESS +
551                           PCIE_SOC_WAKE_ADDRESS);
552         }
553         atomic_inc(&ar_pci->keep_awake_count);
554
555         if (ar_pci->verified_awake)
556                 return 0;
557
558         for (;;) {
559                 if (ath10k_pci_target_is_awake(ar)) {
560                         ar_pci->verified_awake = true;
561                         return 0;
562                 }
563
564                 if (tot_delay > PCIE_WAKE_TIMEOUT) {
565                         ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
566                                     PCIE_WAKE_TIMEOUT,
567                                     atomic_read(&ar_pci->keep_awake_count));
568                         return -ETIMEDOUT;
569                 }
570
571                 udelay(curr_delay);
572                 tot_delay += curr_delay;
573
574                 if (curr_delay < 50)
575                         curr_delay += 5;
576         }
577 }
578
579 void ath10k_do_pci_sleep(struct ath10k *ar)
580 {
581         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
582         void __iomem *pci_addr = ar_pci->mem;
583
584         if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
585                 /* Allow sleep */
586                 ar_pci->verified_awake = false;
587                 iowrite32(PCIE_SOC_WAKE_RESET,
588                           pci_addr + PCIE_LOCAL_BASE_ADDRESS +
589                           PCIE_SOC_WAKE_ADDRESS);
590         }
591 }
592
593 /*
594  * FIXME: Handle OOM properly.
595  */
596 static inline
597 struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
598 {
599         struct ath10k_pci_compl *compl = NULL;
600
601         spin_lock_bh(&pipe_info->pipe_lock);
602         if (list_empty(&pipe_info->compl_free)) {
603                 ath10k_warn("Completion buffers are full\n");
604                 goto exit;
605         }
606         compl = list_first_entry(&pipe_info->compl_free,
607                                  struct ath10k_pci_compl, list);
608         list_del(&compl->list);
609 exit:
610         spin_unlock_bh(&pipe_info->pipe_lock);
611         return compl;
612 }
613
614 /* Called by lower (CE) layer when a send to Target completes. */
615 static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state,
616                                     void *transfer_context,
617                                     u32 ce_data,
618                                     unsigned int nbytes,
619                                     unsigned int transfer_id)
620 {
621         struct ath10k *ar = ce_state->ar;
622         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
623         struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
624         struct ath10k_pci_compl *compl;
625         bool process = false;
626
627         do {
628                 /*
629                  * For the send completion of an item in sendlist, just
630                  * increment num_sends_allowed. The upper layer callback will
631                  * be triggered when last fragment is done with send.
632                  */
633                 if (transfer_context == CE_SENDLIST_ITEM_CTXT) {
634                         spin_lock_bh(&pipe_info->pipe_lock);
635                         pipe_info->num_sends_allowed++;
636                         spin_unlock_bh(&pipe_info->pipe_lock);
637                         continue;
638                 }
639
640                 compl = get_free_compl(pipe_info);
641                 if (!compl)
642                         break;
643
644                 compl->state = ATH10K_PCI_COMPL_SEND;
645                 compl->ce_state = ce_state;
646                 compl->pipe_info = pipe_info;
647                 compl->skb = transfer_context;
648                 compl->nbytes = nbytes;
649                 compl->transfer_id = transfer_id;
650                 compl->flags = 0;
651
652                 /*
653                  * Add the completion to the processing queue.
654                  */
655                 spin_lock_bh(&ar_pci->compl_lock);
656                 list_add_tail(&compl->list, &ar_pci->compl_process);
657                 spin_unlock_bh(&ar_pci->compl_lock);
658
659                 process = true;
660         } while (ath10k_ce_completed_send_next(ce_state,
661                                                            &transfer_context,
662                                                            &ce_data, &nbytes,
663                                                            &transfer_id) == 0);
664
665         /*
666          * If only some of the items within a sendlist have completed,
667          * don't invoke completion processing until the entire sendlist
668          * has been sent.
669          */
670         if (!process)
671                 return;
672
673         ath10k_pci_process_ce(ar);
674 }
675
676 /* Called by lower (CE) layer when data is received from the Target. */
677 static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state,
678                                     void *transfer_context, u32 ce_data,
679                                     unsigned int nbytes,
680                                     unsigned int transfer_id,
681                                     unsigned int flags)
682 {
683         struct ath10k *ar = ce_state->ar;
684         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
685         struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
686         struct ath10k_pci_compl *compl;
687         struct sk_buff *skb;
688
689         do {
690                 compl = get_free_compl(pipe_info);
691                 if (!compl)
692                         break;
693
694                 compl->state = ATH10K_PCI_COMPL_RECV;
695                 compl->ce_state = ce_state;
696                 compl->pipe_info = pipe_info;
697                 compl->skb = transfer_context;
698                 compl->nbytes = nbytes;
699                 compl->transfer_id = transfer_id;
700                 compl->flags = flags;
701
702                 skb = transfer_context;
703                 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
704                                  skb->len + skb_tailroom(skb),
705                                  DMA_FROM_DEVICE);
706                 /*
707                  * Add the completion to the processing queue.
708                  */
709                 spin_lock_bh(&ar_pci->compl_lock);
710                 list_add_tail(&compl->list, &ar_pci->compl_process);
711                 spin_unlock_bh(&ar_pci->compl_lock);
712
713         } while (ath10k_ce_completed_recv_next(ce_state,
714                                                            &transfer_context,
715                                                            &ce_data, &nbytes,
716                                                            &transfer_id,
717                                                            &flags) == 0);
718
719         ath10k_pci_process_ce(ar);
720 }
721
722 /* Send the first nbytes bytes of the buffer */
723 static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
724                                     unsigned int transfer_id,
725                                     unsigned int bytes, struct sk_buff *nbuf)
726 {
727         struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
728         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
729         struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
730         struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
731         struct ce_sendlist sendlist;
732         unsigned int len;
733         u32 flags = 0;
734         int ret;
735
736         memset(&sendlist, 0, sizeof(struct ce_sendlist));
737
738         len = min(bytes, nbuf->len);
739         bytes -= len;
740
741         if (len & 3)
742                 ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
743
744         ath10k_dbg(ATH10K_DBG_PCI,
745                    "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
746                    nbuf->data, (unsigned long long) skb_cb->paddr,
747                    nbuf->len, len);
748         ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
749                         "ath10k tx: data: ",
750                         nbuf->data, nbuf->len);
751
752         ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags);
753
754         /* Make sure we have resources to handle this request */
755         spin_lock_bh(&pipe_info->pipe_lock);
756         if (!pipe_info->num_sends_allowed) {
757                 ath10k_warn("Pipe: %d is full\n", pipe_id);
758                 spin_unlock_bh(&pipe_info->pipe_lock);
759                 return -ENOSR;
760         }
761         pipe_info->num_sends_allowed--;
762         spin_unlock_bh(&pipe_info->pipe_lock);
763
764         ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
765         if (ret)
766                 ath10k_warn("CE send failed: %p\n", nbuf);
767
768         return ret;
769 }
770
771 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
772 {
773         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
774         struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe]);
775         int ret;
776
777         spin_lock_bh(&pipe_info->pipe_lock);
778         ret = pipe_info->num_sends_allowed;
779         spin_unlock_bh(&pipe_info->pipe_lock);
780
781         return ret;
782 }
783
784 static void ath10k_pci_hif_dump_area(struct ath10k *ar)
785 {
786         u32 reg_dump_area = 0;
787         u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
788         u32 host_addr;
789         int ret;
790         u32 i;
791
792         ath10k_err("firmware crashed!\n");
793         ath10k_err("hardware name %s version 0x%x\n",
794                    ar->hw_params.name, ar->target_version);
795         ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major,
796                    ar->fw_version_minor, ar->fw_version_release,
797                    ar->fw_version_build);
798
799         host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
800         if (ath10k_pci_diag_read_mem(ar, host_addr,
801                                      &reg_dump_area, sizeof(u32)) != 0) {
802                 ath10k_warn("could not read hi_failure_state\n");
803                 return;
804         }
805
806         ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
807
808         ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
809                                        &reg_dump_values[0],
810                                        REG_DUMP_COUNT_QCA988X * sizeof(u32));
811         if (ret != 0) {
812                 ath10k_err("could not dump FW Dump Area\n");
813                 return;
814         }
815
816         BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
817
818         ath10k_err("target Register Dump\n");
819         for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
820                 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
821                            i,
822                            reg_dump_values[i],
823                            reg_dump_values[i + 1],
824                            reg_dump_values[i + 2],
825                            reg_dump_values[i + 3]);
826
827         ieee80211_queue_work(ar->hw, &ar->restart_work);
828 }
829
830 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
831                                                int force)
832 {
833         if (!force) {
834                 int resources;
835                 /*
836                  * Decide whether to actually poll for completions, or just
837                  * wait for a later chance.
838                  * If there seem to be plenty of resources left, then just wait
839                  * since checking involves reading a CE register, which is a
840                  * relatively expensive operation.
841                  */
842                 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
843
844                 /*
845                  * If at least 50% of the total resources are still available,
846                  * don't bother checking again yet.
847                  */
848                 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
849                         return;
850         }
851         ath10k_ce_per_engine_service(ar, pipe);
852 }
853
854 static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
855                                          struct ath10k_hif_cb *callbacks)
856 {
857         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
858
859         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
860
861         memcpy(&ar_pci->msg_callbacks_current, callbacks,
862                sizeof(ar_pci->msg_callbacks_current));
863 }
864
865 static int ath10k_pci_start_ce(struct ath10k *ar)
866 {
867         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
868         struct ath10k_ce_pipe *ce_diag = ar_pci->ce_diag;
869         const struct ce_attr *attr;
870         struct ath10k_pci_pipe *pipe_info;
871         struct ath10k_pci_compl *compl;
872         int i, pipe_num, completions, disable_interrupts;
873
874         spin_lock_init(&ar_pci->compl_lock);
875         INIT_LIST_HEAD(&ar_pci->compl_process);
876
877         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
878                 pipe_info = &ar_pci->pipe_info[pipe_num];
879
880                 spin_lock_init(&pipe_info->pipe_lock);
881                 INIT_LIST_HEAD(&pipe_info->compl_free);
882
883                 /* Handle Diagnostic CE specially */
884                 if (pipe_info->ce_hdl == ce_diag)
885                         continue;
886
887                 attr = &host_ce_config_wlan[pipe_num];
888                 completions = 0;
889
890                 if (attr->src_nentries) {
891                         disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
892                         ath10k_ce_send_cb_register(pipe_info->ce_hdl,
893                                                    ath10k_pci_ce_send_done,
894                                                    disable_interrupts);
895                         completions += attr->src_nentries;
896                         pipe_info->num_sends_allowed = attr->src_nentries - 1;
897                 }
898
899                 if (attr->dest_nentries) {
900                         ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
901                                                    ath10k_pci_ce_recv_data);
902                         completions += attr->dest_nentries;
903                 }
904
905                 if (completions == 0)
906                         continue;
907
908                 for (i = 0; i < completions; i++) {
909                         compl = kmalloc(sizeof(*compl), GFP_KERNEL);
910                         if (!compl) {
911                                 ath10k_warn("No memory for completion state\n");
912                                 ath10k_pci_stop_ce(ar);
913                                 return -ENOMEM;
914                         }
915
916                         compl->state = ATH10K_PCI_COMPL_FREE;
917                         list_add_tail(&compl->list, &pipe_info->compl_free);
918                 }
919         }
920
921         return 0;
922 }
923
924 static void ath10k_pci_stop_ce(struct ath10k *ar)
925 {
926         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
927         struct ath10k_pci_compl *compl;
928         struct sk_buff *skb;
929         int i;
930
931         ath10k_ce_disable_interrupts(ar);
932
933         /* Cancel the pending tasklet */
934         tasklet_kill(&ar_pci->intr_tq);
935
936         for (i = 0; i < CE_COUNT; i++)
937                 tasklet_kill(&ar_pci->pipe_info[i].intr);
938
939         /* Mark pending completions as aborted, so that upper layers free up
940          * their associated resources */
941         spin_lock_bh(&ar_pci->compl_lock);
942         list_for_each_entry(compl, &ar_pci->compl_process, list) {
943                 skb = compl->skb;
944                 ATH10K_SKB_CB(skb)->is_aborted = true;
945         }
946         spin_unlock_bh(&ar_pci->compl_lock);
947 }
948
949 static void ath10k_pci_cleanup_ce(struct ath10k *ar)
950 {
951         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
952         struct ath10k_pci_compl *compl, *tmp;
953         struct ath10k_pci_pipe *pipe_info;
954         struct sk_buff *netbuf;
955         int pipe_num;
956
957         /* Free pending completions. */
958         spin_lock_bh(&ar_pci->compl_lock);
959         if (!list_empty(&ar_pci->compl_process))
960                 ath10k_warn("pending completions still present! possible memory leaks.\n");
961
962         list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
963                 list_del(&compl->list);
964                 netbuf = compl->skb;
965                 dev_kfree_skb_any(netbuf);
966                 kfree(compl);
967         }
968         spin_unlock_bh(&ar_pci->compl_lock);
969
970         /* Free unused completions for each pipe. */
971         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
972                 pipe_info = &ar_pci->pipe_info[pipe_num];
973
974                 spin_lock_bh(&pipe_info->pipe_lock);
975                 list_for_each_entry_safe(compl, tmp,
976                                          &pipe_info->compl_free, list) {
977                         list_del(&compl->list);
978                         kfree(compl);
979                 }
980                 spin_unlock_bh(&pipe_info->pipe_lock);
981         }
982 }
983
984 static void ath10k_pci_process_ce(struct ath10k *ar)
985 {
986         struct ath10k_pci *ar_pci = ar->hif.priv;
987         struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
988         struct ath10k_pci_compl *compl;
989         struct sk_buff *skb;
990         unsigned int nbytes;
991         int ret, send_done = 0;
992
993         /* Upper layers aren't ready to handle tx/rx completions in parallel so
994          * we must serialize all completion processing. */
995
996         spin_lock_bh(&ar_pci->compl_lock);
997         if (ar_pci->compl_processing) {
998                 spin_unlock_bh(&ar_pci->compl_lock);
999                 return;
1000         }
1001         ar_pci->compl_processing = true;
1002         spin_unlock_bh(&ar_pci->compl_lock);
1003
1004         for (;;) {
1005                 spin_lock_bh(&ar_pci->compl_lock);
1006                 if (list_empty(&ar_pci->compl_process)) {
1007                         spin_unlock_bh(&ar_pci->compl_lock);
1008                         break;
1009                 }
1010                 compl = list_first_entry(&ar_pci->compl_process,
1011                                          struct ath10k_pci_compl, list);
1012                 list_del(&compl->list);
1013                 spin_unlock_bh(&ar_pci->compl_lock);
1014
1015                 switch (compl->state) {
1016                 case ATH10K_PCI_COMPL_SEND:
1017                         cb->tx_completion(ar,
1018                                           compl->skb,
1019                                           compl->transfer_id);
1020                         send_done = 1;
1021                         break;
1022                 case ATH10K_PCI_COMPL_RECV:
1023                         ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
1024                         if (ret) {
1025                                 ath10k_warn("Unable to post recv buffer for pipe: %d\n",
1026                                             compl->pipe_info->pipe_num);
1027                                 break;
1028                         }
1029
1030                         skb = compl->skb;
1031                         nbytes = compl->nbytes;
1032
1033                         ath10k_dbg(ATH10K_DBG_PCI,
1034                                    "ath10k_pci_ce_recv_data netbuf=%p  nbytes=%d\n",
1035                                    skb, nbytes);
1036                         ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
1037                                         "ath10k rx: ", skb->data, nbytes);
1038
1039                         if (skb->len + skb_tailroom(skb) >= nbytes) {
1040                                 skb_trim(skb, 0);
1041                                 skb_put(skb, nbytes);
1042                                 cb->rx_completion(ar, skb,
1043                                                   compl->pipe_info->pipe_num);
1044                         } else {
1045                                 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
1046                                             nbytes,
1047                                             skb->len + skb_tailroom(skb));
1048                         }
1049                         break;
1050                 case ATH10K_PCI_COMPL_FREE:
1051                         ath10k_warn("free completion cannot be processed\n");
1052                         break;
1053                 default:
1054                         ath10k_warn("invalid completion state (%d)\n",
1055                                     compl->state);
1056                         break;
1057                 }
1058
1059                 compl->state = ATH10K_PCI_COMPL_FREE;
1060
1061                 /*
1062                  * Add completion back to the pipe's free list.
1063                  */
1064                 spin_lock_bh(&compl->pipe_info->pipe_lock);
1065                 list_add_tail(&compl->list, &compl->pipe_info->compl_free);
1066                 compl->pipe_info->num_sends_allowed += send_done;
1067                 spin_unlock_bh(&compl->pipe_info->pipe_lock);
1068         }
1069
1070         spin_lock_bh(&ar_pci->compl_lock);
1071         ar_pci->compl_processing = false;
1072         spin_unlock_bh(&ar_pci->compl_lock);
1073 }
1074
1075 /* TODO - temporary mapping while we have too few CE's */
1076 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
1077                                               u16 service_id, u8 *ul_pipe,
1078                                               u8 *dl_pipe, int *ul_is_polled,
1079                                               int *dl_is_polled)
1080 {
1081         int ret = 0;
1082
1083         /* polling for received messages not supported */
1084         *dl_is_polled = 0;
1085
1086         switch (service_id) {
1087         case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
1088                 /*
1089                  * Host->target HTT gets its own pipe, so it can be polled
1090                  * while other pipes are interrupt driven.
1091                  */
1092                 *ul_pipe = 4;
1093                 /*
1094                  * Use the same target->host pipe for HTC ctrl, HTC raw
1095                  * streams, and HTT.
1096                  */
1097                 *dl_pipe = 1;
1098                 break;
1099
1100         case ATH10K_HTC_SVC_ID_RSVD_CTRL:
1101         case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
1102                 /*
1103                  * Note: HTC_RAW_STREAMS_SVC is currently unused, and
1104                  * HTC_CTRL_RSVD_SVC could share the same pipe as the
1105                  * WMI services.  So, if another CE is needed, change
1106                  * this to *ul_pipe = 3, which frees up CE 0.
1107                  */
1108                 /* *ul_pipe = 3; */
1109                 *ul_pipe = 0;
1110                 *dl_pipe = 1;
1111                 break;
1112
1113         case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
1114         case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
1115         case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
1116         case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
1117
1118         case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1119                 *ul_pipe = 3;
1120                 *dl_pipe = 2;
1121                 break;
1122
1123                 /* pipe 5 unused   */
1124                 /* pipe 6 reserved */
1125                 /* pipe 7 reserved */
1126
1127         default:
1128                 ret = -1;
1129                 break;
1130         }
1131         *ul_is_polled =
1132                 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1133
1134         return ret;
1135 }
1136
1137 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1138                                                 u8 *ul_pipe, u8 *dl_pipe)
1139 {
1140         int ul_is_polled, dl_is_polled;
1141
1142         (void)ath10k_pci_hif_map_service_to_pipe(ar,
1143                                                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1144                                                  ul_pipe,
1145                                                  dl_pipe,
1146                                                  &ul_is_polled,
1147                                                  &dl_is_polled);
1148 }
1149
1150 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
1151                                    int num)
1152 {
1153         struct ath10k *ar = pipe_info->hif_ce_state;
1154         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1155         struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
1156         struct sk_buff *skb;
1157         dma_addr_t ce_data;
1158         int i, ret = 0;
1159
1160         if (pipe_info->buf_sz == 0)
1161                 return 0;
1162
1163         for (i = 0; i < num; i++) {
1164                 skb = dev_alloc_skb(pipe_info->buf_sz);
1165                 if (!skb) {
1166                         ath10k_warn("could not allocate skbuff for pipe %d\n",
1167                                     num);
1168                         ret = -ENOMEM;
1169                         goto err;
1170                 }
1171
1172                 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1173
1174                 ce_data = dma_map_single(ar->dev, skb->data,
1175                                          skb->len + skb_tailroom(skb),
1176                                          DMA_FROM_DEVICE);
1177
1178                 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1179                         ath10k_warn("could not dma map skbuff\n");
1180                         dev_kfree_skb_any(skb);
1181                         ret = -EIO;
1182                         goto err;
1183                 }
1184
1185                 ATH10K_SKB_CB(skb)->paddr = ce_data;
1186
1187                 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1188                                                pipe_info->buf_sz,
1189                                                PCI_DMA_FROMDEVICE);
1190
1191                 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1192                                                  ce_data);
1193                 if (ret) {
1194                         ath10k_warn("could not enqueue to pipe %d (%d)\n",
1195                                     num, ret);
1196                         goto err;
1197                 }
1198         }
1199
1200         return ret;
1201
1202 err:
1203         ath10k_pci_rx_pipe_cleanup(pipe_info);
1204         return ret;
1205 }
1206
1207 static int ath10k_pci_post_rx(struct ath10k *ar)
1208 {
1209         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1210         struct ath10k_pci_pipe *pipe_info;
1211         const struct ce_attr *attr;
1212         int pipe_num, ret = 0;
1213
1214         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1215                 pipe_info = &ar_pci->pipe_info[pipe_num];
1216                 attr = &host_ce_config_wlan[pipe_num];
1217
1218                 if (attr->dest_nentries == 0)
1219                         continue;
1220
1221                 ret = ath10k_pci_post_rx_pipe(pipe_info,
1222                                               attr->dest_nentries - 1);
1223                 if (ret) {
1224                         ath10k_warn("Unable to replenish recv buffers for pipe: %d\n",
1225                                     pipe_num);
1226
1227                         for (; pipe_num >= 0; pipe_num--) {
1228                                 pipe_info = &ar_pci->pipe_info[pipe_num];
1229                                 ath10k_pci_rx_pipe_cleanup(pipe_info);
1230                         }
1231                         return ret;
1232                 }
1233         }
1234
1235         return 0;
1236 }
1237
1238 static int ath10k_pci_hif_start(struct ath10k *ar)
1239 {
1240         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1241         int ret;
1242
1243         ret = ath10k_pci_start_ce(ar);
1244         if (ret) {
1245                 ath10k_warn("could not start CE (%d)\n", ret);
1246                 return ret;
1247         }
1248
1249         /* Post buffers once to start things off. */
1250         ret = ath10k_pci_post_rx(ar);
1251         if (ret) {
1252                 ath10k_warn("could not post rx pipes (%d)\n", ret);
1253                 return ret;
1254         }
1255
1256         ar_pci->started = 1;
1257         return 0;
1258 }
1259
1260 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1261 {
1262         struct ath10k *ar;
1263         struct ath10k_pci *ar_pci;
1264         struct ath10k_ce_pipe *ce_hdl;
1265         u32 buf_sz;
1266         struct sk_buff *netbuf;
1267         u32 ce_data;
1268
1269         buf_sz = pipe_info->buf_sz;
1270
1271         /* Unused Copy Engine */
1272         if (buf_sz == 0)
1273                 return;
1274
1275         ar = pipe_info->hif_ce_state;
1276         ar_pci = ath10k_pci_priv(ar);
1277
1278         if (!ar_pci->started)
1279                 return;
1280
1281         ce_hdl = pipe_info->ce_hdl;
1282
1283         while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1284                                           &ce_data) == 0) {
1285                 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1286                                  netbuf->len + skb_tailroom(netbuf),
1287                                  DMA_FROM_DEVICE);
1288                 dev_kfree_skb_any(netbuf);
1289         }
1290 }
1291
1292 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1293 {
1294         struct ath10k *ar;
1295         struct ath10k_pci *ar_pci;
1296         struct ath10k_ce_pipe *ce_hdl;
1297         struct sk_buff *netbuf;
1298         u32 ce_data;
1299         unsigned int nbytes;
1300         unsigned int id;
1301         u32 buf_sz;
1302
1303         buf_sz = pipe_info->buf_sz;
1304
1305         /* Unused Copy Engine */
1306         if (buf_sz == 0)
1307                 return;
1308
1309         ar = pipe_info->hif_ce_state;
1310         ar_pci = ath10k_pci_priv(ar);
1311
1312         if (!ar_pci->started)
1313                 return;
1314
1315         ce_hdl = pipe_info->ce_hdl;
1316
1317         while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1318                                           &ce_data, &nbytes, &id) == 0) {
1319                 if (netbuf != CE_SENDLIST_ITEM_CTXT) {
1320                         /*
1321                          * Indicate the completion to higer layer to free
1322                          * the buffer
1323                          */
1324                         ATH10K_SKB_CB(netbuf)->is_aborted = true;
1325                         ar_pci->msg_callbacks_current.tx_completion(ar,
1326                                                                     netbuf,
1327                                                                     id);
1328                 }
1329         }
1330 }
1331
1332 /*
1333  * Cleanup residual buffers for device shutdown:
1334  *    buffers that were enqueued for receive
1335  *    buffers that were to be sent
1336  * Note: Buffers that had completed but which were
1337  * not yet processed are on a completion queue. They
1338  * are handled when the completion thread shuts down.
1339  */
1340 static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1341 {
1342         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1343         int pipe_num;
1344
1345         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1346                 struct ath10k_pci_pipe *pipe_info;
1347
1348                 pipe_info = &ar_pci->pipe_info[pipe_num];
1349                 ath10k_pci_rx_pipe_cleanup(pipe_info);
1350                 ath10k_pci_tx_pipe_cleanup(pipe_info);
1351         }
1352 }
1353
1354 static void ath10k_pci_ce_deinit(struct ath10k *ar)
1355 {
1356         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1357         struct ath10k_pci_pipe *pipe_info;
1358         int pipe_num;
1359
1360         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1361                 pipe_info = &ar_pci->pipe_info[pipe_num];
1362                 if (pipe_info->ce_hdl) {
1363                         ath10k_ce_deinit(pipe_info->ce_hdl);
1364                         pipe_info->ce_hdl = NULL;
1365                         pipe_info->buf_sz = 0;
1366                 }
1367         }
1368 }
1369
1370 static void ath10k_pci_disable_irqs(struct ath10k *ar)
1371 {
1372         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1373         int i;
1374
1375         for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
1376                 disable_irq(ar_pci->pdev->irq + i);
1377 }
1378
1379 static void ath10k_pci_hif_stop(struct ath10k *ar)
1380 {
1381         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1382
1383         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
1384
1385         /* Irqs are never explicitly re-enabled. They are implicitly re-enabled
1386          * by ath10k_pci_start_intr(). */
1387         ath10k_pci_disable_irqs(ar);
1388
1389         ath10k_pci_stop_ce(ar);
1390
1391         /* At this point, asynchronous threads are stopped, the target should
1392          * not DMA nor interrupt. We process the leftovers and then free
1393          * everything else up. */
1394
1395         ath10k_pci_process_ce(ar);
1396         ath10k_pci_cleanup_ce(ar);
1397         ath10k_pci_buffer_cleanup(ar);
1398
1399         ar_pci->started = 0;
1400 }
1401
1402 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1403                                            void *req, u32 req_len,
1404                                            void *resp, u32 *resp_len)
1405 {
1406         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1407         struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1408         struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1409         struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1410         struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1411         dma_addr_t req_paddr = 0;
1412         dma_addr_t resp_paddr = 0;
1413         struct bmi_xfer xfer = {};
1414         void *treq, *tresp = NULL;
1415         int ret = 0;
1416
1417         if (resp && !resp_len)
1418                 return -EINVAL;
1419
1420         if (resp && resp_len && *resp_len == 0)
1421                 return -EINVAL;
1422
1423         treq = kmemdup(req, req_len, GFP_KERNEL);
1424         if (!treq)
1425                 return -ENOMEM;
1426
1427         req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1428         ret = dma_mapping_error(ar->dev, req_paddr);
1429         if (ret)
1430                 goto err_dma;
1431
1432         if (resp && resp_len) {
1433                 tresp = kzalloc(*resp_len, GFP_KERNEL);
1434                 if (!tresp) {
1435                         ret = -ENOMEM;
1436                         goto err_req;
1437                 }
1438
1439                 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1440                                             DMA_FROM_DEVICE);
1441                 ret = dma_mapping_error(ar->dev, resp_paddr);
1442                 if (ret)
1443                         goto err_req;
1444
1445                 xfer.wait_for_resp = true;
1446                 xfer.resp_len = 0;
1447
1448                 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1449         }
1450
1451         init_completion(&xfer.done);
1452
1453         ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1454         if (ret)
1455                 goto err_resp;
1456
1457         ret = wait_for_completion_timeout(&xfer.done,
1458                                           BMI_COMMUNICATION_TIMEOUT_HZ);
1459         if (ret <= 0) {
1460                 u32 unused_buffer;
1461                 unsigned int unused_nbytes;
1462                 unsigned int unused_id;
1463
1464                 ret = -ETIMEDOUT;
1465                 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1466                                            &unused_nbytes, &unused_id);
1467         } else {
1468                 /* non-zero means we did not time out */
1469                 ret = 0;
1470         }
1471
1472 err_resp:
1473         if (resp) {
1474                 u32 unused_buffer;
1475
1476                 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1477                 dma_unmap_single(ar->dev, resp_paddr,
1478                                  *resp_len, DMA_FROM_DEVICE);
1479         }
1480 err_req:
1481         dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1482
1483         if (ret == 0 && resp_len) {
1484                 *resp_len = min(*resp_len, xfer.resp_len);
1485                 memcpy(resp, tresp, xfer.resp_len);
1486         }
1487 err_dma:
1488         kfree(treq);
1489         kfree(tresp);
1490
1491         return ret;
1492 }
1493
1494 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state,
1495                                      void *transfer_context,
1496                                      u32 data,
1497                                      unsigned int nbytes,
1498                                      unsigned int transfer_id)
1499 {
1500         struct bmi_xfer *xfer = transfer_context;
1501
1502         if (xfer->wait_for_resp)
1503                 return;
1504
1505         complete(&xfer->done);
1506 }
1507
1508 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state,
1509                                      void *transfer_context,
1510                                      u32 data,
1511                                      unsigned int nbytes,
1512                                      unsigned int transfer_id,
1513                                      unsigned int flags)
1514 {
1515         struct bmi_xfer *xfer = transfer_context;
1516
1517         if (!xfer->wait_for_resp) {
1518                 ath10k_warn("unexpected: BMI data received; ignoring\n");
1519                 return;
1520         }
1521
1522         xfer->resp_len = nbytes;
1523         complete(&xfer->done);
1524 }
1525
1526 /*
1527  * Map from service/endpoint to Copy Engine.
1528  * This table is derived from the CE_PCI TABLE, above.
1529  * It is passed to the Target at startup for use by firmware.
1530  */
1531 static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1532         {
1533                  ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1534                  PIPEDIR_OUT,           /* out = UL = host -> target */
1535                  3,
1536         },
1537         {
1538                  ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1539                  PIPEDIR_IN,            /* in = DL = target -> host */
1540                  2,
1541         },
1542         {
1543                  ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1544                  PIPEDIR_OUT,           /* out = UL = host -> target */
1545                  3,
1546         },
1547         {
1548                  ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1549                  PIPEDIR_IN,            /* in = DL = target -> host */
1550                  2,
1551         },
1552         {
1553                  ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1554                  PIPEDIR_OUT,           /* out = UL = host -> target */
1555                  3,
1556         },
1557         {
1558                  ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1559                  PIPEDIR_IN,            /* in = DL = target -> host */
1560                  2,
1561         },
1562         {
1563                  ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1564                  PIPEDIR_OUT,           /* out = UL = host -> target */
1565                  3,
1566         },
1567         {
1568                  ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1569                  PIPEDIR_IN,            /* in = DL = target -> host */
1570                  2,
1571         },
1572         {
1573                  ATH10K_HTC_SVC_ID_WMI_CONTROL,
1574                  PIPEDIR_OUT,           /* out = UL = host -> target */
1575                  3,
1576         },
1577         {
1578                  ATH10K_HTC_SVC_ID_WMI_CONTROL,
1579                  PIPEDIR_IN,            /* in = DL = target -> host */
1580                  2,
1581         },
1582         {
1583                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1584                  PIPEDIR_OUT,           /* out = UL = host -> target */
1585                  0,             /* could be moved to 3 (share with WMI) */
1586         },
1587         {
1588                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1589                  PIPEDIR_IN,            /* in = DL = target -> host */
1590                  1,
1591         },
1592         {
1593                  ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,    /* not currently used */
1594                  PIPEDIR_OUT,           /* out = UL = host -> target */
1595                  0,
1596         },
1597         {
1598                  ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,    /* not currently used */
1599                  PIPEDIR_IN,            /* in = DL = target -> host */
1600                  1,
1601         },
1602         {
1603                  ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1604                  PIPEDIR_OUT,           /* out = UL = host -> target */
1605                  4,
1606         },
1607         {
1608                  ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1609                  PIPEDIR_IN,            /* in = DL = target -> host */
1610                  1,
1611         },
1612
1613         /* (Additions here) */
1614
1615         {                               /* Must be last */
1616                  0,
1617                  0,
1618                  0,
1619         },
1620 };
1621
1622 /*
1623  * Send an interrupt to the device to wake up the Target CPU
1624  * so it has an opportunity to notice any changed state.
1625  */
1626 static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1627 {
1628         int ret;
1629         u32 core_ctrl;
1630
1631         ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1632                                               CORE_CTRL_ADDRESS,
1633                                           &core_ctrl);
1634         if (ret) {
1635                 ath10k_warn("Unable to read core ctrl\n");
1636                 return ret;
1637         }
1638
1639         /* A_INUM_FIRMWARE interrupt to Target CPU */
1640         core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1641
1642         ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1643                                                CORE_CTRL_ADDRESS,
1644                                            core_ctrl);
1645         if (ret)
1646                 ath10k_warn("Unable to set interrupt mask\n");
1647
1648         return ret;
1649 }
1650
1651 static int ath10k_pci_init_config(struct ath10k *ar)
1652 {
1653         u32 interconnect_targ_addr;
1654         u32 pcie_state_targ_addr = 0;
1655         u32 pipe_cfg_targ_addr = 0;
1656         u32 svc_to_pipe_map = 0;
1657         u32 pcie_config_flags = 0;
1658         u32 ealloc_value;
1659         u32 ealloc_targ_addr;
1660         u32 flag2_value;
1661         u32 flag2_targ_addr;
1662         int ret = 0;
1663
1664         /* Download to Target the CE Config and the service-to-CE map */
1665         interconnect_targ_addr =
1666                 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1667
1668         /* Supply Target-side CE configuration */
1669         ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1670                                           &pcie_state_targ_addr);
1671         if (ret != 0) {
1672                 ath10k_err("Failed to get pcie state addr: %d\n", ret);
1673                 return ret;
1674         }
1675
1676         if (pcie_state_targ_addr == 0) {
1677                 ret = -EIO;
1678                 ath10k_err("Invalid pcie state addr\n");
1679                 return ret;
1680         }
1681
1682         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1683                                           offsetof(struct pcie_state,
1684                                                    pipe_cfg_addr),
1685                                           &pipe_cfg_targ_addr);
1686         if (ret != 0) {
1687                 ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1688                 return ret;
1689         }
1690
1691         if (pipe_cfg_targ_addr == 0) {
1692                 ret = -EIO;
1693                 ath10k_err("Invalid pipe cfg addr\n");
1694                 return ret;
1695         }
1696
1697         ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1698                                  target_ce_config_wlan,
1699                                  sizeof(target_ce_config_wlan));
1700
1701         if (ret != 0) {
1702                 ath10k_err("Failed to write pipe cfg: %d\n", ret);
1703                 return ret;
1704         }
1705
1706         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1707                                           offsetof(struct pcie_state,
1708                                                    svc_to_pipe_map),
1709                                           &svc_to_pipe_map);
1710         if (ret != 0) {
1711                 ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1712                 return ret;
1713         }
1714
1715         if (svc_to_pipe_map == 0) {
1716                 ret = -EIO;
1717                 ath10k_err("Invalid svc_to_pipe map\n");
1718                 return ret;
1719         }
1720
1721         ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1722                                  target_service_to_ce_map_wlan,
1723                                  sizeof(target_service_to_ce_map_wlan));
1724         if (ret != 0) {
1725                 ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1726                 return ret;
1727         }
1728
1729         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1730                                           offsetof(struct pcie_state,
1731                                                    config_flags),
1732                                           &pcie_config_flags);
1733         if (ret != 0) {
1734                 ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1735                 return ret;
1736         }
1737
1738         pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1739
1740         ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1741                                  offsetof(struct pcie_state, config_flags),
1742                                  &pcie_config_flags,
1743                                  sizeof(pcie_config_flags));
1744         if (ret != 0) {
1745                 ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1746                 return ret;
1747         }
1748
1749         /* configure early allocation */
1750         ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1751
1752         ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1753         if (ret != 0) {
1754                 ath10k_err("Faile to get early alloc val: %d\n", ret);
1755                 return ret;
1756         }
1757
1758         /* first bank is switched to IRAM */
1759         ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1760                          HI_EARLY_ALLOC_MAGIC_MASK);
1761         ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1762                          HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1763
1764         ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1765         if (ret != 0) {
1766                 ath10k_err("Failed to set early alloc val: %d\n", ret);
1767                 return ret;
1768         }
1769
1770         /* Tell Target to proceed with initialization */
1771         flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1772
1773         ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1774         if (ret != 0) {
1775                 ath10k_err("Failed to get option val: %d\n", ret);
1776                 return ret;
1777         }
1778
1779         flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1780
1781         ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1782         if (ret != 0) {
1783                 ath10k_err("Failed to set option val: %d\n", ret);
1784                 return ret;
1785         }
1786
1787         return 0;
1788 }
1789
1790
1791
1792 static int ath10k_pci_ce_init(struct ath10k *ar)
1793 {
1794         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1795         struct ath10k_pci_pipe *pipe_info;
1796         const struct ce_attr *attr;
1797         int pipe_num;
1798
1799         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1800                 pipe_info = &ar_pci->pipe_info[pipe_num];
1801                 pipe_info->pipe_num = pipe_num;
1802                 pipe_info->hif_ce_state = ar;
1803                 attr = &host_ce_config_wlan[pipe_num];
1804
1805                 pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1806                 if (pipe_info->ce_hdl == NULL) {
1807                         ath10k_err("Unable to initialize CE for pipe: %d\n",
1808                                    pipe_num);
1809
1810                         /* It is safe to call it here. It checks if ce_hdl is
1811                          * valid for each pipe */
1812                         ath10k_pci_ce_deinit(ar);
1813                         return -1;
1814                 }
1815
1816                 if (pipe_num == ar_pci->ce_count - 1) {
1817                         /*
1818                          * Reserve the ultimate CE for
1819                          * diagnostic Window support
1820                          */
1821                         ar_pci->ce_diag =
1822                         ar_pci->pipe_info[ar_pci->ce_count - 1].ce_hdl;
1823                         continue;
1824                 }
1825
1826                 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1827         }
1828
1829         /*
1830          * Initially, establish CE completion handlers for use with BMI.
1831          * These are overwritten with generic handlers after we exit BMI phase.
1832          */
1833         pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1834         ath10k_ce_send_cb_register(pipe_info->ce_hdl,
1835                                    ath10k_pci_bmi_send_done, 0);
1836
1837         pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1838         ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
1839                                    ath10k_pci_bmi_recv_data);
1840
1841         return 0;
1842 }
1843
1844 static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1845 {
1846         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1847         u32 fw_indicator_address, fw_indicator;
1848
1849         ath10k_pci_wake(ar);
1850
1851         fw_indicator_address = ar_pci->fw_indicator_address;
1852         fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
1853
1854         if (fw_indicator & FW_IND_EVENT_PENDING) {
1855                 /* ACK: clear Target-side pending event */
1856                 ath10k_pci_write32(ar, fw_indicator_address,
1857                                    fw_indicator & ~FW_IND_EVENT_PENDING);
1858
1859                 if (ar_pci->started) {
1860                         ath10k_pci_hif_dump_area(ar);
1861                 } else {
1862                         /*
1863                          * Probable Target failure before we're prepared
1864                          * to handle it.  Generally unexpected.
1865                          */
1866                         ath10k_warn("early firmware event indicated\n");
1867                 }
1868         }
1869
1870         ath10k_pci_sleep(ar);
1871 }
1872
1873 static int ath10k_pci_hif_power_up(struct ath10k *ar)
1874 {
1875         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1876         int ret;
1877
1878         ret = ath10k_pci_start_intr(ar);
1879         if (ret) {
1880                 ath10k_err("could not start interrupt handling (%d)\n", ret);
1881                 goto err;
1882         }
1883
1884         /*
1885          * Bring the target up cleanly.
1886          *
1887          * The target may be in an undefined state with an AUX-powered Target
1888          * and a Host in WoW mode. If the Host crashes, loses power, or is
1889          * restarted (without unloading the driver) then the Target is left
1890          * (aux) powered and running. On a subsequent driver load, the Target
1891          * is in an unexpected state. We try to catch that here in order to
1892          * reset the Target and retry the probe.
1893          */
1894         ath10k_pci_device_reset(ar);
1895
1896         ret = ath10k_pci_reset_target(ar);
1897         if (ret)
1898                 goto err_irq;
1899
1900         if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1901                 /* Force AWAKE forever */
1902                 ath10k_do_pci_wake(ar);
1903
1904         ret = ath10k_pci_ce_init(ar);
1905         if (ret)
1906                 goto err_ps;
1907
1908         ret = ath10k_pci_init_config(ar);
1909         if (ret)
1910                 goto err_ce;
1911
1912         ret = ath10k_pci_wake_target_cpu(ar);
1913         if (ret) {
1914                 ath10k_err("could not wake up target CPU (%d)\n", ret);
1915                 goto err_ce;
1916         }
1917
1918         return 0;
1919
1920 err_ce:
1921         ath10k_pci_ce_deinit(ar);
1922 err_ps:
1923         if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1924                 ath10k_do_pci_sleep(ar);
1925 err_irq:
1926         ath10k_pci_stop_intr(ar);
1927 err:
1928         return ret;
1929 }
1930
1931 static void ath10k_pci_hif_power_down(struct ath10k *ar)
1932 {
1933         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1934
1935         ath10k_pci_stop_intr(ar);
1936
1937         ath10k_pci_ce_deinit(ar);
1938         if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1939                 ath10k_do_pci_sleep(ar);
1940 }
1941
1942 #ifdef CONFIG_PM
1943
1944 #define ATH10K_PCI_PM_CONTROL 0x44
1945
1946 static int ath10k_pci_hif_suspend(struct ath10k *ar)
1947 {
1948         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1949         struct pci_dev *pdev = ar_pci->pdev;
1950         u32 val;
1951
1952         pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1953
1954         if ((val & 0x000000ff) != 0x3) {
1955                 pci_save_state(pdev);
1956                 pci_disable_device(pdev);
1957                 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1958                                        (val & 0xffffff00) | 0x03);
1959         }
1960
1961         return 0;
1962 }
1963
1964 static int ath10k_pci_hif_resume(struct ath10k *ar)
1965 {
1966         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1967         struct pci_dev *pdev = ar_pci->pdev;
1968         u32 val;
1969
1970         pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1971
1972         if ((val & 0x000000ff) != 0) {
1973                 pci_restore_state(pdev);
1974                 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1975                                        val & 0xffffff00);
1976                 /*
1977                  * Suspend/Resume resets the PCI configuration space,
1978                  * so we have to re-disable the RETRY_TIMEOUT register (0x41)
1979                  * to keep PCI Tx retries from interfering with C3 CPU state
1980                  */
1981                 pci_read_config_dword(pdev, 0x40, &val);
1982
1983                 if ((val & 0x0000ff00) != 0)
1984                         pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
1985         }
1986
1987         return 0;
1988 }
1989 #endif
1990
1991 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
1992         .send_head              = ath10k_pci_hif_send_head,
1993         .exchange_bmi_msg       = ath10k_pci_hif_exchange_bmi_msg,
1994         .start                  = ath10k_pci_hif_start,
1995         .stop                   = ath10k_pci_hif_stop,
1996         .map_service_to_pipe    = ath10k_pci_hif_map_service_to_pipe,
1997         .get_default_pipe       = ath10k_pci_hif_get_default_pipe,
1998         .send_complete_check    = ath10k_pci_hif_send_complete_check,
1999         .set_callbacks          = ath10k_pci_hif_set_callbacks,
2000         .get_free_queue_number  = ath10k_pci_hif_get_free_queue_number,
2001         .power_up               = ath10k_pci_hif_power_up,
2002         .power_down             = ath10k_pci_hif_power_down,
2003 #ifdef CONFIG_PM
2004         .suspend                = ath10k_pci_hif_suspend,
2005         .resume                 = ath10k_pci_hif_resume,
2006 #endif
2007 };
2008
2009 static void ath10k_pci_ce_tasklet(unsigned long ptr)
2010 {
2011         struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
2012         struct ath10k_pci *ar_pci = pipe->ar_pci;
2013
2014         ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
2015 }
2016
2017 static void ath10k_msi_err_tasklet(unsigned long data)
2018 {
2019         struct ath10k *ar = (struct ath10k *)data;
2020
2021         ath10k_pci_fw_interrupt_handler(ar);
2022 }
2023
2024 /*
2025  * Handler for a per-engine interrupt on a PARTICULAR CE.
2026  * This is used in cases where each CE has a private MSI interrupt.
2027  */
2028 static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2029 {
2030         struct ath10k *ar = arg;
2031         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2032         int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2033
2034         if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
2035                 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
2036                 return IRQ_HANDLED;
2037         }
2038
2039         /*
2040          * NOTE: We are able to derive ce_id from irq because we
2041          * use a one-to-one mapping for CE's 0..5.
2042          * CE's 6 & 7 do not use interrupts at all.
2043          *
2044          * This mapping must be kept in sync with the mapping
2045          * used by firmware.
2046          */
2047         tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2048         return IRQ_HANDLED;
2049 }
2050
2051 static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2052 {
2053         struct ath10k *ar = arg;
2054         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2055
2056         tasklet_schedule(&ar_pci->msi_fw_err);
2057         return IRQ_HANDLED;
2058 }
2059
2060 /*
2061  * Top-level interrupt handler for all PCI interrupts from a Target.
2062  * When a block of MSI interrupts is allocated, this top-level handler
2063  * is not used; instead, we directly call the correct sub-handler.
2064  */
2065 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2066 {
2067         struct ath10k *ar = arg;
2068         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2069
2070         if (ar_pci->num_msi_intrs == 0) {
2071                 /*
2072                  * IMPORTANT: INTR_CLR regiser has to be set after
2073                  * INTR_ENABLE is set to 0, otherwise interrupt can not be
2074                  * really cleared.
2075                  */
2076                 iowrite32(0, ar_pci->mem +
2077                           (SOC_CORE_BASE_ADDRESS |
2078                            PCIE_INTR_ENABLE_ADDRESS));
2079                 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2080                           PCIE_INTR_CE_MASK_ALL,
2081                           ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2082                                          PCIE_INTR_CLR_ADDRESS));
2083                 /*
2084                  * IMPORTANT: this extra read transaction is required to
2085                  * flush the posted write buffer.
2086                  */
2087                 (void) ioread32(ar_pci->mem +
2088                                 (SOC_CORE_BASE_ADDRESS |
2089                                  PCIE_INTR_ENABLE_ADDRESS));
2090         }
2091
2092         tasklet_schedule(&ar_pci->intr_tq);
2093
2094         return IRQ_HANDLED;
2095 }
2096
2097 static void ath10k_pci_tasklet(unsigned long data)
2098 {
2099         struct ath10k *ar = (struct ath10k *)data;
2100         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2101
2102         ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
2103         ath10k_ce_per_engine_service_any(ar);
2104
2105         if (ar_pci->num_msi_intrs == 0) {
2106                 /* Enable Legacy PCI line interrupts */
2107                 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2108                           PCIE_INTR_CE_MASK_ALL,
2109                           ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2110                                          PCIE_INTR_ENABLE_ADDRESS));
2111                 /*
2112                  * IMPORTANT: this extra read transaction is required to
2113                  * flush the posted write buffer
2114                  */
2115                 (void) ioread32(ar_pci->mem +
2116                                 (SOC_CORE_BASE_ADDRESS |
2117                                  PCIE_INTR_ENABLE_ADDRESS));
2118         }
2119 }
2120
2121 static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
2122 {
2123         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2124         int ret;
2125         int i;
2126
2127         ret = pci_enable_msi_block(ar_pci->pdev, num);
2128         if (ret)
2129                 return ret;
2130
2131         ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2132                           ath10k_pci_msi_fw_handler,
2133                           IRQF_SHARED, "ath10k_pci", ar);
2134         if (ret) {
2135                 ath10k_warn("request_irq(%d) failed %d\n",
2136                             ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2137
2138                 pci_disable_msi(ar_pci->pdev);
2139                 return ret;
2140         }
2141
2142         for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2143                 ret = request_irq(ar_pci->pdev->irq + i,
2144                                   ath10k_pci_per_engine_handler,
2145                                   IRQF_SHARED, "ath10k_pci", ar);
2146                 if (ret) {
2147                         ath10k_warn("request_irq(%d) failed %d\n",
2148                                     ar_pci->pdev->irq + i, ret);
2149
2150                         for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2151                                 free_irq(ar_pci->pdev->irq + i, ar);
2152
2153                         free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
2154                         pci_disable_msi(ar_pci->pdev);
2155                         return ret;
2156                 }
2157         }
2158
2159         ath10k_info("MSI-X interrupt handling (%d intrs)\n", num);
2160         return 0;
2161 }
2162
2163 static int ath10k_pci_start_intr_msi(struct ath10k *ar)
2164 {
2165         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2166         int ret;
2167
2168         ret = pci_enable_msi(ar_pci->pdev);
2169         if (ret < 0)
2170                 return ret;
2171
2172         ret = request_irq(ar_pci->pdev->irq,
2173                           ath10k_pci_interrupt_handler,
2174                           IRQF_SHARED, "ath10k_pci", ar);
2175         if (ret < 0) {
2176                 pci_disable_msi(ar_pci->pdev);
2177                 return ret;
2178         }
2179
2180         ath10k_info("MSI interrupt handling\n");
2181         return 0;
2182 }
2183
2184 static int ath10k_pci_start_intr_legacy(struct ath10k *ar)
2185 {
2186         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2187         int ret;
2188
2189         ret = request_irq(ar_pci->pdev->irq,
2190                           ath10k_pci_interrupt_handler,
2191                           IRQF_SHARED, "ath10k_pci", ar);
2192         if (ret < 0)
2193                 return ret;
2194
2195         /*
2196          * Make sure to wake the Target before enabling Legacy
2197          * Interrupt.
2198          */
2199         iowrite32(PCIE_SOC_WAKE_V_MASK,
2200                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2201                   PCIE_SOC_WAKE_ADDRESS);
2202
2203         ath10k_pci_wait(ar);
2204
2205         /*
2206          * A potential race occurs here: The CORE_BASE write
2207          * depends on target correctly decoding AXI address but
2208          * host won't know when target writes BAR to CORE_CTRL.
2209          * This write might get lost if target has NOT written BAR.
2210          * For now, fix the race by repeating the write in below
2211          * synchronization checking.
2212          */
2213         iowrite32(PCIE_INTR_FIRMWARE_MASK |
2214                   PCIE_INTR_CE_MASK_ALL,
2215                   ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2216                                  PCIE_INTR_ENABLE_ADDRESS));
2217         iowrite32(PCIE_SOC_WAKE_RESET,
2218                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2219                   PCIE_SOC_WAKE_ADDRESS);
2220
2221         ath10k_info("legacy interrupt handling\n");
2222         return 0;
2223 }
2224
2225 static int ath10k_pci_start_intr(struct ath10k *ar)
2226 {
2227         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2228         int num = MSI_NUM_REQUEST;
2229         int ret;
2230         int i;
2231
2232         tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long) ar);
2233         tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2234                      (unsigned long) ar);
2235
2236         for (i = 0; i < CE_COUNT; i++) {
2237                 ar_pci->pipe_info[i].ar_pci = ar_pci;
2238                 tasklet_init(&ar_pci->pipe_info[i].intr,
2239                              ath10k_pci_ce_tasklet,
2240                              (unsigned long)&ar_pci->pipe_info[i]);
2241         }
2242
2243         if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features))
2244                 num = 1;
2245
2246         if (num > 1) {
2247                 ret = ath10k_pci_start_intr_msix(ar, num);
2248                 if (ret == 0)
2249                         goto exit;
2250
2251                 ath10k_warn("MSI-X didn't succeed (%d), trying MSI\n", ret);
2252                 num = 1;
2253         }
2254
2255         if (num == 1) {
2256                 ret = ath10k_pci_start_intr_msi(ar);
2257                 if (ret == 0)
2258                         goto exit;
2259
2260                 ath10k_warn("MSI didn't succeed (%d), trying legacy INTR\n",
2261                             ret);
2262                 num = 0;
2263         }
2264
2265         ret = ath10k_pci_start_intr_legacy(ar);
2266
2267 exit:
2268         ar_pci->num_msi_intrs = num;
2269         ar_pci->ce_count = CE_COUNT;
2270         return ret;
2271 }
2272
2273 static void ath10k_pci_stop_intr(struct ath10k *ar)
2274 {
2275         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2276         int i;
2277
2278         /* There's at least one interrupt irregardless whether its legacy INTR
2279          * or MSI or MSI-X */
2280         for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2281                 free_irq(ar_pci->pdev->irq + i, ar);
2282
2283         if (ar_pci->num_msi_intrs > 0)
2284                 pci_disable_msi(ar_pci->pdev);
2285 }
2286
2287 static int ath10k_pci_reset_target(struct ath10k *ar)
2288 {
2289         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2290         int wait_limit = 300; /* 3 sec */
2291
2292         /* Wait for Target to finish initialization before we proceed. */
2293         iowrite32(PCIE_SOC_WAKE_V_MASK,
2294                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2295                   PCIE_SOC_WAKE_ADDRESS);
2296
2297         ath10k_pci_wait(ar);
2298
2299         while (wait_limit-- &&
2300                !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
2301                  FW_IND_INITIALIZED)) {
2302                 if (ar_pci->num_msi_intrs == 0)
2303                         /* Fix potential race by repeating CORE_BASE writes */
2304                         iowrite32(PCIE_INTR_FIRMWARE_MASK |
2305                                   PCIE_INTR_CE_MASK_ALL,
2306                                   ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2307                                                  PCIE_INTR_ENABLE_ADDRESS));
2308                 mdelay(10);
2309         }
2310
2311         if (wait_limit < 0) {
2312                 ath10k_err("Target stalled\n");
2313                 iowrite32(PCIE_SOC_WAKE_RESET,
2314                           ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2315                           PCIE_SOC_WAKE_ADDRESS);
2316                 return -EIO;
2317         }
2318
2319         iowrite32(PCIE_SOC_WAKE_RESET,
2320                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2321                   PCIE_SOC_WAKE_ADDRESS);
2322
2323         return 0;
2324 }
2325
2326 static void ath10k_pci_device_reset(struct ath10k *ar)
2327 {
2328         int i;
2329         u32 val;
2330
2331         if (!SOC_GLOBAL_RESET_ADDRESS)
2332                 return;
2333
2334         ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
2335                                PCIE_SOC_WAKE_V_MASK);
2336         for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2337                 if (ath10k_pci_target_is_awake(ar))
2338                         break;
2339                 msleep(1);
2340         }
2341
2342         /* Put Target, including PCIe, into RESET. */
2343         val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
2344         val |= 1;
2345         ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2346
2347         for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2348                 if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2349                                           RTC_STATE_COLD_RESET_MASK)
2350                         break;
2351                 msleep(1);
2352         }
2353
2354         /* Pull Target, including PCIe, out of RESET. */
2355         val &= ~1;
2356         ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2357
2358         for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2359                 if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2360                                             RTC_STATE_COLD_RESET_MASK))
2361                         break;
2362                 msleep(1);
2363         }
2364
2365         ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2366 }
2367
2368 static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2369 {
2370         int i;
2371
2372         for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2373                 if (!test_bit(i, ar_pci->features))
2374                         continue;
2375
2376                 switch (i) {
2377                 case ATH10K_PCI_FEATURE_MSI_X:
2378                         ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n");
2379                         break;
2380                 case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
2381                         ath10k_dbg(ATH10K_DBG_PCI, "QCA98XX SoC power save enabled\n");
2382                         break;
2383                 }
2384         }
2385 }
2386
2387 static int ath10k_pci_probe(struct pci_dev *pdev,
2388                             const struct pci_device_id *pci_dev)
2389 {
2390         void __iomem *mem;
2391         int ret = 0;
2392         struct ath10k *ar;
2393         struct ath10k_pci *ar_pci;
2394         u32 lcr_val, chip_id;
2395
2396         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2397
2398         ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2399         if (ar_pci == NULL)
2400                 return -ENOMEM;
2401
2402         ar_pci->pdev = pdev;
2403         ar_pci->dev = &pdev->dev;
2404
2405         switch (pci_dev->device) {
2406         case QCA988X_2_0_DEVICE_ID:
2407                 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2408                 break;
2409         default:
2410                 ret = -ENODEV;
2411                 ath10k_err("Unkown device ID: %d\n", pci_dev->device);
2412                 goto err_ar_pci;
2413         }
2414
2415         if (ath10k_target_ps)
2416                 set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2417
2418         ath10k_pci_dump_features(ar_pci);
2419
2420         ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
2421         if (!ar) {
2422                 ath10k_err("ath10k_core_create failed!\n");
2423                 ret = -EINVAL;
2424                 goto err_ar_pci;
2425         }
2426
2427         ar_pci->ar = ar;
2428         ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2429         atomic_set(&ar_pci->keep_awake_count, 0);
2430
2431         pci_set_drvdata(pdev, ar);
2432
2433         /*
2434          * Without any knowledge of the Host, the Target may have been reset or
2435          * power cycled and its Config Space may no longer reflect the PCI
2436          * address space that was assigned earlier by the PCI infrastructure.
2437          * Refresh it now.
2438          */
2439         ret = pci_assign_resource(pdev, BAR_NUM);
2440         if (ret) {
2441                 ath10k_err("cannot assign PCI space: %d\n", ret);
2442                 goto err_ar;
2443         }
2444
2445         ret = pci_enable_device(pdev);
2446         if (ret) {
2447                 ath10k_err("cannot enable PCI device: %d\n", ret);
2448                 goto err_ar;
2449         }
2450
2451         /* Request MMIO resources */
2452         ret = pci_request_region(pdev, BAR_NUM, "ath");
2453         if (ret) {
2454                 ath10k_err("PCI MMIO reservation error: %d\n", ret);
2455                 goto err_device;
2456         }
2457
2458         /*
2459          * Target structures have a limit of 32 bit DMA pointers.
2460          * DMA pointers can be wider than 32 bits by default on some systems.
2461          */
2462         ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2463         if (ret) {
2464                 ath10k_err("32-bit DMA not available: %d\n", ret);
2465                 goto err_region;
2466         }
2467
2468         ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2469         if (ret) {
2470                 ath10k_err("cannot enable 32-bit consistent DMA\n");
2471                 goto err_region;
2472         }
2473
2474         /* Set bus master bit in PCI_COMMAND to enable DMA */
2475         pci_set_master(pdev);
2476
2477         /*
2478          * Temporary FIX: disable ASPM
2479          * Will be removed after the OTP is programmed
2480          */
2481         pci_read_config_dword(pdev, 0x80, &lcr_val);
2482         pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2483
2484         /* Arrange for access to Target SoC registers. */
2485         mem = pci_iomap(pdev, BAR_NUM, 0);
2486         if (!mem) {
2487                 ath10k_err("PCI iomap error\n");
2488                 ret = -EIO;
2489                 goto err_master;
2490         }
2491
2492         ar_pci->mem = mem;
2493
2494         spin_lock_init(&ar_pci->ce_lock);
2495
2496         ret = ath10k_do_pci_wake(ar);
2497         if (ret) {
2498                 ath10k_err("Failed to get chip id: %d\n", ret);
2499                 return ret;
2500         }
2501
2502         chip_id = ath10k_pci_read32(ar,
2503                                     RTC_SOC_BASE_ADDRESS + SOC_CHIP_ID_ADDRESS);
2504
2505         ath10k_do_pci_sleep(ar);
2506
2507         ret = ath10k_core_register(ar, chip_id);
2508         if (ret) {
2509                 ath10k_err("could not register driver core (%d)\n", ret);
2510                 goto err_iomap;
2511         }
2512
2513         return 0;
2514
2515 err_iomap:
2516         pci_iounmap(pdev, mem);
2517 err_master:
2518         pci_clear_master(pdev);
2519 err_region:
2520         pci_release_region(pdev, BAR_NUM);
2521 err_device:
2522         pci_disable_device(pdev);
2523 err_ar:
2524         pci_set_drvdata(pdev, NULL);
2525         ath10k_core_destroy(ar);
2526 err_ar_pci:
2527         /* call HIF PCI free here */
2528         kfree(ar_pci);
2529
2530         return ret;
2531 }
2532
2533 static void ath10k_pci_remove(struct pci_dev *pdev)
2534 {
2535         struct ath10k *ar = pci_get_drvdata(pdev);
2536         struct ath10k_pci *ar_pci;
2537
2538         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2539
2540         if (!ar)
2541                 return;
2542
2543         ar_pci = ath10k_pci_priv(ar);
2544
2545         if (!ar_pci)
2546                 return;
2547
2548         tasklet_kill(&ar_pci->msi_fw_err);
2549
2550         ath10k_core_unregister(ar);
2551
2552         pci_set_drvdata(pdev, NULL);
2553         pci_iounmap(pdev, ar_pci->mem);
2554         pci_release_region(pdev, BAR_NUM);
2555         pci_clear_master(pdev);
2556         pci_disable_device(pdev);
2557
2558         ath10k_core_destroy(ar);
2559         kfree(ar_pci);
2560 }
2561
2562 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2563
2564 static struct pci_driver ath10k_pci_driver = {
2565         .name = "ath10k_pci",
2566         .id_table = ath10k_pci_id_table,
2567         .probe = ath10k_pci_probe,
2568         .remove = ath10k_pci_remove,
2569 };
2570
2571 static int __init ath10k_pci_init(void)
2572 {
2573         int ret;
2574
2575         ret = pci_register_driver(&ath10k_pci_driver);
2576         if (ret)
2577                 ath10k_err("pci_register_driver failed [%d]\n", ret);
2578
2579         return ret;
2580 }
2581 module_init(ath10k_pci_init);
2582
2583 static void __exit ath10k_pci_exit(void)
2584 {
2585         pci_unregister_driver(&ath10k_pci_driver);
2586 }
2587
2588 module_exit(ath10k_pci_exit);
2589
2590 MODULE_AUTHOR("Qualcomm Atheros");
2591 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2592 MODULE_LICENSE("Dual BSD/GPL");
2593 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2594 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
2595 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);