ath10k: add boot debug messages to pci.c and ce.c
[cascardo/linux.git] / drivers / net / wireless / ath / ath10k / pci.c
1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17
18 #include <linux/pci.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22
23 #include "core.h"
24 #include "debug.h"
25
26 #include "targaddrs.h"
27 #include "bmi.h"
28
29 #include "hif.h"
30 #include "htc.h"
31
32 #include "ce.h"
33 #include "pci.h"
34
35 static unsigned int ath10k_target_ps;
36 module_param(ath10k_target_ps, uint, 0644);
37 MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
38
39 #define QCA988X_2_0_DEVICE_ID   (0x003c)
40
41 static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
42         { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
43         {0}
44 };
45
46 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
47                                        u32 *data);
48
49 static void ath10k_pci_process_ce(struct ath10k *ar);
50 static int ath10k_pci_post_rx(struct ath10k *ar);
51 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
52                                              int num);
53 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
54 static void ath10k_pci_stop_ce(struct ath10k *ar);
55 static void ath10k_pci_device_reset(struct ath10k *ar);
56 static int ath10k_pci_reset_target(struct ath10k *ar);
57 static int ath10k_pci_start_intr(struct ath10k *ar);
58 static void ath10k_pci_stop_intr(struct ath10k *ar);
59
60 static const struct ce_attr host_ce_config_wlan[] = {
61         /* CE0: host->target HTC control and raw streams */
62         {
63                 .flags = CE_ATTR_FLAGS,
64                 .src_nentries = 16,
65                 .src_sz_max = 256,
66                 .dest_nentries = 0,
67         },
68
69         /* CE1: target->host HTT + HTC control */
70         {
71                 .flags = CE_ATTR_FLAGS,
72                 .src_nentries = 0,
73                 .src_sz_max = 512,
74                 .dest_nentries = 512,
75         },
76
77         /* CE2: target->host WMI */
78         {
79                 .flags = CE_ATTR_FLAGS,
80                 .src_nentries = 0,
81                 .src_sz_max = 2048,
82                 .dest_nentries = 32,
83         },
84
85         /* CE3: host->target WMI */
86         {
87                 .flags = CE_ATTR_FLAGS,
88                 .src_nentries = 32,
89                 .src_sz_max = 2048,
90                 .dest_nentries = 0,
91         },
92
93         /* CE4: host->target HTT */
94         {
95                 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
96                 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
97                 .src_sz_max = 256,
98                 .dest_nentries = 0,
99         },
100
101         /* CE5: unused */
102         {
103                 .flags = CE_ATTR_FLAGS,
104                 .src_nentries = 0,
105                 .src_sz_max = 0,
106                 .dest_nentries = 0,
107         },
108
109         /* CE6: target autonomous hif_memcpy */
110         {
111                 .flags = CE_ATTR_FLAGS,
112                 .src_nentries = 0,
113                 .src_sz_max = 0,
114                 .dest_nentries = 0,
115         },
116
117         /* CE7: ce_diag, the Diagnostic Window */
118         {
119                 .flags = CE_ATTR_FLAGS,
120                 .src_nentries = 2,
121                 .src_sz_max = DIAG_TRANSFER_LIMIT,
122                 .dest_nentries = 2,
123         },
124 };
125
126 /* Target firmware's Copy Engine configuration. */
127 static const struct ce_pipe_config target_ce_config_wlan[] = {
128         /* CE0: host->target HTC control and raw streams */
129         {
130                 .pipenum = 0,
131                 .pipedir = PIPEDIR_OUT,
132                 .nentries = 32,
133                 .nbytes_max = 256,
134                 .flags = CE_ATTR_FLAGS,
135                 .reserved = 0,
136         },
137
138         /* CE1: target->host HTT + HTC control */
139         {
140                 .pipenum = 1,
141                 .pipedir = PIPEDIR_IN,
142                 .nentries = 32,
143                 .nbytes_max = 512,
144                 .flags = CE_ATTR_FLAGS,
145                 .reserved = 0,
146         },
147
148         /* CE2: target->host WMI */
149         {
150                 .pipenum = 2,
151                 .pipedir = PIPEDIR_IN,
152                 .nentries = 32,
153                 .nbytes_max = 2048,
154                 .flags = CE_ATTR_FLAGS,
155                 .reserved = 0,
156         },
157
158         /* CE3: host->target WMI */
159         {
160                 .pipenum = 3,
161                 .pipedir = PIPEDIR_OUT,
162                 .nentries = 32,
163                 .nbytes_max = 2048,
164                 .flags = CE_ATTR_FLAGS,
165                 .reserved = 0,
166         },
167
168         /* CE4: host->target HTT */
169         {
170                 .pipenum = 4,
171                 .pipedir = PIPEDIR_OUT,
172                 .nentries = 256,
173                 .nbytes_max = 256,
174                 .flags = CE_ATTR_FLAGS,
175                 .reserved = 0,
176         },
177
178         /* NB: 50% of src nentries, since tx has 2 frags */
179
180         /* CE5: unused */
181         {
182                 .pipenum = 5,
183                 .pipedir = PIPEDIR_OUT,
184                 .nentries = 32,
185                 .nbytes_max = 2048,
186                 .flags = CE_ATTR_FLAGS,
187                 .reserved = 0,
188         },
189
190         /* CE6: Reserved for target autonomous hif_memcpy */
191         {
192                 .pipenum = 6,
193                 .pipedir = PIPEDIR_INOUT,
194                 .nentries = 32,
195                 .nbytes_max = 4096,
196                 .flags = CE_ATTR_FLAGS,
197                 .reserved = 0,
198         },
199
200         /* CE7 used only by Host */
201 };
202
203 /*
204  * Diagnostic read/write access is provided for startup/config/debug usage.
205  * Caller must guarantee proper alignment, when applicable, and single user
206  * at any moment.
207  */
208 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
209                                     int nbytes)
210 {
211         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
212         int ret = 0;
213         u32 buf;
214         unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
215         unsigned int id;
216         unsigned int flags;
217         struct ath10k_ce_pipe *ce_diag;
218         /* Host buffer address in CE space */
219         u32 ce_data;
220         dma_addr_t ce_data_base = 0;
221         void *data_buf = NULL;
222         int i;
223
224         /*
225          * This code cannot handle reads to non-memory space. Redirect to the
226          * register read fn but preserve the multi word read capability of
227          * this fn
228          */
229         if (address < DRAM_BASE_ADDRESS) {
230                 if (!IS_ALIGNED(address, 4) ||
231                     !IS_ALIGNED((unsigned long)data, 4))
232                         return -EIO;
233
234                 while ((nbytes >= 4) &&  ((ret = ath10k_pci_diag_read_access(
235                                            ar, address, (u32 *)data)) == 0)) {
236                         nbytes -= sizeof(u32);
237                         address += sizeof(u32);
238                         data += sizeof(u32);
239                 }
240                 return ret;
241         }
242
243         ce_diag = ar_pci->ce_diag;
244
245         /*
246          * Allocate a temporary bounce buffer to hold caller's data
247          * to be DMA'ed from Target. This guarantees
248          *   1) 4-byte alignment
249          *   2) Buffer in DMA-able space
250          */
251         orig_nbytes = nbytes;
252         data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
253                                                          orig_nbytes,
254                                                          &ce_data_base);
255
256         if (!data_buf) {
257                 ret = -ENOMEM;
258                 goto done;
259         }
260         memset(data_buf, 0, orig_nbytes);
261
262         remaining_bytes = orig_nbytes;
263         ce_data = ce_data_base;
264         while (remaining_bytes) {
265                 nbytes = min_t(unsigned int, remaining_bytes,
266                                DIAG_TRANSFER_LIMIT);
267
268                 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
269                 if (ret != 0)
270                         goto done;
271
272                 /* Request CE to send from Target(!) address to Host buffer */
273                 /*
274                  * The address supplied by the caller is in the
275                  * Target CPU virtual address space.
276                  *
277                  * In order to use this address with the diagnostic CE,
278                  * convert it from Target CPU virtual address space
279                  * to CE address space
280                  */
281                 ath10k_pci_wake(ar);
282                 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
283                                                      address);
284                 ath10k_pci_sleep(ar);
285
286                 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
287                                  0);
288                 if (ret)
289                         goto done;
290
291                 i = 0;
292                 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
293                                                      &completed_nbytes,
294                                                      &id) != 0) {
295                         mdelay(1);
296                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
297                                 ret = -EBUSY;
298                                 goto done;
299                         }
300                 }
301
302                 if (nbytes != completed_nbytes) {
303                         ret = -EIO;
304                         goto done;
305                 }
306
307                 if (buf != (u32) address) {
308                         ret = -EIO;
309                         goto done;
310                 }
311
312                 i = 0;
313                 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
314                                                      &completed_nbytes,
315                                                      &id, &flags) != 0) {
316                         mdelay(1);
317
318                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
319                                 ret = -EBUSY;
320                                 goto done;
321                         }
322                 }
323
324                 if (nbytes != completed_nbytes) {
325                         ret = -EIO;
326                         goto done;
327                 }
328
329                 if (buf != ce_data) {
330                         ret = -EIO;
331                         goto done;
332                 }
333
334                 remaining_bytes -= nbytes;
335                 address += nbytes;
336                 ce_data += nbytes;
337         }
338
339 done:
340         if (ret == 0) {
341                 /* Copy data from allocated DMA buf to caller's buf */
342                 WARN_ON_ONCE(orig_nbytes & 3);
343                 for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
344                         ((u32 *)data)[i] =
345                                 __le32_to_cpu(((__le32 *)data_buf)[i]);
346                 }
347         } else
348                 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
349                            __func__, address);
350
351         if (data_buf)
352                 pci_free_consistent(ar_pci->pdev, orig_nbytes,
353                                     data_buf, ce_data_base);
354
355         return ret;
356 }
357
358 /* Read 4-byte aligned data from Target memory or register */
359 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
360                                        u32 *data)
361 {
362         /* Assume range doesn't cross this boundary */
363         if (address >= DRAM_BASE_ADDRESS)
364                 return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
365
366         ath10k_pci_wake(ar);
367         *data = ath10k_pci_read32(ar, address);
368         ath10k_pci_sleep(ar);
369         return 0;
370 }
371
372 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
373                                      const void *data, int nbytes)
374 {
375         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
376         int ret = 0;
377         u32 buf;
378         unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
379         unsigned int id;
380         unsigned int flags;
381         struct ath10k_ce_pipe *ce_diag;
382         void *data_buf = NULL;
383         u32 ce_data;    /* Host buffer address in CE space */
384         dma_addr_t ce_data_base = 0;
385         int i;
386
387         ce_diag = ar_pci->ce_diag;
388
389         /*
390          * Allocate a temporary bounce buffer to hold caller's data
391          * to be DMA'ed to Target. This guarantees
392          *   1) 4-byte alignment
393          *   2) Buffer in DMA-able space
394          */
395         orig_nbytes = nbytes;
396         data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
397                                                          orig_nbytes,
398                                                          &ce_data_base);
399         if (!data_buf) {
400                 ret = -ENOMEM;
401                 goto done;
402         }
403
404         /* Copy caller's data to allocated DMA buf */
405         WARN_ON_ONCE(orig_nbytes & 3);
406         for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
407                 ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
408
409         /*
410          * The address supplied by the caller is in the
411          * Target CPU virtual address space.
412          *
413          * In order to use this address with the diagnostic CE,
414          * convert it from
415          *    Target CPU virtual address space
416          * to
417          *    CE address space
418          */
419         ath10k_pci_wake(ar);
420         address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
421         ath10k_pci_sleep(ar);
422
423         remaining_bytes = orig_nbytes;
424         ce_data = ce_data_base;
425         while (remaining_bytes) {
426                 /* FIXME: check cast */
427                 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
428
429                 /* Set up to receive directly into Target(!) address */
430                 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
431                 if (ret != 0)
432                         goto done;
433
434                 /*
435                  * Request CE to send caller-supplied data that
436                  * was copied to bounce buffer to Target(!) address.
437                  */
438                 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
439                                      nbytes, 0, 0);
440                 if (ret != 0)
441                         goto done;
442
443                 i = 0;
444                 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
445                                                      &completed_nbytes,
446                                                      &id) != 0) {
447                         mdelay(1);
448
449                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
450                                 ret = -EBUSY;
451                                 goto done;
452                         }
453                 }
454
455                 if (nbytes != completed_nbytes) {
456                         ret = -EIO;
457                         goto done;
458                 }
459
460                 if (buf != ce_data) {
461                         ret = -EIO;
462                         goto done;
463                 }
464
465                 i = 0;
466                 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
467                                                      &completed_nbytes,
468                                                      &id, &flags) != 0) {
469                         mdelay(1);
470
471                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
472                                 ret = -EBUSY;
473                                 goto done;
474                         }
475                 }
476
477                 if (nbytes != completed_nbytes) {
478                         ret = -EIO;
479                         goto done;
480                 }
481
482                 if (buf != address) {
483                         ret = -EIO;
484                         goto done;
485                 }
486
487                 remaining_bytes -= nbytes;
488                 address += nbytes;
489                 ce_data += nbytes;
490         }
491
492 done:
493         if (data_buf) {
494                 pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
495                                     ce_data_base);
496         }
497
498         if (ret != 0)
499                 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
500                            address);
501
502         return ret;
503 }
504
505 /* Write 4B data to Target memory or register */
506 static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
507                                         u32 data)
508 {
509         /* Assume range doesn't cross this boundary */
510         if (address >= DRAM_BASE_ADDRESS)
511                 return ath10k_pci_diag_write_mem(ar, address, &data,
512                                                  sizeof(u32));
513
514         ath10k_pci_wake(ar);
515         ath10k_pci_write32(ar, address, data);
516         ath10k_pci_sleep(ar);
517         return 0;
518 }
519
520 static bool ath10k_pci_target_is_awake(struct ath10k *ar)
521 {
522         void __iomem *mem = ath10k_pci_priv(ar)->mem;
523         u32 val;
524         val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
525                        RTC_STATE_ADDRESS);
526         return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
527 }
528
529 static void ath10k_pci_wait(struct ath10k *ar)
530 {
531         int n = 100;
532
533         while (n-- && !ath10k_pci_target_is_awake(ar))
534                 msleep(10);
535
536         if (n < 0)
537                 ath10k_warn("Unable to wakeup target\n");
538 }
539
540 int ath10k_do_pci_wake(struct ath10k *ar)
541 {
542         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
543         void __iomem *pci_addr = ar_pci->mem;
544         int tot_delay = 0;
545         int curr_delay = 5;
546
547         if (atomic_read(&ar_pci->keep_awake_count) == 0) {
548                 /* Force AWAKE */
549                 iowrite32(PCIE_SOC_WAKE_V_MASK,
550                           pci_addr + PCIE_LOCAL_BASE_ADDRESS +
551                           PCIE_SOC_WAKE_ADDRESS);
552         }
553         atomic_inc(&ar_pci->keep_awake_count);
554
555         if (ar_pci->verified_awake)
556                 return 0;
557
558         for (;;) {
559                 if (ath10k_pci_target_is_awake(ar)) {
560                         ar_pci->verified_awake = true;
561                         return 0;
562                 }
563
564                 if (tot_delay > PCIE_WAKE_TIMEOUT) {
565                         ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
566                                     PCIE_WAKE_TIMEOUT,
567                                     atomic_read(&ar_pci->keep_awake_count));
568                         return -ETIMEDOUT;
569                 }
570
571                 udelay(curr_delay);
572                 tot_delay += curr_delay;
573
574                 if (curr_delay < 50)
575                         curr_delay += 5;
576         }
577 }
578
579 void ath10k_do_pci_sleep(struct ath10k *ar)
580 {
581         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
582         void __iomem *pci_addr = ar_pci->mem;
583
584         if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
585                 /* Allow sleep */
586                 ar_pci->verified_awake = false;
587                 iowrite32(PCIE_SOC_WAKE_RESET,
588                           pci_addr + PCIE_LOCAL_BASE_ADDRESS +
589                           PCIE_SOC_WAKE_ADDRESS);
590         }
591 }
592
593 /*
594  * FIXME: Handle OOM properly.
595  */
596 static inline
597 struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
598 {
599         struct ath10k_pci_compl *compl = NULL;
600
601         spin_lock_bh(&pipe_info->pipe_lock);
602         if (list_empty(&pipe_info->compl_free)) {
603                 ath10k_warn("Completion buffers are full\n");
604                 goto exit;
605         }
606         compl = list_first_entry(&pipe_info->compl_free,
607                                  struct ath10k_pci_compl, list);
608         list_del(&compl->list);
609 exit:
610         spin_unlock_bh(&pipe_info->pipe_lock);
611         return compl;
612 }
613
614 /* Called by lower (CE) layer when a send to Target completes. */
615 static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
616 {
617         struct ath10k *ar = ce_state->ar;
618         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
619         struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
620         struct ath10k_pci_compl *compl;
621         void *transfer_context;
622         u32 ce_data;
623         unsigned int nbytes;
624         unsigned int transfer_id;
625
626         while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
627                                              &ce_data, &nbytes,
628                                              &transfer_id) == 0) {
629                 /*
630                  * For the send completion of an item in sendlist, just
631                  * increment num_sends_allowed. The upper layer callback will
632                  * be triggered when last fragment is done with send.
633                  */
634                 if (transfer_context == CE_SENDLIST_ITEM_CTXT) {
635                         spin_lock_bh(&pipe_info->pipe_lock);
636                         pipe_info->num_sends_allowed++;
637                         spin_unlock_bh(&pipe_info->pipe_lock);
638                         continue;
639                 }
640
641                 compl = get_free_compl(pipe_info);
642                 if (!compl)
643                         break;
644
645                 compl->state = ATH10K_PCI_COMPL_SEND;
646                 compl->ce_state = ce_state;
647                 compl->pipe_info = pipe_info;
648                 compl->skb = transfer_context;
649                 compl->nbytes = nbytes;
650                 compl->transfer_id = transfer_id;
651                 compl->flags = 0;
652
653                 /*
654                  * Add the completion to the processing queue.
655                  */
656                 spin_lock_bh(&ar_pci->compl_lock);
657                 list_add_tail(&compl->list, &ar_pci->compl_process);
658                 spin_unlock_bh(&ar_pci->compl_lock);
659         }
660
661         ath10k_pci_process_ce(ar);
662 }
663
664 /* Called by lower (CE) layer when data is received from the Target. */
665 static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
666 {
667         struct ath10k *ar = ce_state->ar;
668         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
669         struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
670         struct ath10k_pci_compl *compl;
671         struct sk_buff *skb;
672         void *transfer_context;
673         u32 ce_data;
674         unsigned int nbytes;
675         unsigned int transfer_id;
676         unsigned int flags;
677
678         while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
679                                              &ce_data, &nbytes, &transfer_id,
680                                              &flags) == 0) {
681                 compl = get_free_compl(pipe_info);
682                 if (!compl)
683                         break;
684
685                 compl->state = ATH10K_PCI_COMPL_RECV;
686                 compl->ce_state = ce_state;
687                 compl->pipe_info = pipe_info;
688                 compl->skb = transfer_context;
689                 compl->nbytes = nbytes;
690                 compl->transfer_id = transfer_id;
691                 compl->flags = flags;
692
693                 skb = transfer_context;
694                 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
695                                  skb->len + skb_tailroom(skb),
696                                  DMA_FROM_DEVICE);
697                 /*
698                  * Add the completion to the processing queue.
699                  */
700                 spin_lock_bh(&ar_pci->compl_lock);
701                 list_add_tail(&compl->list, &ar_pci->compl_process);
702                 spin_unlock_bh(&ar_pci->compl_lock);
703         }
704
705         ath10k_pci_process_ce(ar);
706 }
707
708 /* Send the first nbytes bytes of the buffer */
709 static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
710                                     unsigned int transfer_id,
711                                     unsigned int bytes, struct sk_buff *nbuf)
712 {
713         struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
714         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
715         struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
716         struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
717         struct ce_sendlist sendlist;
718         unsigned int len;
719         u32 flags = 0;
720         int ret;
721
722         memset(&sendlist, 0, sizeof(struct ce_sendlist));
723
724         len = min(bytes, nbuf->len);
725         bytes -= len;
726
727         if (len & 3)
728                 ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
729
730         ath10k_dbg(ATH10K_DBG_PCI,
731                    "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
732                    nbuf->data, (unsigned long long) skb_cb->paddr,
733                    nbuf->len, len);
734         ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
735                         "ath10k tx: data: ",
736                         nbuf->data, nbuf->len);
737
738         ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags);
739
740         /* Make sure we have resources to handle this request */
741         spin_lock_bh(&pipe_info->pipe_lock);
742         if (!pipe_info->num_sends_allowed) {
743                 ath10k_warn("Pipe: %d is full\n", pipe_id);
744                 spin_unlock_bh(&pipe_info->pipe_lock);
745                 return -ENOSR;
746         }
747         pipe_info->num_sends_allowed--;
748         spin_unlock_bh(&pipe_info->pipe_lock);
749
750         ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
751         if (ret)
752                 ath10k_warn("CE send failed: %p\n", nbuf);
753
754         return ret;
755 }
756
757 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
758 {
759         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
760         struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe]);
761         int ret;
762
763         spin_lock_bh(&pipe_info->pipe_lock);
764         ret = pipe_info->num_sends_allowed;
765         spin_unlock_bh(&pipe_info->pipe_lock);
766
767         return ret;
768 }
769
770 static void ath10k_pci_hif_dump_area(struct ath10k *ar)
771 {
772         u32 reg_dump_area = 0;
773         u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
774         u32 host_addr;
775         int ret;
776         u32 i;
777
778         ath10k_err("firmware crashed!\n");
779         ath10k_err("hardware name %s version 0x%x\n",
780                    ar->hw_params.name, ar->target_version);
781         ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major,
782                    ar->fw_version_minor, ar->fw_version_release,
783                    ar->fw_version_build);
784
785         host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
786         if (ath10k_pci_diag_read_mem(ar, host_addr,
787                                      &reg_dump_area, sizeof(u32)) != 0) {
788                 ath10k_warn("could not read hi_failure_state\n");
789                 return;
790         }
791
792         ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
793
794         ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
795                                        &reg_dump_values[0],
796                                        REG_DUMP_COUNT_QCA988X * sizeof(u32));
797         if (ret != 0) {
798                 ath10k_err("could not dump FW Dump Area\n");
799                 return;
800         }
801
802         BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
803
804         ath10k_err("target Register Dump\n");
805         for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
806                 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
807                            i,
808                            reg_dump_values[i],
809                            reg_dump_values[i + 1],
810                            reg_dump_values[i + 2],
811                            reg_dump_values[i + 3]);
812
813         ieee80211_queue_work(ar->hw, &ar->restart_work);
814 }
815
816 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
817                                                int force)
818 {
819         if (!force) {
820                 int resources;
821                 /*
822                  * Decide whether to actually poll for completions, or just
823                  * wait for a later chance.
824                  * If there seem to be plenty of resources left, then just wait
825                  * since checking involves reading a CE register, which is a
826                  * relatively expensive operation.
827                  */
828                 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
829
830                 /*
831                  * If at least 50% of the total resources are still available,
832                  * don't bother checking again yet.
833                  */
834                 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
835                         return;
836         }
837         ath10k_ce_per_engine_service(ar, pipe);
838 }
839
840 static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
841                                          struct ath10k_hif_cb *callbacks)
842 {
843         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
844
845         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
846
847         memcpy(&ar_pci->msg_callbacks_current, callbacks,
848                sizeof(ar_pci->msg_callbacks_current));
849 }
850
851 static int ath10k_pci_start_ce(struct ath10k *ar)
852 {
853         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
854         struct ath10k_ce_pipe *ce_diag = ar_pci->ce_diag;
855         const struct ce_attr *attr;
856         struct ath10k_pci_pipe *pipe_info;
857         struct ath10k_pci_compl *compl;
858         int i, pipe_num, completions, disable_interrupts;
859
860         spin_lock_init(&ar_pci->compl_lock);
861         INIT_LIST_HEAD(&ar_pci->compl_process);
862
863         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
864                 pipe_info = &ar_pci->pipe_info[pipe_num];
865
866                 spin_lock_init(&pipe_info->pipe_lock);
867                 INIT_LIST_HEAD(&pipe_info->compl_free);
868
869                 /* Handle Diagnostic CE specially */
870                 if (pipe_info->ce_hdl == ce_diag)
871                         continue;
872
873                 attr = &host_ce_config_wlan[pipe_num];
874                 completions = 0;
875
876                 if (attr->src_nentries) {
877                         disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
878                         ath10k_ce_send_cb_register(pipe_info->ce_hdl,
879                                                    ath10k_pci_ce_send_done,
880                                                    disable_interrupts);
881                         completions += attr->src_nentries;
882                         pipe_info->num_sends_allowed = attr->src_nentries - 1;
883                 }
884
885                 if (attr->dest_nentries) {
886                         ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
887                                                    ath10k_pci_ce_recv_data);
888                         completions += attr->dest_nentries;
889                 }
890
891                 if (completions == 0)
892                         continue;
893
894                 for (i = 0; i < completions; i++) {
895                         compl = kmalloc(sizeof(*compl), GFP_KERNEL);
896                         if (!compl) {
897                                 ath10k_warn("No memory for completion state\n");
898                                 ath10k_pci_stop_ce(ar);
899                                 return -ENOMEM;
900                         }
901
902                         compl->state = ATH10K_PCI_COMPL_FREE;
903                         list_add_tail(&compl->list, &pipe_info->compl_free);
904                 }
905         }
906
907         return 0;
908 }
909
910 static void ath10k_pci_stop_ce(struct ath10k *ar)
911 {
912         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
913         struct ath10k_pci_compl *compl;
914         struct sk_buff *skb;
915         int i;
916
917         ath10k_ce_disable_interrupts(ar);
918
919         /* Cancel the pending tasklet */
920         tasklet_kill(&ar_pci->intr_tq);
921
922         for (i = 0; i < CE_COUNT; i++)
923                 tasklet_kill(&ar_pci->pipe_info[i].intr);
924
925         /* Mark pending completions as aborted, so that upper layers free up
926          * their associated resources */
927         spin_lock_bh(&ar_pci->compl_lock);
928         list_for_each_entry(compl, &ar_pci->compl_process, list) {
929                 skb = compl->skb;
930                 ATH10K_SKB_CB(skb)->is_aborted = true;
931         }
932         spin_unlock_bh(&ar_pci->compl_lock);
933 }
934
935 static void ath10k_pci_cleanup_ce(struct ath10k *ar)
936 {
937         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
938         struct ath10k_pci_compl *compl, *tmp;
939         struct ath10k_pci_pipe *pipe_info;
940         struct sk_buff *netbuf;
941         int pipe_num;
942
943         /* Free pending completions. */
944         spin_lock_bh(&ar_pci->compl_lock);
945         if (!list_empty(&ar_pci->compl_process))
946                 ath10k_warn("pending completions still present! possible memory leaks.\n");
947
948         list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
949                 list_del(&compl->list);
950                 netbuf = compl->skb;
951                 dev_kfree_skb_any(netbuf);
952                 kfree(compl);
953         }
954         spin_unlock_bh(&ar_pci->compl_lock);
955
956         /* Free unused completions for each pipe. */
957         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
958                 pipe_info = &ar_pci->pipe_info[pipe_num];
959
960                 spin_lock_bh(&pipe_info->pipe_lock);
961                 list_for_each_entry_safe(compl, tmp,
962                                          &pipe_info->compl_free, list) {
963                         list_del(&compl->list);
964                         kfree(compl);
965                 }
966                 spin_unlock_bh(&pipe_info->pipe_lock);
967         }
968 }
969
970 static void ath10k_pci_process_ce(struct ath10k *ar)
971 {
972         struct ath10k_pci *ar_pci = ar->hif.priv;
973         struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
974         struct ath10k_pci_compl *compl;
975         struct sk_buff *skb;
976         unsigned int nbytes;
977         int ret, send_done = 0;
978
979         /* Upper layers aren't ready to handle tx/rx completions in parallel so
980          * we must serialize all completion processing. */
981
982         spin_lock_bh(&ar_pci->compl_lock);
983         if (ar_pci->compl_processing) {
984                 spin_unlock_bh(&ar_pci->compl_lock);
985                 return;
986         }
987         ar_pci->compl_processing = true;
988         spin_unlock_bh(&ar_pci->compl_lock);
989
990         for (;;) {
991                 spin_lock_bh(&ar_pci->compl_lock);
992                 if (list_empty(&ar_pci->compl_process)) {
993                         spin_unlock_bh(&ar_pci->compl_lock);
994                         break;
995                 }
996                 compl = list_first_entry(&ar_pci->compl_process,
997                                          struct ath10k_pci_compl, list);
998                 list_del(&compl->list);
999                 spin_unlock_bh(&ar_pci->compl_lock);
1000
1001                 switch (compl->state) {
1002                 case ATH10K_PCI_COMPL_SEND:
1003                         cb->tx_completion(ar,
1004                                           compl->skb,
1005                                           compl->transfer_id);
1006                         send_done = 1;
1007                         break;
1008                 case ATH10K_PCI_COMPL_RECV:
1009                         ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
1010                         if (ret) {
1011                                 ath10k_warn("Unable to post recv buffer for pipe: %d\n",
1012                                             compl->pipe_info->pipe_num);
1013                                 break;
1014                         }
1015
1016                         skb = compl->skb;
1017                         nbytes = compl->nbytes;
1018
1019                         ath10k_dbg(ATH10K_DBG_PCI,
1020                                    "ath10k_pci_ce_recv_data netbuf=%p  nbytes=%d\n",
1021                                    skb, nbytes);
1022                         ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
1023                                         "ath10k rx: ", skb->data, nbytes);
1024
1025                         if (skb->len + skb_tailroom(skb) >= nbytes) {
1026                                 skb_trim(skb, 0);
1027                                 skb_put(skb, nbytes);
1028                                 cb->rx_completion(ar, skb,
1029                                                   compl->pipe_info->pipe_num);
1030                         } else {
1031                                 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
1032                                             nbytes,
1033                                             skb->len + skb_tailroom(skb));
1034                         }
1035                         break;
1036                 case ATH10K_PCI_COMPL_FREE:
1037                         ath10k_warn("free completion cannot be processed\n");
1038                         break;
1039                 default:
1040                         ath10k_warn("invalid completion state (%d)\n",
1041                                     compl->state);
1042                         break;
1043                 }
1044
1045                 compl->state = ATH10K_PCI_COMPL_FREE;
1046
1047                 /*
1048                  * Add completion back to the pipe's free list.
1049                  */
1050                 spin_lock_bh(&compl->pipe_info->pipe_lock);
1051                 list_add_tail(&compl->list, &compl->pipe_info->compl_free);
1052                 compl->pipe_info->num_sends_allowed += send_done;
1053                 spin_unlock_bh(&compl->pipe_info->pipe_lock);
1054         }
1055
1056         spin_lock_bh(&ar_pci->compl_lock);
1057         ar_pci->compl_processing = false;
1058         spin_unlock_bh(&ar_pci->compl_lock);
1059 }
1060
1061 /* TODO - temporary mapping while we have too few CE's */
1062 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
1063                                               u16 service_id, u8 *ul_pipe,
1064                                               u8 *dl_pipe, int *ul_is_polled,
1065                                               int *dl_is_polled)
1066 {
1067         int ret = 0;
1068
1069         /* polling for received messages not supported */
1070         *dl_is_polled = 0;
1071
1072         switch (service_id) {
1073         case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
1074                 /*
1075                  * Host->target HTT gets its own pipe, so it can be polled
1076                  * while other pipes are interrupt driven.
1077                  */
1078                 *ul_pipe = 4;
1079                 /*
1080                  * Use the same target->host pipe for HTC ctrl, HTC raw
1081                  * streams, and HTT.
1082                  */
1083                 *dl_pipe = 1;
1084                 break;
1085
1086         case ATH10K_HTC_SVC_ID_RSVD_CTRL:
1087         case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
1088                 /*
1089                  * Note: HTC_RAW_STREAMS_SVC is currently unused, and
1090                  * HTC_CTRL_RSVD_SVC could share the same pipe as the
1091                  * WMI services.  So, if another CE is needed, change
1092                  * this to *ul_pipe = 3, which frees up CE 0.
1093                  */
1094                 /* *ul_pipe = 3; */
1095                 *ul_pipe = 0;
1096                 *dl_pipe = 1;
1097                 break;
1098
1099         case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
1100         case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
1101         case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
1102         case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
1103
1104         case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1105                 *ul_pipe = 3;
1106                 *dl_pipe = 2;
1107                 break;
1108
1109                 /* pipe 5 unused   */
1110                 /* pipe 6 reserved */
1111                 /* pipe 7 reserved */
1112
1113         default:
1114                 ret = -1;
1115                 break;
1116         }
1117         *ul_is_polled =
1118                 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1119
1120         return ret;
1121 }
1122
1123 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1124                                                 u8 *ul_pipe, u8 *dl_pipe)
1125 {
1126         int ul_is_polled, dl_is_polled;
1127
1128         (void)ath10k_pci_hif_map_service_to_pipe(ar,
1129                                                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1130                                                  ul_pipe,
1131                                                  dl_pipe,
1132                                                  &ul_is_polled,
1133                                                  &dl_is_polled);
1134 }
1135
1136 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
1137                                    int num)
1138 {
1139         struct ath10k *ar = pipe_info->hif_ce_state;
1140         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1141         struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
1142         struct sk_buff *skb;
1143         dma_addr_t ce_data;
1144         int i, ret = 0;
1145
1146         if (pipe_info->buf_sz == 0)
1147                 return 0;
1148
1149         for (i = 0; i < num; i++) {
1150                 skb = dev_alloc_skb(pipe_info->buf_sz);
1151                 if (!skb) {
1152                         ath10k_warn("could not allocate skbuff for pipe %d\n",
1153                                     num);
1154                         ret = -ENOMEM;
1155                         goto err;
1156                 }
1157
1158                 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1159
1160                 ce_data = dma_map_single(ar->dev, skb->data,
1161                                          skb->len + skb_tailroom(skb),
1162                                          DMA_FROM_DEVICE);
1163
1164                 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1165                         ath10k_warn("could not dma map skbuff\n");
1166                         dev_kfree_skb_any(skb);
1167                         ret = -EIO;
1168                         goto err;
1169                 }
1170
1171                 ATH10K_SKB_CB(skb)->paddr = ce_data;
1172
1173                 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1174                                                pipe_info->buf_sz,
1175                                                PCI_DMA_FROMDEVICE);
1176
1177                 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1178                                                  ce_data);
1179                 if (ret) {
1180                         ath10k_warn("could not enqueue to pipe %d (%d)\n",
1181                                     num, ret);
1182                         goto err;
1183                 }
1184         }
1185
1186         return ret;
1187
1188 err:
1189         ath10k_pci_rx_pipe_cleanup(pipe_info);
1190         return ret;
1191 }
1192
1193 static int ath10k_pci_post_rx(struct ath10k *ar)
1194 {
1195         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1196         struct ath10k_pci_pipe *pipe_info;
1197         const struct ce_attr *attr;
1198         int pipe_num, ret = 0;
1199
1200         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1201                 pipe_info = &ar_pci->pipe_info[pipe_num];
1202                 attr = &host_ce_config_wlan[pipe_num];
1203
1204                 if (attr->dest_nentries == 0)
1205                         continue;
1206
1207                 ret = ath10k_pci_post_rx_pipe(pipe_info,
1208                                               attr->dest_nentries - 1);
1209                 if (ret) {
1210                         ath10k_warn("Unable to replenish recv buffers for pipe: %d\n",
1211                                     pipe_num);
1212
1213                         for (; pipe_num >= 0; pipe_num--) {
1214                                 pipe_info = &ar_pci->pipe_info[pipe_num];
1215                                 ath10k_pci_rx_pipe_cleanup(pipe_info);
1216                         }
1217                         return ret;
1218                 }
1219         }
1220
1221         return 0;
1222 }
1223
1224 static int ath10k_pci_hif_start(struct ath10k *ar)
1225 {
1226         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1227         int ret;
1228
1229         ret = ath10k_pci_start_ce(ar);
1230         if (ret) {
1231                 ath10k_warn("could not start CE (%d)\n", ret);
1232                 return ret;
1233         }
1234
1235         /* Post buffers once to start things off. */
1236         ret = ath10k_pci_post_rx(ar);
1237         if (ret) {
1238                 ath10k_warn("could not post rx pipes (%d)\n", ret);
1239                 return ret;
1240         }
1241
1242         ar_pci->started = 1;
1243         return 0;
1244 }
1245
1246 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1247 {
1248         struct ath10k *ar;
1249         struct ath10k_pci *ar_pci;
1250         struct ath10k_ce_pipe *ce_hdl;
1251         u32 buf_sz;
1252         struct sk_buff *netbuf;
1253         u32 ce_data;
1254
1255         buf_sz = pipe_info->buf_sz;
1256
1257         /* Unused Copy Engine */
1258         if (buf_sz == 0)
1259                 return;
1260
1261         ar = pipe_info->hif_ce_state;
1262         ar_pci = ath10k_pci_priv(ar);
1263
1264         if (!ar_pci->started)
1265                 return;
1266
1267         ce_hdl = pipe_info->ce_hdl;
1268
1269         while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1270                                           &ce_data) == 0) {
1271                 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1272                                  netbuf->len + skb_tailroom(netbuf),
1273                                  DMA_FROM_DEVICE);
1274                 dev_kfree_skb_any(netbuf);
1275         }
1276 }
1277
1278 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1279 {
1280         struct ath10k *ar;
1281         struct ath10k_pci *ar_pci;
1282         struct ath10k_ce_pipe *ce_hdl;
1283         struct sk_buff *netbuf;
1284         u32 ce_data;
1285         unsigned int nbytes;
1286         unsigned int id;
1287         u32 buf_sz;
1288
1289         buf_sz = pipe_info->buf_sz;
1290
1291         /* Unused Copy Engine */
1292         if (buf_sz == 0)
1293                 return;
1294
1295         ar = pipe_info->hif_ce_state;
1296         ar_pci = ath10k_pci_priv(ar);
1297
1298         if (!ar_pci->started)
1299                 return;
1300
1301         ce_hdl = pipe_info->ce_hdl;
1302
1303         while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1304                                           &ce_data, &nbytes, &id) == 0) {
1305                 if (netbuf != CE_SENDLIST_ITEM_CTXT) {
1306                         /*
1307                          * Indicate the completion to higer layer to free
1308                          * the buffer
1309                          */
1310                         ATH10K_SKB_CB(netbuf)->is_aborted = true;
1311                         ar_pci->msg_callbacks_current.tx_completion(ar,
1312                                                                     netbuf,
1313                                                                     id);
1314                 }
1315         }
1316 }
1317
1318 /*
1319  * Cleanup residual buffers for device shutdown:
1320  *    buffers that were enqueued for receive
1321  *    buffers that were to be sent
1322  * Note: Buffers that had completed but which were
1323  * not yet processed are on a completion queue. They
1324  * are handled when the completion thread shuts down.
1325  */
1326 static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1327 {
1328         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1329         int pipe_num;
1330
1331         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1332                 struct ath10k_pci_pipe *pipe_info;
1333
1334                 pipe_info = &ar_pci->pipe_info[pipe_num];
1335                 ath10k_pci_rx_pipe_cleanup(pipe_info);
1336                 ath10k_pci_tx_pipe_cleanup(pipe_info);
1337         }
1338 }
1339
1340 static void ath10k_pci_ce_deinit(struct ath10k *ar)
1341 {
1342         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1343         struct ath10k_pci_pipe *pipe_info;
1344         int pipe_num;
1345
1346         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1347                 pipe_info = &ar_pci->pipe_info[pipe_num];
1348                 if (pipe_info->ce_hdl) {
1349                         ath10k_ce_deinit(pipe_info->ce_hdl);
1350                         pipe_info->ce_hdl = NULL;
1351                         pipe_info->buf_sz = 0;
1352                 }
1353         }
1354 }
1355
1356 static void ath10k_pci_disable_irqs(struct ath10k *ar)
1357 {
1358         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1359         int i;
1360
1361         for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
1362                 disable_irq(ar_pci->pdev->irq + i);
1363 }
1364
1365 static void ath10k_pci_hif_stop(struct ath10k *ar)
1366 {
1367         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1368
1369         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
1370
1371         /* Irqs are never explicitly re-enabled. They are implicitly re-enabled
1372          * by ath10k_pci_start_intr(). */
1373         ath10k_pci_disable_irqs(ar);
1374
1375         ath10k_pci_stop_ce(ar);
1376
1377         /* At this point, asynchronous threads are stopped, the target should
1378          * not DMA nor interrupt. We process the leftovers and then free
1379          * everything else up. */
1380
1381         ath10k_pci_process_ce(ar);
1382         ath10k_pci_cleanup_ce(ar);
1383         ath10k_pci_buffer_cleanup(ar);
1384
1385         ar_pci->started = 0;
1386 }
1387
1388 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1389                                            void *req, u32 req_len,
1390                                            void *resp, u32 *resp_len)
1391 {
1392         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1393         struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1394         struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1395         struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1396         struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1397         dma_addr_t req_paddr = 0;
1398         dma_addr_t resp_paddr = 0;
1399         struct bmi_xfer xfer = {};
1400         void *treq, *tresp = NULL;
1401         int ret = 0;
1402
1403         if (resp && !resp_len)
1404                 return -EINVAL;
1405
1406         if (resp && resp_len && *resp_len == 0)
1407                 return -EINVAL;
1408
1409         treq = kmemdup(req, req_len, GFP_KERNEL);
1410         if (!treq)
1411                 return -ENOMEM;
1412
1413         req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1414         ret = dma_mapping_error(ar->dev, req_paddr);
1415         if (ret)
1416                 goto err_dma;
1417
1418         if (resp && resp_len) {
1419                 tresp = kzalloc(*resp_len, GFP_KERNEL);
1420                 if (!tresp) {
1421                         ret = -ENOMEM;
1422                         goto err_req;
1423                 }
1424
1425                 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1426                                             DMA_FROM_DEVICE);
1427                 ret = dma_mapping_error(ar->dev, resp_paddr);
1428                 if (ret)
1429                         goto err_req;
1430
1431                 xfer.wait_for_resp = true;
1432                 xfer.resp_len = 0;
1433
1434                 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1435         }
1436
1437         init_completion(&xfer.done);
1438
1439         ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1440         if (ret)
1441                 goto err_resp;
1442
1443         ret = wait_for_completion_timeout(&xfer.done,
1444                                           BMI_COMMUNICATION_TIMEOUT_HZ);
1445         if (ret <= 0) {
1446                 u32 unused_buffer;
1447                 unsigned int unused_nbytes;
1448                 unsigned int unused_id;
1449
1450                 ret = -ETIMEDOUT;
1451                 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1452                                            &unused_nbytes, &unused_id);
1453         } else {
1454                 /* non-zero means we did not time out */
1455                 ret = 0;
1456         }
1457
1458 err_resp:
1459         if (resp) {
1460                 u32 unused_buffer;
1461
1462                 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1463                 dma_unmap_single(ar->dev, resp_paddr,
1464                                  *resp_len, DMA_FROM_DEVICE);
1465         }
1466 err_req:
1467         dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1468
1469         if (ret == 0 && resp_len) {
1470                 *resp_len = min(*resp_len, xfer.resp_len);
1471                 memcpy(resp, tresp, xfer.resp_len);
1472         }
1473 err_dma:
1474         kfree(treq);
1475         kfree(tresp);
1476
1477         return ret;
1478 }
1479
1480 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
1481 {
1482         struct bmi_xfer *xfer;
1483         u32 ce_data;
1484         unsigned int nbytes;
1485         unsigned int transfer_id;
1486
1487         if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1488                                           &nbytes, &transfer_id))
1489                 return;
1490
1491         if (xfer->wait_for_resp)
1492                 return;
1493
1494         complete(&xfer->done);
1495 }
1496
1497 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
1498 {
1499         struct bmi_xfer *xfer;
1500         u32 ce_data;
1501         unsigned int nbytes;
1502         unsigned int transfer_id;
1503         unsigned int flags;
1504
1505         if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1506                                           &nbytes, &transfer_id, &flags))
1507                 return;
1508
1509         if (!xfer->wait_for_resp) {
1510                 ath10k_warn("unexpected: BMI data received; ignoring\n");
1511                 return;
1512         }
1513
1514         xfer->resp_len = nbytes;
1515         complete(&xfer->done);
1516 }
1517
1518 /*
1519  * Map from service/endpoint to Copy Engine.
1520  * This table is derived from the CE_PCI TABLE, above.
1521  * It is passed to the Target at startup for use by firmware.
1522  */
1523 static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1524         {
1525                  ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1526                  PIPEDIR_OUT,           /* out = UL = host -> target */
1527                  3,
1528         },
1529         {
1530                  ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1531                  PIPEDIR_IN,            /* in = DL = target -> host */
1532                  2,
1533         },
1534         {
1535                  ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1536                  PIPEDIR_OUT,           /* out = UL = host -> target */
1537                  3,
1538         },
1539         {
1540                  ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1541                  PIPEDIR_IN,            /* in = DL = target -> host */
1542                  2,
1543         },
1544         {
1545                  ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1546                  PIPEDIR_OUT,           /* out = UL = host -> target */
1547                  3,
1548         },
1549         {
1550                  ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1551                  PIPEDIR_IN,            /* in = DL = target -> host */
1552                  2,
1553         },
1554         {
1555                  ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1556                  PIPEDIR_OUT,           /* out = UL = host -> target */
1557                  3,
1558         },
1559         {
1560                  ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1561                  PIPEDIR_IN,            /* in = DL = target -> host */
1562                  2,
1563         },
1564         {
1565                  ATH10K_HTC_SVC_ID_WMI_CONTROL,
1566                  PIPEDIR_OUT,           /* out = UL = host -> target */
1567                  3,
1568         },
1569         {
1570                  ATH10K_HTC_SVC_ID_WMI_CONTROL,
1571                  PIPEDIR_IN,            /* in = DL = target -> host */
1572                  2,
1573         },
1574         {
1575                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1576                  PIPEDIR_OUT,           /* out = UL = host -> target */
1577                  0,             /* could be moved to 3 (share with WMI) */
1578         },
1579         {
1580                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1581                  PIPEDIR_IN,            /* in = DL = target -> host */
1582                  1,
1583         },
1584         {
1585                  ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,    /* not currently used */
1586                  PIPEDIR_OUT,           /* out = UL = host -> target */
1587                  0,
1588         },
1589         {
1590                  ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,    /* not currently used */
1591                  PIPEDIR_IN,            /* in = DL = target -> host */
1592                  1,
1593         },
1594         {
1595                  ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1596                  PIPEDIR_OUT,           /* out = UL = host -> target */
1597                  4,
1598         },
1599         {
1600                  ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1601                  PIPEDIR_IN,            /* in = DL = target -> host */
1602                  1,
1603         },
1604
1605         /* (Additions here) */
1606
1607         {                               /* Must be last */
1608                  0,
1609                  0,
1610                  0,
1611         },
1612 };
1613
1614 /*
1615  * Send an interrupt to the device to wake up the Target CPU
1616  * so it has an opportunity to notice any changed state.
1617  */
1618 static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1619 {
1620         int ret;
1621         u32 core_ctrl;
1622
1623         ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1624                                               CORE_CTRL_ADDRESS,
1625                                           &core_ctrl);
1626         if (ret) {
1627                 ath10k_warn("Unable to read core ctrl\n");
1628                 return ret;
1629         }
1630
1631         /* A_INUM_FIRMWARE interrupt to Target CPU */
1632         core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1633
1634         ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1635                                                CORE_CTRL_ADDRESS,
1636                                            core_ctrl);
1637         if (ret)
1638                 ath10k_warn("Unable to set interrupt mask\n");
1639
1640         return ret;
1641 }
1642
1643 static int ath10k_pci_init_config(struct ath10k *ar)
1644 {
1645         u32 interconnect_targ_addr;
1646         u32 pcie_state_targ_addr = 0;
1647         u32 pipe_cfg_targ_addr = 0;
1648         u32 svc_to_pipe_map = 0;
1649         u32 pcie_config_flags = 0;
1650         u32 ealloc_value;
1651         u32 ealloc_targ_addr;
1652         u32 flag2_value;
1653         u32 flag2_targ_addr;
1654         int ret = 0;
1655
1656         /* Download to Target the CE Config and the service-to-CE map */
1657         interconnect_targ_addr =
1658                 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1659
1660         /* Supply Target-side CE configuration */
1661         ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1662                                           &pcie_state_targ_addr);
1663         if (ret != 0) {
1664                 ath10k_err("Failed to get pcie state addr: %d\n", ret);
1665                 return ret;
1666         }
1667
1668         if (pcie_state_targ_addr == 0) {
1669                 ret = -EIO;
1670                 ath10k_err("Invalid pcie state addr\n");
1671                 return ret;
1672         }
1673
1674         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1675                                           offsetof(struct pcie_state,
1676                                                    pipe_cfg_addr),
1677                                           &pipe_cfg_targ_addr);
1678         if (ret != 0) {
1679                 ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1680                 return ret;
1681         }
1682
1683         if (pipe_cfg_targ_addr == 0) {
1684                 ret = -EIO;
1685                 ath10k_err("Invalid pipe cfg addr\n");
1686                 return ret;
1687         }
1688
1689         ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1690                                  target_ce_config_wlan,
1691                                  sizeof(target_ce_config_wlan));
1692
1693         if (ret != 0) {
1694                 ath10k_err("Failed to write pipe cfg: %d\n", ret);
1695                 return ret;
1696         }
1697
1698         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1699                                           offsetof(struct pcie_state,
1700                                                    svc_to_pipe_map),
1701                                           &svc_to_pipe_map);
1702         if (ret != 0) {
1703                 ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1704                 return ret;
1705         }
1706
1707         if (svc_to_pipe_map == 0) {
1708                 ret = -EIO;
1709                 ath10k_err("Invalid svc_to_pipe map\n");
1710                 return ret;
1711         }
1712
1713         ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1714                                  target_service_to_ce_map_wlan,
1715                                  sizeof(target_service_to_ce_map_wlan));
1716         if (ret != 0) {
1717                 ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1718                 return ret;
1719         }
1720
1721         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1722                                           offsetof(struct pcie_state,
1723                                                    config_flags),
1724                                           &pcie_config_flags);
1725         if (ret != 0) {
1726                 ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1727                 return ret;
1728         }
1729
1730         pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1731
1732         ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1733                                  offsetof(struct pcie_state, config_flags),
1734                                  &pcie_config_flags,
1735                                  sizeof(pcie_config_flags));
1736         if (ret != 0) {
1737                 ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1738                 return ret;
1739         }
1740
1741         /* configure early allocation */
1742         ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1743
1744         ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1745         if (ret != 0) {
1746                 ath10k_err("Faile to get early alloc val: %d\n", ret);
1747                 return ret;
1748         }
1749
1750         /* first bank is switched to IRAM */
1751         ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1752                          HI_EARLY_ALLOC_MAGIC_MASK);
1753         ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1754                          HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1755
1756         ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1757         if (ret != 0) {
1758                 ath10k_err("Failed to set early alloc val: %d\n", ret);
1759                 return ret;
1760         }
1761
1762         /* Tell Target to proceed with initialization */
1763         flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1764
1765         ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1766         if (ret != 0) {
1767                 ath10k_err("Failed to get option val: %d\n", ret);
1768                 return ret;
1769         }
1770
1771         flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1772
1773         ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1774         if (ret != 0) {
1775                 ath10k_err("Failed to set option val: %d\n", ret);
1776                 return ret;
1777         }
1778
1779         return 0;
1780 }
1781
1782
1783
1784 static int ath10k_pci_ce_init(struct ath10k *ar)
1785 {
1786         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1787         struct ath10k_pci_pipe *pipe_info;
1788         const struct ce_attr *attr;
1789         int pipe_num;
1790
1791         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1792                 pipe_info = &ar_pci->pipe_info[pipe_num];
1793                 pipe_info->pipe_num = pipe_num;
1794                 pipe_info->hif_ce_state = ar;
1795                 attr = &host_ce_config_wlan[pipe_num];
1796
1797                 pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1798                 if (pipe_info->ce_hdl == NULL) {
1799                         ath10k_err("Unable to initialize CE for pipe: %d\n",
1800                                    pipe_num);
1801
1802                         /* It is safe to call it here. It checks if ce_hdl is
1803                          * valid for each pipe */
1804                         ath10k_pci_ce_deinit(ar);
1805                         return -1;
1806                 }
1807
1808                 if (pipe_num == ar_pci->ce_count - 1) {
1809                         /*
1810                          * Reserve the ultimate CE for
1811                          * diagnostic Window support
1812                          */
1813                         ar_pci->ce_diag =
1814                         ar_pci->pipe_info[ar_pci->ce_count - 1].ce_hdl;
1815                         continue;
1816                 }
1817
1818                 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1819         }
1820
1821         /*
1822          * Initially, establish CE completion handlers for use with BMI.
1823          * These are overwritten with generic handlers after we exit BMI phase.
1824          */
1825         pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1826         ath10k_ce_send_cb_register(pipe_info->ce_hdl,
1827                                    ath10k_pci_bmi_send_done, 0);
1828
1829         pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1830         ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
1831                                    ath10k_pci_bmi_recv_data);
1832
1833         return 0;
1834 }
1835
1836 static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1837 {
1838         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1839         u32 fw_indicator_address, fw_indicator;
1840
1841         ath10k_pci_wake(ar);
1842
1843         fw_indicator_address = ar_pci->fw_indicator_address;
1844         fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
1845
1846         if (fw_indicator & FW_IND_EVENT_PENDING) {
1847                 /* ACK: clear Target-side pending event */
1848                 ath10k_pci_write32(ar, fw_indicator_address,
1849                                    fw_indicator & ~FW_IND_EVENT_PENDING);
1850
1851                 if (ar_pci->started) {
1852                         ath10k_pci_hif_dump_area(ar);
1853                 } else {
1854                         /*
1855                          * Probable Target failure before we're prepared
1856                          * to handle it.  Generally unexpected.
1857                          */
1858                         ath10k_warn("early firmware event indicated\n");
1859                 }
1860         }
1861
1862         ath10k_pci_sleep(ar);
1863 }
1864
1865 static int ath10k_pci_hif_power_up(struct ath10k *ar)
1866 {
1867         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1868         int ret;
1869
1870         ret = ath10k_pci_start_intr(ar);
1871         if (ret) {
1872                 ath10k_err("could not start interrupt handling (%d)\n", ret);
1873                 goto err;
1874         }
1875
1876         /*
1877          * Bring the target up cleanly.
1878          *
1879          * The target may be in an undefined state with an AUX-powered Target
1880          * and a Host in WoW mode. If the Host crashes, loses power, or is
1881          * restarted (without unloading the driver) then the Target is left
1882          * (aux) powered and running. On a subsequent driver load, the Target
1883          * is in an unexpected state. We try to catch that here in order to
1884          * reset the Target and retry the probe.
1885          */
1886         ath10k_pci_device_reset(ar);
1887
1888         ret = ath10k_pci_reset_target(ar);
1889         if (ret)
1890                 goto err_irq;
1891
1892         if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1893                 /* Force AWAKE forever */
1894                 ath10k_do_pci_wake(ar);
1895
1896         ret = ath10k_pci_ce_init(ar);
1897         if (ret)
1898                 goto err_ps;
1899
1900         ret = ath10k_pci_init_config(ar);
1901         if (ret)
1902                 goto err_ce;
1903
1904         ret = ath10k_pci_wake_target_cpu(ar);
1905         if (ret) {
1906                 ath10k_err("could not wake up target CPU (%d)\n", ret);
1907                 goto err_ce;
1908         }
1909
1910         return 0;
1911
1912 err_ce:
1913         ath10k_pci_ce_deinit(ar);
1914 err_ps:
1915         if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1916                 ath10k_do_pci_sleep(ar);
1917 err_irq:
1918         ath10k_pci_stop_intr(ar);
1919 err:
1920         return ret;
1921 }
1922
1923 static void ath10k_pci_hif_power_down(struct ath10k *ar)
1924 {
1925         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1926
1927         ath10k_pci_stop_intr(ar);
1928
1929         ath10k_pci_ce_deinit(ar);
1930         if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1931                 ath10k_do_pci_sleep(ar);
1932 }
1933
1934 #ifdef CONFIG_PM
1935
1936 #define ATH10K_PCI_PM_CONTROL 0x44
1937
1938 static int ath10k_pci_hif_suspend(struct ath10k *ar)
1939 {
1940         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1941         struct pci_dev *pdev = ar_pci->pdev;
1942         u32 val;
1943
1944         pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1945
1946         if ((val & 0x000000ff) != 0x3) {
1947                 pci_save_state(pdev);
1948                 pci_disable_device(pdev);
1949                 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1950                                        (val & 0xffffff00) | 0x03);
1951         }
1952
1953         return 0;
1954 }
1955
1956 static int ath10k_pci_hif_resume(struct ath10k *ar)
1957 {
1958         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1959         struct pci_dev *pdev = ar_pci->pdev;
1960         u32 val;
1961
1962         pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1963
1964         if ((val & 0x000000ff) != 0) {
1965                 pci_restore_state(pdev);
1966                 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1967                                        val & 0xffffff00);
1968                 /*
1969                  * Suspend/Resume resets the PCI configuration space,
1970                  * so we have to re-disable the RETRY_TIMEOUT register (0x41)
1971                  * to keep PCI Tx retries from interfering with C3 CPU state
1972                  */
1973                 pci_read_config_dword(pdev, 0x40, &val);
1974
1975                 if ((val & 0x0000ff00) != 0)
1976                         pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
1977         }
1978
1979         return 0;
1980 }
1981 #endif
1982
1983 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
1984         .send_head              = ath10k_pci_hif_send_head,
1985         .exchange_bmi_msg       = ath10k_pci_hif_exchange_bmi_msg,
1986         .start                  = ath10k_pci_hif_start,
1987         .stop                   = ath10k_pci_hif_stop,
1988         .map_service_to_pipe    = ath10k_pci_hif_map_service_to_pipe,
1989         .get_default_pipe       = ath10k_pci_hif_get_default_pipe,
1990         .send_complete_check    = ath10k_pci_hif_send_complete_check,
1991         .set_callbacks          = ath10k_pci_hif_set_callbacks,
1992         .get_free_queue_number  = ath10k_pci_hif_get_free_queue_number,
1993         .power_up               = ath10k_pci_hif_power_up,
1994         .power_down             = ath10k_pci_hif_power_down,
1995 #ifdef CONFIG_PM
1996         .suspend                = ath10k_pci_hif_suspend,
1997         .resume                 = ath10k_pci_hif_resume,
1998 #endif
1999 };
2000
2001 static void ath10k_pci_ce_tasklet(unsigned long ptr)
2002 {
2003         struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
2004         struct ath10k_pci *ar_pci = pipe->ar_pci;
2005
2006         ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
2007 }
2008
2009 static void ath10k_msi_err_tasklet(unsigned long data)
2010 {
2011         struct ath10k *ar = (struct ath10k *)data;
2012
2013         ath10k_pci_fw_interrupt_handler(ar);
2014 }
2015
2016 /*
2017  * Handler for a per-engine interrupt on a PARTICULAR CE.
2018  * This is used in cases where each CE has a private MSI interrupt.
2019  */
2020 static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2021 {
2022         struct ath10k *ar = arg;
2023         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2024         int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2025
2026         if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
2027                 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
2028                 return IRQ_HANDLED;
2029         }
2030
2031         /*
2032          * NOTE: We are able to derive ce_id from irq because we
2033          * use a one-to-one mapping for CE's 0..5.
2034          * CE's 6 & 7 do not use interrupts at all.
2035          *
2036          * This mapping must be kept in sync with the mapping
2037          * used by firmware.
2038          */
2039         tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2040         return IRQ_HANDLED;
2041 }
2042
2043 static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2044 {
2045         struct ath10k *ar = arg;
2046         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2047
2048         tasklet_schedule(&ar_pci->msi_fw_err);
2049         return IRQ_HANDLED;
2050 }
2051
2052 /*
2053  * Top-level interrupt handler for all PCI interrupts from a Target.
2054  * When a block of MSI interrupts is allocated, this top-level handler
2055  * is not used; instead, we directly call the correct sub-handler.
2056  */
2057 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2058 {
2059         struct ath10k *ar = arg;
2060         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2061
2062         if (ar_pci->num_msi_intrs == 0) {
2063                 /*
2064                  * IMPORTANT: INTR_CLR regiser has to be set after
2065                  * INTR_ENABLE is set to 0, otherwise interrupt can not be
2066                  * really cleared.
2067                  */
2068                 iowrite32(0, ar_pci->mem +
2069                           (SOC_CORE_BASE_ADDRESS |
2070                            PCIE_INTR_ENABLE_ADDRESS));
2071                 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2072                           PCIE_INTR_CE_MASK_ALL,
2073                           ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2074                                          PCIE_INTR_CLR_ADDRESS));
2075                 /*
2076                  * IMPORTANT: this extra read transaction is required to
2077                  * flush the posted write buffer.
2078                  */
2079                 (void) ioread32(ar_pci->mem +
2080                                 (SOC_CORE_BASE_ADDRESS |
2081                                  PCIE_INTR_ENABLE_ADDRESS));
2082         }
2083
2084         tasklet_schedule(&ar_pci->intr_tq);
2085
2086         return IRQ_HANDLED;
2087 }
2088
2089 static void ath10k_pci_tasklet(unsigned long data)
2090 {
2091         struct ath10k *ar = (struct ath10k *)data;
2092         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2093
2094         ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
2095         ath10k_ce_per_engine_service_any(ar);
2096
2097         if (ar_pci->num_msi_intrs == 0) {
2098                 /* Enable Legacy PCI line interrupts */
2099                 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2100                           PCIE_INTR_CE_MASK_ALL,
2101                           ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2102                                          PCIE_INTR_ENABLE_ADDRESS));
2103                 /*
2104                  * IMPORTANT: this extra read transaction is required to
2105                  * flush the posted write buffer
2106                  */
2107                 (void) ioread32(ar_pci->mem +
2108                                 (SOC_CORE_BASE_ADDRESS |
2109                                  PCIE_INTR_ENABLE_ADDRESS));
2110         }
2111 }
2112
2113 static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
2114 {
2115         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2116         int ret;
2117         int i;
2118
2119         ret = pci_enable_msi_block(ar_pci->pdev, num);
2120         if (ret)
2121                 return ret;
2122
2123         ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2124                           ath10k_pci_msi_fw_handler,
2125                           IRQF_SHARED, "ath10k_pci", ar);
2126         if (ret) {
2127                 ath10k_warn("request_irq(%d) failed %d\n",
2128                             ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2129
2130                 pci_disable_msi(ar_pci->pdev);
2131                 return ret;
2132         }
2133
2134         for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2135                 ret = request_irq(ar_pci->pdev->irq + i,
2136                                   ath10k_pci_per_engine_handler,
2137                                   IRQF_SHARED, "ath10k_pci", ar);
2138                 if (ret) {
2139                         ath10k_warn("request_irq(%d) failed %d\n",
2140                                     ar_pci->pdev->irq + i, ret);
2141
2142                         for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2143                                 free_irq(ar_pci->pdev->irq + i, ar);
2144
2145                         free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
2146                         pci_disable_msi(ar_pci->pdev);
2147                         return ret;
2148                 }
2149         }
2150
2151         ath10k_info("MSI-X interrupt handling (%d intrs)\n", num);
2152         return 0;
2153 }
2154
2155 static int ath10k_pci_start_intr_msi(struct ath10k *ar)
2156 {
2157         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2158         int ret;
2159
2160         ret = pci_enable_msi(ar_pci->pdev);
2161         if (ret < 0)
2162                 return ret;
2163
2164         ret = request_irq(ar_pci->pdev->irq,
2165                           ath10k_pci_interrupt_handler,
2166                           IRQF_SHARED, "ath10k_pci", ar);
2167         if (ret < 0) {
2168                 pci_disable_msi(ar_pci->pdev);
2169                 return ret;
2170         }
2171
2172         ath10k_info("MSI interrupt handling\n");
2173         return 0;
2174 }
2175
2176 static int ath10k_pci_start_intr_legacy(struct ath10k *ar)
2177 {
2178         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2179         int ret;
2180
2181         ret = request_irq(ar_pci->pdev->irq,
2182                           ath10k_pci_interrupt_handler,
2183                           IRQF_SHARED, "ath10k_pci", ar);
2184         if (ret < 0)
2185                 return ret;
2186
2187         /*
2188          * Make sure to wake the Target before enabling Legacy
2189          * Interrupt.
2190          */
2191         iowrite32(PCIE_SOC_WAKE_V_MASK,
2192                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2193                   PCIE_SOC_WAKE_ADDRESS);
2194
2195         ath10k_pci_wait(ar);
2196
2197         /*
2198          * A potential race occurs here: The CORE_BASE write
2199          * depends on target correctly decoding AXI address but
2200          * host won't know when target writes BAR to CORE_CTRL.
2201          * This write might get lost if target has NOT written BAR.
2202          * For now, fix the race by repeating the write in below
2203          * synchronization checking.
2204          */
2205         iowrite32(PCIE_INTR_FIRMWARE_MASK |
2206                   PCIE_INTR_CE_MASK_ALL,
2207                   ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2208                                  PCIE_INTR_ENABLE_ADDRESS));
2209         iowrite32(PCIE_SOC_WAKE_RESET,
2210                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2211                   PCIE_SOC_WAKE_ADDRESS);
2212
2213         ath10k_info("legacy interrupt handling\n");
2214         return 0;
2215 }
2216
2217 static int ath10k_pci_start_intr(struct ath10k *ar)
2218 {
2219         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2220         int num = MSI_NUM_REQUEST;
2221         int ret;
2222         int i;
2223
2224         tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long) ar);
2225         tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2226                      (unsigned long) ar);
2227
2228         for (i = 0; i < CE_COUNT; i++) {
2229                 ar_pci->pipe_info[i].ar_pci = ar_pci;
2230                 tasklet_init(&ar_pci->pipe_info[i].intr,
2231                              ath10k_pci_ce_tasklet,
2232                              (unsigned long)&ar_pci->pipe_info[i]);
2233         }
2234
2235         if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features))
2236                 num = 1;
2237
2238         if (num > 1) {
2239                 ret = ath10k_pci_start_intr_msix(ar, num);
2240                 if (ret == 0)
2241                         goto exit;
2242
2243                 ath10k_warn("MSI-X didn't succeed (%d), trying MSI\n", ret);
2244                 num = 1;
2245         }
2246
2247         if (num == 1) {
2248                 ret = ath10k_pci_start_intr_msi(ar);
2249                 if (ret == 0)
2250                         goto exit;
2251
2252                 ath10k_warn("MSI didn't succeed (%d), trying legacy INTR\n",
2253                             ret);
2254                 num = 0;
2255         }
2256
2257         ret = ath10k_pci_start_intr_legacy(ar);
2258
2259 exit:
2260         ar_pci->num_msi_intrs = num;
2261         ar_pci->ce_count = CE_COUNT;
2262         return ret;
2263 }
2264
2265 static void ath10k_pci_stop_intr(struct ath10k *ar)
2266 {
2267         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2268         int i;
2269
2270         /* There's at least one interrupt irregardless whether its legacy INTR
2271          * or MSI or MSI-X */
2272         for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2273                 free_irq(ar_pci->pdev->irq + i, ar);
2274
2275         if (ar_pci->num_msi_intrs > 0)
2276                 pci_disable_msi(ar_pci->pdev);
2277 }
2278
2279 static int ath10k_pci_reset_target(struct ath10k *ar)
2280 {
2281         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2282         int wait_limit = 300; /* 3 sec */
2283
2284         /* Wait for Target to finish initialization before we proceed. */
2285         iowrite32(PCIE_SOC_WAKE_V_MASK,
2286                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2287                   PCIE_SOC_WAKE_ADDRESS);
2288
2289         ath10k_pci_wait(ar);
2290
2291         while (wait_limit-- &&
2292                !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
2293                  FW_IND_INITIALIZED)) {
2294                 if (ar_pci->num_msi_intrs == 0)
2295                         /* Fix potential race by repeating CORE_BASE writes */
2296                         iowrite32(PCIE_INTR_FIRMWARE_MASK |
2297                                   PCIE_INTR_CE_MASK_ALL,
2298                                   ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2299                                                  PCIE_INTR_ENABLE_ADDRESS));
2300                 mdelay(10);
2301         }
2302
2303         if (wait_limit < 0) {
2304                 ath10k_err("Target stalled\n");
2305                 iowrite32(PCIE_SOC_WAKE_RESET,
2306                           ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2307                           PCIE_SOC_WAKE_ADDRESS);
2308                 return -EIO;
2309         }
2310
2311         iowrite32(PCIE_SOC_WAKE_RESET,
2312                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2313                   PCIE_SOC_WAKE_ADDRESS);
2314
2315         return 0;
2316 }
2317
2318 static void ath10k_pci_device_reset(struct ath10k *ar)
2319 {
2320         int i;
2321         u32 val;
2322
2323         if (!SOC_GLOBAL_RESET_ADDRESS)
2324                 return;
2325
2326         ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
2327                                PCIE_SOC_WAKE_V_MASK);
2328         for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2329                 if (ath10k_pci_target_is_awake(ar))
2330                         break;
2331                 msleep(1);
2332         }
2333
2334         /* Put Target, including PCIe, into RESET. */
2335         val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
2336         val |= 1;
2337         ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2338
2339         for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2340                 if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2341                                           RTC_STATE_COLD_RESET_MASK)
2342                         break;
2343                 msleep(1);
2344         }
2345
2346         /* Pull Target, including PCIe, out of RESET. */
2347         val &= ~1;
2348         ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2349
2350         for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2351                 if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2352                                             RTC_STATE_COLD_RESET_MASK))
2353                         break;
2354                 msleep(1);
2355         }
2356
2357         ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2358 }
2359
2360 static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2361 {
2362         int i;
2363
2364         for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2365                 if (!test_bit(i, ar_pci->features))
2366                         continue;
2367
2368                 switch (i) {
2369                 case ATH10K_PCI_FEATURE_MSI_X:
2370                         ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
2371                         break;
2372                 case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
2373                         ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
2374                         break;
2375                 }
2376         }
2377 }
2378
2379 static int ath10k_pci_probe(struct pci_dev *pdev,
2380                             const struct pci_device_id *pci_dev)
2381 {
2382         void __iomem *mem;
2383         int ret = 0;
2384         struct ath10k *ar;
2385         struct ath10k_pci *ar_pci;
2386         u32 lcr_val, chip_id;
2387
2388         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2389
2390         ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2391         if (ar_pci == NULL)
2392                 return -ENOMEM;
2393
2394         ar_pci->pdev = pdev;
2395         ar_pci->dev = &pdev->dev;
2396
2397         switch (pci_dev->device) {
2398         case QCA988X_2_0_DEVICE_ID:
2399                 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2400                 break;
2401         default:
2402                 ret = -ENODEV;
2403                 ath10k_err("Unkown device ID: %d\n", pci_dev->device);
2404                 goto err_ar_pci;
2405         }
2406
2407         if (ath10k_target_ps)
2408                 set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2409
2410         ath10k_pci_dump_features(ar_pci);
2411
2412         ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
2413         if (!ar) {
2414                 ath10k_err("ath10k_core_create failed!\n");
2415                 ret = -EINVAL;
2416                 goto err_ar_pci;
2417         }
2418
2419         ar_pci->ar = ar;
2420         ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2421         atomic_set(&ar_pci->keep_awake_count, 0);
2422
2423         pci_set_drvdata(pdev, ar);
2424
2425         /*
2426          * Without any knowledge of the Host, the Target may have been reset or
2427          * power cycled and its Config Space may no longer reflect the PCI
2428          * address space that was assigned earlier by the PCI infrastructure.
2429          * Refresh it now.
2430          */
2431         ret = pci_assign_resource(pdev, BAR_NUM);
2432         if (ret) {
2433                 ath10k_err("cannot assign PCI space: %d\n", ret);
2434                 goto err_ar;
2435         }
2436
2437         ret = pci_enable_device(pdev);
2438         if (ret) {
2439                 ath10k_err("cannot enable PCI device: %d\n", ret);
2440                 goto err_ar;
2441         }
2442
2443         /* Request MMIO resources */
2444         ret = pci_request_region(pdev, BAR_NUM, "ath");
2445         if (ret) {
2446                 ath10k_err("PCI MMIO reservation error: %d\n", ret);
2447                 goto err_device;
2448         }
2449
2450         /*
2451          * Target structures have a limit of 32 bit DMA pointers.
2452          * DMA pointers can be wider than 32 bits by default on some systems.
2453          */
2454         ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2455         if (ret) {
2456                 ath10k_err("32-bit DMA not available: %d\n", ret);
2457                 goto err_region;
2458         }
2459
2460         ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2461         if (ret) {
2462                 ath10k_err("cannot enable 32-bit consistent DMA\n");
2463                 goto err_region;
2464         }
2465
2466         /* Set bus master bit in PCI_COMMAND to enable DMA */
2467         pci_set_master(pdev);
2468
2469         /*
2470          * Temporary FIX: disable ASPM
2471          * Will be removed after the OTP is programmed
2472          */
2473         pci_read_config_dword(pdev, 0x80, &lcr_val);
2474         pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2475
2476         /* Arrange for access to Target SoC registers. */
2477         mem = pci_iomap(pdev, BAR_NUM, 0);
2478         if (!mem) {
2479                 ath10k_err("PCI iomap error\n");
2480                 ret = -EIO;
2481                 goto err_master;
2482         }
2483
2484         ar_pci->mem = mem;
2485
2486         spin_lock_init(&ar_pci->ce_lock);
2487
2488         ret = ath10k_do_pci_wake(ar);
2489         if (ret) {
2490                 ath10k_err("Failed to get chip id: %d\n", ret);
2491                 return ret;
2492         }
2493
2494         chip_id = ath10k_pci_read32(ar,
2495                                     RTC_SOC_BASE_ADDRESS + SOC_CHIP_ID_ADDRESS);
2496
2497         ath10k_do_pci_sleep(ar);
2498
2499         ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2500
2501         ret = ath10k_core_register(ar, chip_id);
2502         if (ret) {
2503                 ath10k_err("could not register driver core (%d)\n", ret);
2504                 goto err_iomap;
2505         }
2506
2507         return 0;
2508
2509 err_iomap:
2510         pci_iounmap(pdev, mem);
2511 err_master:
2512         pci_clear_master(pdev);
2513 err_region:
2514         pci_release_region(pdev, BAR_NUM);
2515 err_device:
2516         pci_disable_device(pdev);
2517 err_ar:
2518         pci_set_drvdata(pdev, NULL);
2519         ath10k_core_destroy(ar);
2520 err_ar_pci:
2521         /* call HIF PCI free here */
2522         kfree(ar_pci);
2523
2524         return ret;
2525 }
2526
2527 static void ath10k_pci_remove(struct pci_dev *pdev)
2528 {
2529         struct ath10k *ar = pci_get_drvdata(pdev);
2530         struct ath10k_pci *ar_pci;
2531
2532         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2533
2534         if (!ar)
2535                 return;
2536
2537         ar_pci = ath10k_pci_priv(ar);
2538
2539         if (!ar_pci)
2540                 return;
2541
2542         tasklet_kill(&ar_pci->msi_fw_err);
2543
2544         ath10k_core_unregister(ar);
2545
2546         pci_set_drvdata(pdev, NULL);
2547         pci_iounmap(pdev, ar_pci->mem);
2548         pci_release_region(pdev, BAR_NUM);
2549         pci_clear_master(pdev);
2550         pci_disable_device(pdev);
2551
2552         ath10k_core_destroy(ar);
2553         kfree(ar_pci);
2554 }
2555
2556 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2557
2558 static struct pci_driver ath10k_pci_driver = {
2559         .name = "ath10k_pci",
2560         .id_table = ath10k_pci_id_table,
2561         .probe = ath10k_pci_probe,
2562         .remove = ath10k_pci_remove,
2563 };
2564
2565 static int __init ath10k_pci_init(void)
2566 {
2567         int ret;
2568
2569         ret = pci_register_driver(&ath10k_pci_driver);
2570         if (ret)
2571                 ath10k_err("pci_register_driver failed [%d]\n", ret);
2572
2573         return ret;
2574 }
2575 module_init(ath10k_pci_init);
2576
2577 static void __exit ath10k_pci_exit(void)
2578 {
2579         pci_unregister_driver(&ath10k_pci_driver);
2580 }
2581
2582 module_exit(ath10k_pci_exit);
2583
2584 MODULE_AUTHOR("Qualcomm Atheros");
2585 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2586 MODULE_LICENSE("Dual BSD/GPL");
2587 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2588 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
2589 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);