ath10k: remove un ar_pci->cacheline_sz field
[cascardo/linux.git] / drivers / net / wireless / ath / ath10k / pci.c
1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17
18 #include <linux/pci.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22
23 #include "core.h"
24 #include "debug.h"
25
26 #include "targaddrs.h"
27 #include "bmi.h"
28
29 #include "hif.h"
30 #include "htc.h"
31
32 #include "ce.h"
33 #include "pci.h"
34
35 static unsigned int ath10k_target_ps;
36 module_param(ath10k_target_ps, uint, 0644);
37 MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
38
39 #define QCA988X_2_0_DEVICE_ID   (0x003c)
40
41 static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
42         { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
43         {0}
44 };
45
46 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
47                                        u32 *data);
48
49 static void ath10k_pci_process_ce(struct ath10k *ar);
50 static int ath10k_pci_post_rx(struct ath10k *ar);
51 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
52                                              int num);
53 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
54 static void ath10k_pci_stop_ce(struct ath10k *ar);
55 static void ath10k_pci_device_reset(struct ath10k *ar);
56 static int ath10k_pci_reset_target(struct ath10k *ar);
57 static int ath10k_pci_start_intr(struct ath10k *ar);
58 static void ath10k_pci_stop_intr(struct ath10k *ar);
59
60 static const struct ce_attr host_ce_config_wlan[] = {
61         /* host->target HTC control and raw streams */
62         { /* CE0 */ CE_ATTR_FLAGS, 16, 256, 0 },
63         /* could be moved to share CE3 */
64         /* target->host HTT + HTC control */
65         { /* CE1 */ CE_ATTR_FLAGS, 0, 512, 512 },
66         /* target->host WMI */
67         { /* CE2 */ CE_ATTR_FLAGS, 0, 2048, 32 },
68         /* host->target WMI */
69         { /* CE3 */ CE_ATTR_FLAGS, 32, 2048, 0 },
70         /* host->target HTT */
71         { /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
72                     CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0 },
73         /* unused */
74         { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0 },
75         /* Target autonomous hif_memcpy */
76         { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0 },
77         /* ce_diag, the Diagnostic Window */
78         { /* CE7 */ CE_ATTR_FLAGS, 2, DIAG_TRANSFER_LIMIT, 2 },
79 };
80
81 /* Target firmware's Copy Engine configuration. */
82 static const struct ce_pipe_config target_ce_config_wlan[] = {
83         /* host->target HTC control and raw streams */
84         { /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0,},
85         /* target->host HTT + HTC control */
86         { /* CE1 */ 1, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0,},
87         /* target->host WMI */
88         { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,},
89         /* host->target WMI */
90         { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
91         /* host->target HTT */
92         { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0,},
93         /* NB: 50% of src nentries, since tx has 2 frags */
94         /* unused */
95         { /* CE5 */ 5, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
96         /* Reserved for target autonomous hif_memcpy */
97         { /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0,},
98         /* CE7 used only by Host */
99 };
100
101 /*
102  * Diagnostic read/write access is provided for startup/config/debug usage.
103  * Caller must guarantee proper alignment, when applicable, and single user
104  * at any moment.
105  */
106 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
107                                     int nbytes)
108 {
109         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
110         int ret = 0;
111         u32 buf;
112         unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
113         unsigned int id;
114         unsigned int flags;
115         struct ath10k_ce_pipe *ce_diag;
116         /* Host buffer address in CE space */
117         u32 ce_data;
118         dma_addr_t ce_data_base = 0;
119         void *data_buf = NULL;
120         int i;
121
122         /*
123          * This code cannot handle reads to non-memory space. Redirect to the
124          * register read fn but preserve the multi word read capability of
125          * this fn
126          */
127         if (address < DRAM_BASE_ADDRESS) {
128                 if (!IS_ALIGNED(address, 4) ||
129                     !IS_ALIGNED((unsigned long)data, 4))
130                         return -EIO;
131
132                 while ((nbytes >= 4) &&  ((ret = ath10k_pci_diag_read_access(
133                                            ar, address, (u32 *)data)) == 0)) {
134                         nbytes -= sizeof(u32);
135                         address += sizeof(u32);
136                         data += sizeof(u32);
137                 }
138                 return ret;
139         }
140
141         ce_diag = ar_pci->ce_diag;
142
143         /*
144          * Allocate a temporary bounce buffer to hold caller's data
145          * to be DMA'ed from Target. This guarantees
146          *   1) 4-byte alignment
147          *   2) Buffer in DMA-able space
148          */
149         orig_nbytes = nbytes;
150         data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
151                                                          orig_nbytes,
152                                                          &ce_data_base);
153
154         if (!data_buf) {
155                 ret = -ENOMEM;
156                 goto done;
157         }
158         memset(data_buf, 0, orig_nbytes);
159
160         remaining_bytes = orig_nbytes;
161         ce_data = ce_data_base;
162         while (remaining_bytes) {
163                 nbytes = min_t(unsigned int, remaining_bytes,
164                                DIAG_TRANSFER_LIMIT);
165
166                 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
167                 if (ret != 0)
168                         goto done;
169
170                 /* Request CE to send from Target(!) address to Host buffer */
171                 /*
172                  * The address supplied by the caller is in the
173                  * Target CPU virtual address space.
174                  *
175                  * In order to use this address with the diagnostic CE,
176                  * convert it from Target CPU virtual address space
177                  * to CE address space
178                  */
179                 ath10k_pci_wake(ar);
180                 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
181                                                      address);
182                 ath10k_pci_sleep(ar);
183
184                 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
185                                  0);
186                 if (ret)
187                         goto done;
188
189                 i = 0;
190                 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
191                                                      &completed_nbytes,
192                                                      &id) != 0) {
193                         mdelay(1);
194                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
195                                 ret = -EBUSY;
196                                 goto done;
197                         }
198                 }
199
200                 if (nbytes != completed_nbytes) {
201                         ret = -EIO;
202                         goto done;
203                 }
204
205                 if (buf != (u32) address) {
206                         ret = -EIO;
207                         goto done;
208                 }
209
210                 i = 0;
211                 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
212                                                      &completed_nbytes,
213                                                      &id, &flags) != 0) {
214                         mdelay(1);
215
216                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
217                                 ret = -EBUSY;
218                                 goto done;
219                         }
220                 }
221
222                 if (nbytes != completed_nbytes) {
223                         ret = -EIO;
224                         goto done;
225                 }
226
227                 if (buf != ce_data) {
228                         ret = -EIO;
229                         goto done;
230                 }
231
232                 remaining_bytes -= nbytes;
233                 address += nbytes;
234                 ce_data += nbytes;
235         }
236
237 done:
238         if (ret == 0) {
239                 /* Copy data from allocated DMA buf to caller's buf */
240                 WARN_ON_ONCE(orig_nbytes & 3);
241                 for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
242                         ((u32 *)data)[i] =
243                                 __le32_to_cpu(((__le32 *)data_buf)[i]);
244                 }
245         } else
246                 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
247                            __func__, address);
248
249         if (data_buf)
250                 pci_free_consistent(ar_pci->pdev, orig_nbytes,
251                                     data_buf, ce_data_base);
252
253         return ret;
254 }
255
256 /* Read 4-byte aligned data from Target memory or register */
257 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
258                                        u32 *data)
259 {
260         /* Assume range doesn't cross this boundary */
261         if (address >= DRAM_BASE_ADDRESS)
262                 return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
263
264         ath10k_pci_wake(ar);
265         *data = ath10k_pci_read32(ar, address);
266         ath10k_pci_sleep(ar);
267         return 0;
268 }
269
270 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
271                                      const void *data, int nbytes)
272 {
273         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
274         int ret = 0;
275         u32 buf;
276         unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
277         unsigned int id;
278         unsigned int flags;
279         struct ath10k_ce_pipe *ce_diag;
280         void *data_buf = NULL;
281         u32 ce_data;    /* Host buffer address in CE space */
282         dma_addr_t ce_data_base = 0;
283         int i;
284
285         ce_diag = ar_pci->ce_diag;
286
287         /*
288          * Allocate a temporary bounce buffer to hold caller's data
289          * to be DMA'ed to Target. This guarantees
290          *   1) 4-byte alignment
291          *   2) Buffer in DMA-able space
292          */
293         orig_nbytes = nbytes;
294         data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
295                                                          orig_nbytes,
296                                                          &ce_data_base);
297         if (!data_buf) {
298                 ret = -ENOMEM;
299                 goto done;
300         }
301
302         /* Copy caller's data to allocated DMA buf */
303         WARN_ON_ONCE(orig_nbytes & 3);
304         for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
305                 ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
306
307         /*
308          * The address supplied by the caller is in the
309          * Target CPU virtual address space.
310          *
311          * In order to use this address with the diagnostic CE,
312          * convert it from
313          *    Target CPU virtual address space
314          * to
315          *    CE address space
316          */
317         ath10k_pci_wake(ar);
318         address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
319         ath10k_pci_sleep(ar);
320
321         remaining_bytes = orig_nbytes;
322         ce_data = ce_data_base;
323         while (remaining_bytes) {
324                 /* FIXME: check cast */
325                 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
326
327                 /* Set up to receive directly into Target(!) address */
328                 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
329                 if (ret != 0)
330                         goto done;
331
332                 /*
333                  * Request CE to send caller-supplied data that
334                  * was copied to bounce buffer to Target(!) address.
335                  */
336                 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
337                                      nbytes, 0, 0);
338                 if (ret != 0)
339                         goto done;
340
341                 i = 0;
342                 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
343                                                      &completed_nbytes,
344                                                      &id) != 0) {
345                         mdelay(1);
346
347                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
348                                 ret = -EBUSY;
349                                 goto done;
350                         }
351                 }
352
353                 if (nbytes != completed_nbytes) {
354                         ret = -EIO;
355                         goto done;
356                 }
357
358                 if (buf != ce_data) {
359                         ret = -EIO;
360                         goto done;
361                 }
362
363                 i = 0;
364                 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
365                                                      &completed_nbytes,
366                                                      &id, &flags) != 0) {
367                         mdelay(1);
368
369                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
370                                 ret = -EBUSY;
371                                 goto done;
372                         }
373                 }
374
375                 if (nbytes != completed_nbytes) {
376                         ret = -EIO;
377                         goto done;
378                 }
379
380                 if (buf != address) {
381                         ret = -EIO;
382                         goto done;
383                 }
384
385                 remaining_bytes -= nbytes;
386                 address += nbytes;
387                 ce_data += nbytes;
388         }
389
390 done:
391         if (data_buf) {
392                 pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
393                                     ce_data_base);
394         }
395
396         if (ret != 0)
397                 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
398                            address);
399
400         return ret;
401 }
402
403 /* Write 4B data to Target memory or register */
404 static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
405                                         u32 data)
406 {
407         /* Assume range doesn't cross this boundary */
408         if (address >= DRAM_BASE_ADDRESS)
409                 return ath10k_pci_diag_write_mem(ar, address, &data,
410                                                  sizeof(u32));
411
412         ath10k_pci_wake(ar);
413         ath10k_pci_write32(ar, address, data);
414         ath10k_pci_sleep(ar);
415         return 0;
416 }
417
418 static bool ath10k_pci_target_is_awake(struct ath10k *ar)
419 {
420         void __iomem *mem = ath10k_pci_priv(ar)->mem;
421         u32 val;
422         val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
423                        RTC_STATE_ADDRESS);
424         return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
425 }
426
427 static void ath10k_pci_wait(struct ath10k *ar)
428 {
429         int n = 100;
430
431         while (n-- && !ath10k_pci_target_is_awake(ar))
432                 msleep(10);
433
434         if (n < 0)
435                 ath10k_warn("Unable to wakeup target\n");
436 }
437
438 void ath10k_do_pci_wake(struct ath10k *ar)
439 {
440         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
441         void __iomem *pci_addr = ar_pci->mem;
442         int tot_delay = 0;
443         int curr_delay = 5;
444
445         if (atomic_read(&ar_pci->keep_awake_count) == 0) {
446                 /* Force AWAKE */
447                 iowrite32(PCIE_SOC_WAKE_V_MASK,
448                           pci_addr + PCIE_LOCAL_BASE_ADDRESS +
449                           PCIE_SOC_WAKE_ADDRESS);
450         }
451         atomic_inc(&ar_pci->keep_awake_count);
452
453         if (ar_pci->verified_awake)
454                 return;
455
456         for (;;) {
457                 if (ath10k_pci_target_is_awake(ar)) {
458                         ar_pci->verified_awake = true;
459                         break;
460                 }
461
462                 if (tot_delay > PCIE_WAKE_TIMEOUT) {
463                         ath10k_warn("target takes too long to wake up (awake count %d)\n",
464                                     atomic_read(&ar_pci->keep_awake_count));
465                         break;
466                 }
467
468                 udelay(curr_delay);
469                 tot_delay += curr_delay;
470
471                 if (curr_delay < 50)
472                         curr_delay += 5;
473         }
474 }
475
476 void ath10k_do_pci_sleep(struct ath10k *ar)
477 {
478         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
479         void __iomem *pci_addr = ar_pci->mem;
480
481         if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
482                 /* Allow sleep */
483                 ar_pci->verified_awake = false;
484                 iowrite32(PCIE_SOC_WAKE_RESET,
485                           pci_addr + PCIE_LOCAL_BASE_ADDRESS +
486                           PCIE_SOC_WAKE_ADDRESS);
487         }
488 }
489
490 /*
491  * FIXME: Handle OOM properly.
492  */
493 static inline
494 struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
495 {
496         struct ath10k_pci_compl *compl = NULL;
497
498         spin_lock_bh(&pipe_info->pipe_lock);
499         if (list_empty(&pipe_info->compl_free)) {
500                 ath10k_warn("Completion buffers are full\n");
501                 goto exit;
502         }
503         compl = list_first_entry(&pipe_info->compl_free,
504                                  struct ath10k_pci_compl, list);
505         list_del(&compl->list);
506 exit:
507         spin_unlock_bh(&pipe_info->pipe_lock);
508         return compl;
509 }
510
511 /* Called by lower (CE) layer when a send to Target completes. */
512 static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state,
513                                     void *transfer_context,
514                                     u32 ce_data,
515                                     unsigned int nbytes,
516                                     unsigned int transfer_id)
517 {
518         struct ath10k *ar = ce_state->ar;
519         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
520         struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
521         struct ath10k_pci_compl *compl;
522         bool process = false;
523
524         do {
525                 /*
526                  * For the send completion of an item in sendlist, just
527                  * increment num_sends_allowed. The upper layer callback will
528                  * be triggered when last fragment is done with send.
529                  */
530                 if (transfer_context == CE_SENDLIST_ITEM_CTXT) {
531                         spin_lock_bh(&pipe_info->pipe_lock);
532                         pipe_info->num_sends_allowed++;
533                         spin_unlock_bh(&pipe_info->pipe_lock);
534                         continue;
535                 }
536
537                 compl = get_free_compl(pipe_info);
538                 if (!compl)
539                         break;
540
541                 compl->state = ATH10K_PCI_COMPL_SEND;
542                 compl->ce_state = ce_state;
543                 compl->pipe_info = pipe_info;
544                 compl->transfer_context = transfer_context;
545                 compl->nbytes = nbytes;
546                 compl->transfer_id = transfer_id;
547                 compl->flags = 0;
548
549                 /*
550                  * Add the completion to the processing queue.
551                  */
552                 spin_lock_bh(&ar_pci->compl_lock);
553                 list_add_tail(&compl->list, &ar_pci->compl_process);
554                 spin_unlock_bh(&ar_pci->compl_lock);
555
556                 process = true;
557         } while (ath10k_ce_completed_send_next(ce_state,
558                                                            &transfer_context,
559                                                            &ce_data, &nbytes,
560                                                            &transfer_id) == 0);
561
562         /*
563          * If only some of the items within a sendlist have completed,
564          * don't invoke completion processing until the entire sendlist
565          * has been sent.
566          */
567         if (!process)
568                 return;
569
570         ath10k_pci_process_ce(ar);
571 }
572
573 /* Called by lower (CE) layer when data is received from the Target. */
574 static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state,
575                                     void *transfer_context, u32 ce_data,
576                                     unsigned int nbytes,
577                                     unsigned int transfer_id,
578                                     unsigned int flags)
579 {
580         struct ath10k *ar = ce_state->ar;
581         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
582         struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
583         struct ath10k_pci_compl *compl;
584         struct sk_buff *skb;
585
586         do {
587                 compl = get_free_compl(pipe_info);
588                 if (!compl)
589                         break;
590
591                 compl->state = ATH10K_PCI_COMPL_RECV;
592                 compl->ce_state = ce_state;
593                 compl->pipe_info = pipe_info;
594                 compl->transfer_context = transfer_context;
595                 compl->nbytes = nbytes;
596                 compl->transfer_id = transfer_id;
597                 compl->flags = flags;
598
599                 skb = transfer_context;
600                 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
601                                  skb->len + skb_tailroom(skb),
602                                  DMA_FROM_DEVICE);
603                 /*
604                  * Add the completion to the processing queue.
605                  */
606                 spin_lock_bh(&ar_pci->compl_lock);
607                 list_add_tail(&compl->list, &ar_pci->compl_process);
608                 spin_unlock_bh(&ar_pci->compl_lock);
609
610         } while (ath10k_ce_completed_recv_next(ce_state,
611                                                            &transfer_context,
612                                                            &ce_data, &nbytes,
613                                                            &transfer_id,
614                                                            &flags) == 0);
615
616         ath10k_pci_process_ce(ar);
617 }
618
619 /* Send the first nbytes bytes of the buffer */
620 static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
621                                     unsigned int transfer_id,
622                                     unsigned int bytes, struct sk_buff *nbuf)
623 {
624         struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
625         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
626         struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
627         struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
628         struct ce_sendlist sendlist;
629         unsigned int len;
630         u32 flags = 0;
631         int ret;
632
633         memset(&sendlist, 0, sizeof(struct ce_sendlist));
634
635         len = min(bytes, nbuf->len);
636         bytes -= len;
637
638         if (len & 3)
639                 ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
640
641         ath10k_dbg(ATH10K_DBG_PCI,
642                    "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
643                    nbuf->data, (unsigned long long) skb_cb->paddr,
644                    nbuf->len, len);
645         ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
646                         "ath10k tx: data: ",
647                         nbuf->data, nbuf->len);
648
649         ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags);
650
651         /* Make sure we have resources to handle this request */
652         spin_lock_bh(&pipe_info->pipe_lock);
653         if (!pipe_info->num_sends_allowed) {
654                 ath10k_warn("Pipe: %d is full\n", pipe_id);
655                 spin_unlock_bh(&pipe_info->pipe_lock);
656                 return -ENOSR;
657         }
658         pipe_info->num_sends_allowed--;
659         spin_unlock_bh(&pipe_info->pipe_lock);
660
661         ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
662         if (ret)
663                 ath10k_warn("CE send failed: %p\n", nbuf);
664
665         return ret;
666 }
667
668 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
669 {
670         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
671         struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe]);
672         int ret;
673
674         spin_lock_bh(&pipe_info->pipe_lock);
675         ret = pipe_info->num_sends_allowed;
676         spin_unlock_bh(&pipe_info->pipe_lock);
677
678         return ret;
679 }
680
681 static void ath10k_pci_hif_dump_area(struct ath10k *ar)
682 {
683         u32 reg_dump_area = 0;
684         u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
685         u32 host_addr;
686         int ret;
687         u32 i;
688
689         ath10k_err("firmware crashed!\n");
690         ath10k_err("hardware name %s version 0x%x\n",
691                    ar->hw_params.name, ar->target_version);
692         ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major,
693                    ar->fw_version_minor, ar->fw_version_release,
694                    ar->fw_version_build);
695
696         host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
697         if (ath10k_pci_diag_read_mem(ar, host_addr,
698                                      &reg_dump_area, sizeof(u32)) != 0) {
699                 ath10k_warn("could not read hi_failure_state\n");
700                 return;
701         }
702
703         ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
704
705         ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
706                                        &reg_dump_values[0],
707                                        REG_DUMP_COUNT_QCA988X * sizeof(u32));
708         if (ret != 0) {
709                 ath10k_err("could not dump FW Dump Area\n");
710                 return;
711         }
712
713         BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
714
715         ath10k_err("target Register Dump\n");
716         for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
717                 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
718                            i,
719                            reg_dump_values[i],
720                            reg_dump_values[i + 1],
721                            reg_dump_values[i + 2],
722                            reg_dump_values[i + 3]);
723
724         ieee80211_queue_work(ar->hw, &ar->restart_work);
725 }
726
727 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
728                                                int force)
729 {
730         if (!force) {
731                 int resources;
732                 /*
733                  * Decide whether to actually poll for completions, or just
734                  * wait for a later chance.
735                  * If there seem to be plenty of resources left, then just wait
736                  * since checking involves reading a CE register, which is a
737                  * relatively expensive operation.
738                  */
739                 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
740
741                 /*
742                  * If at least 50% of the total resources are still available,
743                  * don't bother checking again yet.
744                  */
745                 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
746                         return;
747         }
748         ath10k_ce_per_engine_service(ar, pipe);
749 }
750
751 static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
752                                          struct ath10k_hif_cb *callbacks)
753 {
754         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
755
756         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
757
758         memcpy(&ar_pci->msg_callbacks_current, callbacks,
759                sizeof(ar_pci->msg_callbacks_current));
760 }
761
762 static int ath10k_pci_start_ce(struct ath10k *ar)
763 {
764         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
765         struct ath10k_ce_pipe *ce_diag = ar_pci->ce_diag;
766         const struct ce_attr *attr;
767         struct ath10k_pci_pipe *pipe_info;
768         struct ath10k_pci_compl *compl;
769         int i, pipe_num, completions, disable_interrupts;
770
771         spin_lock_init(&ar_pci->compl_lock);
772         INIT_LIST_HEAD(&ar_pci->compl_process);
773
774         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
775                 pipe_info = &ar_pci->pipe_info[pipe_num];
776
777                 spin_lock_init(&pipe_info->pipe_lock);
778                 INIT_LIST_HEAD(&pipe_info->compl_free);
779
780                 /* Handle Diagnostic CE specially */
781                 if (pipe_info->ce_hdl == ce_diag)
782                         continue;
783
784                 attr = &host_ce_config_wlan[pipe_num];
785                 completions = 0;
786
787                 if (attr->src_nentries) {
788                         disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
789                         ath10k_ce_send_cb_register(pipe_info->ce_hdl,
790                                                    ath10k_pci_ce_send_done,
791                                                    disable_interrupts);
792                         completions += attr->src_nentries;
793                         pipe_info->num_sends_allowed = attr->src_nentries - 1;
794                 }
795
796                 if (attr->dest_nentries) {
797                         ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
798                                                    ath10k_pci_ce_recv_data);
799                         completions += attr->dest_nentries;
800                 }
801
802                 if (completions == 0)
803                         continue;
804
805                 for (i = 0; i < completions; i++) {
806                         compl = kmalloc(sizeof(*compl), GFP_KERNEL);
807                         if (!compl) {
808                                 ath10k_warn("No memory for completion state\n");
809                                 ath10k_pci_stop_ce(ar);
810                                 return -ENOMEM;
811                         }
812
813                         compl->state = ATH10K_PCI_COMPL_FREE;
814                         list_add_tail(&compl->list, &pipe_info->compl_free);
815                 }
816         }
817
818         return 0;
819 }
820
821 static void ath10k_pci_stop_ce(struct ath10k *ar)
822 {
823         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
824         struct ath10k_pci_compl *compl;
825         struct sk_buff *skb;
826         int i;
827
828         ath10k_ce_disable_interrupts(ar);
829
830         /* Cancel the pending tasklet */
831         tasklet_kill(&ar_pci->intr_tq);
832
833         for (i = 0; i < CE_COUNT; i++)
834                 tasklet_kill(&ar_pci->pipe_info[i].intr);
835
836         /* Mark pending completions as aborted, so that upper layers free up
837          * their associated resources */
838         spin_lock_bh(&ar_pci->compl_lock);
839         list_for_each_entry(compl, &ar_pci->compl_process, list) {
840                 skb = (struct sk_buff *)compl->transfer_context;
841                 ATH10K_SKB_CB(skb)->is_aborted = true;
842         }
843         spin_unlock_bh(&ar_pci->compl_lock);
844 }
845
846 static void ath10k_pci_cleanup_ce(struct ath10k *ar)
847 {
848         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
849         struct ath10k_pci_compl *compl, *tmp;
850         struct ath10k_pci_pipe *pipe_info;
851         struct sk_buff *netbuf;
852         int pipe_num;
853
854         /* Free pending completions. */
855         spin_lock_bh(&ar_pci->compl_lock);
856         if (!list_empty(&ar_pci->compl_process))
857                 ath10k_warn("pending completions still present! possible memory leaks.\n");
858
859         list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
860                 list_del(&compl->list);
861                 netbuf = (struct sk_buff *)compl->transfer_context;
862                 dev_kfree_skb_any(netbuf);
863                 kfree(compl);
864         }
865         spin_unlock_bh(&ar_pci->compl_lock);
866
867         /* Free unused completions for each pipe. */
868         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
869                 pipe_info = &ar_pci->pipe_info[pipe_num];
870
871                 spin_lock_bh(&pipe_info->pipe_lock);
872                 list_for_each_entry_safe(compl, tmp,
873                                          &pipe_info->compl_free, list) {
874                         list_del(&compl->list);
875                         kfree(compl);
876                 }
877                 spin_unlock_bh(&pipe_info->pipe_lock);
878         }
879 }
880
881 static void ath10k_pci_process_ce(struct ath10k *ar)
882 {
883         struct ath10k_pci *ar_pci = ar->hif.priv;
884         struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
885         struct ath10k_pci_compl *compl;
886         struct sk_buff *skb;
887         unsigned int nbytes;
888         int ret, send_done = 0;
889
890         /* Upper layers aren't ready to handle tx/rx completions in parallel so
891          * we must serialize all completion processing. */
892
893         spin_lock_bh(&ar_pci->compl_lock);
894         if (ar_pci->compl_processing) {
895                 spin_unlock_bh(&ar_pci->compl_lock);
896                 return;
897         }
898         ar_pci->compl_processing = true;
899         spin_unlock_bh(&ar_pci->compl_lock);
900
901         for (;;) {
902                 spin_lock_bh(&ar_pci->compl_lock);
903                 if (list_empty(&ar_pci->compl_process)) {
904                         spin_unlock_bh(&ar_pci->compl_lock);
905                         break;
906                 }
907                 compl = list_first_entry(&ar_pci->compl_process,
908                                          struct ath10k_pci_compl, list);
909                 list_del(&compl->list);
910                 spin_unlock_bh(&ar_pci->compl_lock);
911
912                 switch (compl->state) {
913                 case ATH10K_PCI_COMPL_SEND:
914                         cb->tx_completion(ar,
915                                           compl->transfer_context,
916                                           compl->transfer_id);
917                         send_done = 1;
918                         break;
919                 case ATH10K_PCI_COMPL_RECV:
920                         ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
921                         if (ret) {
922                                 ath10k_warn("Unable to post recv buffer for pipe: %d\n",
923                                             compl->pipe_info->pipe_num);
924                                 break;
925                         }
926
927                         skb = (struct sk_buff *)compl->transfer_context;
928                         nbytes = compl->nbytes;
929
930                         ath10k_dbg(ATH10K_DBG_PCI,
931                                    "ath10k_pci_ce_recv_data netbuf=%p  nbytes=%d\n",
932                                    skb, nbytes);
933                         ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
934                                         "ath10k rx: ", skb->data, nbytes);
935
936                         if (skb->len + skb_tailroom(skb) >= nbytes) {
937                                 skb_trim(skb, 0);
938                                 skb_put(skb, nbytes);
939                                 cb->rx_completion(ar, skb,
940                                                   compl->pipe_info->pipe_num);
941                         } else {
942                                 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
943                                             nbytes,
944                                             skb->len + skb_tailroom(skb));
945                         }
946                         break;
947                 case ATH10K_PCI_COMPL_FREE:
948                         ath10k_warn("free completion cannot be processed\n");
949                         break;
950                 default:
951                         ath10k_warn("invalid completion state (%d)\n",
952                                     compl->state);
953                         break;
954                 }
955
956                 compl->state = ATH10K_PCI_COMPL_FREE;
957
958                 /*
959                  * Add completion back to the pipe's free list.
960                  */
961                 spin_lock_bh(&compl->pipe_info->pipe_lock);
962                 list_add_tail(&compl->list, &compl->pipe_info->compl_free);
963                 compl->pipe_info->num_sends_allowed += send_done;
964                 spin_unlock_bh(&compl->pipe_info->pipe_lock);
965         }
966
967         spin_lock_bh(&ar_pci->compl_lock);
968         ar_pci->compl_processing = false;
969         spin_unlock_bh(&ar_pci->compl_lock);
970 }
971
972 /* TODO - temporary mapping while we have too few CE's */
973 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
974                                               u16 service_id, u8 *ul_pipe,
975                                               u8 *dl_pipe, int *ul_is_polled,
976                                               int *dl_is_polled)
977 {
978         int ret = 0;
979
980         /* polling for received messages not supported */
981         *dl_is_polled = 0;
982
983         switch (service_id) {
984         case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
985                 /*
986                  * Host->target HTT gets its own pipe, so it can be polled
987                  * while other pipes are interrupt driven.
988                  */
989                 *ul_pipe = 4;
990                 /*
991                  * Use the same target->host pipe for HTC ctrl, HTC raw
992                  * streams, and HTT.
993                  */
994                 *dl_pipe = 1;
995                 break;
996
997         case ATH10K_HTC_SVC_ID_RSVD_CTRL:
998         case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
999                 /*
1000                  * Note: HTC_RAW_STREAMS_SVC is currently unused, and
1001                  * HTC_CTRL_RSVD_SVC could share the same pipe as the
1002                  * WMI services.  So, if another CE is needed, change
1003                  * this to *ul_pipe = 3, which frees up CE 0.
1004                  */
1005                 /* *ul_pipe = 3; */
1006                 *ul_pipe = 0;
1007                 *dl_pipe = 1;
1008                 break;
1009
1010         case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
1011         case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
1012         case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
1013         case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
1014
1015         case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1016                 *ul_pipe = 3;
1017                 *dl_pipe = 2;
1018                 break;
1019
1020                 /* pipe 5 unused   */
1021                 /* pipe 6 reserved */
1022                 /* pipe 7 reserved */
1023
1024         default:
1025                 ret = -1;
1026                 break;
1027         }
1028         *ul_is_polled =
1029                 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1030
1031         return ret;
1032 }
1033
1034 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1035                                                 u8 *ul_pipe, u8 *dl_pipe)
1036 {
1037         int ul_is_polled, dl_is_polled;
1038
1039         (void)ath10k_pci_hif_map_service_to_pipe(ar,
1040                                                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1041                                                  ul_pipe,
1042                                                  dl_pipe,
1043                                                  &ul_is_polled,
1044                                                  &dl_is_polled);
1045 }
1046
1047 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
1048                                    int num)
1049 {
1050         struct ath10k *ar = pipe_info->hif_ce_state;
1051         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1052         struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
1053         struct sk_buff *skb;
1054         dma_addr_t ce_data;
1055         int i, ret = 0;
1056
1057         if (pipe_info->buf_sz == 0)
1058                 return 0;
1059
1060         for (i = 0; i < num; i++) {
1061                 skb = dev_alloc_skb(pipe_info->buf_sz);
1062                 if (!skb) {
1063                         ath10k_warn("could not allocate skbuff for pipe %d\n",
1064                                     num);
1065                         ret = -ENOMEM;
1066                         goto err;
1067                 }
1068
1069                 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1070
1071                 ce_data = dma_map_single(ar->dev, skb->data,
1072                                          skb->len + skb_tailroom(skb),
1073                                          DMA_FROM_DEVICE);
1074
1075                 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1076                         ath10k_warn("could not dma map skbuff\n");
1077                         dev_kfree_skb_any(skb);
1078                         ret = -EIO;
1079                         goto err;
1080                 }
1081
1082                 ATH10K_SKB_CB(skb)->paddr = ce_data;
1083
1084                 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1085                                                pipe_info->buf_sz,
1086                                                PCI_DMA_FROMDEVICE);
1087
1088                 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1089                                                  ce_data);
1090                 if (ret) {
1091                         ath10k_warn("could not enqueue to pipe %d (%d)\n",
1092                                     num, ret);
1093                         goto err;
1094                 }
1095         }
1096
1097         return ret;
1098
1099 err:
1100         ath10k_pci_rx_pipe_cleanup(pipe_info);
1101         return ret;
1102 }
1103
1104 static int ath10k_pci_post_rx(struct ath10k *ar)
1105 {
1106         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1107         struct ath10k_pci_pipe *pipe_info;
1108         const struct ce_attr *attr;
1109         int pipe_num, ret = 0;
1110
1111         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1112                 pipe_info = &ar_pci->pipe_info[pipe_num];
1113                 attr = &host_ce_config_wlan[pipe_num];
1114
1115                 if (attr->dest_nentries == 0)
1116                         continue;
1117
1118                 ret = ath10k_pci_post_rx_pipe(pipe_info,
1119                                               attr->dest_nentries - 1);
1120                 if (ret) {
1121                         ath10k_warn("Unable to replenish recv buffers for pipe: %d\n",
1122                                     pipe_num);
1123
1124                         for (; pipe_num >= 0; pipe_num--) {
1125                                 pipe_info = &ar_pci->pipe_info[pipe_num];
1126                                 ath10k_pci_rx_pipe_cleanup(pipe_info);
1127                         }
1128                         return ret;
1129                 }
1130         }
1131
1132         return 0;
1133 }
1134
1135 static int ath10k_pci_hif_start(struct ath10k *ar)
1136 {
1137         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1138         int ret;
1139
1140         ret = ath10k_pci_start_ce(ar);
1141         if (ret) {
1142                 ath10k_warn("could not start CE (%d)\n", ret);
1143                 return ret;
1144         }
1145
1146         /* Post buffers once to start things off. */
1147         ret = ath10k_pci_post_rx(ar);
1148         if (ret) {
1149                 ath10k_warn("could not post rx pipes (%d)\n", ret);
1150                 return ret;
1151         }
1152
1153         ar_pci->started = 1;
1154         return 0;
1155 }
1156
1157 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1158 {
1159         struct ath10k *ar;
1160         struct ath10k_pci *ar_pci;
1161         struct ath10k_ce_pipe *ce_hdl;
1162         u32 buf_sz;
1163         struct sk_buff *netbuf;
1164         u32 ce_data;
1165
1166         buf_sz = pipe_info->buf_sz;
1167
1168         /* Unused Copy Engine */
1169         if (buf_sz == 0)
1170                 return;
1171
1172         ar = pipe_info->hif_ce_state;
1173         ar_pci = ath10k_pci_priv(ar);
1174
1175         if (!ar_pci->started)
1176                 return;
1177
1178         ce_hdl = pipe_info->ce_hdl;
1179
1180         while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1181                                           &ce_data) == 0) {
1182                 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1183                                  netbuf->len + skb_tailroom(netbuf),
1184                                  DMA_FROM_DEVICE);
1185                 dev_kfree_skb_any(netbuf);
1186         }
1187 }
1188
1189 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1190 {
1191         struct ath10k *ar;
1192         struct ath10k_pci *ar_pci;
1193         struct ath10k_ce_pipe *ce_hdl;
1194         struct sk_buff *netbuf;
1195         u32 ce_data;
1196         unsigned int nbytes;
1197         unsigned int id;
1198         u32 buf_sz;
1199
1200         buf_sz = pipe_info->buf_sz;
1201
1202         /* Unused Copy Engine */
1203         if (buf_sz == 0)
1204                 return;
1205
1206         ar = pipe_info->hif_ce_state;
1207         ar_pci = ath10k_pci_priv(ar);
1208
1209         if (!ar_pci->started)
1210                 return;
1211
1212         ce_hdl = pipe_info->ce_hdl;
1213
1214         while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1215                                           &ce_data, &nbytes, &id) == 0) {
1216                 if (netbuf != CE_SENDLIST_ITEM_CTXT)
1217                         /*
1218                          * Indicate the completion to higer layer to free
1219                          * the buffer
1220                          */
1221                         ATH10K_SKB_CB(netbuf)->is_aborted = true;
1222                         ar_pci->msg_callbacks_current.tx_completion(ar,
1223                                                                     netbuf,
1224                                                                     id);
1225         }
1226 }
1227
1228 /*
1229  * Cleanup residual buffers for device shutdown:
1230  *    buffers that were enqueued for receive
1231  *    buffers that were to be sent
1232  * Note: Buffers that had completed but which were
1233  * not yet processed are on a completion queue. They
1234  * are handled when the completion thread shuts down.
1235  */
1236 static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1237 {
1238         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1239         int pipe_num;
1240
1241         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1242                 struct ath10k_pci_pipe *pipe_info;
1243
1244                 pipe_info = &ar_pci->pipe_info[pipe_num];
1245                 ath10k_pci_rx_pipe_cleanup(pipe_info);
1246                 ath10k_pci_tx_pipe_cleanup(pipe_info);
1247         }
1248 }
1249
1250 static void ath10k_pci_ce_deinit(struct ath10k *ar)
1251 {
1252         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1253         struct ath10k_pci_pipe *pipe_info;
1254         int pipe_num;
1255
1256         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1257                 pipe_info = &ar_pci->pipe_info[pipe_num];
1258                 if (pipe_info->ce_hdl) {
1259                         ath10k_ce_deinit(pipe_info->ce_hdl);
1260                         pipe_info->ce_hdl = NULL;
1261                         pipe_info->buf_sz = 0;
1262                 }
1263         }
1264 }
1265
1266 static void ath10k_pci_disable_irqs(struct ath10k *ar)
1267 {
1268         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1269         int i;
1270
1271         for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
1272                 disable_irq(ar_pci->pdev->irq + i);
1273 }
1274
1275 static void ath10k_pci_hif_stop(struct ath10k *ar)
1276 {
1277         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1278
1279         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
1280
1281         /* Irqs are never explicitly re-enabled. They are implicitly re-enabled
1282          * by ath10k_pci_start_intr(). */
1283         ath10k_pci_disable_irqs(ar);
1284
1285         ath10k_pci_stop_ce(ar);
1286
1287         /* At this point, asynchronous threads are stopped, the target should
1288          * not DMA nor interrupt. We process the leftovers and then free
1289          * everything else up. */
1290
1291         ath10k_pci_process_ce(ar);
1292         ath10k_pci_cleanup_ce(ar);
1293         ath10k_pci_buffer_cleanup(ar);
1294
1295         ar_pci->started = 0;
1296 }
1297
1298 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1299                                            void *req, u32 req_len,
1300                                            void *resp, u32 *resp_len)
1301 {
1302         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1303         struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1304         struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1305         struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1306         struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1307         dma_addr_t req_paddr = 0;
1308         dma_addr_t resp_paddr = 0;
1309         struct bmi_xfer xfer = {};
1310         void *treq, *tresp = NULL;
1311         int ret = 0;
1312
1313         if (resp && !resp_len)
1314                 return -EINVAL;
1315
1316         if (resp && resp_len && *resp_len == 0)
1317                 return -EINVAL;
1318
1319         treq = kmemdup(req, req_len, GFP_KERNEL);
1320         if (!treq)
1321                 return -ENOMEM;
1322
1323         req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1324         ret = dma_mapping_error(ar->dev, req_paddr);
1325         if (ret)
1326                 goto err_dma;
1327
1328         if (resp && resp_len) {
1329                 tresp = kzalloc(*resp_len, GFP_KERNEL);
1330                 if (!tresp) {
1331                         ret = -ENOMEM;
1332                         goto err_req;
1333                 }
1334
1335                 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1336                                             DMA_FROM_DEVICE);
1337                 ret = dma_mapping_error(ar->dev, resp_paddr);
1338                 if (ret)
1339                         goto err_req;
1340
1341                 xfer.wait_for_resp = true;
1342                 xfer.resp_len = 0;
1343
1344                 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1345         }
1346
1347         init_completion(&xfer.done);
1348
1349         ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1350         if (ret)
1351                 goto err_resp;
1352
1353         ret = wait_for_completion_timeout(&xfer.done,
1354                                           BMI_COMMUNICATION_TIMEOUT_HZ);
1355         if (ret <= 0) {
1356                 u32 unused_buffer;
1357                 unsigned int unused_nbytes;
1358                 unsigned int unused_id;
1359
1360                 ret = -ETIMEDOUT;
1361                 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1362                                            &unused_nbytes, &unused_id);
1363         } else {
1364                 /* non-zero means we did not time out */
1365                 ret = 0;
1366         }
1367
1368 err_resp:
1369         if (resp) {
1370                 u32 unused_buffer;
1371
1372                 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1373                 dma_unmap_single(ar->dev, resp_paddr,
1374                                  *resp_len, DMA_FROM_DEVICE);
1375         }
1376 err_req:
1377         dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1378
1379         if (ret == 0 && resp_len) {
1380                 *resp_len = min(*resp_len, xfer.resp_len);
1381                 memcpy(resp, tresp, xfer.resp_len);
1382         }
1383 err_dma:
1384         kfree(treq);
1385         kfree(tresp);
1386
1387         return ret;
1388 }
1389
1390 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state,
1391                                      void *transfer_context,
1392                                      u32 data,
1393                                      unsigned int nbytes,
1394                                      unsigned int transfer_id)
1395 {
1396         struct bmi_xfer *xfer = transfer_context;
1397
1398         if (xfer->wait_for_resp)
1399                 return;
1400
1401         complete(&xfer->done);
1402 }
1403
1404 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state,
1405                                      void *transfer_context,
1406                                      u32 data,
1407                                      unsigned int nbytes,
1408                                      unsigned int transfer_id,
1409                                      unsigned int flags)
1410 {
1411         struct bmi_xfer *xfer = transfer_context;
1412
1413         if (!xfer->wait_for_resp) {
1414                 ath10k_warn("unexpected: BMI data received; ignoring\n");
1415                 return;
1416         }
1417
1418         xfer->resp_len = nbytes;
1419         complete(&xfer->done);
1420 }
1421
1422 /*
1423  * Map from service/endpoint to Copy Engine.
1424  * This table is derived from the CE_PCI TABLE, above.
1425  * It is passed to the Target at startup for use by firmware.
1426  */
1427 static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1428         {
1429                  ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1430                  PIPEDIR_OUT,           /* out = UL = host -> target */
1431                  3,
1432         },
1433         {
1434                  ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1435                  PIPEDIR_IN,            /* in = DL = target -> host */
1436                  2,
1437         },
1438         {
1439                  ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1440                  PIPEDIR_OUT,           /* out = UL = host -> target */
1441                  3,
1442         },
1443         {
1444                  ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1445                  PIPEDIR_IN,            /* in = DL = target -> host */
1446                  2,
1447         },
1448         {
1449                  ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1450                  PIPEDIR_OUT,           /* out = UL = host -> target */
1451                  3,
1452         },
1453         {
1454                  ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1455                  PIPEDIR_IN,            /* in = DL = target -> host */
1456                  2,
1457         },
1458         {
1459                  ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1460                  PIPEDIR_OUT,           /* out = UL = host -> target */
1461                  3,
1462         },
1463         {
1464                  ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1465                  PIPEDIR_IN,            /* in = DL = target -> host */
1466                  2,
1467         },
1468         {
1469                  ATH10K_HTC_SVC_ID_WMI_CONTROL,
1470                  PIPEDIR_OUT,           /* out = UL = host -> target */
1471                  3,
1472         },
1473         {
1474                  ATH10K_HTC_SVC_ID_WMI_CONTROL,
1475                  PIPEDIR_IN,            /* in = DL = target -> host */
1476                  2,
1477         },
1478         {
1479                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1480                  PIPEDIR_OUT,           /* out = UL = host -> target */
1481                  0,             /* could be moved to 3 (share with WMI) */
1482         },
1483         {
1484                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1485                  PIPEDIR_IN,            /* in = DL = target -> host */
1486                  1,
1487         },
1488         {
1489                  ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,    /* not currently used */
1490                  PIPEDIR_OUT,           /* out = UL = host -> target */
1491                  0,
1492         },
1493         {
1494                  ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,    /* not currently used */
1495                  PIPEDIR_IN,            /* in = DL = target -> host */
1496                  1,
1497         },
1498         {
1499                  ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1500                  PIPEDIR_OUT,           /* out = UL = host -> target */
1501                  4,
1502         },
1503         {
1504                  ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1505                  PIPEDIR_IN,            /* in = DL = target -> host */
1506                  1,
1507         },
1508
1509         /* (Additions here) */
1510
1511         {                               /* Must be last */
1512                  0,
1513                  0,
1514                  0,
1515         },
1516 };
1517
1518 /*
1519  * Send an interrupt to the device to wake up the Target CPU
1520  * so it has an opportunity to notice any changed state.
1521  */
1522 static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1523 {
1524         int ret;
1525         u32 core_ctrl;
1526
1527         ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1528                                               CORE_CTRL_ADDRESS,
1529                                           &core_ctrl);
1530         if (ret) {
1531                 ath10k_warn("Unable to read core ctrl\n");
1532                 return ret;
1533         }
1534
1535         /* A_INUM_FIRMWARE interrupt to Target CPU */
1536         core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1537
1538         ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1539                                                CORE_CTRL_ADDRESS,
1540                                            core_ctrl);
1541         if (ret)
1542                 ath10k_warn("Unable to set interrupt mask\n");
1543
1544         return ret;
1545 }
1546
1547 static int ath10k_pci_init_config(struct ath10k *ar)
1548 {
1549         u32 interconnect_targ_addr;
1550         u32 pcie_state_targ_addr = 0;
1551         u32 pipe_cfg_targ_addr = 0;
1552         u32 svc_to_pipe_map = 0;
1553         u32 pcie_config_flags = 0;
1554         u32 ealloc_value;
1555         u32 ealloc_targ_addr;
1556         u32 flag2_value;
1557         u32 flag2_targ_addr;
1558         int ret = 0;
1559
1560         /* Download to Target the CE Config and the service-to-CE map */
1561         interconnect_targ_addr =
1562                 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1563
1564         /* Supply Target-side CE configuration */
1565         ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1566                                           &pcie_state_targ_addr);
1567         if (ret != 0) {
1568                 ath10k_err("Failed to get pcie state addr: %d\n", ret);
1569                 return ret;
1570         }
1571
1572         if (pcie_state_targ_addr == 0) {
1573                 ret = -EIO;
1574                 ath10k_err("Invalid pcie state addr\n");
1575                 return ret;
1576         }
1577
1578         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1579                                           offsetof(struct pcie_state,
1580                                                    pipe_cfg_addr),
1581                                           &pipe_cfg_targ_addr);
1582         if (ret != 0) {
1583                 ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1584                 return ret;
1585         }
1586
1587         if (pipe_cfg_targ_addr == 0) {
1588                 ret = -EIO;
1589                 ath10k_err("Invalid pipe cfg addr\n");
1590                 return ret;
1591         }
1592
1593         ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1594                                  target_ce_config_wlan,
1595                                  sizeof(target_ce_config_wlan));
1596
1597         if (ret != 0) {
1598                 ath10k_err("Failed to write pipe cfg: %d\n", ret);
1599                 return ret;
1600         }
1601
1602         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1603                                           offsetof(struct pcie_state,
1604                                                    svc_to_pipe_map),
1605                                           &svc_to_pipe_map);
1606         if (ret != 0) {
1607                 ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1608                 return ret;
1609         }
1610
1611         if (svc_to_pipe_map == 0) {
1612                 ret = -EIO;
1613                 ath10k_err("Invalid svc_to_pipe map\n");
1614                 return ret;
1615         }
1616
1617         ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1618                                  target_service_to_ce_map_wlan,
1619                                  sizeof(target_service_to_ce_map_wlan));
1620         if (ret != 0) {
1621                 ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1622                 return ret;
1623         }
1624
1625         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1626                                           offsetof(struct pcie_state,
1627                                                    config_flags),
1628                                           &pcie_config_flags);
1629         if (ret != 0) {
1630                 ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1631                 return ret;
1632         }
1633
1634         pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1635
1636         ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1637                                  offsetof(struct pcie_state, config_flags),
1638                                  &pcie_config_flags,
1639                                  sizeof(pcie_config_flags));
1640         if (ret != 0) {
1641                 ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1642                 return ret;
1643         }
1644
1645         /* configure early allocation */
1646         ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1647
1648         ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1649         if (ret != 0) {
1650                 ath10k_err("Faile to get early alloc val: %d\n", ret);
1651                 return ret;
1652         }
1653
1654         /* first bank is switched to IRAM */
1655         ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1656                          HI_EARLY_ALLOC_MAGIC_MASK);
1657         ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1658                          HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1659
1660         ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1661         if (ret != 0) {
1662                 ath10k_err("Failed to set early alloc val: %d\n", ret);
1663                 return ret;
1664         }
1665
1666         /* Tell Target to proceed with initialization */
1667         flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1668
1669         ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1670         if (ret != 0) {
1671                 ath10k_err("Failed to get option val: %d\n", ret);
1672                 return ret;
1673         }
1674
1675         flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1676
1677         ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1678         if (ret != 0) {
1679                 ath10k_err("Failed to set option val: %d\n", ret);
1680                 return ret;
1681         }
1682
1683         return 0;
1684 }
1685
1686
1687
1688 static int ath10k_pci_ce_init(struct ath10k *ar)
1689 {
1690         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1691         struct ath10k_pci_pipe *pipe_info;
1692         const struct ce_attr *attr;
1693         int pipe_num;
1694
1695         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1696                 pipe_info = &ar_pci->pipe_info[pipe_num];
1697                 pipe_info->pipe_num = pipe_num;
1698                 pipe_info->hif_ce_state = ar;
1699                 attr = &host_ce_config_wlan[pipe_num];
1700
1701                 pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1702                 if (pipe_info->ce_hdl == NULL) {
1703                         ath10k_err("Unable to initialize CE for pipe: %d\n",
1704                                    pipe_num);
1705
1706                         /* It is safe to call it here. It checks if ce_hdl is
1707                          * valid for each pipe */
1708                         ath10k_pci_ce_deinit(ar);
1709                         return -1;
1710                 }
1711
1712                 if (pipe_num == ar_pci->ce_count - 1) {
1713                         /*
1714                          * Reserve the ultimate CE for
1715                          * diagnostic Window support
1716                          */
1717                         ar_pci->ce_diag =
1718                         ar_pci->pipe_info[ar_pci->ce_count - 1].ce_hdl;
1719                         continue;
1720                 }
1721
1722                 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1723         }
1724
1725         /*
1726          * Initially, establish CE completion handlers for use with BMI.
1727          * These are overwritten with generic handlers after we exit BMI phase.
1728          */
1729         pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1730         ath10k_ce_send_cb_register(pipe_info->ce_hdl,
1731                                    ath10k_pci_bmi_send_done, 0);
1732
1733         pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1734         ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
1735                                    ath10k_pci_bmi_recv_data);
1736
1737         return 0;
1738 }
1739
1740 static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1741 {
1742         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1743         u32 fw_indicator_address, fw_indicator;
1744
1745         ath10k_pci_wake(ar);
1746
1747         fw_indicator_address = ar_pci->fw_indicator_address;
1748         fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
1749
1750         if (fw_indicator & FW_IND_EVENT_PENDING) {
1751                 /* ACK: clear Target-side pending event */
1752                 ath10k_pci_write32(ar, fw_indicator_address,
1753                                    fw_indicator & ~FW_IND_EVENT_PENDING);
1754
1755                 if (ar_pci->started) {
1756                         ath10k_pci_hif_dump_area(ar);
1757                 } else {
1758                         /*
1759                          * Probable Target failure before we're prepared
1760                          * to handle it.  Generally unexpected.
1761                          */
1762                         ath10k_warn("early firmware event indicated\n");
1763                 }
1764         }
1765
1766         ath10k_pci_sleep(ar);
1767 }
1768
1769 static int ath10k_pci_hif_power_up(struct ath10k *ar)
1770 {
1771         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1772         int ret;
1773
1774         ret = ath10k_pci_start_intr(ar);
1775         if (ret) {
1776                 ath10k_err("could not start interrupt handling (%d)\n", ret);
1777                 goto err;
1778         }
1779
1780         /*
1781          * Bring the target up cleanly.
1782          *
1783          * The target may be in an undefined state with an AUX-powered Target
1784          * and a Host in WoW mode. If the Host crashes, loses power, or is
1785          * restarted (without unloading the driver) then the Target is left
1786          * (aux) powered and running. On a subsequent driver load, the Target
1787          * is in an unexpected state. We try to catch that here in order to
1788          * reset the Target and retry the probe.
1789          */
1790         ath10k_pci_device_reset(ar);
1791
1792         ret = ath10k_pci_reset_target(ar);
1793         if (ret)
1794                 goto err_irq;
1795
1796         if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1797                 /* Force AWAKE forever */
1798                 ath10k_do_pci_wake(ar);
1799
1800         ret = ath10k_pci_ce_init(ar);
1801         if (ret)
1802                 goto err_ps;
1803
1804         ret = ath10k_pci_init_config(ar);
1805         if (ret)
1806                 goto err_ce;
1807
1808         ret = ath10k_pci_wake_target_cpu(ar);
1809         if (ret) {
1810                 ath10k_err("could not wake up target CPU (%d)\n", ret);
1811                 goto err_ce;
1812         }
1813
1814         return 0;
1815
1816 err_ce:
1817         ath10k_pci_ce_deinit(ar);
1818 err_ps:
1819         if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1820                 ath10k_do_pci_sleep(ar);
1821 err_irq:
1822         ath10k_pci_stop_intr(ar);
1823 err:
1824         return ret;
1825 }
1826
1827 static void ath10k_pci_hif_power_down(struct ath10k *ar)
1828 {
1829         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1830
1831         ath10k_pci_stop_intr(ar);
1832
1833         ath10k_pci_ce_deinit(ar);
1834         if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1835                 ath10k_do_pci_sleep(ar);
1836 }
1837
1838 #ifdef CONFIG_PM
1839
1840 #define ATH10K_PCI_PM_CONTROL 0x44
1841
1842 static int ath10k_pci_hif_suspend(struct ath10k *ar)
1843 {
1844         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1845         struct pci_dev *pdev = ar_pci->pdev;
1846         u32 val;
1847
1848         pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1849
1850         if ((val & 0x000000ff) != 0x3) {
1851                 pci_save_state(pdev);
1852                 pci_disable_device(pdev);
1853                 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1854                                        (val & 0xffffff00) | 0x03);
1855         }
1856
1857         return 0;
1858 }
1859
1860 static int ath10k_pci_hif_resume(struct ath10k *ar)
1861 {
1862         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1863         struct pci_dev *pdev = ar_pci->pdev;
1864         u32 val;
1865
1866         pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1867
1868         if ((val & 0x000000ff) != 0) {
1869                 pci_restore_state(pdev);
1870                 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1871                                        val & 0xffffff00);
1872                 /*
1873                  * Suspend/Resume resets the PCI configuration space,
1874                  * so we have to re-disable the RETRY_TIMEOUT register (0x41)
1875                  * to keep PCI Tx retries from interfering with C3 CPU state
1876                  */
1877                 pci_read_config_dword(pdev, 0x40, &val);
1878
1879                 if ((val & 0x0000ff00) != 0)
1880                         pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
1881         }
1882
1883         return 0;
1884 }
1885 #endif
1886
1887 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
1888         .send_head              = ath10k_pci_hif_send_head,
1889         .exchange_bmi_msg       = ath10k_pci_hif_exchange_bmi_msg,
1890         .start                  = ath10k_pci_hif_start,
1891         .stop                   = ath10k_pci_hif_stop,
1892         .map_service_to_pipe    = ath10k_pci_hif_map_service_to_pipe,
1893         .get_default_pipe       = ath10k_pci_hif_get_default_pipe,
1894         .send_complete_check    = ath10k_pci_hif_send_complete_check,
1895         .set_callbacks          = ath10k_pci_hif_set_callbacks,
1896         .get_free_queue_number  = ath10k_pci_hif_get_free_queue_number,
1897         .power_up               = ath10k_pci_hif_power_up,
1898         .power_down             = ath10k_pci_hif_power_down,
1899 #ifdef CONFIG_PM
1900         .suspend                = ath10k_pci_hif_suspend,
1901         .resume                 = ath10k_pci_hif_resume,
1902 #endif
1903 };
1904
1905 static void ath10k_pci_ce_tasklet(unsigned long ptr)
1906 {
1907         struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
1908         struct ath10k_pci *ar_pci = pipe->ar_pci;
1909
1910         ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
1911 }
1912
1913 static void ath10k_msi_err_tasklet(unsigned long data)
1914 {
1915         struct ath10k *ar = (struct ath10k *)data;
1916
1917         ath10k_pci_fw_interrupt_handler(ar);
1918 }
1919
1920 /*
1921  * Handler for a per-engine interrupt on a PARTICULAR CE.
1922  * This is used in cases where each CE has a private MSI interrupt.
1923  */
1924 static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
1925 {
1926         struct ath10k *ar = arg;
1927         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1928         int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
1929
1930         if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
1931                 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
1932                 return IRQ_HANDLED;
1933         }
1934
1935         /*
1936          * NOTE: We are able to derive ce_id from irq because we
1937          * use a one-to-one mapping for CE's 0..5.
1938          * CE's 6 & 7 do not use interrupts at all.
1939          *
1940          * This mapping must be kept in sync with the mapping
1941          * used by firmware.
1942          */
1943         tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
1944         return IRQ_HANDLED;
1945 }
1946
1947 static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
1948 {
1949         struct ath10k *ar = arg;
1950         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1951
1952         tasklet_schedule(&ar_pci->msi_fw_err);
1953         return IRQ_HANDLED;
1954 }
1955
1956 /*
1957  * Top-level interrupt handler for all PCI interrupts from a Target.
1958  * When a block of MSI interrupts is allocated, this top-level handler
1959  * is not used; instead, we directly call the correct sub-handler.
1960  */
1961 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
1962 {
1963         struct ath10k *ar = arg;
1964         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1965
1966         if (ar_pci->num_msi_intrs == 0) {
1967                 /*
1968                  * IMPORTANT: INTR_CLR regiser has to be set after
1969                  * INTR_ENABLE is set to 0, otherwise interrupt can not be
1970                  * really cleared.
1971                  */
1972                 iowrite32(0, ar_pci->mem +
1973                           (SOC_CORE_BASE_ADDRESS |
1974                            PCIE_INTR_ENABLE_ADDRESS));
1975                 iowrite32(PCIE_INTR_FIRMWARE_MASK |
1976                           PCIE_INTR_CE_MASK_ALL,
1977                           ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
1978                                          PCIE_INTR_CLR_ADDRESS));
1979                 /*
1980                  * IMPORTANT: this extra read transaction is required to
1981                  * flush the posted write buffer.
1982                  */
1983                 (void) ioread32(ar_pci->mem +
1984                                 (SOC_CORE_BASE_ADDRESS |
1985                                  PCIE_INTR_ENABLE_ADDRESS));
1986         }
1987
1988         tasklet_schedule(&ar_pci->intr_tq);
1989
1990         return IRQ_HANDLED;
1991 }
1992
1993 static void ath10k_pci_tasklet(unsigned long data)
1994 {
1995         struct ath10k *ar = (struct ath10k *)data;
1996         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1997
1998         ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
1999         ath10k_ce_per_engine_service_any(ar);
2000
2001         if (ar_pci->num_msi_intrs == 0) {
2002                 /* Enable Legacy PCI line interrupts */
2003                 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2004                           PCIE_INTR_CE_MASK_ALL,
2005                           ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2006                                          PCIE_INTR_ENABLE_ADDRESS));
2007                 /*
2008                  * IMPORTANT: this extra read transaction is required to
2009                  * flush the posted write buffer
2010                  */
2011                 (void) ioread32(ar_pci->mem +
2012                                 (SOC_CORE_BASE_ADDRESS |
2013                                  PCIE_INTR_ENABLE_ADDRESS));
2014         }
2015 }
2016
2017 static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
2018 {
2019         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2020         int ret;
2021         int i;
2022
2023         ret = pci_enable_msi_block(ar_pci->pdev, num);
2024         if (ret)
2025                 return ret;
2026
2027         ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2028                           ath10k_pci_msi_fw_handler,
2029                           IRQF_SHARED, "ath10k_pci", ar);
2030         if (ret) {
2031                 ath10k_warn("request_irq(%d) failed %d\n",
2032                             ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2033
2034                 pci_disable_msi(ar_pci->pdev);
2035                 return ret;
2036         }
2037
2038         for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2039                 ret = request_irq(ar_pci->pdev->irq + i,
2040                                   ath10k_pci_per_engine_handler,
2041                                   IRQF_SHARED, "ath10k_pci", ar);
2042                 if (ret) {
2043                         ath10k_warn("request_irq(%d) failed %d\n",
2044                                     ar_pci->pdev->irq + i, ret);
2045
2046                         for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2047                                 free_irq(ar_pci->pdev->irq + i, ar);
2048
2049                         free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
2050                         pci_disable_msi(ar_pci->pdev);
2051                         return ret;
2052                 }
2053         }
2054
2055         ath10k_info("MSI-X interrupt handling (%d intrs)\n", num);
2056         return 0;
2057 }
2058
2059 static int ath10k_pci_start_intr_msi(struct ath10k *ar)
2060 {
2061         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2062         int ret;
2063
2064         ret = pci_enable_msi(ar_pci->pdev);
2065         if (ret < 0)
2066                 return ret;
2067
2068         ret = request_irq(ar_pci->pdev->irq,
2069                           ath10k_pci_interrupt_handler,
2070                           IRQF_SHARED, "ath10k_pci", ar);
2071         if (ret < 0) {
2072                 pci_disable_msi(ar_pci->pdev);
2073                 return ret;
2074         }
2075
2076         ath10k_info("MSI interrupt handling\n");
2077         return 0;
2078 }
2079
2080 static int ath10k_pci_start_intr_legacy(struct ath10k *ar)
2081 {
2082         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2083         int ret;
2084
2085         ret = request_irq(ar_pci->pdev->irq,
2086                           ath10k_pci_interrupt_handler,
2087                           IRQF_SHARED, "ath10k_pci", ar);
2088         if (ret < 0)
2089                 return ret;
2090
2091         /*
2092          * Make sure to wake the Target before enabling Legacy
2093          * Interrupt.
2094          */
2095         iowrite32(PCIE_SOC_WAKE_V_MASK,
2096                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2097                   PCIE_SOC_WAKE_ADDRESS);
2098
2099         ath10k_pci_wait(ar);
2100
2101         /*
2102          * A potential race occurs here: The CORE_BASE write
2103          * depends on target correctly decoding AXI address but
2104          * host won't know when target writes BAR to CORE_CTRL.
2105          * This write might get lost if target has NOT written BAR.
2106          * For now, fix the race by repeating the write in below
2107          * synchronization checking.
2108          */
2109         iowrite32(PCIE_INTR_FIRMWARE_MASK |
2110                   PCIE_INTR_CE_MASK_ALL,
2111                   ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2112                                  PCIE_INTR_ENABLE_ADDRESS));
2113         iowrite32(PCIE_SOC_WAKE_RESET,
2114                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2115                   PCIE_SOC_WAKE_ADDRESS);
2116
2117         ath10k_info("legacy interrupt handling\n");
2118         return 0;
2119 }
2120
2121 static int ath10k_pci_start_intr(struct ath10k *ar)
2122 {
2123         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2124         int num = MSI_NUM_REQUEST;
2125         int ret;
2126         int i;
2127
2128         tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long) ar);
2129         tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2130                      (unsigned long) ar);
2131
2132         for (i = 0; i < CE_COUNT; i++) {
2133                 ar_pci->pipe_info[i].ar_pci = ar_pci;
2134                 tasklet_init(&ar_pci->pipe_info[i].intr,
2135                              ath10k_pci_ce_tasklet,
2136                              (unsigned long)&ar_pci->pipe_info[i]);
2137         }
2138
2139         if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features))
2140                 num = 1;
2141
2142         if (num > 1) {
2143                 ret = ath10k_pci_start_intr_msix(ar, num);
2144                 if (ret == 0)
2145                         goto exit;
2146
2147                 ath10k_warn("MSI-X didn't succeed (%d), trying MSI\n", ret);
2148                 num = 1;
2149         }
2150
2151         if (num == 1) {
2152                 ret = ath10k_pci_start_intr_msi(ar);
2153                 if (ret == 0)
2154                         goto exit;
2155
2156                 ath10k_warn("MSI didn't succeed (%d), trying legacy INTR\n",
2157                             ret);
2158                 num = 0;
2159         }
2160
2161         ret = ath10k_pci_start_intr_legacy(ar);
2162
2163 exit:
2164         ar_pci->num_msi_intrs = num;
2165         ar_pci->ce_count = CE_COUNT;
2166         return ret;
2167 }
2168
2169 static void ath10k_pci_stop_intr(struct ath10k *ar)
2170 {
2171         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2172         int i;
2173
2174         /* There's at least one interrupt irregardless whether its legacy INTR
2175          * or MSI or MSI-X */
2176         for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2177                 free_irq(ar_pci->pdev->irq + i, ar);
2178
2179         if (ar_pci->num_msi_intrs > 0)
2180                 pci_disable_msi(ar_pci->pdev);
2181 }
2182
2183 static int ath10k_pci_reset_target(struct ath10k *ar)
2184 {
2185         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2186         int wait_limit = 300; /* 3 sec */
2187
2188         /* Wait for Target to finish initialization before we proceed. */
2189         iowrite32(PCIE_SOC_WAKE_V_MASK,
2190                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2191                   PCIE_SOC_WAKE_ADDRESS);
2192
2193         ath10k_pci_wait(ar);
2194
2195         while (wait_limit-- &&
2196                !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
2197                  FW_IND_INITIALIZED)) {
2198                 if (ar_pci->num_msi_intrs == 0)
2199                         /* Fix potential race by repeating CORE_BASE writes */
2200                         iowrite32(PCIE_INTR_FIRMWARE_MASK |
2201                                   PCIE_INTR_CE_MASK_ALL,
2202                                   ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2203                                                  PCIE_INTR_ENABLE_ADDRESS));
2204                 mdelay(10);
2205         }
2206
2207         if (wait_limit < 0) {
2208                 ath10k_err("Target stalled\n");
2209                 iowrite32(PCIE_SOC_WAKE_RESET,
2210                           ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2211                           PCIE_SOC_WAKE_ADDRESS);
2212                 return -EIO;
2213         }
2214
2215         iowrite32(PCIE_SOC_WAKE_RESET,
2216                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2217                   PCIE_SOC_WAKE_ADDRESS);
2218
2219         return 0;
2220 }
2221
2222 static void ath10k_pci_device_reset(struct ath10k *ar)
2223 {
2224         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2225         void __iomem *mem = ar_pci->mem;
2226         int i;
2227         u32 val;
2228
2229         if (!SOC_GLOBAL_RESET_ADDRESS)
2230                 return;
2231
2232         if (!mem)
2233                 return;
2234
2235         ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS,
2236                                PCIE_SOC_WAKE_V_MASK);
2237         for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2238                 if (ath10k_pci_target_is_awake(ar))
2239                         break;
2240                 msleep(1);
2241         }
2242
2243         /* Put Target, including PCIe, into RESET. */
2244         val = ath10k_pci_reg_read32(mem, SOC_GLOBAL_RESET_ADDRESS);
2245         val |= 1;
2246         ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
2247
2248         for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2249                 if (ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
2250                                           RTC_STATE_COLD_RESET_MASK)
2251                         break;
2252                 msleep(1);
2253         }
2254
2255         /* Pull Target, including PCIe, out of RESET. */
2256         val &= ~1;
2257         ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
2258
2259         for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2260                 if (!(ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
2261                                             RTC_STATE_COLD_RESET_MASK))
2262                         break;
2263                 msleep(1);
2264         }
2265
2266         ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2267 }
2268
2269 static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2270 {
2271         int i;
2272
2273         for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2274                 if (!test_bit(i, ar_pci->features))
2275                         continue;
2276
2277                 switch (i) {
2278                 case ATH10K_PCI_FEATURE_MSI_X:
2279                         ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n");
2280                         break;
2281                 case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
2282                         ath10k_dbg(ATH10K_DBG_PCI, "QCA98XX SoC power save enabled\n");
2283                         break;
2284                 }
2285         }
2286 }
2287
2288 static int ath10k_pci_probe(struct pci_dev *pdev,
2289                             const struct pci_device_id *pci_dev)
2290 {
2291         void __iomem *mem;
2292         int ret = 0;
2293         struct ath10k *ar;
2294         struct ath10k_pci *ar_pci;
2295         u32 lcr_val;
2296
2297         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2298
2299         ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2300         if (ar_pci == NULL)
2301                 return -ENOMEM;
2302
2303         ar_pci->pdev = pdev;
2304         ar_pci->dev = &pdev->dev;
2305
2306         switch (pci_dev->device) {
2307         case QCA988X_2_0_DEVICE_ID:
2308                 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2309                 break;
2310         default:
2311                 ret = -ENODEV;
2312                 ath10k_err("Unkown device ID: %d\n", pci_dev->device);
2313                 goto err_ar_pci;
2314         }
2315
2316         if (ath10k_target_ps)
2317                 set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2318
2319         ath10k_pci_dump_features(ar_pci);
2320
2321         ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
2322         if (!ar) {
2323                 ath10k_err("ath10k_core_create failed!\n");
2324                 ret = -EINVAL;
2325                 goto err_ar_pci;
2326         }
2327
2328         ar_pci->ar = ar;
2329         ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2330         atomic_set(&ar_pci->keep_awake_count, 0);
2331
2332         pci_set_drvdata(pdev, ar);
2333
2334         /*
2335          * Without any knowledge of the Host, the Target may have been reset or
2336          * power cycled and its Config Space may no longer reflect the PCI
2337          * address space that was assigned earlier by the PCI infrastructure.
2338          * Refresh it now.
2339          */
2340         ret = pci_assign_resource(pdev, BAR_NUM);
2341         if (ret) {
2342                 ath10k_err("cannot assign PCI space: %d\n", ret);
2343                 goto err_ar;
2344         }
2345
2346         ret = pci_enable_device(pdev);
2347         if (ret) {
2348                 ath10k_err("cannot enable PCI device: %d\n", ret);
2349                 goto err_ar;
2350         }
2351
2352         /* Request MMIO resources */
2353         ret = pci_request_region(pdev, BAR_NUM, "ath");
2354         if (ret) {
2355                 ath10k_err("PCI MMIO reservation error: %d\n", ret);
2356                 goto err_device;
2357         }
2358
2359         /*
2360          * Target structures have a limit of 32 bit DMA pointers.
2361          * DMA pointers can be wider than 32 bits by default on some systems.
2362          */
2363         ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2364         if (ret) {
2365                 ath10k_err("32-bit DMA not available: %d\n", ret);
2366                 goto err_region;
2367         }
2368
2369         ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2370         if (ret) {
2371                 ath10k_err("cannot enable 32-bit consistent DMA\n");
2372                 goto err_region;
2373         }
2374
2375         /* Set bus master bit in PCI_COMMAND to enable DMA */
2376         pci_set_master(pdev);
2377
2378         /*
2379          * Temporary FIX: disable ASPM
2380          * Will be removed after the OTP is programmed
2381          */
2382         pci_read_config_dword(pdev, 0x80, &lcr_val);
2383         pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2384
2385         /* Arrange for access to Target SoC registers. */
2386         mem = pci_iomap(pdev, BAR_NUM, 0);
2387         if (!mem) {
2388                 ath10k_err("PCI iomap error\n");
2389                 ret = -EIO;
2390                 goto err_master;
2391         }
2392
2393         ar_pci->mem = mem;
2394
2395         spin_lock_init(&ar_pci->ce_lock);
2396
2397         ret = ath10k_core_register(ar);
2398         if (ret) {
2399                 ath10k_err("could not register driver core (%d)\n", ret);
2400                 goto err_iomap;
2401         }
2402
2403         return 0;
2404
2405 err_iomap:
2406         pci_iounmap(pdev, mem);
2407 err_master:
2408         pci_clear_master(pdev);
2409 err_region:
2410         pci_release_region(pdev, BAR_NUM);
2411 err_device:
2412         pci_disable_device(pdev);
2413 err_ar:
2414         pci_set_drvdata(pdev, NULL);
2415         ath10k_core_destroy(ar);
2416 err_ar_pci:
2417         /* call HIF PCI free here */
2418         kfree(ar_pci);
2419
2420         return ret;
2421 }
2422
2423 static void ath10k_pci_remove(struct pci_dev *pdev)
2424 {
2425         struct ath10k *ar = pci_get_drvdata(pdev);
2426         struct ath10k_pci *ar_pci;
2427
2428         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2429
2430         if (!ar)
2431                 return;
2432
2433         ar_pci = ath10k_pci_priv(ar);
2434
2435         if (!ar_pci)
2436                 return;
2437
2438         tasklet_kill(&ar_pci->msi_fw_err);
2439
2440         ath10k_core_unregister(ar);
2441
2442         pci_set_drvdata(pdev, NULL);
2443         pci_iounmap(pdev, ar_pci->mem);
2444         pci_release_region(pdev, BAR_NUM);
2445         pci_clear_master(pdev);
2446         pci_disable_device(pdev);
2447
2448         ath10k_core_destroy(ar);
2449         kfree(ar_pci);
2450 }
2451
2452 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2453
2454 static struct pci_driver ath10k_pci_driver = {
2455         .name = "ath10k_pci",
2456         .id_table = ath10k_pci_id_table,
2457         .probe = ath10k_pci_probe,
2458         .remove = ath10k_pci_remove,
2459 };
2460
2461 static int __init ath10k_pci_init(void)
2462 {
2463         int ret;
2464
2465         ret = pci_register_driver(&ath10k_pci_driver);
2466         if (ret)
2467                 ath10k_err("pci_register_driver failed [%d]\n", ret);
2468
2469         return ret;
2470 }
2471 module_init(ath10k_pci_init);
2472
2473 static void __exit ath10k_pci_exit(void)
2474 {
2475         pci_unregister_driver(&ath10k_pci_driver);
2476 }
2477
2478 module_exit(ath10k_pci_exit);
2479
2480 MODULE_AUTHOR("Qualcomm Atheros");
2481 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2482 MODULE_LICENSE("Dual BSD/GPL");
2483 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2484 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
2485 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);