2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include <linux/pci.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
26 #include "targaddrs.h"
35 static unsigned int ath10k_target_ps;
36 module_param(ath10k_target_ps, uint, 0644);
37 MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
39 #define QCA988X_2_0_DEVICE_ID (0x003c)
41 static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
42 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
46 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
49 static void ath10k_pci_process_ce(struct ath10k *ar);
50 static int ath10k_pci_post_rx(struct ath10k *ar);
51 static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
53 static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info);
54 static void ath10k_pci_stop_ce(struct ath10k *ar);
55 static void ath10k_pci_device_reset(struct ath10k *ar);
56 static int ath10k_pci_reset_target(struct ath10k *ar);
57 static int ath10k_pci_start_intr(struct ath10k *ar);
58 static void ath10k_pci_stop_intr(struct ath10k *ar);
60 static const struct ce_attr host_ce_config_wlan[] = {
61 /* host->target HTC control and raw streams */
62 { /* CE0 */ CE_ATTR_FLAGS, 0, 16, 256, 0, NULL,},
63 /* could be moved to share CE3 */
64 /* target->host HTT + HTC control */
65 { /* CE1 */ CE_ATTR_FLAGS, 0, 0, 512, 512, NULL,},
66 /* target->host WMI */
67 { /* CE2 */ CE_ATTR_FLAGS, 0, 0, 2048, 32, NULL,},
68 /* host->target WMI */
69 { /* CE3 */ CE_ATTR_FLAGS, 0, 32, 2048, 0, NULL,},
70 /* host->target HTT */
71 { /* CE4 */ CE_ATTR_FLAGS | CE_ATTR_DIS_INTR, 0,
72 CE_HTT_H2T_MSG_SRC_NENTRIES, 256, 0, NULL,},
74 { /* CE5 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
75 /* Target autonomous hif_memcpy */
76 { /* CE6 */ CE_ATTR_FLAGS, 0, 0, 0, 0, NULL,},
77 /* ce_diag, the Diagnostic Window */
78 { /* CE7 */ CE_ATTR_FLAGS, 0, 2, DIAG_TRANSFER_LIMIT, 2, NULL,},
81 /* Target firmware's Copy Engine configuration. */
82 static const struct ce_pipe_config target_ce_config_wlan[] = {
83 /* host->target HTC control and raw streams */
84 { /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0,},
85 /* target->host HTT + HTC control */
86 { /* CE1 */ 1, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0,},
87 /* target->host WMI */
88 { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,},
89 /* host->target WMI */
90 { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
91 /* host->target HTT */
92 { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0,},
93 /* NB: 50% of src nentries, since tx has 2 frags */
95 { /* CE5 */ 5, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
96 /* Reserved for target autonomous hif_memcpy */
97 { /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0,},
98 /* CE7 used only by Host */
102 * Diagnostic read/write access is provided for startup/config/debug usage.
103 * Caller must guarantee proper alignment, when applicable, and single user
106 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
109 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
112 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
115 struct ce_state *ce_diag;
116 /* Host buffer address in CE space */
118 dma_addr_t ce_data_base = 0;
119 void *data_buf = NULL;
123 * This code cannot handle reads to non-memory space. Redirect to the
124 * register read fn but preserve the multi word read capability of
127 if (address < DRAM_BASE_ADDRESS) {
128 if (!IS_ALIGNED(address, 4) ||
129 !IS_ALIGNED((unsigned long)data, 4))
132 while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access(
133 ar, address, (u32 *)data)) == 0)) {
134 nbytes -= sizeof(u32);
135 address += sizeof(u32);
141 ce_diag = ar_pci->ce_diag;
144 * Allocate a temporary bounce buffer to hold caller's data
145 * to be DMA'ed from Target. This guarantees
146 * 1) 4-byte alignment
147 * 2) Buffer in DMA-able space
149 orig_nbytes = nbytes;
150 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
158 memset(data_buf, 0, orig_nbytes);
160 remaining_bytes = orig_nbytes;
161 ce_data = ce_data_base;
162 while (remaining_bytes) {
163 nbytes = min_t(unsigned int, remaining_bytes,
164 DIAG_TRANSFER_LIMIT);
166 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
170 /* Request CE to send from Target(!) address to Host buffer */
172 * The address supplied by the caller is in the
173 * Target CPU virtual address space.
175 * In order to use this address with the diagnostic CE,
176 * convert it from Target CPU virtual address space
177 * to CE address space
180 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
182 ath10k_pci_sleep(ar);
184 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
190 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
194 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
200 if (nbytes != completed_nbytes) {
205 if (buf != (u32) address) {
211 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
216 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
222 if (nbytes != completed_nbytes) {
227 if (buf != ce_data) {
232 remaining_bytes -= nbytes;
239 /* Copy data from allocated DMA buf to caller's buf */
240 WARN_ON_ONCE(orig_nbytes & 3);
241 for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
243 __le32_to_cpu(((__le32 *)data_buf)[i]);
246 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
250 pci_free_consistent(ar_pci->pdev, orig_nbytes,
251 data_buf, ce_data_base);
256 /* Read 4-byte aligned data from Target memory or register */
257 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
260 /* Assume range doesn't cross this boundary */
261 if (address >= DRAM_BASE_ADDRESS)
262 return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
265 *data = ath10k_pci_read32(ar, address);
266 ath10k_pci_sleep(ar);
270 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
271 const void *data, int nbytes)
273 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
276 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
279 struct ce_state *ce_diag;
280 void *data_buf = NULL;
281 u32 ce_data; /* Host buffer address in CE space */
282 dma_addr_t ce_data_base = 0;
285 ce_diag = ar_pci->ce_diag;
288 * Allocate a temporary bounce buffer to hold caller's data
289 * to be DMA'ed to Target. This guarantees
290 * 1) 4-byte alignment
291 * 2) Buffer in DMA-able space
293 orig_nbytes = nbytes;
294 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
302 /* Copy caller's data to allocated DMA buf */
303 WARN_ON_ONCE(orig_nbytes & 3);
304 for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
305 ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
308 * The address supplied by the caller is in the
309 * Target CPU virtual address space.
311 * In order to use this address with the diagnostic CE,
313 * Target CPU virtual address space
318 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
319 ath10k_pci_sleep(ar);
321 remaining_bytes = orig_nbytes;
322 ce_data = ce_data_base;
323 while (remaining_bytes) {
324 /* FIXME: check cast */
325 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
327 /* Set up to receive directly into Target(!) address */
328 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
333 * Request CE to send caller-supplied data that
334 * was copied to bounce buffer to Target(!) address.
336 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
342 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
347 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
353 if (nbytes != completed_nbytes) {
358 if (buf != ce_data) {
364 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
369 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
375 if (nbytes != completed_nbytes) {
380 if (buf != address) {
385 remaining_bytes -= nbytes;
392 pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
397 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
403 /* Write 4B data to Target memory or register */
404 static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
407 /* Assume range doesn't cross this boundary */
408 if (address >= DRAM_BASE_ADDRESS)
409 return ath10k_pci_diag_write_mem(ar, address, &data,
413 ath10k_pci_write32(ar, address, data);
414 ath10k_pci_sleep(ar);
418 static bool ath10k_pci_target_is_awake(struct ath10k *ar)
420 void __iomem *mem = ath10k_pci_priv(ar)->mem;
422 val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
424 return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
427 static void ath10k_pci_wait(struct ath10k *ar)
431 while (n-- && !ath10k_pci_target_is_awake(ar))
435 ath10k_warn("Unable to wakeup target\n");
438 void ath10k_do_pci_wake(struct ath10k *ar)
440 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
441 void __iomem *pci_addr = ar_pci->mem;
445 if (atomic_read(&ar_pci->keep_awake_count) == 0) {
447 iowrite32(PCIE_SOC_WAKE_V_MASK,
448 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
449 PCIE_SOC_WAKE_ADDRESS);
451 atomic_inc(&ar_pci->keep_awake_count);
453 if (ar_pci->verified_awake)
457 if (ath10k_pci_target_is_awake(ar)) {
458 ar_pci->verified_awake = true;
462 if (tot_delay > PCIE_WAKE_TIMEOUT) {
463 ath10k_warn("target takes too long to wake up (awake count %d)\n",
464 atomic_read(&ar_pci->keep_awake_count));
469 tot_delay += curr_delay;
476 void ath10k_do_pci_sleep(struct ath10k *ar)
478 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
479 void __iomem *pci_addr = ar_pci->mem;
481 if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
483 ar_pci->verified_awake = false;
484 iowrite32(PCIE_SOC_WAKE_RESET,
485 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
486 PCIE_SOC_WAKE_ADDRESS);
491 * FIXME: Handle OOM properly.
494 struct ath10k_pci_compl *get_free_compl(struct hif_ce_pipe_info *pipe_info)
496 struct ath10k_pci_compl *compl = NULL;
498 spin_lock_bh(&pipe_info->pipe_lock);
499 if (list_empty(&pipe_info->compl_free)) {
500 ath10k_warn("Completion buffers are full\n");
503 compl = list_first_entry(&pipe_info->compl_free,
504 struct ath10k_pci_compl, list);
505 list_del(&compl->list);
507 spin_unlock_bh(&pipe_info->pipe_lock);
511 /* Called by lower (CE) layer when a send to Target completes. */
512 static void ath10k_pci_ce_send_done(struct ce_state *ce_state,
513 void *transfer_context,
516 unsigned int transfer_id)
518 struct ath10k *ar = ce_state->ar;
519 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
520 struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id];
521 struct ath10k_pci_compl *compl;
522 bool process = false;
526 * For the send completion of an item in sendlist, just
527 * increment num_sends_allowed. The upper layer callback will
528 * be triggered when last fragment is done with send.
530 if (transfer_context == CE_SENDLIST_ITEM_CTXT) {
531 spin_lock_bh(&pipe_info->pipe_lock);
532 pipe_info->num_sends_allowed++;
533 spin_unlock_bh(&pipe_info->pipe_lock);
537 compl = get_free_compl(pipe_info);
541 compl->send_or_recv = HIF_CE_COMPLETE_SEND;
542 compl->ce_state = ce_state;
543 compl->pipe_info = pipe_info;
544 compl->transfer_context = transfer_context;
545 compl->nbytes = nbytes;
546 compl->transfer_id = transfer_id;
550 * Add the completion to the processing queue.
552 spin_lock_bh(&ar_pci->compl_lock);
553 list_add_tail(&compl->list, &ar_pci->compl_process);
554 spin_unlock_bh(&ar_pci->compl_lock);
557 } while (ath10k_ce_completed_send_next(ce_state,
563 * If only some of the items within a sendlist have completed,
564 * don't invoke completion processing until the entire sendlist
570 ath10k_pci_process_ce(ar);
573 /* Called by lower (CE) layer when data is received from the Target. */
574 static void ath10k_pci_ce_recv_data(struct ce_state *ce_state,
575 void *transfer_context, u32 ce_data,
577 unsigned int transfer_id,
580 struct ath10k *ar = ce_state->ar;
581 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
582 struct hif_ce_pipe_info *pipe_info = &ar_pci->pipe_info[ce_state->id];
583 struct ath10k_pci_compl *compl;
587 compl = get_free_compl(pipe_info);
591 compl->send_or_recv = HIF_CE_COMPLETE_RECV;
592 compl->ce_state = ce_state;
593 compl->pipe_info = pipe_info;
594 compl->transfer_context = transfer_context;
595 compl->nbytes = nbytes;
596 compl->transfer_id = transfer_id;
597 compl->flags = flags;
599 skb = transfer_context;
600 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
601 skb->len + skb_tailroom(skb),
604 * Add the completion to the processing queue.
606 spin_lock_bh(&ar_pci->compl_lock);
607 list_add_tail(&compl->list, &ar_pci->compl_process);
608 spin_unlock_bh(&ar_pci->compl_lock);
610 } while (ath10k_ce_completed_recv_next(ce_state,
616 ath10k_pci_process_ce(ar);
619 /* Send the first nbytes bytes of the buffer */
620 static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
621 unsigned int transfer_id,
622 unsigned int bytes, struct sk_buff *nbuf)
624 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
625 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
626 struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe_id]);
627 struct ce_state *ce_hdl = pipe_info->ce_hdl;
628 struct ce_sendlist sendlist;
633 memset(&sendlist, 0, sizeof(struct ce_sendlist));
635 len = min(bytes, nbuf->len);
639 ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
641 ath10k_dbg(ATH10K_DBG_PCI,
642 "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
643 nbuf->data, (unsigned long long) skb_cb->paddr,
645 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
647 nbuf->data, nbuf->len);
649 ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags);
651 /* Make sure we have resources to handle this request */
652 spin_lock_bh(&pipe_info->pipe_lock);
653 if (!pipe_info->num_sends_allowed) {
654 ath10k_warn("Pipe: %d is full\n", pipe_id);
655 spin_unlock_bh(&pipe_info->pipe_lock);
658 pipe_info->num_sends_allowed--;
659 spin_unlock_bh(&pipe_info->pipe_lock);
661 ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
663 ath10k_warn("CE send failed: %p\n", nbuf);
668 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
670 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
671 struct hif_ce_pipe_info *pipe_info = &(ar_pci->pipe_info[pipe]);
674 spin_lock_bh(&pipe_info->pipe_lock);
675 ret = pipe_info->num_sends_allowed;
676 spin_unlock_bh(&pipe_info->pipe_lock);
681 static void ath10k_pci_hif_dump_area(struct ath10k *ar)
683 u32 reg_dump_area = 0;
684 u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
689 ath10k_err("firmware crashed!\n");
690 ath10k_err("hardware name %s version 0x%x\n",
691 ar->hw_params.name, ar->target_version);
692 ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major,
693 ar->fw_version_minor, ar->fw_version_release,
694 ar->fw_version_build);
696 host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
697 if (ath10k_pci_diag_read_mem(ar, host_addr,
698 ®_dump_area, sizeof(u32)) != 0) {
699 ath10k_warn("could not read hi_failure_state\n");
703 ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
705 ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
707 REG_DUMP_COUNT_QCA988X * sizeof(u32));
709 ath10k_err("could not dump FW Dump Area\n");
713 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
715 ath10k_err("target Register Dump\n");
716 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
717 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
720 reg_dump_values[i + 1],
721 reg_dump_values[i + 2],
722 reg_dump_values[i + 3]);
724 ieee80211_queue_work(ar->hw, &ar->restart_work);
727 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
733 * Decide whether to actually poll for completions, or just
734 * wait for a later chance.
735 * If there seem to be plenty of resources left, then just wait
736 * since checking involves reading a CE register, which is a
737 * relatively expensive operation.
739 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
742 * If at least 50% of the total resources are still available,
743 * don't bother checking again yet.
745 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
748 ath10k_ce_per_engine_service(ar, pipe);
751 static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
752 struct ath10k_hif_cb *callbacks)
754 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
756 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
758 memcpy(&ar_pci->msg_callbacks_current, callbacks,
759 sizeof(ar_pci->msg_callbacks_current));
762 static int ath10k_pci_start_ce(struct ath10k *ar)
764 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
765 struct ce_state *ce_diag = ar_pci->ce_diag;
766 const struct ce_attr *attr;
767 struct hif_ce_pipe_info *pipe_info;
768 struct ath10k_pci_compl *compl;
769 int i, pipe_num, completions, disable_interrupts;
771 spin_lock_init(&ar_pci->compl_lock);
772 INIT_LIST_HEAD(&ar_pci->compl_process);
774 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
775 pipe_info = &ar_pci->pipe_info[pipe_num];
777 spin_lock_init(&pipe_info->pipe_lock);
778 INIT_LIST_HEAD(&pipe_info->compl_free);
780 /* Handle Diagnostic CE specially */
781 if (pipe_info->ce_hdl == ce_diag)
784 attr = &host_ce_config_wlan[pipe_num];
787 if (attr->src_nentries) {
788 disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
789 ath10k_ce_send_cb_register(pipe_info->ce_hdl,
790 ath10k_pci_ce_send_done,
792 completions += attr->src_nentries;
793 pipe_info->num_sends_allowed = attr->src_nentries - 1;
796 if (attr->dest_nentries) {
797 ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
798 ath10k_pci_ce_recv_data);
799 completions += attr->dest_nentries;
802 if (completions == 0)
805 for (i = 0; i < completions; i++) {
806 compl = kmalloc(sizeof(struct ath10k_pci_compl),
809 ath10k_warn("No memory for completion state\n");
810 ath10k_pci_stop_ce(ar);
814 compl->send_or_recv = HIF_CE_COMPLETE_FREE;
815 list_add_tail(&compl->list, &pipe_info->compl_free);
822 static void ath10k_pci_stop_ce(struct ath10k *ar)
824 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
825 struct ath10k_pci_compl *compl;
829 ath10k_ce_disable_interrupts(ar);
831 /* Cancel the pending tasklet */
832 tasklet_kill(&ar_pci->intr_tq);
834 for (i = 0; i < CE_COUNT; i++)
835 tasklet_kill(&ar_pci->pipe_info[i].intr);
837 /* Mark pending completions as aborted, so that upper layers free up
838 * their associated resources */
839 spin_lock_bh(&ar_pci->compl_lock);
840 list_for_each_entry(compl, &ar_pci->compl_process, list) {
841 skb = (struct sk_buff *)compl->transfer_context;
842 ATH10K_SKB_CB(skb)->is_aborted = true;
844 spin_unlock_bh(&ar_pci->compl_lock);
847 static void ath10k_pci_cleanup_ce(struct ath10k *ar)
849 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
850 struct ath10k_pci_compl *compl, *tmp;
851 struct hif_ce_pipe_info *pipe_info;
852 struct sk_buff *netbuf;
855 /* Free pending completions. */
856 spin_lock_bh(&ar_pci->compl_lock);
857 if (!list_empty(&ar_pci->compl_process))
858 ath10k_warn("pending completions still present! possible memory leaks.\n");
860 list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
861 list_del(&compl->list);
862 netbuf = (struct sk_buff *)compl->transfer_context;
863 dev_kfree_skb_any(netbuf);
866 spin_unlock_bh(&ar_pci->compl_lock);
868 /* Free unused completions for each pipe. */
869 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
870 pipe_info = &ar_pci->pipe_info[pipe_num];
872 spin_lock_bh(&pipe_info->pipe_lock);
873 list_for_each_entry_safe(compl, tmp,
874 &pipe_info->compl_free, list) {
875 list_del(&compl->list);
878 spin_unlock_bh(&pipe_info->pipe_lock);
882 static void ath10k_pci_process_ce(struct ath10k *ar)
884 struct ath10k_pci *ar_pci = ar->hif.priv;
885 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
886 struct ath10k_pci_compl *compl;
889 int ret, send_done = 0;
891 /* Upper layers aren't ready to handle tx/rx completions in parallel so
892 * we must serialize all completion processing. */
894 spin_lock_bh(&ar_pci->compl_lock);
895 if (ar_pci->compl_processing) {
896 spin_unlock_bh(&ar_pci->compl_lock);
899 ar_pci->compl_processing = true;
900 spin_unlock_bh(&ar_pci->compl_lock);
903 spin_lock_bh(&ar_pci->compl_lock);
904 if (list_empty(&ar_pci->compl_process)) {
905 spin_unlock_bh(&ar_pci->compl_lock);
908 compl = list_first_entry(&ar_pci->compl_process,
909 struct ath10k_pci_compl, list);
910 list_del(&compl->list);
911 spin_unlock_bh(&ar_pci->compl_lock);
913 if (compl->send_or_recv == HIF_CE_COMPLETE_SEND) {
914 cb->tx_completion(ar,
915 compl->transfer_context,
919 ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
921 ath10k_warn("Unable to post recv buffer for pipe: %d\n",
922 compl->pipe_info->pipe_num);
926 skb = (struct sk_buff *)compl->transfer_context;
927 nbytes = compl->nbytes;
929 ath10k_dbg(ATH10K_DBG_PCI,
930 "ath10k_pci_ce_recv_data netbuf=%p nbytes=%d\n",
932 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
933 "ath10k rx: ", skb->data, nbytes);
935 if (skb->len + skb_tailroom(skb) >= nbytes) {
937 skb_put(skb, nbytes);
938 cb->rx_completion(ar, skb,
939 compl->pipe_info->pipe_num);
941 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
943 skb->len + skb_tailroom(skb));
947 compl->send_or_recv = HIF_CE_COMPLETE_FREE;
950 * Add completion back to the pipe's free list.
952 spin_lock_bh(&compl->pipe_info->pipe_lock);
953 list_add_tail(&compl->list, &compl->pipe_info->compl_free);
954 compl->pipe_info->num_sends_allowed += send_done;
955 spin_unlock_bh(&compl->pipe_info->pipe_lock);
958 spin_lock_bh(&ar_pci->compl_lock);
959 ar_pci->compl_processing = false;
960 spin_unlock_bh(&ar_pci->compl_lock);
963 /* TODO - temporary mapping while we have too few CE's */
964 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
965 u16 service_id, u8 *ul_pipe,
966 u8 *dl_pipe, int *ul_is_polled,
971 /* polling for received messages not supported */
974 switch (service_id) {
975 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
977 * Host->target HTT gets its own pipe, so it can be polled
978 * while other pipes are interrupt driven.
982 * Use the same target->host pipe for HTC ctrl, HTC raw
988 case ATH10K_HTC_SVC_ID_RSVD_CTRL:
989 case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
991 * Note: HTC_RAW_STREAMS_SVC is currently unused, and
992 * HTC_CTRL_RSVD_SVC could share the same pipe as the
993 * WMI services. So, if another CE is needed, change
994 * this to *ul_pipe = 3, which frees up CE 0.
1001 case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
1002 case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
1003 case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
1004 case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
1006 case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1012 /* pipe 6 reserved */
1013 /* pipe 7 reserved */
1020 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1025 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1026 u8 *ul_pipe, u8 *dl_pipe)
1028 int ul_is_polled, dl_is_polled;
1030 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1031 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1038 static int ath10k_pci_post_rx_pipe(struct hif_ce_pipe_info *pipe_info,
1041 struct ath10k *ar = pipe_info->hif_ce_state;
1042 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1043 struct ce_state *ce_state = pipe_info->ce_hdl;
1044 struct sk_buff *skb;
1048 if (pipe_info->buf_sz == 0)
1051 for (i = 0; i < num; i++) {
1052 skb = dev_alloc_skb(pipe_info->buf_sz);
1054 ath10k_warn("could not allocate skbuff for pipe %d\n",
1060 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1062 ce_data = dma_map_single(ar->dev, skb->data,
1063 skb->len + skb_tailroom(skb),
1066 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1067 ath10k_warn("could not dma map skbuff\n");
1068 dev_kfree_skb_any(skb);
1073 ATH10K_SKB_CB(skb)->paddr = ce_data;
1075 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1077 PCI_DMA_FROMDEVICE);
1079 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1082 ath10k_warn("could not enqueue to pipe %d (%d)\n",
1091 ath10k_pci_rx_pipe_cleanup(pipe_info);
1095 static int ath10k_pci_post_rx(struct ath10k *ar)
1097 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1098 struct hif_ce_pipe_info *pipe_info;
1099 const struct ce_attr *attr;
1100 int pipe_num, ret = 0;
1102 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1103 pipe_info = &ar_pci->pipe_info[pipe_num];
1104 attr = &host_ce_config_wlan[pipe_num];
1106 if (attr->dest_nentries == 0)
1109 ret = ath10k_pci_post_rx_pipe(pipe_info,
1110 attr->dest_nentries - 1);
1112 ath10k_warn("Unable to replenish recv buffers for pipe: %d\n",
1115 for (; pipe_num >= 0; pipe_num--) {
1116 pipe_info = &ar_pci->pipe_info[pipe_num];
1117 ath10k_pci_rx_pipe_cleanup(pipe_info);
1126 static int ath10k_pci_hif_start(struct ath10k *ar)
1128 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1131 ret = ath10k_pci_start_ce(ar);
1133 ath10k_warn("could not start CE (%d)\n", ret);
1137 /* Post buffers once to start things off. */
1138 ret = ath10k_pci_post_rx(ar);
1140 ath10k_warn("could not post rx pipes (%d)\n", ret);
1144 ar_pci->started = 1;
1148 static void ath10k_pci_rx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
1151 struct ath10k_pci *ar_pci;
1152 struct ce_state *ce_hdl;
1154 struct sk_buff *netbuf;
1157 buf_sz = pipe_info->buf_sz;
1159 /* Unused Copy Engine */
1163 ar = pipe_info->hif_ce_state;
1164 ar_pci = ath10k_pci_priv(ar);
1166 if (!ar_pci->started)
1169 ce_hdl = pipe_info->ce_hdl;
1171 while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1173 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1174 netbuf->len + skb_tailroom(netbuf),
1176 dev_kfree_skb_any(netbuf);
1180 static void ath10k_pci_tx_pipe_cleanup(struct hif_ce_pipe_info *pipe_info)
1183 struct ath10k_pci *ar_pci;
1184 struct ce_state *ce_hdl;
1185 struct sk_buff *netbuf;
1187 unsigned int nbytes;
1191 buf_sz = pipe_info->buf_sz;
1193 /* Unused Copy Engine */
1197 ar = pipe_info->hif_ce_state;
1198 ar_pci = ath10k_pci_priv(ar);
1200 if (!ar_pci->started)
1203 ce_hdl = pipe_info->ce_hdl;
1205 while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1206 &ce_data, &nbytes, &id) == 0) {
1207 if (netbuf != CE_SENDLIST_ITEM_CTXT)
1209 * Indicate the completion to higer layer to free
1212 ATH10K_SKB_CB(netbuf)->is_aborted = true;
1213 ar_pci->msg_callbacks_current.tx_completion(ar,
1220 * Cleanup residual buffers for device shutdown:
1221 * buffers that were enqueued for receive
1222 * buffers that were to be sent
1223 * Note: Buffers that had completed but which were
1224 * not yet processed are on a completion queue. They
1225 * are handled when the completion thread shuts down.
1227 static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1229 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1232 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1233 struct hif_ce_pipe_info *pipe_info;
1235 pipe_info = &ar_pci->pipe_info[pipe_num];
1236 ath10k_pci_rx_pipe_cleanup(pipe_info);
1237 ath10k_pci_tx_pipe_cleanup(pipe_info);
1241 static void ath10k_pci_ce_deinit(struct ath10k *ar)
1243 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1244 struct hif_ce_pipe_info *pipe_info;
1247 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1248 pipe_info = &ar_pci->pipe_info[pipe_num];
1249 if (pipe_info->ce_hdl) {
1250 ath10k_ce_deinit(pipe_info->ce_hdl);
1251 pipe_info->ce_hdl = NULL;
1252 pipe_info->buf_sz = 0;
1257 static void ath10k_pci_disable_irqs(struct ath10k *ar)
1259 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1262 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
1263 disable_irq(ar_pci->pdev->irq + i);
1266 static void ath10k_pci_hif_stop(struct ath10k *ar)
1268 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1270 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
1272 /* Irqs are never explicitly re-enabled. They are implicitly re-enabled
1273 * by ath10k_pci_start_intr(). */
1274 ath10k_pci_disable_irqs(ar);
1276 ath10k_pci_stop_ce(ar);
1278 /* At this point, asynchronous threads are stopped, the target should
1279 * not DMA nor interrupt. We process the leftovers and then free
1280 * everything else up. */
1282 ath10k_pci_process_ce(ar);
1283 ath10k_pci_cleanup_ce(ar);
1284 ath10k_pci_buffer_cleanup(ar);
1286 ar_pci->started = 0;
1289 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1290 void *req, u32 req_len,
1291 void *resp, u32 *resp_len)
1293 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1294 struct ce_state *ce_tx = ar_pci->pipe_info[BMI_CE_NUM_TO_TARG].ce_hdl;
1295 struct ce_state *ce_rx = ar_pci->pipe_info[BMI_CE_NUM_TO_HOST].ce_hdl;
1296 dma_addr_t req_paddr = 0;
1297 dma_addr_t resp_paddr = 0;
1298 struct bmi_xfer xfer = {};
1299 void *treq, *tresp = NULL;
1302 if (resp && !resp_len)
1305 if (resp && resp_len && *resp_len == 0)
1308 treq = kmemdup(req, req_len, GFP_KERNEL);
1312 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1313 ret = dma_mapping_error(ar->dev, req_paddr);
1317 if (resp && resp_len) {
1318 tresp = kzalloc(*resp_len, GFP_KERNEL);
1324 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1326 ret = dma_mapping_error(ar->dev, resp_paddr);
1330 xfer.wait_for_resp = true;
1333 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1336 init_completion(&xfer.done);
1338 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1342 ret = wait_for_completion_timeout(&xfer.done,
1343 BMI_COMMUNICATION_TIMEOUT_HZ);
1346 unsigned int unused_nbytes;
1347 unsigned int unused_id;
1350 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1351 &unused_nbytes, &unused_id);
1353 /* non-zero means we did not time out */
1361 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1362 dma_unmap_single(ar->dev, resp_paddr,
1363 *resp_len, DMA_FROM_DEVICE);
1366 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1368 if (ret == 0 && resp_len) {
1369 *resp_len = min(*resp_len, xfer.resp_len);
1370 memcpy(resp, tresp, xfer.resp_len);
1379 static void ath10k_pci_bmi_send_done(struct ce_state *ce_state,
1380 void *transfer_context,
1382 unsigned int nbytes,
1383 unsigned int transfer_id)
1385 struct bmi_xfer *xfer = transfer_context;
1387 if (xfer->wait_for_resp)
1390 complete(&xfer->done);
1393 static void ath10k_pci_bmi_recv_data(struct ce_state *ce_state,
1394 void *transfer_context,
1396 unsigned int nbytes,
1397 unsigned int transfer_id,
1400 struct bmi_xfer *xfer = transfer_context;
1402 if (!xfer->wait_for_resp) {
1403 ath10k_warn("unexpected: BMI data received; ignoring\n");
1407 xfer->resp_len = nbytes;
1408 complete(&xfer->done);
1412 * Map from service/endpoint to Copy Engine.
1413 * This table is derived from the CE_PCI TABLE, above.
1414 * It is passed to the Target at startup for use by firmware.
1416 static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1418 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1419 PIPEDIR_OUT, /* out = UL = host -> target */
1423 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1424 PIPEDIR_IN, /* in = DL = target -> host */
1428 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1429 PIPEDIR_OUT, /* out = UL = host -> target */
1433 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1434 PIPEDIR_IN, /* in = DL = target -> host */
1438 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1439 PIPEDIR_OUT, /* out = UL = host -> target */
1443 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1444 PIPEDIR_IN, /* in = DL = target -> host */
1448 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1449 PIPEDIR_OUT, /* out = UL = host -> target */
1453 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1454 PIPEDIR_IN, /* in = DL = target -> host */
1458 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1459 PIPEDIR_OUT, /* out = UL = host -> target */
1463 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1464 PIPEDIR_IN, /* in = DL = target -> host */
1468 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1469 PIPEDIR_OUT, /* out = UL = host -> target */
1470 0, /* could be moved to 3 (share with WMI) */
1473 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1474 PIPEDIR_IN, /* in = DL = target -> host */
1478 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1479 PIPEDIR_OUT, /* out = UL = host -> target */
1483 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1484 PIPEDIR_IN, /* in = DL = target -> host */
1488 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1489 PIPEDIR_OUT, /* out = UL = host -> target */
1493 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1494 PIPEDIR_IN, /* in = DL = target -> host */
1498 /* (Additions here) */
1500 { /* Must be last */
1508 * Send an interrupt to the device to wake up the Target CPU
1509 * so it has an opportunity to notice any changed state.
1511 static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1516 ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1520 ath10k_warn("Unable to read core ctrl\n");
1524 /* A_INUM_FIRMWARE interrupt to Target CPU */
1525 core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1527 ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1531 ath10k_warn("Unable to set interrupt mask\n");
1536 static int ath10k_pci_init_config(struct ath10k *ar)
1538 u32 interconnect_targ_addr;
1539 u32 pcie_state_targ_addr = 0;
1540 u32 pipe_cfg_targ_addr = 0;
1541 u32 svc_to_pipe_map = 0;
1542 u32 pcie_config_flags = 0;
1544 u32 ealloc_targ_addr;
1546 u32 flag2_targ_addr;
1549 /* Download to Target the CE Config and the service-to-CE map */
1550 interconnect_targ_addr =
1551 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1553 /* Supply Target-side CE configuration */
1554 ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1555 &pcie_state_targ_addr);
1557 ath10k_err("Failed to get pcie state addr: %d\n", ret);
1561 if (pcie_state_targ_addr == 0) {
1563 ath10k_err("Invalid pcie state addr\n");
1567 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1568 offsetof(struct pcie_state,
1570 &pipe_cfg_targ_addr);
1572 ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1576 if (pipe_cfg_targ_addr == 0) {
1578 ath10k_err("Invalid pipe cfg addr\n");
1582 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1583 target_ce_config_wlan,
1584 sizeof(target_ce_config_wlan));
1587 ath10k_err("Failed to write pipe cfg: %d\n", ret);
1591 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1592 offsetof(struct pcie_state,
1596 ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1600 if (svc_to_pipe_map == 0) {
1602 ath10k_err("Invalid svc_to_pipe map\n");
1606 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1607 target_service_to_ce_map_wlan,
1608 sizeof(target_service_to_ce_map_wlan));
1610 ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1614 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1615 offsetof(struct pcie_state,
1617 &pcie_config_flags);
1619 ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1623 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1625 ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1626 offsetof(struct pcie_state, config_flags),
1628 sizeof(pcie_config_flags));
1630 ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1634 /* configure early allocation */
1635 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1637 ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1639 ath10k_err("Faile to get early alloc val: %d\n", ret);
1643 /* first bank is switched to IRAM */
1644 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1645 HI_EARLY_ALLOC_MAGIC_MASK);
1646 ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1647 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1649 ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1651 ath10k_err("Failed to set early alloc val: %d\n", ret);
1655 /* Tell Target to proceed with initialization */
1656 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1658 ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1660 ath10k_err("Failed to get option val: %d\n", ret);
1664 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1666 ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1668 ath10k_err("Failed to set option val: %d\n", ret);
1677 static int ath10k_pci_ce_init(struct ath10k *ar)
1679 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1680 struct hif_ce_pipe_info *pipe_info;
1681 const struct ce_attr *attr;
1684 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1685 pipe_info = &ar_pci->pipe_info[pipe_num];
1686 pipe_info->pipe_num = pipe_num;
1687 pipe_info->hif_ce_state = ar;
1688 attr = &host_ce_config_wlan[pipe_num];
1690 pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1691 if (pipe_info->ce_hdl == NULL) {
1692 ath10k_err("Unable to initialize CE for pipe: %d\n",
1695 /* It is safe to call it here. It checks if ce_hdl is
1696 * valid for each pipe */
1697 ath10k_pci_ce_deinit(ar);
1701 if (pipe_num == ar_pci->ce_count - 1) {
1703 * Reserve the ultimate CE for
1704 * diagnostic Window support
1707 ar_pci->pipe_info[ar_pci->ce_count - 1].ce_hdl;
1711 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1715 * Initially, establish CE completion handlers for use with BMI.
1716 * These are overwritten with generic handlers after we exit BMI phase.
1718 pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1719 ath10k_ce_send_cb_register(pipe_info->ce_hdl,
1720 ath10k_pci_bmi_send_done, 0);
1722 pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1723 ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
1724 ath10k_pci_bmi_recv_data);
1729 static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1731 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1732 u32 fw_indicator_address, fw_indicator;
1734 ath10k_pci_wake(ar);
1736 fw_indicator_address = ar_pci->fw_indicator_address;
1737 fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
1739 if (fw_indicator & FW_IND_EVENT_PENDING) {
1740 /* ACK: clear Target-side pending event */
1741 ath10k_pci_write32(ar, fw_indicator_address,
1742 fw_indicator & ~FW_IND_EVENT_PENDING);
1744 if (ar_pci->started) {
1745 ath10k_pci_hif_dump_area(ar);
1748 * Probable Target failure before we're prepared
1749 * to handle it. Generally unexpected.
1751 ath10k_warn("early firmware event indicated\n");
1755 ath10k_pci_sleep(ar);
1758 static int ath10k_pci_hif_power_up(struct ath10k *ar)
1760 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1763 ret = ath10k_pci_start_intr(ar);
1765 ath10k_err("could not start interrupt handling (%d)\n", ret);
1770 * Bring the target up cleanly.
1772 * The target may be in an undefined state with an AUX-powered Target
1773 * and a Host in WoW mode. If the Host crashes, loses power, or is
1774 * restarted (without unloading the driver) then the Target is left
1775 * (aux) powered and running. On a subsequent driver load, the Target
1776 * is in an unexpected state. We try to catch that here in order to
1777 * reset the Target and retry the probe.
1779 ath10k_pci_device_reset(ar);
1781 ret = ath10k_pci_reset_target(ar);
1785 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1786 /* Force AWAKE forever */
1787 ath10k_do_pci_wake(ar);
1789 ret = ath10k_pci_ce_init(ar);
1793 ret = ath10k_pci_init_config(ar);
1797 ret = ath10k_pci_wake_target_cpu(ar);
1799 ath10k_err("could not wake up target CPU (%d)\n", ret);
1806 ath10k_pci_ce_deinit(ar);
1808 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1809 ath10k_do_pci_sleep(ar);
1811 ath10k_pci_stop_intr(ar);
1816 static void ath10k_pci_hif_power_down(struct ath10k *ar)
1818 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1820 ath10k_pci_stop_intr(ar);
1822 ath10k_pci_ce_deinit(ar);
1823 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1824 ath10k_do_pci_sleep(ar);
1829 #define ATH10K_PCI_PM_CONTROL 0x44
1831 static int ath10k_pci_hif_suspend(struct ath10k *ar)
1833 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1834 struct pci_dev *pdev = ar_pci->pdev;
1837 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1839 if ((val & 0x000000ff) != 0x3) {
1840 pci_save_state(pdev);
1841 pci_disable_device(pdev);
1842 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1843 (val & 0xffffff00) | 0x03);
1849 static int ath10k_pci_hif_resume(struct ath10k *ar)
1851 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1852 struct pci_dev *pdev = ar_pci->pdev;
1855 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1857 if ((val & 0x000000ff) != 0) {
1858 pci_restore_state(pdev);
1859 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1862 * Suspend/Resume resets the PCI configuration space,
1863 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
1864 * to keep PCI Tx retries from interfering with C3 CPU state
1866 pci_read_config_dword(pdev, 0x40, &val);
1868 if ((val & 0x0000ff00) != 0)
1869 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
1876 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
1877 .send_head = ath10k_pci_hif_send_head,
1878 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
1879 .start = ath10k_pci_hif_start,
1880 .stop = ath10k_pci_hif_stop,
1881 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
1882 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
1883 .send_complete_check = ath10k_pci_hif_send_complete_check,
1884 .set_callbacks = ath10k_pci_hif_set_callbacks,
1885 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
1886 .power_up = ath10k_pci_hif_power_up,
1887 .power_down = ath10k_pci_hif_power_down,
1889 .suspend = ath10k_pci_hif_suspend,
1890 .resume = ath10k_pci_hif_resume,
1894 static void ath10k_pci_ce_tasklet(unsigned long ptr)
1896 struct hif_ce_pipe_info *pipe = (struct hif_ce_pipe_info *)ptr;
1897 struct ath10k_pci *ar_pci = pipe->ar_pci;
1899 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
1902 static void ath10k_msi_err_tasklet(unsigned long data)
1904 struct ath10k *ar = (struct ath10k *)data;
1906 ath10k_pci_fw_interrupt_handler(ar);
1910 * Handler for a per-engine interrupt on a PARTICULAR CE.
1911 * This is used in cases where each CE has a private MSI interrupt.
1913 static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
1915 struct ath10k *ar = arg;
1916 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1917 int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
1919 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
1920 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
1925 * NOTE: We are able to derive ce_id from irq because we
1926 * use a one-to-one mapping for CE's 0..5.
1927 * CE's 6 & 7 do not use interrupts at all.
1929 * This mapping must be kept in sync with the mapping
1932 tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
1936 static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
1938 struct ath10k *ar = arg;
1939 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1941 tasklet_schedule(&ar_pci->msi_fw_err);
1946 * Top-level interrupt handler for all PCI interrupts from a Target.
1947 * When a block of MSI interrupts is allocated, this top-level handler
1948 * is not used; instead, we directly call the correct sub-handler.
1950 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
1952 struct ath10k *ar = arg;
1953 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1955 if (ar_pci->num_msi_intrs == 0) {
1957 * IMPORTANT: INTR_CLR regiser has to be set after
1958 * INTR_ENABLE is set to 0, otherwise interrupt can not be
1961 iowrite32(0, ar_pci->mem +
1962 (SOC_CORE_BASE_ADDRESS |
1963 PCIE_INTR_ENABLE_ADDRESS));
1964 iowrite32(PCIE_INTR_FIRMWARE_MASK |
1965 PCIE_INTR_CE_MASK_ALL,
1966 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
1967 PCIE_INTR_CLR_ADDRESS));
1969 * IMPORTANT: this extra read transaction is required to
1970 * flush the posted write buffer.
1972 (void) ioread32(ar_pci->mem +
1973 (SOC_CORE_BASE_ADDRESS |
1974 PCIE_INTR_ENABLE_ADDRESS));
1977 tasklet_schedule(&ar_pci->intr_tq);
1982 static void ath10k_pci_tasklet(unsigned long data)
1984 struct ath10k *ar = (struct ath10k *)data;
1985 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1987 ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
1988 ath10k_ce_per_engine_service_any(ar);
1990 if (ar_pci->num_msi_intrs == 0) {
1991 /* Enable Legacy PCI line interrupts */
1992 iowrite32(PCIE_INTR_FIRMWARE_MASK |
1993 PCIE_INTR_CE_MASK_ALL,
1994 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
1995 PCIE_INTR_ENABLE_ADDRESS));
1997 * IMPORTANT: this extra read transaction is required to
1998 * flush the posted write buffer
2000 (void) ioread32(ar_pci->mem +
2001 (SOC_CORE_BASE_ADDRESS |
2002 PCIE_INTR_ENABLE_ADDRESS));
2006 static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
2008 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2012 ret = pci_enable_msi_block(ar_pci->pdev, num);
2016 ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2017 ath10k_pci_msi_fw_handler,
2018 IRQF_SHARED, "ath10k_pci", ar);
2020 ath10k_warn("request_irq(%d) failed %d\n",
2021 ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2023 pci_disable_msi(ar_pci->pdev);
2027 for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2028 ret = request_irq(ar_pci->pdev->irq + i,
2029 ath10k_pci_per_engine_handler,
2030 IRQF_SHARED, "ath10k_pci", ar);
2032 ath10k_warn("request_irq(%d) failed %d\n",
2033 ar_pci->pdev->irq + i, ret);
2035 for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2036 free_irq(ar_pci->pdev->irq + i, ar);
2038 free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
2039 pci_disable_msi(ar_pci->pdev);
2044 ath10k_info("MSI-X interrupt handling (%d intrs)\n", num);
2048 static int ath10k_pci_start_intr_msi(struct ath10k *ar)
2050 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2053 ret = pci_enable_msi(ar_pci->pdev);
2057 ret = request_irq(ar_pci->pdev->irq,
2058 ath10k_pci_interrupt_handler,
2059 IRQF_SHARED, "ath10k_pci", ar);
2061 pci_disable_msi(ar_pci->pdev);
2065 ath10k_info("MSI interrupt handling\n");
2069 static int ath10k_pci_start_intr_legacy(struct ath10k *ar)
2071 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2074 ret = request_irq(ar_pci->pdev->irq,
2075 ath10k_pci_interrupt_handler,
2076 IRQF_SHARED, "ath10k_pci", ar);
2081 * Make sure to wake the Target before enabling Legacy
2084 iowrite32(PCIE_SOC_WAKE_V_MASK,
2085 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2086 PCIE_SOC_WAKE_ADDRESS);
2088 ath10k_pci_wait(ar);
2091 * A potential race occurs here: The CORE_BASE write
2092 * depends on target correctly decoding AXI address but
2093 * host won't know when target writes BAR to CORE_CTRL.
2094 * This write might get lost if target has NOT written BAR.
2095 * For now, fix the race by repeating the write in below
2096 * synchronization checking.
2098 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2099 PCIE_INTR_CE_MASK_ALL,
2100 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2101 PCIE_INTR_ENABLE_ADDRESS));
2102 iowrite32(PCIE_SOC_WAKE_RESET,
2103 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2104 PCIE_SOC_WAKE_ADDRESS);
2106 ath10k_info("legacy interrupt handling\n");
2110 static int ath10k_pci_start_intr(struct ath10k *ar)
2112 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2113 int num = MSI_NUM_REQUEST;
2117 tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long) ar);
2118 tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2119 (unsigned long) ar);
2121 for (i = 0; i < CE_COUNT; i++) {
2122 ar_pci->pipe_info[i].ar_pci = ar_pci;
2123 tasklet_init(&ar_pci->pipe_info[i].intr,
2124 ath10k_pci_ce_tasklet,
2125 (unsigned long)&ar_pci->pipe_info[i]);
2128 if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features))
2132 ret = ath10k_pci_start_intr_msix(ar, num);
2136 ath10k_warn("MSI-X didn't succeed (%d), trying MSI\n", ret);
2141 ret = ath10k_pci_start_intr_msi(ar);
2145 ath10k_warn("MSI didn't succeed (%d), trying legacy INTR\n",
2150 ret = ath10k_pci_start_intr_legacy(ar);
2153 ar_pci->num_msi_intrs = num;
2154 ar_pci->ce_count = CE_COUNT;
2158 static void ath10k_pci_stop_intr(struct ath10k *ar)
2160 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2163 /* There's at least one interrupt irregardless whether its legacy INTR
2164 * or MSI or MSI-X */
2165 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2166 free_irq(ar_pci->pdev->irq + i, ar);
2168 if (ar_pci->num_msi_intrs > 0)
2169 pci_disable_msi(ar_pci->pdev);
2172 static int ath10k_pci_reset_target(struct ath10k *ar)
2174 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2175 int wait_limit = 300; /* 3 sec */
2177 /* Wait for Target to finish initialization before we proceed. */
2178 iowrite32(PCIE_SOC_WAKE_V_MASK,
2179 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2180 PCIE_SOC_WAKE_ADDRESS);
2182 ath10k_pci_wait(ar);
2184 while (wait_limit-- &&
2185 !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
2186 FW_IND_INITIALIZED)) {
2187 if (ar_pci->num_msi_intrs == 0)
2188 /* Fix potential race by repeating CORE_BASE writes */
2189 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2190 PCIE_INTR_CE_MASK_ALL,
2191 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2192 PCIE_INTR_ENABLE_ADDRESS));
2196 if (wait_limit < 0) {
2197 ath10k_err("Target stalled\n");
2198 iowrite32(PCIE_SOC_WAKE_RESET,
2199 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2200 PCIE_SOC_WAKE_ADDRESS);
2204 iowrite32(PCIE_SOC_WAKE_RESET,
2205 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2206 PCIE_SOC_WAKE_ADDRESS);
2211 static void ath10k_pci_device_reset(struct ath10k *ar)
2213 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2214 void __iomem *mem = ar_pci->mem;
2218 if (!SOC_GLOBAL_RESET_ADDRESS)
2224 ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS,
2225 PCIE_SOC_WAKE_V_MASK);
2226 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2227 if (ath10k_pci_target_is_awake(ar))
2232 /* Put Target, including PCIe, into RESET. */
2233 val = ath10k_pci_reg_read32(mem, SOC_GLOBAL_RESET_ADDRESS);
2235 ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
2237 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2238 if (ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
2239 RTC_STATE_COLD_RESET_MASK)
2244 /* Pull Target, including PCIe, out of RESET. */
2246 ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
2248 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2249 if (!(ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
2250 RTC_STATE_COLD_RESET_MASK))
2255 ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2258 static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2262 for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2263 if (!test_bit(i, ar_pci->features))
2267 case ATH10K_PCI_FEATURE_MSI_X:
2268 ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n");
2270 case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
2271 ath10k_dbg(ATH10K_DBG_PCI, "QCA98XX SoC power save enabled\n");
2277 static int ath10k_pci_probe(struct pci_dev *pdev,
2278 const struct pci_device_id *pci_dev)
2283 struct ath10k_pci *ar_pci;
2286 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2288 ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2292 ar_pci->pdev = pdev;
2293 ar_pci->dev = &pdev->dev;
2295 switch (pci_dev->device) {
2296 case QCA988X_2_0_DEVICE_ID:
2297 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2301 ath10k_err("Unkown device ID: %d\n", pci_dev->device);
2305 if (ath10k_target_ps)
2306 set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2308 ath10k_pci_dump_features(ar_pci);
2310 ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
2312 ath10k_err("ath10k_core_create failed!\n");
2318 ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2319 atomic_set(&ar_pci->keep_awake_count, 0);
2321 pci_set_drvdata(pdev, ar);
2324 * Without any knowledge of the Host, the Target may have been reset or
2325 * power cycled and its Config Space may no longer reflect the PCI
2326 * address space that was assigned earlier by the PCI infrastructure.
2329 ret = pci_assign_resource(pdev, BAR_NUM);
2331 ath10k_err("cannot assign PCI space: %d\n", ret);
2335 ret = pci_enable_device(pdev);
2337 ath10k_err("cannot enable PCI device: %d\n", ret);
2341 /* Request MMIO resources */
2342 ret = pci_request_region(pdev, BAR_NUM, "ath");
2344 ath10k_err("PCI MMIO reservation error: %d\n", ret);
2349 * Target structures have a limit of 32 bit DMA pointers.
2350 * DMA pointers can be wider than 32 bits by default on some systems.
2352 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2354 ath10k_err("32-bit DMA not available: %d\n", ret);
2358 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2360 ath10k_err("cannot enable 32-bit consistent DMA\n");
2364 /* Set bus master bit in PCI_COMMAND to enable DMA */
2365 pci_set_master(pdev);
2368 * Temporary FIX: disable ASPM
2369 * Will be removed after the OTP is programmed
2371 pci_read_config_dword(pdev, 0x80, &lcr_val);
2372 pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2374 /* Arrange for access to Target SoC registers. */
2375 mem = pci_iomap(pdev, BAR_NUM, 0);
2377 ath10k_err("PCI iomap error\n");
2384 spin_lock_init(&ar_pci->ce_lock);
2386 ar_pci->cacheline_sz = dma_get_cache_alignment();
2388 ret = ath10k_core_register(ar);
2390 ath10k_err("could not register driver core (%d)\n", ret);
2397 pci_iounmap(pdev, mem);
2399 pci_clear_master(pdev);
2401 pci_release_region(pdev, BAR_NUM);
2403 pci_disable_device(pdev);
2405 pci_set_drvdata(pdev, NULL);
2406 ath10k_core_destroy(ar);
2408 /* call HIF PCI free here */
2414 static void ath10k_pci_remove(struct pci_dev *pdev)
2416 struct ath10k *ar = pci_get_drvdata(pdev);
2417 struct ath10k_pci *ar_pci;
2419 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2424 ar_pci = ath10k_pci_priv(ar);
2429 tasklet_kill(&ar_pci->msi_fw_err);
2431 ath10k_core_unregister(ar);
2433 pci_set_drvdata(pdev, NULL);
2434 pci_iounmap(pdev, ar_pci->mem);
2435 pci_release_region(pdev, BAR_NUM);
2436 pci_clear_master(pdev);
2437 pci_disable_device(pdev);
2439 ath10k_core_destroy(ar);
2443 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2445 static struct pci_driver ath10k_pci_driver = {
2446 .name = "ath10k_pci",
2447 .id_table = ath10k_pci_id_table,
2448 .probe = ath10k_pci_probe,
2449 .remove = ath10k_pci_remove,
2452 static int __init ath10k_pci_init(void)
2456 ret = pci_register_driver(&ath10k_pci_driver);
2458 ath10k_err("pci_register_driver failed [%d]\n", ret);
2462 module_init(ath10k_pci_init);
2464 static void __exit ath10k_pci_exit(void)
2466 pci_unregister_driver(&ath10k_pci_driver);
2469 module_exit(ath10k_pci_exit);
2471 MODULE_AUTHOR("Qualcomm Atheros");
2472 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2473 MODULE_LICENSE("Dual BSD/GPL");
2474 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2475 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
2476 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);