2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include <linux/pci.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
26 #include "targaddrs.h"
35 static unsigned int ath10k_target_ps;
36 module_param(ath10k_target_ps, uint, 0644);
37 MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
39 #define QCA988X_2_0_DEVICE_ID (0x003c)
41 static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
42 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
46 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
49 static void ath10k_pci_process_ce(struct ath10k *ar);
50 static int ath10k_pci_post_rx(struct ath10k *ar);
51 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
53 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
54 static void ath10k_pci_stop_ce(struct ath10k *ar);
55 static void ath10k_pci_device_reset(struct ath10k *ar);
56 static int ath10k_pci_reset_target(struct ath10k *ar);
57 static int ath10k_pci_start_intr(struct ath10k *ar);
58 static void ath10k_pci_stop_intr(struct ath10k *ar);
60 static const struct ce_attr host_ce_config_wlan[] = {
61 /* CE0: host->target HTC control and raw streams */
63 .flags = CE_ATTR_FLAGS,
69 /* CE1: target->host HTT + HTC control */
71 .flags = CE_ATTR_FLAGS,
77 /* CE2: target->host WMI */
79 .flags = CE_ATTR_FLAGS,
85 /* CE3: host->target WMI */
87 .flags = CE_ATTR_FLAGS,
93 /* CE4: host->target HTT */
95 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
96 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
103 .flags = CE_ATTR_FLAGS,
109 /* CE6: target autonomous hif_memcpy */
111 .flags = CE_ATTR_FLAGS,
117 /* CE7: ce_diag, the Diagnostic Window */
119 .flags = CE_ATTR_FLAGS,
121 .src_sz_max = DIAG_TRANSFER_LIMIT,
126 /* Target firmware's Copy Engine configuration. */
127 static const struct ce_pipe_config target_ce_config_wlan[] = {
128 /* host->target HTC control and raw streams */
129 { /* CE0 */ 0, PIPEDIR_OUT, 32, 256, CE_ATTR_FLAGS, 0,},
130 /* target->host HTT + HTC control */
131 { /* CE1 */ 1, PIPEDIR_IN, 32, 512, CE_ATTR_FLAGS, 0,},
132 /* target->host WMI */
133 { /* CE2 */ 2, PIPEDIR_IN, 32, 2048, CE_ATTR_FLAGS, 0,},
134 /* host->target WMI */
135 { /* CE3 */ 3, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
136 /* host->target HTT */
137 { /* CE4 */ 4, PIPEDIR_OUT, 256, 256, CE_ATTR_FLAGS, 0,},
138 /* NB: 50% of src nentries, since tx has 2 frags */
140 { /* CE5 */ 5, PIPEDIR_OUT, 32, 2048, CE_ATTR_FLAGS, 0,},
141 /* Reserved for target autonomous hif_memcpy */
142 { /* CE6 */ 6, PIPEDIR_INOUT, 32, 4096, CE_ATTR_FLAGS, 0,},
143 /* CE7 used only by Host */
147 * Diagnostic read/write access is provided for startup/config/debug usage.
148 * Caller must guarantee proper alignment, when applicable, and single user
151 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
154 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
157 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
160 struct ath10k_ce_pipe *ce_diag;
161 /* Host buffer address in CE space */
163 dma_addr_t ce_data_base = 0;
164 void *data_buf = NULL;
168 * This code cannot handle reads to non-memory space. Redirect to the
169 * register read fn but preserve the multi word read capability of
172 if (address < DRAM_BASE_ADDRESS) {
173 if (!IS_ALIGNED(address, 4) ||
174 !IS_ALIGNED((unsigned long)data, 4))
177 while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access(
178 ar, address, (u32 *)data)) == 0)) {
179 nbytes -= sizeof(u32);
180 address += sizeof(u32);
186 ce_diag = ar_pci->ce_diag;
189 * Allocate a temporary bounce buffer to hold caller's data
190 * to be DMA'ed from Target. This guarantees
191 * 1) 4-byte alignment
192 * 2) Buffer in DMA-able space
194 orig_nbytes = nbytes;
195 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
203 memset(data_buf, 0, orig_nbytes);
205 remaining_bytes = orig_nbytes;
206 ce_data = ce_data_base;
207 while (remaining_bytes) {
208 nbytes = min_t(unsigned int, remaining_bytes,
209 DIAG_TRANSFER_LIMIT);
211 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
215 /* Request CE to send from Target(!) address to Host buffer */
217 * The address supplied by the caller is in the
218 * Target CPU virtual address space.
220 * In order to use this address with the diagnostic CE,
221 * convert it from Target CPU virtual address space
222 * to CE address space
225 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
227 ath10k_pci_sleep(ar);
229 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
235 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
239 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
245 if (nbytes != completed_nbytes) {
250 if (buf != (u32) address) {
256 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
261 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
267 if (nbytes != completed_nbytes) {
272 if (buf != ce_data) {
277 remaining_bytes -= nbytes;
284 /* Copy data from allocated DMA buf to caller's buf */
285 WARN_ON_ONCE(orig_nbytes & 3);
286 for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
288 __le32_to_cpu(((__le32 *)data_buf)[i]);
291 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
295 pci_free_consistent(ar_pci->pdev, orig_nbytes,
296 data_buf, ce_data_base);
301 /* Read 4-byte aligned data from Target memory or register */
302 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
305 /* Assume range doesn't cross this boundary */
306 if (address >= DRAM_BASE_ADDRESS)
307 return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
310 *data = ath10k_pci_read32(ar, address);
311 ath10k_pci_sleep(ar);
315 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
316 const void *data, int nbytes)
318 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
321 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
324 struct ath10k_ce_pipe *ce_diag;
325 void *data_buf = NULL;
326 u32 ce_data; /* Host buffer address in CE space */
327 dma_addr_t ce_data_base = 0;
330 ce_diag = ar_pci->ce_diag;
333 * Allocate a temporary bounce buffer to hold caller's data
334 * to be DMA'ed to Target. This guarantees
335 * 1) 4-byte alignment
336 * 2) Buffer in DMA-able space
338 orig_nbytes = nbytes;
339 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
347 /* Copy caller's data to allocated DMA buf */
348 WARN_ON_ONCE(orig_nbytes & 3);
349 for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
350 ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
353 * The address supplied by the caller is in the
354 * Target CPU virtual address space.
356 * In order to use this address with the diagnostic CE,
358 * Target CPU virtual address space
363 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
364 ath10k_pci_sleep(ar);
366 remaining_bytes = orig_nbytes;
367 ce_data = ce_data_base;
368 while (remaining_bytes) {
369 /* FIXME: check cast */
370 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
372 /* Set up to receive directly into Target(!) address */
373 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
378 * Request CE to send caller-supplied data that
379 * was copied to bounce buffer to Target(!) address.
381 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
387 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
392 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
398 if (nbytes != completed_nbytes) {
403 if (buf != ce_data) {
409 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
414 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
420 if (nbytes != completed_nbytes) {
425 if (buf != address) {
430 remaining_bytes -= nbytes;
437 pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
442 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
448 /* Write 4B data to Target memory or register */
449 static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
452 /* Assume range doesn't cross this boundary */
453 if (address >= DRAM_BASE_ADDRESS)
454 return ath10k_pci_diag_write_mem(ar, address, &data,
458 ath10k_pci_write32(ar, address, data);
459 ath10k_pci_sleep(ar);
463 static bool ath10k_pci_target_is_awake(struct ath10k *ar)
465 void __iomem *mem = ath10k_pci_priv(ar)->mem;
467 val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
469 return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
472 static void ath10k_pci_wait(struct ath10k *ar)
476 while (n-- && !ath10k_pci_target_is_awake(ar))
480 ath10k_warn("Unable to wakeup target\n");
483 void ath10k_do_pci_wake(struct ath10k *ar)
485 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
486 void __iomem *pci_addr = ar_pci->mem;
490 if (atomic_read(&ar_pci->keep_awake_count) == 0) {
492 iowrite32(PCIE_SOC_WAKE_V_MASK,
493 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
494 PCIE_SOC_WAKE_ADDRESS);
496 atomic_inc(&ar_pci->keep_awake_count);
498 if (ar_pci->verified_awake)
502 if (ath10k_pci_target_is_awake(ar)) {
503 ar_pci->verified_awake = true;
507 if (tot_delay > PCIE_WAKE_TIMEOUT) {
508 ath10k_warn("target takes too long to wake up (awake count %d)\n",
509 atomic_read(&ar_pci->keep_awake_count));
514 tot_delay += curr_delay;
521 void ath10k_do_pci_sleep(struct ath10k *ar)
523 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
524 void __iomem *pci_addr = ar_pci->mem;
526 if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
528 ar_pci->verified_awake = false;
529 iowrite32(PCIE_SOC_WAKE_RESET,
530 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
531 PCIE_SOC_WAKE_ADDRESS);
536 * FIXME: Handle OOM properly.
539 struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
541 struct ath10k_pci_compl *compl = NULL;
543 spin_lock_bh(&pipe_info->pipe_lock);
544 if (list_empty(&pipe_info->compl_free)) {
545 ath10k_warn("Completion buffers are full\n");
548 compl = list_first_entry(&pipe_info->compl_free,
549 struct ath10k_pci_compl, list);
550 list_del(&compl->list);
552 spin_unlock_bh(&pipe_info->pipe_lock);
556 /* Called by lower (CE) layer when a send to Target completes. */
557 static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state,
558 void *transfer_context,
561 unsigned int transfer_id)
563 struct ath10k *ar = ce_state->ar;
564 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
565 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
566 struct ath10k_pci_compl *compl;
567 bool process = false;
571 * For the send completion of an item in sendlist, just
572 * increment num_sends_allowed. The upper layer callback will
573 * be triggered when last fragment is done with send.
575 if (transfer_context == CE_SENDLIST_ITEM_CTXT) {
576 spin_lock_bh(&pipe_info->pipe_lock);
577 pipe_info->num_sends_allowed++;
578 spin_unlock_bh(&pipe_info->pipe_lock);
582 compl = get_free_compl(pipe_info);
586 compl->state = ATH10K_PCI_COMPL_SEND;
587 compl->ce_state = ce_state;
588 compl->pipe_info = pipe_info;
589 compl->transfer_context = transfer_context;
590 compl->nbytes = nbytes;
591 compl->transfer_id = transfer_id;
595 * Add the completion to the processing queue.
597 spin_lock_bh(&ar_pci->compl_lock);
598 list_add_tail(&compl->list, &ar_pci->compl_process);
599 spin_unlock_bh(&ar_pci->compl_lock);
602 } while (ath10k_ce_completed_send_next(ce_state,
608 * If only some of the items within a sendlist have completed,
609 * don't invoke completion processing until the entire sendlist
615 ath10k_pci_process_ce(ar);
618 /* Called by lower (CE) layer when data is received from the Target. */
619 static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state,
620 void *transfer_context, u32 ce_data,
622 unsigned int transfer_id,
625 struct ath10k *ar = ce_state->ar;
626 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
627 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
628 struct ath10k_pci_compl *compl;
632 compl = get_free_compl(pipe_info);
636 compl->state = ATH10K_PCI_COMPL_RECV;
637 compl->ce_state = ce_state;
638 compl->pipe_info = pipe_info;
639 compl->transfer_context = transfer_context;
640 compl->nbytes = nbytes;
641 compl->transfer_id = transfer_id;
642 compl->flags = flags;
644 skb = transfer_context;
645 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
646 skb->len + skb_tailroom(skb),
649 * Add the completion to the processing queue.
651 spin_lock_bh(&ar_pci->compl_lock);
652 list_add_tail(&compl->list, &ar_pci->compl_process);
653 spin_unlock_bh(&ar_pci->compl_lock);
655 } while (ath10k_ce_completed_recv_next(ce_state,
661 ath10k_pci_process_ce(ar);
664 /* Send the first nbytes bytes of the buffer */
665 static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
666 unsigned int transfer_id,
667 unsigned int bytes, struct sk_buff *nbuf)
669 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
670 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
671 struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
672 struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
673 struct ce_sendlist sendlist;
678 memset(&sendlist, 0, sizeof(struct ce_sendlist));
680 len = min(bytes, nbuf->len);
684 ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
686 ath10k_dbg(ATH10K_DBG_PCI,
687 "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
688 nbuf->data, (unsigned long long) skb_cb->paddr,
690 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
692 nbuf->data, nbuf->len);
694 ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags);
696 /* Make sure we have resources to handle this request */
697 spin_lock_bh(&pipe_info->pipe_lock);
698 if (!pipe_info->num_sends_allowed) {
699 ath10k_warn("Pipe: %d is full\n", pipe_id);
700 spin_unlock_bh(&pipe_info->pipe_lock);
703 pipe_info->num_sends_allowed--;
704 spin_unlock_bh(&pipe_info->pipe_lock);
706 ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
708 ath10k_warn("CE send failed: %p\n", nbuf);
713 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
715 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
716 struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe]);
719 spin_lock_bh(&pipe_info->pipe_lock);
720 ret = pipe_info->num_sends_allowed;
721 spin_unlock_bh(&pipe_info->pipe_lock);
726 static void ath10k_pci_hif_dump_area(struct ath10k *ar)
728 u32 reg_dump_area = 0;
729 u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
734 ath10k_err("firmware crashed!\n");
735 ath10k_err("hardware name %s version 0x%x\n",
736 ar->hw_params.name, ar->target_version);
737 ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major,
738 ar->fw_version_minor, ar->fw_version_release,
739 ar->fw_version_build);
741 host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
742 if (ath10k_pci_diag_read_mem(ar, host_addr,
743 ®_dump_area, sizeof(u32)) != 0) {
744 ath10k_warn("could not read hi_failure_state\n");
748 ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
750 ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
752 REG_DUMP_COUNT_QCA988X * sizeof(u32));
754 ath10k_err("could not dump FW Dump Area\n");
758 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
760 ath10k_err("target Register Dump\n");
761 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
762 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
765 reg_dump_values[i + 1],
766 reg_dump_values[i + 2],
767 reg_dump_values[i + 3]);
769 ieee80211_queue_work(ar->hw, &ar->restart_work);
772 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
778 * Decide whether to actually poll for completions, or just
779 * wait for a later chance.
780 * If there seem to be plenty of resources left, then just wait
781 * since checking involves reading a CE register, which is a
782 * relatively expensive operation.
784 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
787 * If at least 50% of the total resources are still available,
788 * don't bother checking again yet.
790 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
793 ath10k_ce_per_engine_service(ar, pipe);
796 static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
797 struct ath10k_hif_cb *callbacks)
799 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
801 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
803 memcpy(&ar_pci->msg_callbacks_current, callbacks,
804 sizeof(ar_pci->msg_callbacks_current));
807 static int ath10k_pci_start_ce(struct ath10k *ar)
809 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
810 struct ath10k_ce_pipe *ce_diag = ar_pci->ce_diag;
811 const struct ce_attr *attr;
812 struct ath10k_pci_pipe *pipe_info;
813 struct ath10k_pci_compl *compl;
814 int i, pipe_num, completions, disable_interrupts;
816 spin_lock_init(&ar_pci->compl_lock);
817 INIT_LIST_HEAD(&ar_pci->compl_process);
819 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
820 pipe_info = &ar_pci->pipe_info[pipe_num];
822 spin_lock_init(&pipe_info->pipe_lock);
823 INIT_LIST_HEAD(&pipe_info->compl_free);
825 /* Handle Diagnostic CE specially */
826 if (pipe_info->ce_hdl == ce_diag)
829 attr = &host_ce_config_wlan[pipe_num];
832 if (attr->src_nentries) {
833 disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
834 ath10k_ce_send_cb_register(pipe_info->ce_hdl,
835 ath10k_pci_ce_send_done,
837 completions += attr->src_nentries;
838 pipe_info->num_sends_allowed = attr->src_nentries - 1;
841 if (attr->dest_nentries) {
842 ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
843 ath10k_pci_ce_recv_data);
844 completions += attr->dest_nentries;
847 if (completions == 0)
850 for (i = 0; i < completions; i++) {
851 compl = kmalloc(sizeof(*compl), GFP_KERNEL);
853 ath10k_warn("No memory for completion state\n");
854 ath10k_pci_stop_ce(ar);
858 compl->state = ATH10K_PCI_COMPL_FREE;
859 list_add_tail(&compl->list, &pipe_info->compl_free);
866 static void ath10k_pci_stop_ce(struct ath10k *ar)
868 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
869 struct ath10k_pci_compl *compl;
873 ath10k_ce_disable_interrupts(ar);
875 /* Cancel the pending tasklet */
876 tasklet_kill(&ar_pci->intr_tq);
878 for (i = 0; i < CE_COUNT; i++)
879 tasklet_kill(&ar_pci->pipe_info[i].intr);
881 /* Mark pending completions as aborted, so that upper layers free up
882 * their associated resources */
883 spin_lock_bh(&ar_pci->compl_lock);
884 list_for_each_entry(compl, &ar_pci->compl_process, list) {
885 skb = (struct sk_buff *)compl->transfer_context;
886 ATH10K_SKB_CB(skb)->is_aborted = true;
888 spin_unlock_bh(&ar_pci->compl_lock);
891 static void ath10k_pci_cleanup_ce(struct ath10k *ar)
893 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
894 struct ath10k_pci_compl *compl, *tmp;
895 struct ath10k_pci_pipe *pipe_info;
896 struct sk_buff *netbuf;
899 /* Free pending completions. */
900 spin_lock_bh(&ar_pci->compl_lock);
901 if (!list_empty(&ar_pci->compl_process))
902 ath10k_warn("pending completions still present! possible memory leaks.\n");
904 list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
905 list_del(&compl->list);
906 netbuf = (struct sk_buff *)compl->transfer_context;
907 dev_kfree_skb_any(netbuf);
910 spin_unlock_bh(&ar_pci->compl_lock);
912 /* Free unused completions for each pipe. */
913 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
914 pipe_info = &ar_pci->pipe_info[pipe_num];
916 spin_lock_bh(&pipe_info->pipe_lock);
917 list_for_each_entry_safe(compl, tmp,
918 &pipe_info->compl_free, list) {
919 list_del(&compl->list);
922 spin_unlock_bh(&pipe_info->pipe_lock);
926 static void ath10k_pci_process_ce(struct ath10k *ar)
928 struct ath10k_pci *ar_pci = ar->hif.priv;
929 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
930 struct ath10k_pci_compl *compl;
933 int ret, send_done = 0;
935 /* Upper layers aren't ready to handle tx/rx completions in parallel so
936 * we must serialize all completion processing. */
938 spin_lock_bh(&ar_pci->compl_lock);
939 if (ar_pci->compl_processing) {
940 spin_unlock_bh(&ar_pci->compl_lock);
943 ar_pci->compl_processing = true;
944 spin_unlock_bh(&ar_pci->compl_lock);
947 spin_lock_bh(&ar_pci->compl_lock);
948 if (list_empty(&ar_pci->compl_process)) {
949 spin_unlock_bh(&ar_pci->compl_lock);
952 compl = list_first_entry(&ar_pci->compl_process,
953 struct ath10k_pci_compl, list);
954 list_del(&compl->list);
955 spin_unlock_bh(&ar_pci->compl_lock);
957 switch (compl->state) {
958 case ATH10K_PCI_COMPL_SEND:
959 cb->tx_completion(ar,
960 compl->transfer_context,
964 case ATH10K_PCI_COMPL_RECV:
965 ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
967 ath10k_warn("Unable to post recv buffer for pipe: %d\n",
968 compl->pipe_info->pipe_num);
972 skb = (struct sk_buff *)compl->transfer_context;
973 nbytes = compl->nbytes;
975 ath10k_dbg(ATH10K_DBG_PCI,
976 "ath10k_pci_ce_recv_data netbuf=%p nbytes=%d\n",
978 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
979 "ath10k rx: ", skb->data, nbytes);
981 if (skb->len + skb_tailroom(skb) >= nbytes) {
983 skb_put(skb, nbytes);
984 cb->rx_completion(ar, skb,
985 compl->pipe_info->pipe_num);
987 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
989 skb->len + skb_tailroom(skb));
992 case ATH10K_PCI_COMPL_FREE:
993 ath10k_warn("free completion cannot be processed\n");
996 ath10k_warn("invalid completion state (%d)\n",
1001 compl->state = ATH10K_PCI_COMPL_FREE;
1004 * Add completion back to the pipe's free list.
1006 spin_lock_bh(&compl->pipe_info->pipe_lock);
1007 list_add_tail(&compl->list, &compl->pipe_info->compl_free);
1008 compl->pipe_info->num_sends_allowed += send_done;
1009 spin_unlock_bh(&compl->pipe_info->pipe_lock);
1012 spin_lock_bh(&ar_pci->compl_lock);
1013 ar_pci->compl_processing = false;
1014 spin_unlock_bh(&ar_pci->compl_lock);
1017 /* TODO - temporary mapping while we have too few CE's */
1018 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
1019 u16 service_id, u8 *ul_pipe,
1020 u8 *dl_pipe, int *ul_is_polled,
1025 /* polling for received messages not supported */
1028 switch (service_id) {
1029 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
1031 * Host->target HTT gets its own pipe, so it can be polled
1032 * while other pipes are interrupt driven.
1036 * Use the same target->host pipe for HTC ctrl, HTC raw
1042 case ATH10K_HTC_SVC_ID_RSVD_CTRL:
1043 case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
1045 * Note: HTC_RAW_STREAMS_SVC is currently unused, and
1046 * HTC_CTRL_RSVD_SVC could share the same pipe as the
1047 * WMI services. So, if another CE is needed, change
1048 * this to *ul_pipe = 3, which frees up CE 0.
1055 case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
1056 case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
1057 case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
1058 case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
1060 case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1066 /* pipe 6 reserved */
1067 /* pipe 7 reserved */
1074 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1079 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1080 u8 *ul_pipe, u8 *dl_pipe)
1082 int ul_is_polled, dl_is_polled;
1084 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1085 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1092 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
1095 struct ath10k *ar = pipe_info->hif_ce_state;
1096 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1097 struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
1098 struct sk_buff *skb;
1102 if (pipe_info->buf_sz == 0)
1105 for (i = 0; i < num; i++) {
1106 skb = dev_alloc_skb(pipe_info->buf_sz);
1108 ath10k_warn("could not allocate skbuff for pipe %d\n",
1114 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1116 ce_data = dma_map_single(ar->dev, skb->data,
1117 skb->len + skb_tailroom(skb),
1120 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1121 ath10k_warn("could not dma map skbuff\n");
1122 dev_kfree_skb_any(skb);
1127 ATH10K_SKB_CB(skb)->paddr = ce_data;
1129 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1131 PCI_DMA_FROMDEVICE);
1133 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1136 ath10k_warn("could not enqueue to pipe %d (%d)\n",
1145 ath10k_pci_rx_pipe_cleanup(pipe_info);
1149 static int ath10k_pci_post_rx(struct ath10k *ar)
1151 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1152 struct ath10k_pci_pipe *pipe_info;
1153 const struct ce_attr *attr;
1154 int pipe_num, ret = 0;
1156 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1157 pipe_info = &ar_pci->pipe_info[pipe_num];
1158 attr = &host_ce_config_wlan[pipe_num];
1160 if (attr->dest_nentries == 0)
1163 ret = ath10k_pci_post_rx_pipe(pipe_info,
1164 attr->dest_nentries - 1);
1166 ath10k_warn("Unable to replenish recv buffers for pipe: %d\n",
1169 for (; pipe_num >= 0; pipe_num--) {
1170 pipe_info = &ar_pci->pipe_info[pipe_num];
1171 ath10k_pci_rx_pipe_cleanup(pipe_info);
1180 static int ath10k_pci_hif_start(struct ath10k *ar)
1182 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1185 ret = ath10k_pci_start_ce(ar);
1187 ath10k_warn("could not start CE (%d)\n", ret);
1191 /* Post buffers once to start things off. */
1192 ret = ath10k_pci_post_rx(ar);
1194 ath10k_warn("could not post rx pipes (%d)\n", ret);
1198 ar_pci->started = 1;
1202 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1205 struct ath10k_pci *ar_pci;
1206 struct ath10k_ce_pipe *ce_hdl;
1208 struct sk_buff *netbuf;
1211 buf_sz = pipe_info->buf_sz;
1213 /* Unused Copy Engine */
1217 ar = pipe_info->hif_ce_state;
1218 ar_pci = ath10k_pci_priv(ar);
1220 if (!ar_pci->started)
1223 ce_hdl = pipe_info->ce_hdl;
1225 while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1227 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1228 netbuf->len + skb_tailroom(netbuf),
1230 dev_kfree_skb_any(netbuf);
1234 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1237 struct ath10k_pci *ar_pci;
1238 struct ath10k_ce_pipe *ce_hdl;
1239 struct sk_buff *netbuf;
1241 unsigned int nbytes;
1245 buf_sz = pipe_info->buf_sz;
1247 /* Unused Copy Engine */
1251 ar = pipe_info->hif_ce_state;
1252 ar_pci = ath10k_pci_priv(ar);
1254 if (!ar_pci->started)
1257 ce_hdl = pipe_info->ce_hdl;
1259 while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1260 &ce_data, &nbytes, &id) == 0) {
1261 if (netbuf != CE_SENDLIST_ITEM_CTXT)
1263 * Indicate the completion to higer layer to free
1266 ATH10K_SKB_CB(netbuf)->is_aborted = true;
1267 ar_pci->msg_callbacks_current.tx_completion(ar,
1274 * Cleanup residual buffers for device shutdown:
1275 * buffers that were enqueued for receive
1276 * buffers that were to be sent
1277 * Note: Buffers that had completed but which were
1278 * not yet processed are on a completion queue. They
1279 * are handled when the completion thread shuts down.
1281 static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1283 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1286 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1287 struct ath10k_pci_pipe *pipe_info;
1289 pipe_info = &ar_pci->pipe_info[pipe_num];
1290 ath10k_pci_rx_pipe_cleanup(pipe_info);
1291 ath10k_pci_tx_pipe_cleanup(pipe_info);
1295 static void ath10k_pci_ce_deinit(struct ath10k *ar)
1297 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1298 struct ath10k_pci_pipe *pipe_info;
1301 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1302 pipe_info = &ar_pci->pipe_info[pipe_num];
1303 if (pipe_info->ce_hdl) {
1304 ath10k_ce_deinit(pipe_info->ce_hdl);
1305 pipe_info->ce_hdl = NULL;
1306 pipe_info->buf_sz = 0;
1311 static void ath10k_pci_disable_irqs(struct ath10k *ar)
1313 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1316 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
1317 disable_irq(ar_pci->pdev->irq + i);
1320 static void ath10k_pci_hif_stop(struct ath10k *ar)
1322 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1324 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
1326 /* Irqs are never explicitly re-enabled. They are implicitly re-enabled
1327 * by ath10k_pci_start_intr(). */
1328 ath10k_pci_disable_irqs(ar);
1330 ath10k_pci_stop_ce(ar);
1332 /* At this point, asynchronous threads are stopped, the target should
1333 * not DMA nor interrupt. We process the leftovers and then free
1334 * everything else up. */
1336 ath10k_pci_process_ce(ar);
1337 ath10k_pci_cleanup_ce(ar);
1338 ath10k_pci_buffer_cleanup(ar);
1340 ar_pci->started = 0;
1343 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1344 void *req, u32 req_len,
1345 void *resp, u32 *resp_len)
1347 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1348 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1349 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1350 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1351 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1352 dma_addr_t req_paddr = 0;
1353 dma_addr_t resp_paddr = 0;
1354 struct bmi_xfer xfer = {};
1355 void *treq, *tresp = NULL;
1358 if (resp && !resp_len)
1361 if (resp && resp_len && *resp_len == 0)
1364 treq = kmemdup(req, req_len, GFP_KERNEL);
1368 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1369 ret = dma_mapping_error(ar->dev, req_paddr);
1373 if (resp && resp_len) {
1374 tresp = kzalloc(*resp_len, GFP_KERNEL);
1380 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1382 ret = dma_mapping_error(ar->dev, resp_paddr);
1386 xfer.wait_for_resp = true;
1389 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1392 init_completion(&xfer.done);
1394 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1398 ret = wait_for_completion_timeout(&xfer.done,
1399 BMI_COMMUNICATION_TIMEOUT_HZ);
1402 unsigned int unused_nbytes;
1403 unsigned int unused_id;
1406 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1407 &unused_nbytes, &unused_id);
1409 /* non-zero means we did not time out */
1417 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1418 dma_unmap_single(ar->dev, resp_paddr,
1419 *resp_len, DMA_FROM_DEVICE);
1422 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1424 if (ret == 0 && resp_len) {
1425 *resp_len = min(*resp_len, xfer.resp_len);
1426 memcpy(resp, tresp, xfer.resp_len);
1435 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state,
1436 void *transfer_context,
1438 unsigned int nbytes,
1439 unsigned int transfer_id)
1441 struct bmi_xfer *xfer = transfer_context;
1443 if (xfer->wait_for_resp)
1446 complete(&xfer->done);
1449 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state,
1450 void *transfer_context,
1452 unsigned int nbytes,
1453 unsigned int transfer_id,
1456 struct bmi_xfer *xfer = transfer_context;
1458 if (!xfer->wait_for_resp) {
1459 ath10k_warn("unexpected: BMI data received; ignoring\n");
1463 xfer->resp_len = nbytes;
1464 complete(&xfer->done);
1468 * Map from service/endpoint to Copy Engine.
1469 * This table is derived from the CE_PCI TABLE, above.
1470 * It is passed to the Target at startup for use by firmware.
1472 static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1474 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1475 PIPEDIR_OUT, /* out = UL = host -> target */
1479 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1480 PIPEDIR_IN, /* in = DL = target -> host */
1484 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1485 PIPEDIR_OUT, /* out = UL = host -> target */
1489 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1490 PIPEDIR_IN, /* in = DL = target -> host */
1494 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1495 PIPEDIR_OUT, /* out = UL = host -> target */
1499 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1500 PIPEDIR_IN, /* in = DL = target -> host */
1504 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1505 PIPEDIR_OUT, /* out = UL = host -> target */
1509 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1510 PIPEDIR_IN, /* in = DL = target -> host */
1514 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1515 PIPEDIR_OUT, /* out = UL = host -> target */
1519 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1520 PIPEDIR_IN, /* in = DL = target -> host */
1524 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1525 PIPEDIR_OUT, /* out = UL = host -> target */
1526 0, /* could be moved to 3 (share with WMI) */
1529 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1530 PIPEDIR_IN, /* in = DL = target -> host */
1534 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1535 PIPEDIR_OUT, /* out = UL = host -> target */
1539 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1540 PIPEDIR_IN, /* in = DL = target -> host */
1544 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1545 PIPEDIR_OUT, /* out = UL = host -> target */
1549 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1550 PIPEDIR_IN, /* in = DL = target -> host */
1554 /* (Additions here) */
1556 { /* Must be last */
1564 * Send an interrupt to the device to wake up the Target CPU
1565 * so it has an opportunity to notice any changed state.
1567 static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1572 ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1576 ath10k_warn("Unable to read core ctrl\n");
1580 /* A_INUM_FIRMWARE interrupt to Target CPU */
1581 core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1583 ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1587 ath10k_warn("Unable to set interrupt mask\n");
1592 static int ath10k_pci_init_config(struct ath10k *ar)
1594 u32 interconnect_targ_addr;
1595 u32 pcie_state_targ_addr = 0;
1596 u32 pipe_cfg_targ_addr = 0;
1597 u32 svc_to_pipe_map = 0;
1598 u32 pcie_config_flags = 0;
1600 u32 ealloc_targ_addr;
1602 u32 flag2_targ_addr;
1605 /* Download to Target the CE Config and the service-to-CE map */
1606 interconnect_targ_addr =
1607 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1609 /* Supply Target-side CE configuration */
1610 ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1611 &pcie_state_targ_addr);
1613 ath10k_err("Failed to get pcie state addr: %d\n", ret);
1617 if (pcie_state_targ_addr == 0) {
1619 ath10k_err("Invalid pcie state addr\n");
1623 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1624 offsetof(struct pcie_state,
1626 &pipe_cfg_targ_addr);
1628 ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1632 if (pipe_cfg_targ_addr == 0) {
1634 ath10k_err("Invalid pipe cfg addr\n");
1638 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1639 target_ce_config_wlan,
1640 sizeof(target_ce_config_wlan));
1643 ath10k_err("Failed to write pipe cfg: %d\n", ret);
1647 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1648 offsetof(struct pcie_state,
1652 ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1656 if (svc_to_pipe_map == 0) {
1658 ath10k_err("Invalid svc_to_pipe map\n");
1662 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1663 target_service_to_ce_map_wlan,
1664 sizeof(target_service_to_ce_map_wlan));
1666 ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1670 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1671 offsetof(struct pcie_state,
1673 &pcie_config_flags);
1675 ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1679 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1681 ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1682 offsetof(struct pcie_state, config_flags),
1684 sizeof(pcie_config_flags));
1686 ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1690 /* configure early allocation */
1691 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1693 ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1695 ath10k_err("Faile to get early alloc val: %d\n", ret);
1699 /* first bank is switched to IRAM */
1700 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1701 HI_EARLY_ALLOC_MAGIC_MASK);
1702 ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1703 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1705 ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1707 ath10k_err("Failed to set early alloc val: %d\n", ret);
1711 /* Tell Target to proceed with initialization */
1712 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1714 ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1716 ath10k_err("Failed to get option val: %d\n", ret);
1720 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1722 ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1724 ath10k_err("Failed to set option val: %d\n", ret);
1733 static int ath10k_pci_ce_init(struct ath10k *ar)
1735 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1736 struct ath10k_pci_pipe *pipe_info;
1737 const struct ce_attr *attr;
1740 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1741 pipe_info = &ar_pci->pipe_info[pipe_num];
1742 pipe_info->pipe_num = pipe_num;
1743 pipe_info->hif_ce_state = ar;
1744 attr = &host_ce_config_wlan[pipe_num];
1746 pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1747 if (pipe_info->ce_hdl == NULL) {
1748 ath10k_err("Unable to initialize CE for pipe: %d\n",
1751 /* It is safe to call it here. It checks if ce_hdl is
1752 * valid for each pipe */
1753 ath10k_pci_ce_deinit(ar);
1757 if (pipe_num == ar_pci->ce_count - 1) {
1759 * Reserve the ultimate CE for
1760 * diagnostic Window support
1763 ar_pci->pipe_info[ar_pci->ce_count - 1].ce_hdl;
1767 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1771 * Initially, establish CE completion handlers for use with BMI.
1772 * These are overwritten with generic handlers after we exit BMI phase.
1774 pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1775 ath10k_ce_send_cb_register(pipe_info->ce_hdl,
1776 ath10k_pci_bmi_send_done, 0);
1778 pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1779 ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
1780 ath10k_pci_bmi_recv_data);
1785 static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1787 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1788 u32 fw_indicator_address, fw_indicator;
1790 ath10k_pci_wake(ar);
1792 fw_indicator_address = ar_pci->fw_indicator_address;
1793 fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
1795 if (fw_indicator & FW_IND_EVENT_PENDING) {
1796 /* ACK: clear Target-side pending event */
1797 ath10k_pci_write32(ar, fw_indicator_address,
1798 fw_indicator & ~FW_IND_EVENT_PENDING);
1800 if (ar_pci->started) {
1801 ath10k_pci_hif_dump_area(ar);
1804 * Probable Target failure before we're prepared
1805 * to handle it. Generally unexpected.
1807 ath10k_warn("early firmware event indicated\n");
1811 ath10k_pci_sleep(ar);
1814 static int ath10k_pci_hif_power_up(struct ath10k *ar)
1816 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1819 ret = ath10k_pci_start_intr(ar);
1821 ath10k_err("could not start interrupt handling (%d)\n", ret);
1826 * Bring the target up cleanly.
1828 * The target may be in an undefined state with an AUX-powered Target
1829 * and a Host in WoW mode. If the Host crashes, loses power, or is
1830 * restarted (without unloading the driver) then the Target is left
1831 * (aux) powered and running. On a subsequent driver load, the Target
1832 * is in an unexpected state. We try to catch that here in order to
1833 * reset the Target and retry the probe.
1835 ath10k_pci_device_reset(ar);
1837 ret = ath10k_pci_reset_target(ar);
1841 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1842 /* Force AWAKE forever */
1843 ath10k_do_pci_wake(ar);
1845 ret = ath10k_pci_ce_init(ar);
1849 ret = ath10k_pci_init_config(ar);
1853 ret = ath10k_pci_wake_target_cpu(ar);
1855 ath10k_err("could not wake up target CPU (%d)\n", ret);
1862 ath10k_pci_ce_deinit(ar);
1864 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1865 ath10k_do_pci_sleep(ar);
1867 ath10k_pci_stop_intr(ar);
1872 static void ath10k_pci_hif_power_down(struct ath10k *ar)
1874 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1876 ath10k_pci_stop_intr(ar);
1878 ath10k_pci_ce_deinit(ar);
1879 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1880 ath10k_do_pci_sleep(ar);
1885 #define ATH10K_PCI_PM_CONTROL 0x44
1887 static int ath10k_pci_hif_suspend(struct ath10k *ar)
1889 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1890 struct pci_dev *pdev = ar_pci->pdev;
1893 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1895 if ((val & 0x000000ff) != 0x3) {
1896 pci_save_state(pdev);
1897 pci_disable_device(pdev);
1898 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1899 (val & 0xffffff00) | 0x03);
1905 static int ath10k_pci_hif_resume(struct ath10k *ar)
1907 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1908 struct pci_dev *pdev = ar_pci->pdev;
1911 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1913 if ((val & 0x000000ff) != 0) {
1914 pci_restore_state(pdev);
1915 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1918 * Suspend/Resume resets the PCI configuration space,
1919 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
1920 * to keep PCI Tx retries from interfering with C3 CPU state
1922 pci_read_config_dword(pdev, 0x40, &val);
1924 if ((val & 0x0000ff00) != 0)
1925 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
1932 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
1933 .send_head = ath10k_pci_hif_send_head,
1934 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
1935 .start = ath10k_pci_hif_start,
1936 .stop = ath10k_pci_hif_stop,
1937 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
1938 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
1939 .send_complete_check = ath10k_pci_hif_send_complete_check,
1940 .set_callbacks = ath10k_pci_hif_set_callbacks,
1941 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
1942 .power_up = ath10k_pci_hif_power_up,
1943 .power_down = ath10k_pci_hif_power_down,
1945 .suspend = ath10k_pci_hif_suspend,
1946 .resume = ath10k_pci_hif_resume,
1950 static void ath10k_pci_ce_tasklet(unsigned long ptr)
1952 struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
1953 struct ath10k_pci *ar_pci = pipe->ar_pci;
1955 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
1958 static void ath10k_msi_err_tasklet(unsigned long data)
1960 struct ath10k *ar = (struct ath10k *)data;
1962 ath10k_pci_fw_interrupt_handler(ar);
1966 * Handler for a per-engine interrupt on a PARTICULAR CE.
1967 * This is used in cases where each CE has a private MSI interrupt.
1969 static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
1971 struct ath10k *ar = arg;
1972 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1973 int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
1975 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
1976 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
1981 * NOTE: We are able to derive ce_id from irq because we
1982 * use a one-to-one mapping for CE's 0..5.
1983 * CE's 6 & 7 do not use interrupts at all.
1985 * This mapping must be kept in sync with the mapping
1988 tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
1992 static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
1994 struct ath10k *ar = arg;
1995 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1997 tasklet_schedule(&ar_pci->msi_fw_err);
2002 * Top-level interrupt handler for all PCI interrupts from a Target.
2003 * When a block of MSI interrupts is allocated, this top-level handler
2004 * is not used; instead, we directly call the correct sub-handler.
2006 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2008 struct ath10k *ar = arg;
2009 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2011 if (ar_pci->num_msi_intrs == 0) {
2013 * IMPORTANT: INTR_CLR regiser has to be set after
2014 * INTR_ENABLE is set to 0, otherwise interrupt can not be
2017 iowrite32(0, ar_pci->mem +
2018 (SOC_CORE_BASE_ADDRESS |
2019 PCIE_INTR_ENABLE_ADDRESS));
2020 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2021 PCIE_INTR_CE_MASK_ALL,
2022 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2023 PCIE_INTR_CLR_ADDRESS));
2025 * IMPORTANT: this extra read transaction is required to
2026 * flush the posted write buffer.
2028 (void) ioread32(ar_pci->mem +
2029 (SOC_CORE_BASE_ADDRESS |
2030 PCIE_INTR_ENABLE_ADDRESS));
2033 tasklet_schedule(&ar_pci->intr_tq);
2038 static void ath10k_pci_tasklet(unsigned long data)
2040 struct ath10k *ar = (struct ath10k *)data;
2041 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2043 ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
2044 ath10k_ce_per_engine_service_any(ar);
2046 if (ar_pci->num_msi_intrs == 0) {
2047 /* Enable Legacy PCI line interrupts */
2048 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2049 PCIE_INTR_CE_MASK_ALL,
2050 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2051 PCIE_INTR_ENABLE_ADDRESS));
2053 * IMPORTANT: this extra read transaction is required to
2054 * flush the posted write buffer
2056 (void) ioread32(ar_pci->mem +
2057 (SOC_CORE_BASE_ADDRESS |
2058 PCIE_INTR_ENABLE_ADDRESS));
2062 static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
2064 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2068 ret = pci_enable_msi_block(ar_pci->pdev, num);
2072 ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2073 ath10k_pci_msi_fw_handler,
2074 IRQF_SHARED, "ath10k_pci", ar);
2076 ath10k_warn("request_irq(%d) failed %d\n",
2077 ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2079 pci_disable_msi(ar_pci->pdev);
2083 for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2084 ret = request_irq(ar_pci->pdev->irq + i,
2085 ath10k_pci_per_engine_handler,
2086 IRQF_SHARED, "ath10k_pci", ar);
2088 ath10k_warn("request_irq(%d) failed %d\n",
2089 ar_pci->pdev->irq + i, ret);
2091 for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2092 free_irq(ar_pci->pdev->irq + i, ar);
2094 free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
2095 pci_disable_msi(ar_pci->pdev);
2100 ath10k_info("MSI-X interrupt handling (%d intrs)\n", num);
2104 static int ath10k_pci_start_intr_msi(struct ath10k *ar)
2106 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2109 ret = pci_enable_msi(ar_pci->pdev);
2113 ret = request_irq(ar_pci->pdev->irq,
2114 ath10k_pci_interrupt_handler,
2115 IRQF_SHARED, "ath10k_pci", ar);
2117 pci_disable_msi(ar_pci->pdev);
2121 ath10k_info("MSI interrupt handling\n");
2125 static int ath10k_pci_start_intr_legacy(struct ath10k *ar)
2127 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2130 ret = request_irq(ar_pci->pdev->irq,
2131 ath10k_pci_interrupt_handler,
2132 IRQF_SHARED, "ath10k_pci", ar);
2137 * Make sure to wake the Target before enabling Legacy
2140 iowrite32(PCIE_SOC_WAKE_V_MASK,
2141 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2142 PCIE_SOC_WAKE_ADDRESS);
2144 ath10k_pci_wait(ar);
2147 * A potential race occurs here: The CORE_BASE write
2148 * depends on target correctly decoding AXI address but
2149 * host won't know when target writes BAR to CORE_CTRL.
2150 * This write might get lost if target has NOT written BAR.
2151 * For now, fix the race by repeating the write in below
2152 * synchronization checking.
2154 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2155 PCIE_INTR_CE_MASK_ALL,
2156 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2157 PCIE_INTR_ENABLE_ADDRESS));
2158 iowrite32(PCIE_SOC_WAKE_RESET,
2159 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2160 PCIE_SOC_WAKE_ADDRESS);
2162 ath10k_info("legacy interrupt handling\n");
2166 static int ath10k_pci_start_intr(struct ath10k *ar)
2168 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2169 int num = MSI_NUM_REQUEST;
2173 tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long) ar);
2174 tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2175 (unsigned long) ar);
2177 for (i = 0; i < CE_COUNT; i++) {
2178 ar_pci->pipe_info[i].ar_pci = ar_pci;
2179 tasklet_init(&ar_pci->pipe_info[i].intr,
2180 ath10k_pci_ce_tasklet,
2181 (unsigned long)&ar_pci->pipe_info[i]);
2184 if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features))
2188 ret = ath10k_pci_start_intr_msix(ar, num);
2192 ath10k_warn("MSI-X didn't succeed (%d), trying MSI\n", ret);
2197 ret = ath10k_pci_start_intr_msi(ar);
2201 ath10k_warn("MSI didn't succeed (%d), trying legacy INTR\n",
2206 ret = ath10k_pci_start_intr_legacy(ar);
2209 ar_pci->num_msi_intrs = num;
2210 ar_pci->ce_count = CE_COUNT;
2214 static void ath10k_pci_stop_intr(struct ath10k *ar)
2216 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2219 /* There's at least one interrupt irregardless whether its legacy INTR
2220 * or MSI or MSI-X */
2221 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2222 free_irq(ar_pci->pdev->irq + i, ar);
2224 if (ar_pci->num_msi_intrs > 0)
2225 pci_disable_msi(ar_pci->pdev);
2228 static int ath10k_pci_reset_target(struct ath10k *ar)
2230 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2231 int wait_limit = 300; /* 3 sec */
2233 /* Wait for Target to finish initialization before we proceed. */
2234 iowrite32(PCIE_SOC_WAKE_V_MASK,
2235 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2236 PCIE_SOC_WAKE_ADDRESS);
2238 ath10k_pci_wait(ar);
2240 while (wait_limit-- &&
2241 !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
2242 FW_IND_INITIALIZED)) {
2243 if (ar_pci->num_msi_intrs == 0)
2244 /* Fix potential race by repeating CORE_BASE writes */
2245 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2246 PCIE_INTR_CE_MASK_ALL,
2247 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2248 PCIE_INTR_ENABLE_ADDRESS));
2252 if (wait_limit < 0) {
2253 ath10k_err("Target stalled\n");
2254 iowrite32(PCIE_SOC_WAKE_RESET,
2255 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2256 PCIE_SOC_WAKE_ADDRESS);
2260 iowrite32(PCIE_SOC_WAKE_RESET,
2261 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2262 PCIE_SOC_WAKE_ADDRESS);
2267 static void ath10k_pci_device_reset(struct ath10k *ar)
2269 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2270 void __iomem *mem = ar_pci->mem;
2274 if (!SOC_GLOBAL_RESET_ADDRESS)
2280 ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS,
2281 PCIE_SOC_WAKE_V_MASK);
2282 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2283 if (ath10k_pci_target_is_awake(ar))
2288 /* Put Target, including PCIe, into RESET. */
2289 val = ath10k_pci_reg_read32(mem, SOC_GLOBAL_RESET_ADDRESS);
2291 ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
2293 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2294 if (ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
2295 RTC_STATE_COLD_RESET_MASK)
2300 /* Pull Target, including PCIe, out of RESET. */
2302 ath10k_pci_reg_write32(mem, SOC_GLOBAL_RESET_ADDRESS, val);
2304 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2305 if (!(ath10k_pci_reg_read32(mem, RTC_STATE_ADDRESS) &
2306 RTC_STATE_COLD_RESET_MASK))
2311 ath10k_pci_reg_write32(mem, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2314 static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2318 for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2319 if (!test_bit(i, ar_pci->features))
2323 case ATH10K_PCI_FEATURE_MSI_X:
2324 ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n");
2326 case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
2327 ath10k_dbg(ATH10K_DBG_PCI, "QCA98XX SoC power save enabled\n");
2333 static int ath10k_pci_probe(struct pci_dev *pdev,
2334 const struct pci_device_id *pci_dev)
2339 struct ath10k_pci *ar_pci;
2342 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2344 ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2348 ar_pci->pdev = pdev;
2349 ar_pci->dev = &pdev->dev;
2351 switch (pci_dev->device) {
2352 case QCA988X_2_0_DEVICE_ID:
2353 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2357 ath10k_err("Unkown device ID: %d\n", pci_dev->device);
2361 if (ath10k_target_ps)
2362 set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2364 ath10k_pci_dump_features(ar_pci);
2366 ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
2368 ath10k_err("ath10k_core_create failed!\n");
2374 ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2375 atomic_set(&ar_pci->keep_awake_count, 0);
2377 pci_set_drvdata(pdev, ar);
2380 * Without any knowledge of the Host, the Target may have been reset or
2381 * power cycled and its Config Space may no longer reflect the PCI
2382 * address space that was assigned earlier by the PCI infrastructure.
2385 ret = pci_assign_resource(pdev, BAR_NUM);
2387 ath10k_err("cannot assign PCI space: %d\n", ret);
2391 ret = pci_enable_device(pdev);
2393 ath10k_err("cannot enable PCI device: %d\n", ret);
2397 /* Request MMIO resources */
2398 ret = pci_request_region(pdev, BAR_NUM, "ath");
2400 ath10k_err("PCI MMIO reservation error: %d\n", ret);
2405 * Target structures have a limit of 32 bit DMA pointers.
2406 * DMA pointers can be wider than 32 bits by default on some systems.
2408 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2410 ath10k_err("32-bit DMA not available: %d\n", ret);
2414 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2416 ath10k_err("cannot enable 32-bit consistent DMA\n");
2420 /* Set bus master bit in PCI_COMMAND to enable DMA */
2421 pci_set_master(pdev);
2424 * Temporary FIX: disable ASPM
2425 * Will be removed after the OTP is programmed
2427 pci_read_config_dword(pdev, 0x80, &lcr_val);
2428 pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2430 /* Arrange for access to Target SoC registers. */
2431 mem = pci_iomap(pdev, BAR_NUM, 0);
2433 ath10k_err("PCI iomap error\n");
2440 spin_lock_init(&ar_pci->ce_lock);
2442 ret = ath10k_core_register(ar);
2444 ath10k_err("could not register driver core (%d)\n", ret);
2451 pci_iounmap(pdev, mem);
2453 pci_clear_master(pdev);
2455 pci_release_region(pdev, BAR_NUM);
2457 pci_disable_device(pdev);
2459 pci_set_drvdata(pdev, NULL);
2460 ath10k_core_destroy(ar);
2462 /* call HIF PCI free here */
2468 static void ath10k_pci_remove(struct pci_dev *pdev)
2470 struct ath10k *ar = pci_get_drvdata(pdev);
2471 struct ath10k_pci *ar_pci;
2473 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2478 ar_pci = ath10k_pci_priv(ar);
2483 tasklet_kill(&ar_pci->msi_fw_err);
2485 ath10k_core_unregister(ar);
2487 pci_set_drvdata(pdev, NULL);
2488 pci_iounmap(pdev, ar_pci->mem);
2489 pci_release_region(pdev, BAR_NUM);
2490 pci_clear_master(pdev);
2491 pci_disable_device(pdev);
2493 ath10k_core_destroy(ar);
2497 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2499 static struct pci_driver ath10k_pci_driver = {
2500 .name = "ath10k_pci",
2501 .id_table = ath10k_pci_id_table,
2502 .probe = ath10k_pci_probe,
2503 .remove = ath10k_pci_remove,
2506 static int __init ath10k_pci_init(void)
2510 ret = pci_register_driver(&ath10k_pci_driver);
2512 ath10k_err("pci_register_driver failed [%d]\n", ret);
2516 module_init(ath10k_pci_init);
2518 static void __exit ath10k_pci_exit(void)
2520 pci_unregister_driver(&ath10k_pci_driver);
2523 module_exit(ath10k_pci_exit);
2525 MODULE_AUTHOR("Qualcomm Atheros");
2526 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2527 MODULE_LICENSE("Dual BSD/GPL");
2528 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2529 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
2530 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);