2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include <linux/pci.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
26 #include "targaddrs.h"
35 static unsigned int ath10k_target_ps;
36 module_param(ath10k_target_ps, uint, 0644);
37 MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
39 #define QCA988X_2_0_DEVICE_ID (0x003c)
41 static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
42 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
46 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
49 static void ath10k_pci_process_ce(struct ath10k *ar);
50 static int ath10k_pci_post_rx(struct ath10k *ar);
51 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
53 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
54 static void ath10k_pci_stop_ce(struct ath10k *ar);
55 static void ath10k_pci_device_reset(struct ath10k *ar);
56 static int ath10k_pci_reset_target(struct ath10k *ar);
57 static int ath10k_pci_start_intr(struct ath10k *ar);
58 static void ath10k_pci_stop_intr(struct ath10k *ar);
60 static const struct ce_attr host_ce_config_wlan[] = {
61 /* CE0: host->target HTC control and raw streams */
63 .flags = CE_ATTR_FLAGS,
69 /* CE1: target->host HTT + HTC control */
71 .flags = CE_ATTR_FLAGS,
77 /* CE2: target->host WMI */
79 .flags = CE_ATTR_FLAGS,
85 /* CE3: host->target WMI */
87 .flags = CE_ATTR_FLAGS,
93 /* CE4: host->target HTT */
95 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
96 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
103 .flags = CE_ATTR_FLAGS,
109 /* CE6: target autonomous hif_memcpy */
111 .flags = CE_ATTR_FLAGS,
117 /* CE7: ce_diag, the Diagnostic Window */
119 .flags = CE_ATTR_FLAGS,
121 .src_sz_max = DIAG_TRANSFER_LIMIT,
126 /* Target firmware's Copy Engine configuration. */
127 static const struct ce_pipe_config target_ce_config_wlan[] = {
128 /* CE0: host->target HTC control and raw streams */
131 .pipedir = PIPEDIR_OUT,
134 .flags = CE_ATTR_FLAGS,
138 /* CE1: target->host HTT + HTC control */
141 .pipedir = PIPEDIR_IN,
144 .flags = CE_ATTR_FLAGS,
148 /* CE2: target->host WMI */
151 .pipedir = PIPEDIR_IN,
154 .flags = CE_ATTR_FLAGS,
158 /* CE3: host->target WMI */
161 .pipedir = PIPEDIR_OUT,
164 .flags = CE_ATTR_FLAGS,
168 /* CE4: host->target HTT */
171 .pipedir = PIPEDIR_OUT,
174 .flags = CE_ATTR_FLAGS,
178 /* NB: 50% of src nentries, since tx has 2 frags */
183 .pipedir = PIPEDIR_OUT,
186 .flags = CE_ATTR_FLAGS,
190 /* CE6: Reserved for target autonomous hif_memcpy */
193 .pipedir = PIPEDIR_INOUT,
196 .flags = CE_ATTR_FLAGS,
200 /* CE7 used only by Host */
204 * Diagnostic read/write access is provided for startup/config/debug usage.
205 * Caller must guarantee proper alignment, when applicable, and single user
208 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
211 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
214 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
217 struct ath10k_ce_pipe *ce_diag;
218 /* Host buffer address in CE space */
220 dma_addr_t ce_data_base = 0;
221 void *data_buf = NULL;
225 * This code cannot handle reads to non-memory space. Redirect to the
226 * register read fn but preserve the multi word read capability of
229 if (address < DRAM_BASE_ADDRESS) {
230 if (!IS_ALIGNED(address, 4) ||
231 !IS_ALIGNED((unsigned long)data, 4))
234 while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access(
235 ar, address, (u32 *)data)) == 0)) {
236 nbytes -= sizeof(u32);
237 address += sizeof(u32);
243 ce_diag = ar_pci->ce_diag;
246 * Allocate a temporary bounce buffer to hold caller's data
247 * to be DMA'ed from Target. This guarantees
248 * 1) 4-byte alignment
249 * 2) Buffer in DMA-able space
251 orig_nbytes = nbytes;
252 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
260 memset(data_buf, 0, orig_nbytes);
262 remaining_bytes = orig_nbytes;
263 ce_data = ce_data_base;
264 while (remaining_bytes) {
265 nbytes = min_t(unsigned int, remaining_bytes,
266 DIAG_TRANSFER_LIMIT);
268 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
272 /* Request CE to send from Target(!) address to Host buffer */
274 * The address supplied by the caller is in the
275 * Target CPU virtual address space.
277 * In order to use this address with the diagnostic CE,
278 * convert it from Target CPU virtual address space
279 * to CE address space
282 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
284 ath10k_pci_sleep(ar);
286 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
292 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
296 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
302 if (nbytes != completed_nbytes) {
307 if (buf != (u32) address) {
313 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
318 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
324 if (nbytes != completed_nbytes) {
329 if (buf != ce_data) {
334 remaining_bytes -= nbytes;
341 /* Copy data from allocated DMA buf to caller's buf */
342 WARN_ON_ONCE(orig_nbytes & 3);
343 for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
345 __le32_to_cpu(((__le32 *)data_buf)[i]);
348 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
352 pci_free_consistent(ar_pci->pdev, orig_nbytes,
353 data_buf, ce_data_base);
358 /* Read 4-byte aligned data from Target memory or register */
359 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
362 /* Assume range doesn't cross this boundary */
363 if (address >= DRAM_BASE_ADDRESS)
364 return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
367 *data = ath10k_pci_read32(ar, address);
368 ath10k_pci_sleep(ar);
372 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
373 const void *data, int nbytes)
375 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
378 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
381 struct ath10k_ce_pipe *ce_diag;
382 void *data_buf = NULL;
383 u32 ce_data; /* Host buffer address in CE space */
384 dma_addr_t ce_data_base = 0;
387 ce_diag = ar_pci->ce_diag;
390 * Allocate a temporary bounce buffer to hold caller's data
391 * to be DMA'ed to Target. This guarantees
392 * 1) 4-byte alignment
393 * 2) Buffer in DMA-able space
395 orig_nbytes = nbytes;
396 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
404 /* Copy caller's data to allocated DMA buf */
405 WARN_ON_ONCE(orig_nbytes & 3);
406 for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
407 ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
410 * The address supplied by the caller is in the
411 * Target CPU virtual address space.
413 * In order to use this address with the diagnostic CE,
415 * Target CPU virtual address space
420 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
421 ath10k_pci_sleep(ar);
423 remaining_bytes = orig_nbytes;
424 ce_data = ce_data_base;
425 while (remaining_bytes) {
426 /* FIXME: check cast */
427 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
429 /* Set up to receive directly into Target(!) address */
430 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
435 * Request CE to send caller-supplied data that
436 * was copied to bounce buffer to Target(!) address.
438 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
444 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
449 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
455 if (nbytes != completed_nbytes) {
460 if (buf != ce_data) {
466 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
471 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
477 if (nbytes != completed_nbytes) {
482 if (buf != address) {
487 remaining_bytes -= nbytes;
494 pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
499 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
505 /* Write 4B data to Target memory or register */
506 static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
509 /* Assume range doesn't cross this boundary */
510 if (address >= DRAM_BASE_ADDRESS)
511 return ath10k_pci_diag_write_mem(ar, address, &data,
515 ath10k_pci_write32(ar, address, data);
516 ath10k_pci_sleep(ar);
520 static bool ath10k_pci_target_is_awake(struct ath10k *ar)
522 void __iomem *mem = ath10k_pci_priv(ar)->mem;
524 val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
526 return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
529 static void ath10k_pci_wait(struct ath10k *ar)
533 while (n-- && !ath10k_pci_target_is_awake(ar))
537 ath10k_warn("Unable to wakeup target\n");
540 int ath10k_do_pci_wake(struct ath10k *ar)
542 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
543 void __iomem *pci_addr = ar_pci->mem;
547 if (atomic_read(&ar_pci->keep_awake_count) == 0) {
549 iowrite32(PCIE_SOC_WAKE_V_MASK,
550 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
551 PCIE_SOC_WAKE_ADDRESS);
553 atomic_inc(&ar_pci->keep_awake_count);
555 if (ar_pci->verified_awake)
559 if (ath10k_pci_target_is_awake(ar)) {
560 ar_pci->verified_awake = true;
564 if (tot_delay > PCIE_WAKE_TIMEOUT) {
565 ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
567 atomic_read(&ar_pci->keep_awake_count));
572 tot_delay += curr_delay;
579 void ath10k_do_pci_sleep(struct ath10k *ar)
581 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
582 void __iomem *pci_addr = ar_pci->mem;
584 if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
586 ar_pci->verified_awake = false;
587 iowrite32(PCIE_SOC_WAKE_RESET,
588 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
589 PCIE_SOC_WAKE_ADDRESS);
594 * FIXME: Handle OOM properly.
597 struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
599 struct ath10k_pci_compl *compl = NULL;
601 spin_lock_bh(&pipe_info->pipe_lock);
602 if (list_empty(&pipe_info->compl_free)) {
603 ath10k_warn("Completion buffers are full\n");
606 compl = list_first_entry(&pipe_info->compl_free,
607 struct ath10k_pci_compl, list);
608 list_del(&compl->list);
610 spin_unlock_bh(&pipe_info->pipe_lock);
614 /* Called by lower (CE) layer when a send to Target completes. */
615 static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state,
616 void *transfer_context,
619 unsigned int transfer_id)
621 struct ath10k *ar = ce_state->ar;
622 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
623 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
624 struct ath10k_pci_compl *compl;
625 bool process = false;
629 * For the send completion of an item in sendlist, just
630 * increment num_sends_allowed. The upper layer callback will
631 * be triggered when last fragment is done with send.
633 if (transfer_context == CE_SENDLIST_ITEM_CTXT) {
634 spin_lock_bh(&pipe_info->pipe_lock);
635 pipe_info->num_sends_allowed++;
636 spin_unlock_bh(&pipe_info->pipe_lock);
640 compl = get_free_compl(pipe_info);
644 compl->state = ATH10K_PCI_COMPL_SEND;
645 compl->ce_state = ce_state;
646 compl->pipe_info = pipe_info;
647 compl->skb = transfer_context;
648 compl->nbytes = nbytes;
649 compl->transfer_id = transfer_id;
653 * Add the completion to the processing queue.
655 spin_lock_bh(&ar_pci->compl_lock);
656 list_add_tail(&compl->list, &ar_pci->compl_process);
657 spin_unlock_bh(&ar_pci->compl_lock);
660 } while (ath10k_ce_completed_send_next(ce_state,
666 * If only some of the items within a sendlist have completed,
667 * don't invoke completion processing until the entire sendlist
673 ath10k_pci_process_ce(ar);
676 /* Called by lower (CE) layer when data is received from the Target. */
677 static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state,
678 void *transfer_context, u32 ce_data,
680 unsigned int transfer_id,
683 struct ath10k *ar = ce_state->ar;
684 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
685 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
686 struct ath10k_pci_compl *compl;
690 compl = get_free_compl(pipe_info);
694 compl->state = ATH10K_PCI_COMPL_RECV;
695 compl->ce_state = ce_state;
696 compl->pipe_info = pipe_info;
697 compl->skb = transfer_context;
698 compl->nbytes = nbytes;
699 compl->transfer_id = transfer_id;
700 compl->flags = flags;
702 skb = transfer_context;
703 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
704 skb->len + skb_tailroom(skb),
707 * Add the completion to the processing queue.
709 spin_lock_bh(&ar_pci->compl_lock);
710 list_add_tail(&compl->list, &ar_pci->compl_process);
711 spin_unlock_bh(&ar_pci->compl_lock);
713 } while (ath10k_ce_completed_recv_next(ce_state,
719 ath10k_pci_process_ce(ar);
722 /* Send the first nbytes bytes of the buffer */
723 static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
724 unsigned int transfer_id,
725 unsigned int bytes, struct sk_buff *nbuf)
727 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
728 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
729 struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
730 struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
731 struct ce_sendlist sendlist;
736 memset(&sendlist, 0, sizeof(struct ce_sendlist));
738 len = min(bytes, nbuf->len);
742 ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
744 ath10k_dbg(ATH10K_DBG_PCI,
745 "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
746 nbuf->data, (unsigned long long) skb_cb->paddr,
748 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
750 nbuf->data, nbuf->len);
752 ath10k_ce_sendlist_buf_add(&sendlist, skb_cb->paddr, len, flags);
754 /* Make sure we have resources to handle this request */
755 spin_lock_bh(&pipe_info->pipe_lock);
756 if (!pipe_info->num_sends_allowed) {
757 ath10k_warn("Pipe: %d is full\n", pipe_id);
758 spin_unlock_bh(&pipe_info->pipe_lock);
761 pipe_info->num_sends_allowed--;
762 spin_unlock_bh(&pipe_info->pipe_lock);
764 ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, &sendlist, transfer_id);
766 ath10k_warn("CE send failed: %p\n", nbuf);
771 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
773 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
774 struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe]);
777 spin_lock_bh(&pipe_info->pipe_lock);
778 ret = pipe_info->num_sends_allowed;
779 spin_unlock_bh(&pipe_info->pipe_lock);
784 static void ath10k_pci_hif_dump_area(struct ath10k *ar)
786 u32 reg_dump_area = 0;
787 u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
792 ath10k_err("firmware crashed!\n");
793 ath10k_err("hardware name %s version 0x%x\n",
794 ar->hw_params.name, ar->target_version);
795 ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major,
796 ar->fw_version_minor, ar->fw_version_release,
797 ar->fw_version_build);
799 host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
800 if (ath10k_pci_diag_read_mem(ar, host_addr,
801 ®_dump_area, sizeof(u32)) != 0) {
802 ath10k_warn("could not read hi_failure_state\n");
806 ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
808 ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
810 REG_DUMP_COUNT_QCA988X * sizeof(u32));
812 ath10k_err("could not dump FW Dump Area\n");
816 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
818 ath10k_err("target Register Dump\n");
819 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
820 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
823 reg_dump_values[i + 1],
824 reg_dump_values[i + 2],
825 reg_dump_values[i + 3]);
827 ieee80211_queue_work(ar->hw, &ar->restart_work);
830 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
836 * Decide whether to actually poll for completions, or just
837 * wait for a later chance.
838 * If there seem to be plenty of resources left, then just wait
839 * since checking involves reading a CE register, which is a
840 * relatively expensive operation.
842 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
845 * If at least 50% of the total resources are still available,
846 * don't bother checking again yet.
848 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
851 ath10k_ce_per_engine_service(ar, pipe);
854 static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
855 struct ath10k_hif_cb *callbacks)
857 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
859 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
861 memcpy(&ar_pci->msg_callbacks_current, callbacks,
862 sizeof(ar_pci->msg_callbacks_current));
865 static int ath10k_pci_start_ce(struct ath10k *ar)
867 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
868 struct ath10k_ce_pipe *ce_diag = ar_pci->ce_diag;
869 const struct ce_attr *attr;
870 struct ath10k_pci_pipe *pipe_info;
871 struct ath10k_pci_compl *compl;
872 int i, pipe_num, completions, disable_interrupts;
874 spin_lock_init(&ar_pci->compl_lock);
875 INIT_LIST_HEAD(&ar_pci->compl_process);
877 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
878 pipe_info = &ar_pci->pipe_info[pipe_num];
880 spin_lock_init(&pipe_info->pipe_lock);
881 INIT_LIST_HEAD(&pipe_info->compl_free);
883 /* Handle Diagnostic CE specially */
884 if (pipe_info->ce_hdl == ce_diag)
887 attr = &host_ce_config_wlan[pipe_num];
890 if (attr->src_nentries) {
891 disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
892 ath10k_ce_send_cb_register(pipe_info->ce_hdl,
893 ath10k_pci_ce_send_done,
895 completions += attr->src_nentries;
896 pipe_info->num_sends_allowed = attr->src_nentries - 1;
899 if (attr->dest_nentries) {
900 ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
901 ath10k_pci_ce_recv_data);
902 completions += attr->dest_nentries;
905 if (completions == 0)
908 for (i = 0; i < completions; i++) {
909 compl = kmalloc(sizeof(*compl), GFP_KERNEL);
911 ath10k_warn("No memory for completion state\n");
912 ath10k_pci_stop_ce(ar);
916 compl->state = ATH10K_PCI_COMPL_FREE;
917 list_add_tail(&compl->list, &pipe_info->compl_free);
924 static void ath10k_pci_stop_ce(struct ath10k *ar)
926 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
927 struct ath10k_pci_compl *compl;
931 ath10k_ce_disable_interrupts(ar);
933 /* Cancel the pending tasklet */
934 tasklet_kill(&ar_pci->intr_tq);
936 for (i = 0; i < CE_COUNT; i++)
937 tasklet_kill(&ar_pci->pipe_info[i].intr);
939 /* Mark pending completions as aborted, so that upper layers free up
940 * their associated resources */
941 spin_lock_bh(&ar_pci->compl_lock);
942 list_for_each_entry(compl, &ar_pci->compl_process, list) {
944 ATH10K_SKB_CB(skb)->is_aborted = true;
946 spin_unlock_bh(&ar_pci->compl_lock);
949 static void ath10k_pci_cleanup_ce(struct ath10k *ar)
951 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
952 struct ath10k_pci_compl *compl, *tmp;
953 struct ath10k_pci_pipe *pipe_info;
954 struct sk_buff *netbuf;
957 /* Free pending completions. */
958 spin_lock_bh(&ar_pci->compl_lock);
959 if (!list_empty(&ar_pci->compl_process))
960 ath10k_warn("pending completions still present! possible memory leaks.\n");
962 list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
963 list_del(&compl->list);
965 dev_kfree_skb_any(netbuf);
968 spin_unlock_bh(&ar_pci->compl_lock);
970 /* Free unused completions for each pipe. */
971 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
972 pipe_info = &ar_pci->pipe_info[pipe_num];
974 spin_lock_bh(&pipe_info->pipe_lock);
975 list_for_each_entry_safe(compl, tmp,
976 &pipe_info->compl_free, list) {
977 list_del(&compl->list);
980 spin_unlock_bh(&pipe_info->pipe_lock);
984 static void ath10k_pci_process_ce(struct ath10k *ar)
986 struct ath10k_pci *ar_pci = ar->hif.priv;
987 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
988 struct ath10k_pci_compl *compl;
991 int ret, send_done = 0;
993 /* Upper layers aren't ready to handle tx/rx completions in parallel so
994 * we must serialize all completion processing. */
996 spin_lock_bh(&ar_pci->compl_lock);
997 if (ar_pci->compl_processing) {
998 spin_unlock_bh(&ar_pci->compl_lock);
1001 ar_pci->compl_processing = true;
1002 spin_unlock_bh(&ar_pci->compl_lock);
1005 spin_lock_bh(&ar_pci->compl_lock);
1006 if (list_empty(&ar_pci->compl_process)) {
1007 spin_unlock_bh(&ar_pci->compl_lock);
1010 compl = list_first_entry(&ar_pci->compl_process,
1011 struct ath10k_pci_compl, list);
1012 list_del(&compl->list);
1013 spin_unlock_bh(&ar_pci->compl_lock);
1015 switch (compl->state) {
1016 case ATH10K_PCI_COMPL_SEND:
1017 cb->tx_completion(ar,
1019 compl->transfer_id);
1022 case ATH10K_PCI_COMPL_RECV:
1023 ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
1025 ath10k_warn("Unable to post recv buffer for pipe: %d\n",
1026 compl->pipe_info->pipe_num);
1031 nbytes = compl->nbytes;
1033 ath10k_dbg(ATH10K_DBG_PCI,
1034 "ath10k_pci_ce_recv_data netbuf=%p nbytes=%d\n",
1036 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
1037 "ath10k rx: ", skb->data, nbytes);
1039 if (skb->len + skb_tailroom(skb) >= nbytes) {
1041 skb_put(skb, nbytes);
1042 cb->rx_completion(ar, skb,
1043 compl->pipe_info->pipe_num);
1045 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
1047 skb->len + skb_tailroom(skb));
1050 case ATH10K_PCI_COMPL_FREE:
1051 ath10k_warn("free completion cannot be processed\n");
1054 ath10k_warn("invalid completion state (%d)\n",
1059 compl->state = ATH10K_PCI_COMPL_FREE;
1062 * Add completion back to the pipe's free list.
1064 spin_lock_bh(&compl->pipe_info->pipe_lock);
1065 list_add_tail(&compl->list, &compl->pipe_info->compl_free);
1066 compl->pipe_info->num_sends_allowed += send_done;
1067 spin_unlock_bh(&compl->pipe_info->pipe_lock);
1070 spin_lock_bh(&ar_pci->compl_lock);
1071 ar_pci->compl_processing = false;
1072 spin_unlock_bh(&ar_pci->compl_lock);
1075 /* TODO - temporary mapping while we have too few CE's */
1076 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
1077 u16 service_id, u8 *ul_pipe,
1078 u8 *dl_pipe, int *ul_is_polled,
1083 /* polling for received messages not supported */
1086 switch (service_id) {
1087 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
1089 * Host->target HTT gets its own pipe, so it can be polled
1090 * while other pipes are interrupt driven.
1094 * Use the same target->host pipe for HTC ctrl, HTC raw
1100 case ATH10K_HTC_SVC_ID_RSVD_CTRL:
1101 case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
1103 * Note: HTC_RAW_STREAMS_SVC is currently unused, and
1104 * HTC_CTRL_RSVD_SVC could share the same pipe as the
1105 * WMI services. So, if another CE is needed, change
1106 * this to *ul_pipe = 3, which frees up CE 0.
1113 case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
1114 case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
1115 case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
1116 case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
1118 case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1124 /* pipe 6 reserved */
1125 /* pipe 7 reserved */
1132 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1137 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1138 u8 *ul_pipe, u8 *dl_pipe)
1140 int ul_is_polled, dl_is_polled;
1142 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1143 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1150 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
1153 struct ath10k *ar = pipe_info->hif_ce_state;
1154 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1155 struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
1156 struct sk_buff *skb;
1160 if (pipe_info->buf_sz == 0)
1163 for (i = 0; i < num; i++) {
1164 skb = dev_alloc_skb(pipe_info->buf_sz);
1166 ath10k_warn("could not allocate skbuff for pipe %d\n",
1172 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1174 ce_data = dma_map_single(ar->dev, skb->data,
1175 skb->len + skb_tailroom(skb),
1178 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1179 ath10k_warn("could not dma map skbuff\n");
1180 dev_kfree_skb_any(skb);
1185 ATH10K_SKB_CB(skb)->paddr = ce_data;
1187 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1189 PCI_DMA_FROMDEVICE);
1191 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1194 ath10k_warn("could not enqueue to pipe %d (%d)\n",
1203 ath10k_pci_rx_pipe_cleanup(pipe_info);
1207 static int ath10k_pci_post_rx(struct ath10k *ar)
1209 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1210 struct ath10k_pci_pipe *pipe_info;
1211 const struct ce_attr *attr;
1212 int pipe_num, ret = 0;
1214 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1215 pipe_info = &ar_pci->pipe_info[pipe_num];
1216 attr = &host_ce_config_wlan[pipe_num];
1218 if (attr->dest_nentries == 0)
1221 ret = ath10k_pci_post_rx_pipe(pipe_info,
1222 attr->dest_nentries - 1);
1224 ath10k_warn("Unable to replenish recv buffers for pipe: %d\n",
1227 for (; pipe_num >= 0; pipe_num--) {
1228 pipe_info = &ar_pci->pipe_info[pipe_num];
1229 ath10k_pci_rx_pipe_cleanup(pipe_info);
1238 static int ath10k_pci_hif_start(struct ath10k *ar)
1240 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1243 ret = ath10k_pci_start_ce(ar);
1245 ath10k_warn("could not start CE (%d)\n", ret);
1249 /* Post buffers once to start things off. */
1250 ret = ath10k_pci_post_rx(ar);
1252 ath10k_warn("could not post rx pipes (%d)\n", ret);
1256 ar_pci->started = 1;
1260 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1263 struct ath10k_pci *ar_pci;
1264 struct ath10k_ce_pipe *ce_hdl;
1266 struct sk_buff *netbuf;
1269 buf_sz = pipe_info->buf_sz;
1271 /* Unused Copy Engine */
1275 ar = pipe_info->hif_ce_state;
1276 ar_pci = ath10k_pci_priv(ar);
1278 if (!ar_pci->started)
1281 ce_hdl = pipe_info->ce_hdl;
1283 while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1285 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1286 netbuf->len + skb_tailroom(netbuf),
1288 dev_kfree_skb_any(netbuf);
1292 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1295 struct ath10k_pci *ar_pci;
1296 struct ath10k_ce_pipe *ce_hdl;
1297 struct sk_buff *netbuf;
1299 unsigned int nbytes;
1303 buf_sz = pipe_info->buf_sz;
1305 /* Unused Copy Engine */
1309 ar = pipe_info->hif_ce_state;
1310 ar_pci = ath10k_pci_priv(ar);
1312 if (!ar_pci->started)
1315 ce_hdl = pipe_info->ce_hdl;
1317 while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1318 &ce_data, &nbytes, &id) == 0) {
1319 if (netbuf != CE_SENDLIST_ITEM_CTXT)
1321 * Indicate the completion to higer layer to free
1324 ATH10K_SKB_CB(netbuf)->is_aborted = true;
1325 ar_pci->msg_callbacks_current.tx_completion(ar,
1332 * Cleanup residual buffers for device shutdown:
1333 * buffers that were enqueued for receive
1334 * buffers that were to be sent
1335 * Note: Buffers that had completed but which were
1336 * not yet processed are on a completion queue. They
1337 * are handled when the completion thread shuts down.
1339 static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1341 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1344 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1345 struct ath10k_pci_pipe *pipe_info;
1347 pipe_info = &ar_pci->pipe_info[pipe_num];
1348 ath10k_pci_rx_pipe_cleanup(pipe_info);
1349 ath10k_pci_tx_pipe_cleanup(pipe_info);
1353 static void ath10k_pci_ce_deinit(struct ath10k *ar)
1355 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1356 struct ath10k_pci_pipe *pipe_info;
1359 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1360 pipe_info = &ar_pci->pipe_info[pipe_num];
1361 if (pipe_info->ce_hdl) {
1362 ath10k_ce_deinit(pipe_info->ce_hdl);
1363 pipe_info->ce_hdl = NULL;
1364 pipe_info->buf_sz = 0;
1369 static void ath10k_pci_disable_irqs(struct ath10k *ar)
1371 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1374 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
1375 disable_irq(ar_pci->pdev->irq + i);
1378 static void ath10k_pci_hif_stop(struct ath10k *ar)
1380 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1382 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
1384 /* Irqs are never explicitly re-enabled. They are implicitly re-enabled
1385 * by ath10k_pci_start_intr(). */
1386 ath10k_pci_disable_irqs(ar);
1388 ath10k_pci_stop_ce(ar);
1390 /* At this point, asynchronous threads are stopped, the target should
1391 * not DMA nor interrupt. We process the leftovers and then free
1392 * everything else up. */
1394 ath10k_pci_process_ce(ar);
1395 ath10k_pci_cleanup_ce(ar);
1396 ath10k_pci_buffer_cleanup(ar);
1398 ar_pci->started = 0;
1401 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1402 void *req, u32 req_len,
1403 void *resp, u32 *resp_len)
1405 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1406 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1407 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1408 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1409 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1410 dma_addr_t req_paddr = 0;
1411 dma_addr_t resp_paddr = 0;
1412 struct bmi_xfer xfer = {};
1413 void *treq, *tresp = NULL;
1416 if (resp && !resp_len)
1419 if (resp && resp_len && *resp_len == 0)
1422 treq = kmemdup(req, req_len, GFP_KERNEL);
1426 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1427 ret = dma_mapping_error(ar->dev, req_paddr);
1431 if (resp && resp_len) {
1432 tresp = kzalloc(*resp_len, GFP_KERNEL);
1438 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1440 ret = dma_mapping_error(ar->dev, resp_paddr);
1444 xfer.wait_for_resp = true;
1447 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1450 init_completion(&xfer.done);
1452 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1456 ret = wait_for_completion_timeout(&xfer.done,
1457 BMI_COMMUNICATION_TIMEOUT_HZ);
1460 unsigned int unused_nbytes;
1461 unsigned int unused_id;
1464 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1465 &unused_nbytes, &unused_id);
1467 /* non-zero means we did not time out */
1475 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1476 dma_unmap_single(ar->dev, resp_paddr,
1477 *resp_len, DMA_FROM_DEVICE);
1480 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1482 if (ret == 0 && resp_len) {
1483 *resp_len = min(*resp_len, xfer.resp_len);
1484 memcpy(resp, tresp, xfer.resp_len);
1493 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state,
1494 void *transfer_context,
1496 unsigned int nbytes,
1497 unsigned int transfer_id)
1499 struct bmi_xfer *xfer = transfer_context;
1501 if (xfer->wait_for_resp)
1504 complete(&xfer->done);
1507 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state,
1508 void *transfer_context,
1510 unsigned int nbytes,
1511 unsigned int transfer_id,
1514 struct bmi_xfer *xfer = transfer_context;
1516 if (!xfer->wait_for_resp) {
1517 ath10k_warn("unexpected: BMI data received; ignoring\n");
1521 xfer->resp_len = nbytes;
1522 complete(&xfer->done);
1526 * Map from service/endpoint to Copy Engine.
1527 * This table is derived from the CE_PCI TABLE, above.
1528 * It is passed to the Target at startup for use by firmware.
1530 static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1532 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1533 PIPEDIR_OUT, /* out = UL = host -> target */
1537 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1538 PIPEDIR_IN, /* in = DL = target -> host */
1542 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1543 PIPEDIR_OUT, /* out = UL = host -> target */
1547 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1548 PIPEDIR_IN, /* in = DL = target -> host */
1552 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1553 PIPEDIR_OUT, /* out = UL = host -> target */
1557 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1558 PIPEDIR_IN, /* in = DL = target -> host */
1562 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1563 PIPEDIR_OUT, /* out = UL = host -> target */
1567 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1568 PIPEDIR_IN, /* in = DL = target -> host */
1572 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1573 PIPEDIR_OUT, /* out = UL = host -> target */
1577 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1578 PIPEDIR_IN, /* in = DL = target -> host */
1582 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1583 PIPEDIR_OUT, /* out = UL = host -> target */
1584 0, /* could be moved to 3 (share with WMI) */
1587 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1588 PIPEDIR_IN, /* in = DL = target -> host */
1592 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1593 PIPEDIR_OUT, /* out = UL = host -> target */
1597 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1598 PIPEDIR_IN, /* in = DL = target -> host */
1602 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1603 PIPEDIR_OUT, /* out = UL = host -> target */
1607 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1608 PIPEDIR_IN, /* in = DL = target -> host */
1612 /* (Additions here) */
1614 { /* Must be last */
1622 * Send an interrupt to the device to wake up the Target CPU
1623 * so it has an opportunity to notice any changed state.
1625 static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1630 ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1634 ath10k_warn("Unable to read core ctrl\n");
1638 /* A_INUM_FIRMWARE interrupt to Target CPU */
1639 core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1641 ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1645 ath10k_warn("Unable to set interrupt mask\n");
1650 static int ath10k_pci_init_config(struct ath10k *ar)
1652 u32 interconnect_targ_addr;
1653 u32 pcie_state_targ_addr = 0;
1654 u32 pipe_cfg_targ_addr = 0;
1655 u32 svc_to_pipe_map = 0;
1656 u32 pcie_config_flags = 0;
1658 u32 ealloc_targ_addr;
1660 u32 flag2_targ_addr;
1663 /* Download to Target the CE Config and the service-to-CE map */
1664 interconnect_targ_addr =
1665 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1667 /* Supply Target-side CE configuration */
1668 ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1669 &pcie_state_targ_addr);
1671 ath10k_err("Failed to get pcie state addr: %d\n", ret);
1675 if (pcie_state_targ_addr == 0) {
1677 ath10k_err("Invalid pcie state addr\n");
1681 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1682 offsetof(struct pcie_state,
1684 &pipe_cfg_targ_addr);
1686 ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1690 if (pipe_cfg_targ_addr == 0) {
1692 ath10k_err("Invalid pipe cfg addr\n");
1696 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1697 target_ce_config_wlan,
1698 sizeof(target_ce_config_wlan));
1701 ath10k_err("Failed to write pipe cfg: %d\n", ret);
1705 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1706 offsetof(struct pcie_state,
1710 ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1714 if (svc_to_pipe_map == 0) {
1716 ath10k_err("Invalid svc_to_pipe map\n");
1720 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1721 target_service_to_ce_map_wlan,
1722 sizeof(target_service_to_ce_map_wlan));
1724 ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1728 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1729 offsetof(struct pcie_state,
1731 &pcie_config_flags);
1733 ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1737 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1739 ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1740 offsetof(struct pcie_state, config_flags),
1742 sizeof(pcie_config_flags));
1744 ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1748 /* configure early allocation */
1749 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1751 ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1753 ath10k_err("Faile to get early alloc val: %d\n", ret);
1757 /* first bank is switched to IRAM */
1758 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1759 HI_EARLY_ALLOC_MAGIC_MASK);
1760 ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1761 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1763 ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1765 ath10k_err("Failed to set early alloc val: %d\n", ret);
1769 /* Tell Target to proceed with initialization */
1770 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1772 ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1774 ath10k_err("Failed to get option val: %d\n", ret);
1778 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1780 ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1782 ath10k_err("Failed to set option val: %d\n", ret);
1791 static int ath10k_pci_ce_init(struct ath10k *ar)
1793 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1794 struct ath10k_pci_pipe *pipe_info;
1795 const struct ce_attr *attr;
1798 for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1799 pipe_info = &ar_pci->pipe_info[pipe_num];
1800 pipe_info->pipe_num = pipe_num;
1801 pipe_info->hif_ce_state = ar;
1802 attr = &host_ce_config_wlan[pipe_num];
1804 pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1805 if (pipe_info->ce_hdl == NULL) {
1806 ath10k_err("Unable to initialize CE for pipe: %d\n",
1809 /* It is safe to call it here. It checks if ce_hdl is
1810 * valid for each pipe */
1811 ath10k_pci_ce_deinit(ar);
1815 if (pipe_num == ar_pci->ce_count - 1) {
1817 * Reserve the ultimate CE for
1818 * diagnostic Window support
1821 ar_pci->pipe_info[ar_pci->ce_count - 1].ce_hdl;
1825 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1829 * Initially, establish CE completion handlers for use with BMI.
1830 * These are overwritten with generic handlers after we exit BMI phase.
1832 pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1833 ath10k_ce_send_cb_register(pipe_info->ce_hdl,
1834 ath10k_pci_bmi_send_done, 0);
1836 pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1837 ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
1838 ath10k_pci_bmi_recv_data);
1843 static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1845 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1846 u32 fw_indicator_address, fw_indicator;
1848 ath10k_pci_wake(ar);
1850 fw_indicator_address = ar_pci->fw_indicator_address;
1851 fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
1853 if (fw_indicator & FW_IND_EVENT_PENDING) {
1854 /* ACK: clear Target-side pending event */
1855 ath10k_pci_write32(ar, fw_indicator_address,
1856 fw_indicator & ~FW_IND_EVENT_PENDING);
1858 if (ar_pci->started) {
1859 ath10k_pci_hif_dump_area(ar);
1862 * Probable Target failure before we're prepared
1863 * to handle it. Generally unexpected.
1865 ath10k_warn("early firmware event indicated\n");
1869 ath10k_pci_sleep(ar);
1872 static int ath10k_pci_hif_power_up(struct ath10k *ar)
1874 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1877 ret = ath10k_pci_start_intr(ar);
1879 ath10k_err("could not start interrupt handling (%d)\n", ret);
1884 * Bring the target up cleanly.
1886 * The target may be in an undefined state with an AUX-powered Target
1887 * and a Host in WoW mode. If the Host crashes, loses power, or is
1888 * restarted (without unloading the driver) then the Target is left
1889 * (aux) powered and running. On a subsequent driver load, the Target
1890 * is in an unexpected state. We try to catch that here in order to
1891 * reset the Target and retry the probe.
1893 ath10k_pci_device_reset(ar);
1895 ret = ath10k_pci_reset_target(ar);
1899 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1900 /* Force AWAKE forever */
1901 ath10k_do_pci_wake(ar);
1903 ret = ath10k_pci_ce_init(ar);
1907 ret = ath10k_pci_init_config(ar);
1911 ret = ath10k_pci_wake_target_cpu(ar);
1913 ath10k_err("could not wake up target CPU (%d)\n", ret);
1920 ath10k_pci_ce_deinit(ar);
1922 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1923 ath10k_do_pci_sleep(ar);
1925 ath10k_pci_stop_intr(ar);
1930 static void ath10k_pci_hif_power_down(struct ath10k *ar)
1932 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1934 ath10k_pci_stop_intr(ar);
1936 ath10k_pci_ce_deinit(ar);
1937 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1938 ath10k_do_pci_sleep(ar);
1943 #define ATH10K_PCI_PM_CONTROL 0x44
1945 static int ath10k_pci_hif_suspend(struct ath10k *ar)
1947 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1948 struct pci_dev *pdev = ar_pci->pdev;
1951 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1953 if ((val & 0x000000ff) != 0x3) {
1954 pci_save_state(pdev);
1955 pci_disable_device(pdev);
1956 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1957 (val & 0xffffff00) | 0x03);
1963 static int ath10k_pci_hif_resume(struct ath10k *ar)
1965 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1966 struct pci_dev *pdev = ar_pci->pdev;
1969 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1971 if ((val & 0x000000ff) != 0) {
1972 pci_restore_state(pdev);
1973 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1976 * Suspend/Resume resets the PCI configuration space,
1977 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
1978 * to keep PCI Tx retries from interfering with C3 CPU state
1980 pci_read_config_dword(pdev, 0x40, &val);
1982 if ((val & 0x0000ff00) != 0)
1983 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
1990 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
1991 .send_head = ath10k_pci_hif_send_head,
1992 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
1993 .start = ath10k_pci_hif_start,
1994 .stop = ath10k_pci_hif_stop,
1995 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
1996 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
1997 .send_complete_check = ath10k_pci_hif_send_complete_check,
1998 .set_callbacks = ath10k_pci_hif_set_callbacks,
1999 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
2000 .power_up = ath10k_pci_hif_power_up,
2001 .power_down = ath10k_pci_hif_power_down,
2003 .suspend = ath10k_pci_hif_suspend,
2004 .resume = ath10k_pci_hif_resume,
2008 static void ath10k_pci_ce_tasklet(unsigned long ptr)
2010 struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
2011 struct ath10k_pci *ar_pci = pipe->ar_pci;
2013 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
2016 static void ath10k_msi_err_tasklet(unsigned long data)
2018 struct ath10k *ar = (struct ath10k *)data;
2020 ath10k_pci_fw_interrupt_handler(ar);
2024 * Handler for a per-engine interrupt on a PARTICULAR CE.
2025 * This is used in cases where each CE has a private MSI interrupt.
2027 static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2029 struct ath10k *ar = arg;
2030 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2031 int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2033 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
2034 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
2039 * NOTE: We are able to derive ce_id from irq because we
2040 * use a one-to-one mapping for CE's 0..5.
2041 * CE's 6 & 7 do not use interrupts at all.
2043 * This mapping must be kept in sync with the mapping
2046 tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2050 static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2052 struct ath10k *ar = arg;
2053 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2055 tasklet_schedule(&ar_pci->msi_fw_err);
2060 * Top-level interrupt handler for all PCI interrupts from a Target.
2061 * When a block of MSI interrupts is allocated, this top-level handler
2062 * is not used; instead, we directly call the correct sub-handler.
2064 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2066 struct ath10k *ar = arg;
2067 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2069 if (ar_pci->num_msi_intrs == 0) {
2071 * IMPORTANT: INTR_CLR regiser has to be set after
2072 * INTR_ENABLE is set to 0, otherwise interrupt can not be
2075 iowrite32(0, ar_pci->mem +
2076 (SOC_CORE_BASE_ADDRESS |
2077 PCIE_INTR_ENABLE_ADDRESS));
2078 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2079 PCIE_INTR_CE_MASK_ALL,
2080 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2081 PCIE_INTR_CLR_ADDRESS));
2083 * IMPORTANT: this extra read transaction is required to
2084 * flush the posted write buffer.
2086 (void) ioread32(ar_pci->mem +
2087 (SOC_CORE_BASE_ADDRESS |
2088 PCIE_INTR_ENABLE_ADDRESS));
2091 tasklet_schedule(&ar_pci->intr_tq);
2096 static void ath10k_pci_tasklet(unsigned long data)
2098 struct ath10k *ar = (struct ath10k *)data;
2099 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2101 ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
2102 ath10k_ce_per_engine_service_any(ar);
2104 if (ar_pci->num_msi_intrs == 0) {
2105 /* Enable Legacy PCI line interrupts */
2106 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2107 PCIE_INTR_CE_MASK_ALL,
2108 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2109 PCIE_INTR_ENABLE_ADDRESS));
2111 * IMPORTANT: this extra read transaction is required to
2112 * flush the posted write buffer
2114 (void) ioread32(ar_pci->mem +
2115 (SOC_CORE_BASE_ADDRESS |
2116 PCIE_INTR_ENABLE_ADDRESS));
2120 static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
2122 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2126 ret = pci_enable_msi_block(ar_pci->pdev, num);
2130 ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2131 ath10k_pci_msi_fw_handler,
2132 IRQF_SHARED, "ath10k_pci", ar);
2134 ath10k_warn("request_irq(%d) failed %d\n",
2135 ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2137 pci_disable_msi(ar_pci->pdev);
2141 for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2142 ret = request_irq(ar_pci->pdev->irq + i,
2143 ath10k_pci_per_engine_handler,
2144 IRQF_SHARED, "ath10k_pci", ar);
2146 ath10k_warn("request_irq(%d) failed %d\n",
2147 ar_pci->pdev->irq + i, ret);
2149 for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2150 free_irq(ar_pci->pdev->irq + i, ar);
2152 free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
2153 pci_disable_msi(ar_pci->pdev);
2158 ath10k_info("MSI-X interrupt handling (%d intrs)\n", num);
2162 static int ath10k_pci_start_intr_msi(struct ath10k *ar)
2164 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2167 ret = pci_enable_msi(ar_pci->pdev);
2171 ret = request_irq(ar_pci->pdev->irq,
2172 ath10k_pci_interrupt_handler,
2173 IRQF_SHARED, "ath10k_pci", ar);
2175 pci_disable_msi(ar_pci->pdev);
2179 ath10k_info("MSI interrupt handling\n");
2183 static int ath10k_pci_start_intr_legacy(struct ath10k *ar)
2185 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2188 ret = request_irq(ar_pci->pdev->irq,
2189 ath10k_pci_interrupt_handler,
2190 IRQF_SHARED, "ath10k_pci", ar);
2195 * Make sure to wake the Target before enabling Legacy
2198 iowrite32(PCIE_SOC_WAKE_V_MASK,
2199 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2200 PCIE_SOC_WAKE_ADDRESS);
2202 ath10k_pci_wait(ar);
2205 * A potential race occurs here: The CORE_BASE write
2206 * depends on target correctly decoding AXI address but
2207 * host won't know when target writes BAR to CORE_CTRL.
2208 * This write might get lost if target has NOT written BAR.
2209 * For now, fix the race by repeating the write in below
2210 * synchronization checking.
2212 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2213 PCIE_INTR_CE_MASK_ALL,
2214 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2215 PCIE_INTR_ENABLE_ADDRESS));
2216 iowrite32(PCIE_SOC_WAKE_RESET,
2217 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2218 PCIE_SOC_WAKE_ADDRESS);
2220 ath10k_info("legacy interrupt handling\n");
2224 static int ath10k_pci_start_intr(struct ath10k *ar)
2226 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2227 int num = MSI_NUM_REQUEST;
2231 tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long) ar);
2232 tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2233 (unsigned long) ar);
2235 for (i = 0; i < CE_COUNT; i++) {
2236 ar_pci->pipe_info[i].ar_pci = ar_pci;
2237 tasklet_init(&ar_pci->pipe_info[i].intr,
2238 ath10k_pci_ce_tasklet,
2239 (unsigned long)&ar_pci->pipe_info[i]);
2242 if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features))
2246 ret = ath10k_pci_start_intr_msix(ar, num);
2250 ath10k_warn("MSI-X didn't succeed (%d), trying MSI\n", ret);
2255 ret = ath10k_pci_start_intr_msi(ar);
2259 ath10k_warn("MSI didn't succeed (%d), trying legacy INTR\n",
2264 ret = ath10k_pci_start_intr_legacy(ar);
2267 ar_pci->num_msi_intrs = num;
2268 ar_pci->ce_count = CE_COUNT;
2272 static void ath10k_pci_stop_intr(struct ath10k *ar)
2274 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2277 /* There's at least one interrupt irregardless whether its legacy INTR
2278 * or MSI or MSI-X */
2279 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2280 free_irq(ar_pci->pdev->irq + i, ar);
2282 if (ar_pci->num_msi_intrs > 0)
2283 pci_disable_msi(ar_pci->pdev);
2286 static int ath10k_pci_reset_target(struct ath10k *ar)
2288 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2289 int wait_limit = 300; /* 3 sec */
2291 /* Wait for Target to finish initialization before we proceed. */
2292 iowrite32(PCIE_SOC_WAKE_V_MASK,
2293 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2294 PCIE_SOC_WAKE_ADDRESS);
2296 ath10k_pci_wait(ar);
2298 while (wait_limit-- &&
2299 !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
2300 FW_IND_INITIALIZED)) {
2301 if (ar_pci->num_msi_intrs == 0)
2302 /* Fix potential race by repeating CORE_BASE writes */
2303 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2304 PCIE_INTR_CE_MASK_ALL,
2305 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2306 PCIE_INTR_ENABLE_ADDRESS));
2310 if (wait_limit < 0) {
2311 ath10k_err("Target stalled\n");
2312 iowrite32(PCIE_SOC_WAKE_RESET,
2313 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2314 PCIE_SOC_WAKE_ADDRESS);
2318 iowrite32(PCIE_SOC_WAKE_RESET,
2319 ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2320 PCIE_SOC_WAKE_ADDRESS);
2325 static void ath10k_pci_device_reset(struct ath10k *ar)
2330 if (!SOC_GLOBAL_RESET_ADDRESS)
2333 ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
2334 PCIE_SOC_WAKE_V_MASK);
2335 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2336 if (ath10k_pci_target_is_awake(ar))
2341 /* Put Target, including PCIe, into RESET. */
2342 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
2344 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2346 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2347 if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2348 RTC_STATE_COLD_RESET_MASK)
2353 /* Pull Target, including PCIe, out of RESET. */
2355 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2357 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2358 if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2359 RTC_STATE_COLD_RESET_MASK))
2364 ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2367 static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2371 for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2372 if (!test_bit(i, ar_pci->features))
2376 case ATH10K_PCI_FEATURE_MSI_X:
2377 ath10k_dbg(ATH10K_DBG_PCI, "device supports MSI-X\n");
2379 case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
2380 ath10k_dbg(ATH10K_DBG_PCI, "QCA98XX SoC power save enabled\n");
2386 static int ath10k_pci_probe(struct pci_dev *pdev,
2387 const struct pci_device_id *pci_dev)
2392 struct ath10k_pci *ar_pci;
2395 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2397 ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2401 ar_pci->pdev = pdev;
2402 ar_pci->dev = &pdev->dev;
2404 switch (pci_dev->device) {
2405 case QCA988X_2_0_DEVICE_ID:
2406 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2410 ath10k_err("Unkown device ID: %d\n", pci_dev->device);
2414 if (ath10k_target_ps)
2415 set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2417 ath10k_pci_dump_features(ar_pci);
2419 ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
2421 ath10k_err("ath10k_core_create failed!\n");
2427 ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2428 atomic_set(&ar_pci->keep_awake_count, 0);
2430 pci_set_drvdata(pdev, ar);
2433 * Without any knowledge of the Host, the Target may have been reset or
2434 * power cycled and its Config Space may no longer reflect the PCI
2435 * address space that was assigned earlier by the PCI infrastructure.
2438 ret = pci_assign_resource(pdev, BAR_NUM);
2440 ath10k_err("cannot assign PCI space: %d\n", ret);
2444 ret = pci_enable_device(pdev);
2446 ath10k_err("cannot enable PCI device: %d\n", ret);
2450 /* Request MMIO resources */
2451 ret = pci_request_region(pdev, BAR_NUM, "ath");
2453 ath10k_err("PCI MMIO reservation error: %d\n", ret);
2458 * Target structures have a limit of 32 bit DMA pointers.
2459 * DMA pointers can be wider than 32 bits by default on some systems.
2461 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2463 ath10k_err("32-bit DMA not available: %d\n", ret);
2467 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2469 ath10k_err("cannot enable 32-bit consistent DMA\n");
2473 /* Set bus master bit in PCI_COMMAND to enable DMA */
2474 pci_set_master(pdev);
2477 * Temporary FIX: disable ASPM
2478 * Will be removed after the OTP is programmed
2480 pci_read_config_dword(pdev, 0x80, &lcr_val);
2481 pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2483 /* Arrange for access to Target SoC registers. */
2484 mem = pci_iomap(pdev, BAR_NUM, 0);
2486 ath10k_err("PCI iomap error\n");
2493 spin_lock_init(&ar_pci->ce_lock);
2495 ret = ath10k_core_register(ar);
2497 ath10k_err("could not register driver core (%d)\n", ret);
2504 pci_iounmap(pdev, mem);
2506 pci_clear_master(pdev);
2508 pci_release_region(pdev, BAR_NUM);
2510 pci_disable_device(pdev);
2512 pci_set_drvdata(pdev, NULL);
2513 ath10k_core_destroy(ar);
2515 /* call HIF PCI free here */
2521 static void ath10k_pci_remove(struct pci_dev *pdev)
2523 struct ath10k *ar = pci_get_drvdata(pdev);
2524 struct ath10k_pci *ar_pci;
2526 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2531 ar_pci = ath10k_pci_priv(ar);
2536 tasklet_kill(&ar_pci->msi_fw_err);
2538 ath10k_core_unregister(ar);
2540 pci_set_drvdata(pdev, NULL);
2541 pci_iounmap(pdev, ar_pci->mem);
2542 pci_release_region(pdev, BAR_NUM);
2543 pci_clear_master(pdev);
2544 pci_disable_device(pdev);
2546 ath10k_core_destroy(ar);
2550 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2552 static struct pci_driver ath10k_pci_driver = {
2553 .name = "ath10k_pci",
2554 .id_table = ath10k_pci_id_table,
2555 .probe = ath10k_pci_probe,
2556 .remove = ath10k_pci_remove,
2559 static int __init ath10k_pci_init(void)
2563 ret = pci_register_driver(&ath10k_pci_driver);
2565 ath10k_err("pci_register_driver failed [%d]\n", ret);
2569 module_init(ath10k_pci_init);
2571 static void __exit ath10k_pci_exit(void)
2573 pci_unregister_driver(&ath10k_pci_driver);
2576 module_exit(ath10k_pci_exit);
2578 MODULE_AUTHOR("Qualcomm Atheros");
2579 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2580 MODULE_LICENSE("Dual BSD/GPL");
2581 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2582 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
2583 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);