2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include <linux/pci.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22 #include <linux/bitops.h>
27 #include "targaddrs.h"
36 enum ath10k_pci_irq_mode {
37 ATH10K_PCI_IRQ_AUTO = 0,
38 ATH10K_PCI_IRQ_LEGACY = 1,
39 ATH10K_PCI_IRQ_MSI = 2,
42 static unsigned int ath10k_target_ps;
43 static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
45 module_param(ath10k_target_ps, uint, 0644);
46 MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
48 module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
49 MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
51 #define QCA988X_2_0_DEVICE_ID (0x003c)
53 static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
54 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
58 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
61 static int ath10k_pci_post_rx(struct ath10k *ar);
62 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
64 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
65 static int ath10k_pci_cold_reset(struct ath10k *ar);
66 static int ath10k_pci_warm_reset(struct ath10k *ar);
67 static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
68 static int ath10k_pci_init_irq(struct ath10k *ar);
69 static int ath10k_pci_deinit_irq(struct ath10k *ar);
70 static int ath10k_pci_request_irq(struct ath10k *ar);
71 static void ath10k_pci_free_irq(struct ath10k *ar);
72 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
73 struct ath10k_ce_pipe *rx_pipe,
74 struct bmi_xfer *xfer);
76 static const struct ce_attr host_ce_config_wlan[] = {
77 /* CE0: host->target HTC control and raw streams */
79 .flags = CE_ATTR_FLAGS,
85 /* CE1: target->host HTT + HTC control */
87 .flags = CE_ATTR_FLAGS,
93 /* CE2: target->host WMI */
95 .flags = CE_ATTR_FLAGS,
101 /* CE3: host->target WMI */
103 .flags = CE_ATTR_FLAGS,
109 /* CE4: host->target HTT */
111 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
112 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
119 .flags = CE_ATTR_FLAGS,
125 /* CE6: target autonomous hif_memcpy */
127 .flags = CE_ATTR_FLAGS,
133 /* CE7: ce_diag, the Diagnostic Window */
135 .flags = CE_ATTR_FLAGS,
137 .src_sz_max = DIAG_TRANSFER_LIMIT,
142 /* Target firmware's Copy Engine configuration. */
143 static const struct ce_pipe_config target_ce_config_wlan[] = {
144 /* CE0: host->target HTC control and raw streams */
147 .pipedir = PIPEDIR_OUT,
150 .flags = CE_ATTR_FLAGS,
154 /* CE1: target->host HTT + HTC control */
157 .pipedir = PIPEDIR_IN,
160 .flags = CE_ATTR_FLAGS,
164 /* CE2: target->host WMI */
167 .pipedir = PIPEDIR_IN,
170 .flags = CE_ATTR_FLAGS,
174 /* CE3: host->target WMI */
177 .pipedir = PIPEDIR_OUT,
180 .flags = CE_ATTR_FLAGS,
184 /* CE4: host->target HTT */
187 .pipedir = PIPEDIR_OUT,
190 .flags = CE_ATTR_FLAGS,
194 /* NB: 50% of src nentries, since tx has 2 frags */
199 .pipedir = PIPEDIR_OUT,
202 .flags = CE_ATTR_FLAGS,
206 /* CE6: Reserved for target autonomous hif_memcpy */
209 .pipedir = PIPEDIR_INOUT,
212 .flags = CE_ATTR_FLAGS,
216 /* CE7 used only by Host */
219 static bool ath10k_pci_irq_pending(struct ath10k *ar)
223 /* Check if the shared legacy irq is for us */
224 cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
225 PCIE_INTR_CAUSE_ADDRESS);
226 if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
232 static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
234 /* IMPORTANT: INTR_CLR register has to be set after
235 * INTR_ENABLE is set to 0, otherwise interrupt can not be
237 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
239 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
240 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
242 /* IMPORTANT: this extra read transaction is required to
243 * flush the posted write buffer. */
244 (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
245 PCIE_INTR_ENABLE_ADDRESS);
248 static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
250 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
251 PCIE_INTR_ENABLE_ADDRESS,
252 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
254 /* IMPORTANT: this extra read transaction is required to
255 * flush the posted write buffer. */
256 (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
257 PCIE_INTR_ENABLE_ADDRESS);
260 static irqreturn_t ath10k_pci_early_irq_handler(int irq, void *arg)
262 struct ath10k *ar = arg;
263 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
265 if (ar_pci->num_msi_intrs == 0) {
266 if (!ath10k_pci_irq_pending(ar))
269 ath10k_pci_disable_and_clear_legacy_irq(ar);
272 tasklet_schedule(&ar_pci->early_irq_tasklet);
277 static int ath10k_pci_request_early_irq(struct ath10k *ar)
279 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
282 /* Regardless whether MSI-X/MSI/legacy irqs have been set up the first
283 * interrupt from irq vector is triggered in all cases for FW
284 * indication/errors */
285 ret = request_irq(ar_pci->pdev->irq, ath10k_pci_early_irq_handler,
286 IRQF_SHARED, "ath10k_pci (early)", ar);
288 ath10k_warn("failed to request early irq: %d\n", ret);
295 static void ath10k_pci_free_early_irq(struct ath10k *ar)
297 free_irq(ath10k_pci_priv(ar)->pdev->irq, ar);
301 * Diagnostic read/write access is provided for startup/config/debug usage.
302 * Caller must guarantee proper alignment, when applicable, and single user
305 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
308 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
311 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
314 struct ath10k_ce_pipe *ce_diag;
315 /* Host buffer address in CE space */
317 dma_addr_t ce_data_base = 0;
318 void *data_buf = NULL;
322 * This code cannot handle reads to non-memory space. Redirect to the
323 * register read fn but preserve the multi word read capability of
326 if (address < DRAM_BASE_ADDRESS) {
327 if (!IS_ALIGNED(address, 4) ||
328 !IS_ALIGNED((unsigned long)data, 4))
331 while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access(
332 ar, address, (u32 *)data)) == 0)) {
333 nbytes -= sizeof(u32);
334 address += sizeof(u32);
340 ce_diag = ar_pci->ce_diag;
343 * Allocate a temporary bounce buffer to hold caller's data
344 * to be DMA'ed from Target. This guarantees
345 * 1) 4-byte alignment
346 * 2) Buffer in DMA-able space
348 orig_nbytes = nbytes;
349 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
357 memset(data_buf, 0, orig_nbytes);
359 remaining_bytes = orig_nbytes;
360 ce_data = ce_data_base;
361 while (remaining_bytes) {
362 nbytes = min_t(unsigned int, remaining_bytes,
363 DIAG_TRANSFER_LIMIT);
365 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
369 /* Request CE to send from Target(!) address to Host buffer */
371 * The address supplied by the caller is in the
372 * Target CPU virtual address space.
374 * In order to use this address with the diagnostic CE,
375 * convert it from Target CPU virtual address space
376 * to CE address space
379 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
381 ath10k_pci_sleep(ar);
383 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
389 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
393 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
399 if (nbytes != completed_nbytes) {
404 if (buf != (u32) address) {
410 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
415 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
421 if (nbytes != completed_nbytes) {
426 if (buf != ce_data) {
431 remaining_bytes -= nbytes;
438 /* Copy data from allocated DMA buf to caller's buf */
439 WARN_ON_ONCE(orig_nbytes & 3);
440 for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
442 __le32_to_cpu(((__le32 *)data_buf)[i]);
445 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
449 pci_free_consistent(ar_pci->pdev, orig_nbytes,
450 data_buf, ce_data_base);
455 /* Read 4-byte aligned data from Target memory or register */
456 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
459 /* Assume range doesn't cross this boundary */
460 if (address >= DRAM_BASE_ADDRESS)
461 return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
464 *data = ath10k_pci_read32(ar, address);
465 ath10k_pci_sleep(ar);
469 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
470 const void *data, int nbytes)
472 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
475 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
478 struct ath10k_ce_pipe *ce_diag;
479 void *data_buf = NULL;
480 u32 ce_data; /* Host buffer address in CE space */
481 dma_addr_t ce_data_base = 0;
484 ce_diag = ar_pci->ce_diag;
487 * Allocate a temporary bounce buffer to hold caller's data
488 * to be DMA'ed to Target. This guarantees
489 * 1) 4-byte alignment
490 * 2) Buffer in DMA-able space
492 orig_nbytes = nbytes;
493 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
501 /* Copy caller's data to allocated DMA buf */
502 WARN_ON_ONCE(orig_nbytes & 3);
503 for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
504 ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
507 * The address supplied by the caller is in the
508 * Target CPU virtual address space.
510 * In order to use this address with the diagnostic CE,
512 * Target CPU virtual address space
517 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
518 ath10k_pci_sleep(ar);
520 remaining_bytes = orig_nbytes;
521 ce_data = ce_data_base;
522 while (remaining_bytes) {
523 /* FIXME: check cast */
524 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
526 /* Set up to receive directly into Target(!) address */
527 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
532 * Request CE to send caller-supplied data that
533 * was copied to bounce buffer to Target(!) address.
535 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
541 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
546 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
552 if (nbytes != completed_nbytes) {
557 if (buf != ce_data) {
563 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
568 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
574 if (nbytes != completed_nbytes) {
579 if (buf != address) {
584 remaining_bytes -= nbytes;
591 pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
596 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
602 /* Write 4B data to Target memory or register */
603 static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
606 /* Assume range doesn't cross this boundary */
607 if (address >= DRAM_BASE_ADDRESS)
608 return ath10k_pci_diag_write_mem(ar, address, &data,
612 ath10k_pci_write32(ar, address, data);
613 ath10k_pci_sleep(ar);
617 static bool ath10k_pci_target_is_awake(struct ath10k *ar)
619 void __iomem *mem = ath10k_pci_priv(ar)->mem;
621 val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
623 return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
626 int ath10k_do_pci_wake(struct ath10k *ar)
628 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
629 void __iomem *pci_addr = ar_pci->mem;
633 if (atomic_read(&ar_pci->keep_awake_count) == 0) {
635 iowrite32(PCIE_SOC_WAKE_V_MASK,
636 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
637 PCIE_SOC_WAKE_ADDRESS);
639 atomic_inc(&ar_pci->keep_awake_count);
641 if (ar_pci->verified_awake)
645 if (ath10k_pci_target_is_awake(ar)) {
646 ar_pci->verified_awake = true;
650 if (tot_delay > PCIE_WAKE_TIMEOUT) {
651 ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
653 atomic_read(&ar_pci->keep_awake_count));
658 tot_delay += curr_delay;
665 void ath10k_do_pci_sleep(struct ath10k *ar)
667 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
668 void __iomem *pci_addr = ar_pci->mem;
670 if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
672 ar_pci->verified_awake = false;
673 iowrite32(PCIE_SOC_WAKE_RESET,
674 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
675 PCIE_SOC_WAKE_ADDRESS);
679 /* Called by lower (CE) layer when a send to Target completes. */
680 static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
682 struct ath10k *ar = ce_state->ar;
683 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
684 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
685 void *transfer_context;
688 unsigned int transfer_id;
690 while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
692 &transfer_id) == 0) {
693 /* no need to call tx completion for NULL pointers */
694 if (transfer_context == NULL)
697 cb->tx_completion(ar, transfer_context, transfer_id);
701 /* Called by lower (CE) layer when data is received from the Target. */
702 static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
704 struct ath10k *ar = ce_state->ar;
705 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
706 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
707 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
709 void *transfer_context;
711 unsigned int nbytes, max_nbytes;
712 unsigned int transfer_id;
716 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
717 &ce_data, &nbytes, &transfer_id,
719 err = ath10k_pci_post_rx_pipe(pipe_info, 1);
722 ath10k_warn("failed to replenish CE rx ring %d: %d\n",
723 pipe_info->pipe_num, err);
726 skb = transfer_context;
727 max_nbytes = skb->len + skb_tailroom(skb);
728 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
729 max_nbytes, DMA_FROM_DEVICE);
731 if (unlikely(max_nbytes < nbytes)) {
732 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
734 dev_kfree_skb_any(skb);
738 skb_put(skb, nbytes);
739 cb->rx_completion(ar, skb, pipe_info->pipe_num);
743 static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
744 struct ath10k_hif_sg_item *items, int n_items)
746 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
747 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
748 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
749 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
750 unsigned int nentries_mask = src_ring->nentries_mask;
751 unsigned int sw_index = src_ring->sw_index;
752 unsigned int write_index = src_ring->write_index;
755 spin_lock_bh(&ar_pci->ce_lock);
757 if (unlikely(CE_RING_DELTA(nentries_mask,
758 write_index, sw_index - 1) < n_items)) {
763 for (i = 0; i < n_items - 1; i++) {
764 ath10k_dbg(ATH10K_DBG_PCI,
765 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
766 i, items[i].paddr, items[i].len, n_items);
767 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
768 items[i].vaddr, items[i].len);
770 err = ath10k_ce_send_nolock(ce_pipe,
771 items[i].transfer_context,
774 items[i].transfer_id,
775 CE_SEND_FLAG_GATHER);
780 /* `i` is equal to `n_items -1` after for() */
782 ath10k_dbg(ATH10K_DBG_PCI,
783 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
784 i, items[i].paddr, items[i].len, n_items);
785 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
786 items[i].vaddr, items[i].len);
788 err = ath10k_ce_send_nolock(ce_pipe,
789 items[i].transfer_context,
792 items[i].transfer_id,
799 spin_unlock_bh(&ar_pci->ce_lock);
803 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
805 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
806 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
809 static void ath10k_pci_hif_dump_area(struct ath10k *ar)
811 u32 reg_dump_area = 0;
812 u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
817 ath10k_err("firmware crashed!\n");
818 ath10k_err("hardware name %s version 0x%x\n",
819 ar->hw_params.name, ar->target_version);
820 ath10k_err("firmware version: %s\n", ar->hw->wiphy->fw_version);
822 host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
823 ret = ath10k_pci_diag_read_mem(ar, host_addr,
824 ®_dump_area, sizeof(u32));
826 ath10k_err("failed to read FW dump area address: %d\n", ret);
830 ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
832 ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
834 REG_DUMP_COUNT_QCA988X * sizeof(u32));
836 ath10k_err("failed to read FW dump area: %d\n", ret);
840 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
842 ath10k_err("target Register Dump\n");
843 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
844 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
847 reg_dump_values[i + 1],
848 reg_dump_values[i + 2],
849 reg_dump_values[i + 3]);
851 queue_work(ar->workqueue, &ar->restart_work);
854 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
860 * Decide whether to actually poll for completions, or just
861 * wait for a later chance.
862 * If there seem to be plenty of resources left, then just wait
863 * since checking involves reading a CE register, which is a
864 * relatively expensive operation.
866 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
869 * If at least 50% of the total resources are still available,
870 * don't bother checking again yet.
872 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
875 ath10k_ce_per_engine_service(ar, pipe);
878 static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
879 struct ath10k_hif_cb *callbacks)
881 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
883 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
885 memcpy(&ar_pci->msg_callbacks_current, callbacks,
886 sizeof(ar_pci->msg_callbacks_current));
889 static int ath10k_pci_setup_ce_irq(struct ath10k *ar)
891 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
892 const struct ce_attr *attr;
893 struct ath10k_pci_pipe *pipe_info;
894 int pipe_num, disable_interrupts;
896 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
897 pipe_info = &ar_pci->pipe_info[pipe_num];
899 /* Handle Diagnostic CE specially */
900 if (pipe_info->ce_hdl == ar_pci->ce_diag)
903 attr = &host_ce_config_wlan[pipe_num];
905 if (attr->src_nentries) {
906 disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
907 ath10k_ce_send_cb_register(pipe_info->ce_hdl,
908 ath10k_pci_ce_send_done,
912 if (attr->dest_nentries)
913 ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
914 ath10k_pci_ce_recv_data);
920 static void ath10k_pci_kill_tasklet(struct ath10k *ar)
922 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
925 tasklet_kill(&ar_pci->intr_tq);
926 tasklet_kill(&ar_pci->msi_fw_err);
927 tasklet_kill(&ar_pci->early_irq_tasklet);
929 for (i = 0; i < CE_COUNT; i++)
930 tasklet_kill(&ar_pci->pipe_info[i].intr);
933 /* TODO - temporary mapping while we have too few CE's */
934 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
935 u16 service_id, u8 *ul_pipe,
936 u8 *dl_pipe, int *ul_is_polled,
941 /* polling for received messages not supported */
944 switch (service_id) {
945 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
947 * Host->target HTT gets its own pipe, so it can be polled
948 * while other pipes are interrupt driven.
952 * Use the same target->host pipe for HTC ctrl, HTC raw
958 case ATH10K_HTC_SVC_ID_RSVD_CTRL:
959 case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
961 * Note: HTC_RAW_STREAMS_SVC is currently unused, and
962 * HTC_CTRL_RSVD_SVC could share the same pipe as the
963 * WMI services. So, if another CE is needed, change
964 * this to *ul_pipe = 3, which frees up CE 0.
971 case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
972 case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
973 case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
974 case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
976 case ATH10K_HTC_SVC_ID_WMI_CONTROL:
982 /* pipe 6 reserved */
983 /* pipe 7 reserved */
990 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
995 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
996 u8 *ul_pipe, u8 *dl_pipe)
998 int ul_is_polled, dl_is_polled;
1000 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1001 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1008 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
1011 struct ath10k *ar = pipe_info->hif_ce_state;
1012 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1013 struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
1014 struct sk_buff *skb;
1018 if (pipe_info->buf_sz == 0)
1021 for (i = 0; i < num; i++) {
1022 skb = dev_alloc_skb(pipe_info->buf_sz);
1024 ath10k_warn("failed to allocate skbuff for pipe %d\n",
1030 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1032 ce_data = dma_map_single(ar->dev, skb->data,
1033 skb->len + skb_tailroom(skb),
1036 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1037 ath10k_warn("failed to DMA map sk_buff\n");
1038 dev_kfree_skb_any(skb);
1043 ATH10K_SKB_CB(skb)->paddr = ce_data;
1045 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1047 PCI_DMA_FROMDEVICE);
1049 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1052 ath10k_warn("failed to enqueue to pipe %d: %d\n",
1061 ath10k_pci_rx_pipe_cleanup(pipe_info);
1065 static int ath10k_pci_post_rx(struct ath10k *ar)
1067 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1068 struct ath10k_pci_pipe *pipe_info;
1069 const struct ce_attr *attr;
1070 int pipe_num, ret = 0;
1072 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1073 pipe_info = &ar_pci->pipe_info[pipe_num];
1074 attr = &host_ce_config_wlan[pipe_num];
1076 if (attr->dest_nentries == 0)
1079 ret = ath10k_pci_post_rx_pipe(pipe_info,
1080 attr->dest_nentries - 1);
1082 ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1085 for (; pipe_num >= 0; pipe_num--) {
1086 pipe_info = &ar_pci->pipe_info[pipe_num];
1087 ath10k_pci_rx_pipe_cleanup(pipe_info);
1096 static int ath10k_pci_hif_start(struct ath10k *ar)
1098 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1101 ath10k_pci_free_early_irq(ar);
1102 ath10k_pci_kill_tasklet(ar);
1104 ret = ath10k_pci_request_irq(ar);
1106 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1111 ret = ath10k_pci_setup_ce_irq(ar);
1113 ath10k_warn("failed to setup CE interrupts: %d\n", ret);
1117 /* Post buffers once to start things off. */
1118 ret = ath10k_pci_post_rx(ar);
1120 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1125 ar_pci->started = 1;
1129 ath10k_ce_disable_interrupts(ar);
1130 ath10k_pci_free_irq(ar);
1131 ath10k_pci_kill_tasklet(ar);
1133 /* Though there should be no interrupts (device was reset)
1134 * power_down() expects the early IRQ to be installed as per the
1135 * driver lifecycle. */
1136 ret_early = ath10k_pci_request_early_irq(ar);
1138 ath10k_warn("failed to re-enable early irq: %d\n", ret_early);
1143 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1146 struct ath10k_pci *ar_pci;
1147 struct ath10k_ce_pipe *ce_hdl;
1149 struct sk_buff *netbuf;
1152 buf_sz = pipe_info->buf_sz;
1154 /* Unused Copy Engine */
1158 ar = pipe_info->hif_ce_state;
1159 ar_pci = ath10k_pci_priv(ar);
1161 if (!ar_pci->started)
1164 ce_hdl = pipe_info->ce_hdl;
1166 while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1168 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1169 netbuf->len + skb_tailroom(netbuf),
1171 dev_kfree_skb_any(netbuf);
1175 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1178 struct ath10k_pci *ar_pci;
1179 struct ath10k_ce_pipe *ce_hdl;
1180 struct sk_buff *netbuf;
1182 unsigned int nbytes;
1186 buf_sz = pipe_info->buf_sz;
1188 /* Unused Copy Engine */
1192 ar = pipe_info->hif_ce_state;
1193 ar_pci = ath10k_pci_priv(ar);
1195 if (!ar_pci->started)
1198 ce_hdl = pipe_info->ce_hdl;
1200 while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1201 &ce_data, &nbytes, &id) == 0) {
1202 /* no need to call tx completion for NULL pointers */
1206 ar_pci->msg_callbacks_current.tx_completion(ar,
1213 * Cleanup residual buffers for device shutdown:
1214 * buffers that were enqueued for receive
1215 * buffers that were to be sent
1216 * Note: Buffers that had completed but which were
1217 * not yet processed are on a completion queue. They
1218 * are handled when the completion thread shuts down.
1220 static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1222 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1225 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1226 struct ath10k_pci_pipe *pipe_info;
1228 pipe_info = &ar_pci->pipe_info[pipe_num];
1229 ath10k_pci_rx_pipe_cleanup(pipe_info);
1230 ath10k_pci_tx_pipe_cleanup(pipe_info);
1234 static void ath10k_pci_ce_deinit(struct ath10k *ar)
1236 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1237 struct ath10k_pci_pipe *pipe_info;
1240 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1241 pipe_info = &ar_pci->pipe_info[pipe_num];
1242 if (pipe_info->ce_hdl) {
1243 ath10k_ce_deinit(pipe_info->ce_hdl);
1244 pipe_info->ce_hdl = NULL;
1245 pipe_info->buf_sz = 0;
1250 static void ath10k_pci_hif_stop(struct ath10k *ar)
1252 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1255 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
1257 ret = ath10k_ce_disable_interrupts(ar);
1259 ath10k_warn("failed to disable CE interrupts: %d\n", ret);
1261 ath10k_pci_free_irq(ar);
1262 ath10k_pci_kill_tasklet(ar);
1264 ret = ath10k_pci_request_early_irq(ar);
1266 ath10k_warn("failed to re-enable early irq: %d\n", ret);
1268 /* At this point, asynchronous threads are stopped, the target should
1269 * not DMA nor interrupt. We process the leftovers and then free
1270 * everything else up. */
1272 ath10k_pci_buffer_cleanup(ar);
1274 /* Make the sure the device won't access any structures on the host by
1275 * resetting it. The device was fed with PCI CE ringbuffer
1276 * configuration during init. If ringbuffers are freed and the device
1277 * were to access them this could lead to memory corruption on the
1279 ath10k_pci_warm_reset(ar);
1281 ar_pci->started = 0;
1284 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1285 void *req, u32 req_len,
1286 void *resp, u32 *resp_len)
1288 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1289 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1290 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1291 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1292 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1293 dma_addr_t req_paddr = 0;
1294 dma_addr_t resp_paddr = 0;
1295 struct bmi_xfer xfer = {};
1296 void *treq, *tresp = NULL;
1301 if (resp && !resp_len)
1304 if (resp && resp_len && *resp_len == 0)
1307 treq = kmemdup(req, req_len, GFP_KERNEL);
1311 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1312 ret = dma_mapping_error(ar->dev, req_paddr);
1316 if (resp && resp_len) {
1317 tresp = kzalloc(*resp_len, GFP_KERNEL);
1323 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1325 ret = dma_mapping_error(ar->dev, resp_paddr);
1329 xfer.wait_for_resp = true;
1332 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1335 init_completion(&xfer.done);
1337 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1341 ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
1344 unsigned int unused_nbytes;
1345 unsigned int unused_id;
1347 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1348 &unused_nbytes, &unused_id);
1350 /* non-zero means we did not time out */
1358 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1359 dma_unmap_single(ar->dev, resp_paddr,
1360 *resp_len, DMA_FROM_DEVICE);
1363 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1365 if (ret == 0 && resp_len) {
1366 *resp_len = min(*resp_len, xfer.resp_len);
1367 memcpy(resp, tresp, xfer.resp_len);
1376 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
1378 struct bmi_xfer *xfer;
1380 unsigned int nbytes;
1381 unsigned int transfer_id;
1383 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1384 &nbytes, &transfer_id))
1387 if (xfer->wait_for_resp)
1390 complete(&xfer->done);
1393 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
1395 struct bmi_xfer *xfer;
1397 unsigned int nbytes;
1398 unsigned int transfer_id;
1401 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1402 &nbytes, &transfer_id, &flags))
1405 if (!xfer->wait_for_resp) {
1406 ath10k_warn("unexpected: BMI data received; ignoring\n");
1410 xfer->resp_len = nbytes;
1411 complete(&xfer->done);
1414 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
1415 struct ath10k_ce_pipe *rx_pipe,
1416 struct bmi_xfer *xfer)
1418 unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1420 while (time_before_eq(jiffies, timeout)) {
1421 ath10k_pci_bmi_send_done(tx_pipe);
1422 ath10k_pci_bmi_recv_data(rx_pipe);
1424 if (completion_done(&xfer->done))
1434 * Map from service/endpoint to Copy Engine.
1435 * This table is derived from the CE_PCI TABLE, above.
1436 * It is passed to the Target at startup for use by firmware.
1438 static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1440 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1441 PIPEDIR_OUT, /* out = UL = host -> target */
1445 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1446 PIPEDIR_IN, /* in = DL = target -> host */
1450 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1451 PIPEDIR_OUT, /* out = UL = host -> target */
1455 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1456 PIPEDIR_IN, /* in = DL = target -> host */
1460 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1461 PIPEDIR_OUT, /* out = UL = host -> target */
1465 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1466 PIPEDIR_IN, /* in = DL = target -> host */
1470 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1471 PIPEDIR_OUT, /* out = UL = host -> target */
1475 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1476 PIPEDIR_IN, /* in = DL = target -> host */
1480 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1481 PIPEDIR_OUT, /* out = UL = host -> target */
1485 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1486 PIPEDIR_IN, /* in = DL = target -> host */
1490 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1491 PIPEDIR_OUT, /* out = UL = host -> target */
1492 0, /* could be moved to 3 (share with WMI) */
1495 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1496 PIPEDIR_IN, /* in = DL = target -> host */
1500 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1501 PIPEDIR_OUT, /* out = UL = host -> target */
1505 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1506 PIPEDIR_IN, /* in = DL = target -> host */
1510 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1511 PIPEDIR_OUT, /* out = UL = host -> target */
1515 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1516 PIPEDIR_IN, /* in = DL = target -> host */
1520 /* (Additions here) */
1522 { /* Must be last */
1530 * Send an interrupt to the device to wake up the Target CPU
1531 * so it has an opportunity to notice any changed state.
1533 static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1538 ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1542 ath10k_warn("failed to read core_ctrl: %d\n", ret);
1546 /* A_INUM_FIRMWARE interrupt to Target CPU */
1547 core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1549 ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1553 ath10k_warn("failed to set target CPU interrupt mask: %d\n",
1561 static int ath10k_pci_init_config(struct ath10k *ar)
1563 u32 interconnect_targ_addr;
1564 u32 pcie_state_targ_addr = 0;
1565 u32 pipe_cfg_targ_addr = 0;
1566 u32 svc_to_pipe_map = 0;
1567 u32 pcie_config_flags = 0;
1569 u32 ealloc_targ_addr;
1571 u32 flag2_targ_addr;
1574 /* Download to Target the CE Config and the service-to-CE map */
1575 interconnect_targ_addr =
1576 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1578 /* Supply Target-side CE configuration */
1579 ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1580 &pcie_state_targ_addr);
1582 ath10k_err("Failed to get pcie state addr: %d\n", ret);
1586 if (pcie_state_targ_addr == 0) {
1588 ath10k_err("Invalid pcie state addr\n");
1592 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1593 offsetof(struct pcie_state,
1595 &pipe_cfg_targ_addr);
1597 ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1601 if (pipe_cfg_targ_addr == 0) {
1603 ath10k_err("Invalid pipe cfg addr\n");
1607 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1608 target_ce_config_wlan,
1609 sizeof(target_ce_config_wlan));
1612 ath10k_err("Failed to write pipe cfg: %d\n", ret);
1616 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1617 offsetof(struct pcie_state,
1621 ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1625 if (svc_to_pipe_map == 0) {
1627 ath10k_err("Invalid svc_to_pipe map\n");
1631 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1632 target_service_to_ce_map_wlan,
1633 sizeof(target_service_to_ce_map_wlan));
1635 ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1639 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1640 offsetof(struct pcie_state,
1642 &pcie_config_flags);
1644 ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1648 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1650 ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1651 offsetof(struct pcie_state, config_flags),
1653 sizeof(pcie_config_flags));
1655 ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1659 /* configure early allocation */
1660 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1662 ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1664 ath10k_err("Faile to get early alloc val: %d\n", ret);
1668 /* first bank is switched to IRAM */
1669 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1670 HI_EARLY_ALLOC_MAGIC_MASK);
1671 ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1672 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1674 ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1676 ath10k_err("Failed to set early alloc val: %d\n", ret);
1680 /* Tell Target to proceed with initialization */
1681 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1683 ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1685 ath10k_err("Failed to get option val: %d\n", ret);
1689 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1691 ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1693 ath10k_err("Failed to set option val: %d\n", ret);
1702 static int ath10k_pci_ce_init(struct ath10k *ar)
1704 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1705 struct ath10k_pci_pipe *pipe_info;
1706 const struct ce_attr *attr;
1709 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1710 pipe_info = &ar_pci->pipe_info[pipe_num];
1711 pipe_info->pipe_num = pipe_num;
1712 pipe_info->hif_ce_state = ar;
1713 attr = &host_ce_config_wlan[pipe_num];
1715 pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1716 if (pipe_info->ce_hdl == NULL) {
1717 ath10k_err("failed to initialize CE for pipe: %d\n",
1720 /* It is safe to call it here. It checks if ce_hdl is
1721 * valid for each pipe */
1722 ath10k_pci_ce_deinit(ar);
1726 if (pipe_num == CE_COUNT - 1) {
1728 * Reserve the ultimate CE for
1729 * diagnostic Window support
1731 ar_pci->ce_diag = pipe_info->ce_hdl;
1735 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1741 static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1743 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1744 u32 fw_indicator_address, fw_indicator;
1746 ath10k_pci_wake(ar);
1748 fw_indicator_address = ar_pci->fw_indicator_address;
1749 fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
1751 if (fw_indicator & FW_IND_EVENT_PENDING) {
1752 /* ACK: clear Target-side pending event */
1753 ath10k_pci_write32(ar, fw_indicator_address,
1754 fw_indicator & ~FW_IND_EVENT_PENDING);
1756 if (ar_pci->started) {
1757 ath10k_pci_hif_dump_area(ar);
1760 * Probable Target failure before we're prepared
1761 * to handle it. Generally unexpected.
1763 ath10k_warn("early firmware event indicated\n");
1767 ath10k_pci_sleep(ar);
1770 static int ath10k_pci_warm_reset(struct ath10k *ar)
1772 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1776 ath10k_dbg(ATH10K_DBG_BOOT, "boot performing warm chip reset\n");
1778 ret = ath10k_do_pci_wake(ar);
1780 ath10k_err("failed to wake up target: %d\n", ret);
1785 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1786 PCIE_INTR_CAUSE_ADDRESS);
1787 ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
1789 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1791 ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
1794 /* disable pending irqs */
1795 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1796 PCIE_INTR_ENABLE_ADDRESS, 0);
1798 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1799 PCIE_INTR_CLR_ADDRESS, ~0);
1803 /* clear fw indicator */
1804 ath10k_pci_write32(ar, ar_pci->fw_indicator_address, 0);
1806 /* clear target LF timer interrupts */
1807 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1808 SOC_LF_TIMER_CONTROL0_ADDRESS);
1809 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
1810 SOC_LF_TIMER_CONTROL0_ADDRESS,
1811 val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
1814 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1815 SOC_RESET_CONTROL_ADDRESS);
1816 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1817 val | SOC_RESET_CONTROL_CE_RST_MASK);
1818 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1819 SOC_RESET_CONTROL_ADDRESS);
1823 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1824 val & ~SOC_RESET_CONTROL_CE_RST_MASK);
1825 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1826 SOC_RESET_CONTROL_ADDRESS);
1830 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1831 PCIE_INTR_CAUSE_ADDRESS);
1832 ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
1834 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1836 ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
1839 /* CPU warm reset */
1840 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1841 SOC_RESET_CONTROL_ADDRESS);
1842 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1843 val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
1845 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1846 SOC_RESET_CONTROL_ADDRESS);
1847 ath10k_dbg(ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n", val);
1851 ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset complete\n");
1853 ath10k_do_pci_sleep(ar);
1857 static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
1859 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1860 const char *irq_mode;
1864 * Bring the target up cleanly.
1866 * The target may be in an undefined state with an AUX-powered Target
1867 * and a Host in WoW mode. If the Host crashes, loses power, or is
1868 * restarted (without unloading the driver) then the Target is left
1869 * (aux) powered and running. On a subsequent driver load, the Target
1870 * is in an unexpected state. We try to catch that here in order to
1871 * reset the Target and retry the probe.
1874 ret = ath10k_pci_cold_reset(ar);
1876 ret = ath10k_pci_warm_reset(ar);
1879 ath10k_err("failed to reset target: %d\n", ret);
1883 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1884 /* Force AWAKE forever */
1885 ath10k_do_pci_wake(ar);
1887 ret = ath10k_pci_ce_init(ar);
1889 ath10k_err("failed to initialize CE: %d\n", ret);
1893 ret = ath10k_ce_disable_interrupts(ar);
1895 ath10k_err("failed to disable CE interrupts: %d\n", ret);
1899 ret = ath10k_pci_init_irq(ar);
1901 ath10k_err("failed to init irqs: %d\n", ret);
1905 ret = ath10k_pci_request_early_irq(ar);
1907 ath10k_err("failed to request early irq: %d\n", ret);
1908 goto err_deinit_irq;
1911 ret = ath10k_pci_wait_for_target_init(ar);
1913 ath10k_err("failed to wait for target to init: %d\n", ret);
1914 goto err_free_early_irq;
1917 ret = ath10k_pci_init_config(ar);
1919 ath10k_err("failed to setup init config: %d\n", ret);
1920 goto err_free_early_irq;
1923 ret = ath10k_pci_wake_target_cpu(ar);
1925 ath10k_err("could not wake up target CPU: %d\n", ret);
1926 goto err_free_early_irq;
1929 if (ar_pci->num_msi_intrs > 1)
1931 else if (ar_pci->num_msi_intrs == 1)
1934 irq_mode = "legacy";
1936 if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
1937 ath10k_info("pci irq %s\n", irq_mode);
1942 ath10k_pci_free_early_irq(ar);
1944 ath10k_pci_deinit_irq(ar);
1946 ath10k_pci_ce_deinit(ar);
1947 ath10k_pci_warm_reset(ar);
1949 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1950 ath10k_do_pci_sleep(ar);
1955 static int ath10k_pci_hif_power_up(struct ath10k *ar)
1960 * Hardware CUS232 version 2 has some issues with cold reset and the
1961 * preferred (and safer) way to perform a device reset is through a
1964 * Warm reset doesn't always work though (notably after a firmware
1965 * crash) so fall back to cold reset if necessary.
1967 ret = __ath10k_pci_hif_power_up(ar, false);
1969 ath10k_warn("failed to power up target using warm reset (%d), trying cold reset\n",
1972 ret = __ath10k_pci_hif_power_up(ar, true);
1974 ath10k_err("failed to power up target using cold reset too (%d)\n",
1983 static void ath10k_pci_hif_power_down(struct ath10k *ar)
1985 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1987 ath10k_pci_free_early_irq(ar);
1988 ath10k_pci_kill_tasklet(ar);
1989 ath10k_pci_deinit_irq(ar);
1990 ath10k_pci_warm_reset(ar);
1992 ath10k_pci_ce_deinit(ar);
1993 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1994 ath10k_do_pci_sleep(ar);
1999 #define ATH10K_PCI_PM_CONTROL 0x44
2001 static int ath10k_pci_hif_suspend(struct ath10k *ar)
2003 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2004 struct pci_dev *pdev = ar_pci->pdev;
2007 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2009 if ((val & 0x000000ff) != 0x3) {
2010 pci_save_state(pdev);
2011 pci_disable_device(pdev);
2012 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2013 (val & 0xffffff00) | 0x03);
2019 static int ath10k_pci_hif_resume(struct ath10k *ar)
2021 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2022 struct pci_dev *pdev = ar_pci->pdev;
2025 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2027 if ((val & 0x000000ff) != 0) {
2028 pci_restore_state(pdev);
2029 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2032 * Suspend/Resume resets the PCI configuration space,
2033 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
2034 * to keep PCI Tx retries from interfering with C3 CPU state
2036 pci_read_config_dword(pdev, 0x40, &val);
2038 if ((val & 0x0000ff00) != 0)
2039 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2046 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
2047 .tx_sg = ath10k_pci_hif_tx_sg,
2048 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
2049 .start = ath10k_pci_hif_start,
2050 .stop = ath10k_pci_hif_stop,
2051 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
2052 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
2053 .send_complete_check = ath10k_pci_hif_send_complete_check,
2054 .set_callbacks = ath10k_pci_hif_set_callbacks,
2055 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
2056 .power_up = ath10k_pci_hif_power_up,
2057 .power_down = ath10k_pci_hif_power_down,
2059 .suspend = ath10k_pci_hif_suspend,
2060 .resume = ath10k_pci_hif_resume,
2064 static void ath10k_pci_ce_tasklet(unsigned long ptr)
2066 struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
2067 struct ath10k_pci *ar_pci = pipe->ar_pci;
2069 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
2072 static void ath10k_msi_err_tasklet(unsigned long data)
2074 struct ath10k *ar = (struct ath10k *)data;
2076 ath10k_pci_fw_interrupt_handler(ar);
2080 * Handler for a per-engine interrupt on a PARTICULAR CE.
2081 * This is used in cases where each CE has a private MSI interrupt.
2083 static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2085 struct ath10k *ar = arg;
2086 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2087 int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2089 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
2090 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
2095 * NOTE: We are able to derive ce_id from irq because we
2096 * use a one-to-one mapping for CE's 0..5.
2097 * CE's 6 & 7 do not use interrupts at all.
2099 * This mapping must be kept in sync with the mapping
2102 tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2106 static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2108 struct ath10k *ar = arg;
2109 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2111 tasklet_schedule(&ar_pci->msi_fw_err);
2116 * Top-level interrupt handler for all PCI interrupts from a Target.
2117 * When a block of MSI interrupts is allocated, this top-level handler
2118 * is not used; instead, we directly call the correct sub-handler.
2120 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2122 struct ath10k *ar = arg;
2123 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2125 if (ar_pci->num_msi_intrs == 0) {
2126 if (!ath10k_pci_irq_pending(ar))
2129 ath10k_pci_disable_and_clear_legacy_irq(ar);
2132 tasklet_schedule(&ar_pci->intr_tq);
2137 static void ath10k_pci_early_irq_tasklet(unsigned long data)
2139 struct ath10k *ar = (struct ath10k *)data;
2140 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2144 ret = ath10k_pci_wake(ar);
2146 ath10k_warn("failed to wake target in early irq tasklet: %d\n",
2151 fw_ind = ath10k_pci_read32(ar, ar_pci->fw_indicator_address);
2152 if (fw_ind & FW_IND_EVENT_PENDING) {
2153 ath10k_pci_write32(ar, ar_pci->fw_indicator_address,
2154 fw_ind & ~FW_IND_EVENT_PENDING);
2156 /* Some structures are unavailable during early boot or at
2157 * driver teardown so just print that the device has crashed. */
2158 ath10k_warn("device crashed - no diagnostics available\n");
2161 ath10k_pci_sleep(ar);
2162 ath10k_pci_enable_legacy_irq(ar);
2165 static void ath10k_pci_tasklet(unsigned long data)
2167 struct ath10k *ar = (struct ath10k *)data;
2168 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2170 ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
2171 ath10k_ce_per_engine_service_any(ar);
2173 /* Re-enable legacy irq that was disabled in the irq handler */
2174 if (ar_pci->num_msi_intrs == 0)
2175 ath10k_pci_enable_legacy_irq(ar);
2178 static int ath10k_pci_request_irq_msix(struct ath10k *ar)
2180 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2183 ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2184 ath10k_pci_msi_fw_handler,
2185 IRQF_SHARED, "ath10k_pci", ar);
2187 ath10k_warn("failed to request MSI-X fw irq %d: %d\n",
2188 ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2192 for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2193 ret = request_irq(ar_pci->pdev->irq + i,
2194 ath10k_pci_per_engine_handler,
2195 IRQF_SHARED, "ath10k_pci", ar);
2197 ath10k_warn("failed to request MSI-X ce irq %d: %d\n",
2198 ar_pci->pdev->irq + i, ret);
2200 for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2201 free_irq(ar_pci->pdev->irq + i, ar);
2203 free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
2211 static int ath10k_pci_request_irq_msi(struct ath10k *ar)
2213 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2216 ret = request_irq(ar_pci->pdev->irq,
2217 ath10k_pci_interrupt_handler,
2218 IRQF_SHARED, "ath10k_pci", ar);
2220 ath10k_warn("failed to request MSI irq %d: %d\n",
2221 ar_pci->pdev->irq, ret);
2228 static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
2230 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2233 ret = request_irq(ar_pci->pdev->irq,
2234 ath10k_pci_interrupt_handler,
2235 IRQF_SHARED, "ath10k_pci", ar);
2237 ath10k_warn("failed to request legacy irq %d: %d\n",
2238 ar_pci->pdev->irq, ret);
2245 static int ath10k_pci_request_irq(struct ath10k *ar)
2247 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2249 switch (ar_pci->num_msi_intrs) {
2251 return ath10k_pci_request_irq_legacy(ar);
2253 return ath10k_pci_request_irq_msi(ar);
2254 case MSI_NUM_REQUEST:
2255 return ath10k_pci_request_irq_msix(ar);
2258 ath10k_warn("unknown irq configuration upon request\n");
2262 static void ath10k_pci_free_irq(struct ath10k *ar)
2264 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2267 /* There's at least one interrupt irregardless whether its legacy INTR
2268 * or MSI or MSI-X */
2269 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2270 free_irq(ar_pci->pdev->irq + i, ar);
2273 static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
2275 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2278 tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
2279 tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2281 tasklet_init(&ar_pci->early_irq_tasklet, ath10k_pci_early_irq_tasklet,
2284 for (i = 0; i < CE_COUNT; i++) {
2285 ar_pci->pipe_info[i].ar_pci = ar_pci;
2286 tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
2287 (unsigned long)&ar_pci->pipe_info[i]);
2291 static int ath10k_pci_init_irq(struct ath10k *ar)
2293 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2294 bool msix_supported = test_bit(ATH10K_PCI_FEATURE_MSI_X,
2298 ath10k_pci_init_irq_tasklets(ar);
2300 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO &&
2301 !test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
2302 ath10k_info("limiting irq mode to: %d\n", ath10k_pci_irq_mode);
2305 if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO && msix_supported) {
2306 ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
2307 ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
2308 ar_pci->num_msi_intrs);
2316 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
2317 ar_pci->num_msi_intrs = 1;
2318 ret = pci_enable_msi(ar_pci->pdev);
2327 * A potential race occurs here: The CORE_BASE write
2328 * depends on target correctly decoding AXI address but
2329 * host won't know when target writes BAR to CORE_CTRL.
2330 * This write might get lost if target has NOT written BAR.
2331 * For now, fix the race by repeating the write in below
2332 * synchronization checking. */
2333 ar_pci->num_msi_intrs = 0;
2335 ret = ath10k_pci_wake(ar);
2337 ath10k_warn("failed to wake target: %d\n", ret);
2341 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2342 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
2343 ath10k_pci_sleep(ar);
2348 static int ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
2352 ret = ath10k_pci_wake(ar);
2354 ath10k_warn("failed to wake target: %d\n", ret);
2358 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2360 ath10k_pci_sleep(ar);
2365 static int ath10k_pci_deinit_irq(struct ath10k *ar)
2367 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2369 switch (ar_pci->num_msi_intrs) {
2371 return ath10k_pci_deinit_irq_legacy(ar);
2374 case MSI_NUM_REQUEST:
2375 pci_disable_msi(ar_pci->pdev);
2378 pci_disable_msi(ar_pci->pdev);
2381 ath10k_warn("unknown irq configuration upon deinit\n");
2385 static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
2387 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2388 int wait_limit = 300; /* 3 sec */
2391 ret = ath10k_pci_wake(ar);
2393 ath10k_err("failed to wake up target: %d\n", ret);
2397 while (wait_limit-- &&
2398 !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
2399 FW_IND_INITIALIZED)) {
2400 if (ar_pci->num_msi_intrs == 0)
2401 /* Fix potential race by repeating CORE_BASE writes */
2402 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2403 PCIE_INTR_CE_MASK_ALL,
2404 ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2405 PCIE_INTR_ENABLE_ADDRESS));
2409 if (wait_limit < 0) {
2410 ath10k_err("target stalled\n");
2416 ath10k_pci_sleep(ar);
2420 static int ath10k_pci_cold_reset(struct ath10k *ar)
2425 ret = ath10k_do_pci_wake(ar);
2427 ath10k_err("failed to wake up target: %d\n",
2432 /* Put Target, including PCIe, into RESET. */
2433 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
2435 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2437 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2438 if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2439 RTC_STATE_COLD_RESET_MASK)
2444 /* Pull Target, including PCIe, out of RESET. */
2446 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2448 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2449 if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2450 RTC_STATE_COLD_RESET_MASK))
2455 ath10k_do_pci_sleep(ar);
2459 static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2463 for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2464 if (!test_bit(i, ar_pci->features))
2468 case ATH10K_PCI_FEATURE_MSI_X:
2469 ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
2471 case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
2472 ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
2478 static int ath10k_pci_probe(struct pci_dev *pdev,
2479 const struct pci_device_id *pci_dev)
2484 struct ath10k_pci *ar_pci;
2485 u32 lcr_val, chip_id;
2487 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2489 ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2493 ar_pci->pdev = pdev;
2494 ar_pci->dev = &pdev->dev;
2496 switch (pci_dev->device) {
2497 case QCA988X_2_0_DEVICE_ID:
2498 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2502 ath10k_err("Unknown device ID: %d\n", pci_dev->device);
2506 if (ath10k_target_ps)
2507 set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2509 ath10k_pci_dump_features(ar_pci);
2511 ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
2513 ath10k_err("failed to create driver core\n");
2519 ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2520 atomic_set(&ar_pci->keep_awake_count, 0);
2522 pci_set_drvdata(pdev, ar);
2525 * Without any knowledge of the Host, the Target may have been reset or
2526 * power cycled and its Config Space may no longer reflect the PCI
2527 * address space that was assigned earlier by the PCI infrastructure.
2530 ret = pci_assign_resource(pdev, BAR_NUM);
2532 ath10k_err("failed to assign PCI space: %d\n", ret);
2536 ret = pci_enable_device(pdev);
2538 ath10k_err("failed to enable PCI device: %d\n", ret);
2542 /* Request MMIO resources */
2543 ret = pci_request_region(pdev, BAR_NUM, "ath");
2545 ath10k_err("failed to request MMIO region: %d\n", ret);
2550 * Target structures have a limit of 32 bit DMA pointers.
2551 * DMA pointers can be wider than 32 bits by default on some systems.
2553 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2555 ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret);
2559 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2561 ath10k_err("failed to set consistent DMA mask to 32-bit\n");
2565 /* Set bus master bit in PCI_COMMAND to enable DMA */
2566 pci_set_master(pdev);
2569 * Temporary FIX: disable ASPM
2570 * Will be removed after the OTP is programmed
2572 pci_read_config_dword(pdev, 0x80, &lcr_val);
2573 pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2575 /* Arrange for access to Target SoC registers. */
2576 mem = pci_iomap(pdev, BAR_NUM, 0);
2578 ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM);
2585 spin_lock_init(&ar_pci->ce_lock);
2587 ret = ath10k_do_pci_wake(ar);
2589 ath10k_err("Failed to get chip id: %d\n", ret);
2593 chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
2595 ath10k_do_pci_sleep(ar);
2597 ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2599 ret = ath10k_core_register(ar, chip_id);
2601 ath10k_err("failed to register driver core: %d\n", ret);
2608 pci_iounmap(pdev, mem);
2610 pci_clear_master(pdev);
2612 pci_release_region(pdev, BAR_NUM);
2614 pci_disable_device(pdev);
2616 ath10k_core_destroy(ar);
2618 /* call HIF PCI free here */
2624 static void ath10k_pci_remove(struct pci_dev *pdev)
2626 struct ath10k *ar = pci_get_drvdata(pdev);
2627 struct ath10k_pci *ar_pci;
2629 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2634 ar_pci = ath10k_pci_priv(ar);
2639 tasklet_kill(&ar_pci->msi_fw_err);
2641 ath10k_core_unregister(ar);
2643 pci_iounmap(pdev, ar_pci->mem);
2644 pci_release_region(pdev, BAR_NUM);
2645 pci_clear_master(pdev);
2646 pci_disable_device(pdev);
2648 ath10k_core_destroy(ar);
2652 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2654 static struct pci_driver ath10k_pci_driver = {
2655 .name = "ath10k_pci",
2656 .id_table = ath10k_pci_id_table,
2657 .probe = ath10k_pci_probe,
2658 .remove = ath10k_pci_remove,
2661 static int __init ath10k_pci_init(void)
2665 ret = pci_register_driver(&ath10k_pci_driver);
2667 ath10k_err("failed to register PCI driver: %d\n", ret);
2671 module_init(ath10k_pci_init);
2673 static void __exit ath10k_pci_exit(void)
2675 pci_unregister_driver(&ath10k_pci_driver);
2678 module_exit(ath10k_pci_exit);
2680 MODULE_AUTHOR("Qualcomm Atheros");
2681 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2682 MODULE_LICENSE("Dual BSD/GPL");
2683 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2684 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
2685 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);