2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include <linux/pci.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22 #include <linux/bitops.h>
27 #include "targaddrs.h"
36 enum ath10k_pci_irq_mode {
37 ATH10K_PCI_IRQ_AUTO = 0,
38 ATH10K_PCI_IRQ_LEGACY = 1,
39 ATH10K_PCI_IRQ_MSI = 2,
42 enum ath10k_pci_reset_mode {
43 ATH10K_PCI_RESET_AUTO = 0,
44 ATH10K_PCI_RESET_WARM_ONLY = 1,
47 static unsigned int ath10k_pci_target_ps;
48 static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
49 static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
51 module_param_named(target_ps, ath10k_pci_target_ps, uint, 0644);
52 MODULE_PARM_DESC(target_ps, "Enable ath10k Target (SoC) PS option");
54 module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
55 MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
57 module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
58 MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
60 /* how long wait to wait for target to initialise, in ms */
61 #define ATH10K_PCI_TARGET_WAIT 3000
63 #define QCA988X_2_0_DEVICE_ID (0x003c)
65 static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
66 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
70 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
73 static int ath10k_pci_post_rx(struct ath10k *ar);
74 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
76 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
77 static int ath10k_pci_cold_reset(struct ath10k *ar);
78 static int ath10k_pci_warm_reset(struct ath10k *ar);
79 static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
80 static int ath10k_pci_init_irq(struct ath10k *ar);
81 static int ath10k_pci_deinit_irq(struct ath10k *ar);
82 static int ath10k_pci_request_irq(struct ath10k *ar);
83 static void ath10k_pci_free_irq(struct ath10k *ar);
84 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
85 struct ath10k_ce_pipe *rx_pipe,
86 struct bmi_xfer *xfer);
88 static const struct ce_attr host_ce_config_wlan[] = {
89 /* CE0: host->target HTC control and raw streams */
91 .flags = CE_ATTR_FLAGS,
97 /* CE1: target->host HTT + HTC control */
99 .flags = CE_ATTR_FLAGS,
102 .dest_nentries = 512,
105 /* CE2: target->host WMI */
107 .flags = CE_ATTR_FLAGS,
113 /* CE3: host->target WMI */
115 .flags = CE_ATTR_FLAGS,
121 /* CE4: host->target HTT */
123 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
124 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
131 .flags = CE_ATTR_FLAGS,
137 /* CE6: target autonomous hif_memcpy */
139 .flags = CE_ATTR_FLAGS,
145 /* CE7: ce_diag, the Diagnostic Window */
147 .flags = CE_ATTR_FLAGS,
149 .src_sz_max = DIAG_TRANSFER_LIMIT,
154 /* Target firmware's Copy Engine configuration. */
155 static const struct ce_pipe_config target_ce_config_wlan[] = {
156 /* CE0: host->target HTC control and raw streams */
159 .pipedir = PIPEDIR_OUT,
162 .flags = CE_ATTR_FLAGS,
166 /* CE1: target->host HTT + HTC control */
169 .pipedir = PIPEDIR_IN,
172 .flags = CE_ATTR_FLAGS,
176 /* CE2: target->host WMI */
179 .pipedir = PIPEDIR_IN,
182 .flags = CE_ATTR_FLAGS,
186 /* CE3: host->target WMI */
189 .pipedir = PIPEDIR_OUT,
192 .flags = CE_ATTR_FLAGS,
196 /* CE4: host->target HTT */
199 .pipedir = PIPEDIR_OUT,
202 .flags = CE_ATTR_FLAGS,
206 /* NB: 50% of src nentries, since tx has 2 frags */
211 .pipedir = PIPEDIR_OUT,
214 .flags = CE_ATTR_FLAGS,
218 /* CE6: Reserved for target autonomous hif_memcpy */
221 .pipedir = PIPEDIR_INOUT,
224 .flags = CE_ATTR_FLAGS,
228 /* CE7 used only by Host */
231 static bool ath10k_pci_irq_pending(struct ath10k *ar)
235 /* Check if the shared legacy irq is for us */
236 cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
237 PCIE_INTR_CAUSE_ADDRESS);
238 if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
244 static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
246 /* IMPORTANT: INTR_CLR register has to be set after
247 * INTR_ENABLE is set to 0, otherwise interrupt can not be
249 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
251 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
252 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
254 /* IMPORTANT: this extra read transaction is required to
255 * flush the posted write buffer. */
256 (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
257 PCIE_INTR_ENABLE_ADDRESS);
260 static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
262 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
263 PCIE_INTR_ENABLE_ADDRESS,
264 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
266 /* IMPORTANT: this extra read transaction is required to
267 * flush the posted write buffer. */
268 (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
269 PCIE_INTR_ENABLE_ADDRESS);
272 static irqreturn_t ath10k_pci_early_irq_handler(int irq, void *arg)
274 struct ath10k *ar = arg;
275 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
277 if (ar_pci->num_msi_intrs == 0) {
278 if (!ath10k_pci_irq_pending(ar))
281 ath10k_pci_disable_and_clear_legacy_irq(ar);
284 tasklet_schedule(&ar_pci->early_irq_tasklet);
289 static int ath10k_pci_request_early_irq(struct ath10k *ar)
291 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
294 /* Regardless whether MSI-X/MSI/legacy irqs have been set up the first
295 * interrupt from irq vector is triggered in all cases for FW
296 * indication/errors */
297 ret = request_irq(ar_pci->pdev->irq, ath10k_pci_early_irq_handler,
298 IRQF_SHARED, "ath10k_pci (early)", ar);
300 ath10k_warn("failed to request early irq: %d\n", ret);
307 static void ath10k_pci_free_early_irq(struct ath10k *ar)
309 free_irq(ath10k_pci_priv(ar)->pdev->irq, ar);
313 * Diagnostic read/write access is provided for startup/config/debug usage.
314 * Caller must guarantee proper alignment, when applicable, and single user
317 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
320 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
323 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
326 struct ath10k_ce_pipe *ce_diag;
327 /* Host buffer address in CE space */
329 dma_addr_t ce_data_base = 0;
330 void *data_buf = NULL;
334 * This code cannot handle reads to non-memory space. Redirect to the
335 * register read fn but preserve the multi word read capability of
338 if (address < DRAM_BASE_ADDRESS) {
339 if (!IS_ALIGNED(address, 4) ||
340 !IS_ALIGNED((unsigned long)data, 4))
343 while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access(
344 ar, address, (u32 *)data)) == 0)) {
345 nbytes -= sizeof(u32);
346 address += sizeof(u32);
352 ce_diag = ar_pci->ce_diag;
355 * Allocate a temporary bounce buffer to hold caller's data
356 * to be DMA'ed from Target. This guarantees
357 * 1) 4-byte alignment
358 * 2) Buffer in DMA-able space
360 orig_nbytes = nbytes;
361 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
370 memset(data_buf, 0, orig_nbytes);
372 remaining_bytes = orig_nbytes;
373 ce_data = ce_data_base;
374 while (remaining_bytes) {
375 nbytes = min_t(unsigned int, remaining_bytes,
376 DIAG_TRANSFER_LIMIT);
378 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
382 /* Request CE to send from Target(!) address to Host buffer */
384 * The address supplied by the caller is in the
385 * Target CPU virtual address space.
387 * In order to use this address with the diagnostic CE,
388 * convert it from Target CPU virtual address space
389 * to CE address space
392 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
394 ath10k_pci_sleep(ar);
396 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
402 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
406 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
412 if (nbytes != completed_nbytes) {
417 if (buf != (u32) address) {
423 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
428 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
434 if (nbytes != completed_nbytes) {
439 if (buf != ce_data) {
444 remaining_bytes -= nbytes;
451 /* Copy data from allocated DMA buf to caller's buf */
452 WARN_ON_ONCE(orig_nbytes & 3);
453 for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
455 __le32_to_cpu(((__le32 *)data_buf)[i]);
458 ath10k_warn("failed to read diag value at 0x%x: %d\n",
462 dma_free_coherent(ar->dev, orig_nbytes, data_buf,
468 /* Read 4-byte aligned data from Target memory or register */
469 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
472 /* Assume range doesn't cross this boundary */
473 if (address >= DRAM_BASE_ADDRESS)
474 return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
477 *data = ath10k_pci_read32(ar, address);
478 ath10k_pci_sleep(ar);
482 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
483 const void *data, int nbytes)
485 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
488 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
491 struct ath10k_ce_pipe *ce_diag;
492 void *data_buf = NULL;
493 u32 ce_data; /* Host buffer address in CE space */
494 dma_addr_t ce_data_base = 0;
497 ce_diag = ar_pci->ce_diag;
500 * Allocate a temporary bounce buffer to hold caller's data
501 * to be DMA'ed to Target. This guarantees
502 * 1) 4-byte alignment
503 * 2) Buffer in DMA-able space
505 orig_nbytes = nbytes;
506 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
515 /* Copy caller's data to allocated DMA buf */
516 WARN_ON_ONCE(orig_nbytes & 3);
517 for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
518 ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
521 * The address supplied by the caller is in the
522 * Target CPU virtual address space.
524 * In order to use this address with the diagnostic CE,
526 * Target CPU virtual address space
531 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
532 ath10k_pci_sleep(ar);
534 remaining_bytes = orig_nbytes;
535 ce_data = ce_data_base;
536 while (remaining_bytes) {
537 /* FIXME: check cast */
538 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
540 /* Set up to receive directly into Target(!) address */
541 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
546 * Request CE to send caller-supplied data that
547 * was copied to bounce buffer to Target(!) address.
549 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
555 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
560 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
566 if (nbytes != completed_nbytes) {
571 if (buf != ce_data) {
577 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
582 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
588 if (nbytes != completed_nbytes) {
593 if (buf != address) {
598 remaining_bytes -= nbytes;
605 dma_free_coherent(ar->dev, orig_nbytes, data_buf,
610 ath10k_warn("failed to write diag value at 0x%x: %d\n",
616 /* Write 4B data to Target memory or register */
617 static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
620 /* Assume range doesn't cross this boundary */
621 if (address >= DRAM_BASE_ADDRESS)
622 return ath10k_pci_diag_write_mem(ar, address, &data,
626 ath10k_pci_write32(ar, address, data);
627 ath10k_pci_sleep(ar);
631 static bool ath10k_pci_target_is_awake(struct ath10k *ar)
633 void __iomem *mem = ath10k_pci_priv(ar)->mem;
635 val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
637 return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
640 int ath10k_do_pci_wake(struct ath10k *ar)
642 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
643 void __iomem *pci_addr = ar_pci->mem;
647 if (atomic_read(&ar_pci->keep_awake_count) == 0) {
649 iowrite32(PCIE_SOC_WAKE_V_MASK,
650 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
651 PCIE_SOC_WAKE_ADDRESS);
653 atomic_inc(&ar_pci->keep_awake_count);
655 if (ar_pci->verified_awake)
659 if (ath10k_pci_target_is_awake(ar)) {
660 ar_pci->verified_awake = true;
664 if (tot_delay > PCIE_WAKE_TIMEOUT) {
665 ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
667 atomic_read(&ar_pci->keep_awake_count));
672 tot_delay += curr_delay;
679 void ath10k_do_pci_sleep(struct ath10k *ar)
681 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
682 void __iomem *pci_addr = ar_pci->mem;
684 if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
686 ar_pci->verified_awake = false;
687 iowrite32(PCIE_SOC_WAKE_RESET,
688 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
689 PCIE_SOC_WAKE_ADDRESS);
693 /* Called by lower (CE) layer when a send to Target completes. */
694 static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
696 struct ath10k *ar = ce_state->ar;
697 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
698 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
699 void *transfer_context;
702 unsigned int transfer_id;
704 while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
706 &transfer_id) == 0) {
707 /* no need to call tx completion for NULL pointers */
708 if (transfer_context == NULL)
711 cb->tx_completion(ar, transfer_context, transfer_id);
715 /* Called by lower (CE) layer when data is received from the Target. */
716 static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
718 struct ath10k *ar = ce_state->ar;
719 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
720 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
721 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
723 void *transfer_context;
725 unsigned int nbytes, max_nbytes;
726 unsigned int transfer_id;
730 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
731 &ce_data, &nbytes, &transfer_id,
733 err = ath10k_pci_post_rx_pipe(pipe_info, 1);
736 ath10k_warn("failed to replenish CE rx ring %d: %d\n",
737 pipe_info->pipe_num, err);
740 skb = transfer_context;
741 max_nbytes = skb->len + skb_tailroom(skb);
742 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
743 max_nbytes, DMA_FROM_DEVICE);
745 if (unlikely(max_nbytes < nbytes)) {
746 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
748 dev_kfree_skb_any(skb);
752 skb_put(skb, nbytes);
753 cb->rx_completion(ar, skb, pipe_info->pipe_num);
757 static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
758 struct ath10k_hif_sg_item *items, int n_items)
760 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
761 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
762 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
763 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
764 unsigned int nentries_mask = src_ring->nentries_mask;
765 unsigned int sw_index = src_ring->sw_index;
766 unsigned int write_index = src_ring->write_index;
769 spin_lock_bh(&ar_pci->ce_lock);
771 if (unlikely(CE_RING_DELTA(nentries_mask,
772 write_index, sw_index - 1) < n_items)) {
777 for (i = 0; i < n_items - 1; i++) {
778 ath10k_dbg(ATH10K_DBG_PCI,
779 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
780 i, items[i].paddr, items[i].len, n_items);
781 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
782 items[i].vaddr, items[i].len);
784 err = ath10k_ce_send_nolock(ce_pipe,
785 items[i].transfer_context,
788 items[i].transfer_id,
789 CE_SEND_FLAG_GATHER);
794 /* `i` is equal to `n_items -1` after for() */
796 ath10k_dbg(ATH10K_DBG_PCI,
797 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
798 i, items[i].paddr, items[i].len, n_items);
799 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
800 items[i].vaddr, items[i].len);
802 err = ath10k_ce_send_nolock(ce_pipe,
803 items[i].transfer_context,
806 items[i].transfer_id,
813 spin_unlock_bh(&ar_pci->ce_lock);
817 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
819 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
821 ath10k_dbg(ATH10K_DBG_PCI, "pci hif get free queue number\n");
823 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
826 static void ath10k_pci_hif_dump_area(struct ath10k *ar)
828 u32 reg_dump_area = 0;
829 u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
834 ath10k_err("firmware crashed!\n");
835 ath10k_err("hardware name %s version 0x%x\n",
836 ar->hw_params.name, ar->target_version);
837 ath10k_err("firmware version: %s\n", ar->hw->wiphy->fw_version);
839 host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
840 ret = ath10k_pci_diag_read_mem(ar, host_addr,
841 ®_dump_area, sizeof(u32));
843 ath10k_err("failed to read FW dump area address: %d\n", ret);
847 ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
849 ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
851 REG_DUMP_COUNT_QCA988X * sizeof(u32));
853 ath10k_err("failed to read FW dump area: %d\n", ret);
857 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
859 ath10k_err("target Register Dump\n");
860 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
861 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
864 reg_dump_values[i + 1],
865 reg_dump_values[i + 2],
866 reg_dump_values[i + 3]);
868 queue_work(ar->workqueue, &ar->restart_work);
871 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
874 ath10k_dbg(ATH10K_DBG_PCI, "pci hif send complete check\n");
879 * Decide whether to actually poll for completions, or just
880 * wait for a later chance.
881 * If there seem to be plenty of resources left, then just wait
882 * since checking involves reading a CE register, which is a
883 * relatively expensive operation.
885 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
888 * If at least 50% of the total resources are still available,
889 * don't bother checking again yet.
891 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
894 ath10k_ce_per_engine_service(ar, pipe);
897 static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
898 struct ath10k_hif_cb *callbacks)
900 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
902 ath10k_dbg(ATH10K_DBG_PCI, "pci hif set callbacks\n");
904 memcpy(&ar_pci->msg_callbacks_current, callbacks,
905 sizeof(ar_pci->msg_callbacks_current));
908 static int ath10k_pci_setup_ce_irq(struct ath10k *ar)
910 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
911 const struct ce_attr *attr;
912 struct ath10k_pci_pipe *pipe_info;
913 int pipe_num, disable_interrupts;
915 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
916 pipe_info = &ar_pci->pipe_info[pipe_num];
918 /* Handle Diagnostic CE specially */
919 if (pipe_info->ce_hdl == ar_pci->ce_diag)
922 attr = &host_ce_config_wlan[pipe_num];
924 if (attr->src_nentries) {
925 disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
926 ath10k_ce_send_cb_register(pipe_info->ce_hdl,
927 ath10k_pci_ce_send_done,
931 if (attr->dest_nentries)
932 ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
933 ath10k_pci_ce_recv_data);
939 static void ath10k_pci_kill_tasklet(struct ath10k *ar)
941 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
944 tasklet_kill(&ar_pci->intr_tq);
945 tasklet_kill(&ar_pci->msi_fw_err);
946 tasklet_kill(&ar_pci->early_irq_tasklet);
948 for (i = 0; i < CE_COUNT; i++)
949 tasklet_kill(&ar_pci->pipe_info[i].intr);
952 /* TODO - temporary mapping while we have too few CE's */
953 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
954 u16 service_id, u8 *ul_pipe,
955 u8 *dl_pipe, int *ul_is_polled,
960 ath10k_dbg(ATH10K_DBG_PCI, "pci hif map service\n");
962 /* polling for received messages not supported */
965 switch (service_id) {
966 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
968 * Host->target HTT gets its own pipe, so it can be polled
969 * while other pipes are interrupt driven.
973 * Use the same target->host pipe for HTC ctrl, HTC raw
979 case ATH10K_HTC_SVC_ID_RSVD_CTRL:
980 case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
982 * Note: HTC_RAW_STREAMS_SVC is currently unused, and
983 * HTC_CTRL_RSVD_SVC could share the same pipe as the
984 * WMI services. So, if another CE is needed, change
985 * this to *ul_pipe = 3, which frees up CE 0.
992 case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
993 case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
994 case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
995 case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
997 case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1003 /* pipe 6 reserved */
1004 /* pipe 7 reserved */
1011 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1016 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1017 u8 *ul_pipe, u8 *dl_pipe)
1019 int ul_is_polled, dl_is_polled;
1021 ath10k_dbg(ATH10K_DBG_PCI, "pci hif get default pipe\n");
1023 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1024 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1031 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
1034 struct ath10k *ar = pipe_info->hif_ce_state;
1035 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1036 struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
1037 struct sk_buff *skb;
1041 if (pipe_info->buf_sz == 0)
1044 for (i = 0; i < num; i++) {
1045 skb = dev_alloc_skb(pipe_info->buf_sz);
1047 ath10k_warn("failed to allocate skbuff for pipe %d\n",
1053 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1055 ce_data = dma_map_single(ar->dev, skb->data,
1056 skb->len + skb_tailroom(skb),
1059 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1060 ath10k_warn("failed to DMA map sk_buff\n");
1061 dev_kfree_skb_any(skb);
1066 ATH10K_SKB_CB(skb)->paddr = ce_data;
1068 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1070 PCI_DMA_FROMDEVICE);
1072 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1075 ath10k_warn("failed to enqueue to pipe %d: %d\n",
1084 ath10k_pci_rx_pipe_cleanup(pipe_info);
1088 static int ath10k_pci_post_rx(struct ath10k *ar)
1090 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1091 struct ath10k_pci_pipe *pipe_info;
1092 const struct ce_attr *attr;
1093 int pipe_num, ret = 0;
1095 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1096 pipe_info = &ar_pci->pipe_info[pipe_num];
1097 attr = &host_ce_config_wlan[pipe_num];
1099 if (attr->dest_nentries == 0)
1102 ret = ath10k_pci_post_rx_pipe(pipe_info,
1103 attr->dest_nentries - 1);
1105 ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1108 for (; pipe_num >= 0; pipe_num--) {
1109 pipe_info = &ar_pci->pipe_info[pipe_num];
1110 ath10k_pci_rx_pipe_cleanup(pipe_info);
1119 static int ath10k_pci_hif_start(struct ath10k *ar)
1121 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1124 ath10k_dbg(ATH10K_DBG_BOOT, "boot hif start\n");
1126 ath10k_pci_free_early_irq(ar);
1127 ath10k_pci_kill_tasklet(ar);
1129 ret = ath10k_pci_request_irq(ar);
1131 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1136 ret = ath10k_pci_setup_ce_irq(ar);
1138 ath10k_warn("failed to setup CE interrupts: %d\n", ret);
1142 /* Post buffers once to start things off. */
1143 ret = ath10k_pci_post_rx(ar);
1145 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1150 ar_pci->started = 1;
1154 ath10k_ce_disable_interrupts(ar);
1155 ath10k_pci_free_irq(ar);
1156 ath10k_pci_kill_tasklet(ar);
1158 /* Though there should be no interrupts (device was reset)
1159 * power_down() expects the early IRQ to be installed as per the
1160 * driver lifecycle. */
1161 ret_early = ath10k_pci_request_early_irq(ar);
1163 ath10k_warn("failed to re-enable early irq: %d\n", ret_early);
1168 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1171 struct ath10k_pci *ar_pci;
1172 struct ath10k_ce_pipe *ce_hdl;
1174 struct sk_buff *netbuf;
1177 buf_sz = pipe_info->buf_sz;
1179 /* Unused Copy Engine */
1183 ar = pipe_info->hif_ce_state;
1184 ar_pci = ath10k_pci_priv(ar);
1186 if (!ar_pci->started)
1189 ce_hdl = pipe_info->ce_hdl;
1191 while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1193 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1194 netbuf->len + skb_tailroom(netbuf),
1196 dev_kfree_skb_any(netbuf);
1200 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1203 struct ath10k_pci *ar_pci;
1204 struct ath10k_ce_pipe *ce_hdl;
1205 struct sk_buff *netbuf;
1207 unsigned int nbytes;
1211 buf_sz = pipe_info->buf_sz;
1213 /* Unused Copy Engine */
1217 ar = pipe_info->hif_ce_state;
1218 ar_pci = ath10k_pci_priv(ar);
1220 if (!ar_pci->started)
1223 ce_hdl = pipe_info->ce_hdl;
1225 while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1226 &ce_data, &nbytes, &id) == 0) {
1227 /* no need to call tx completion for NULL pointers */
1231 ar_pci->msg_callbacks_current.tx_completion(ar,
1238 * Cleanup residual buffers for device shutdown:
1239 * buffers that were enqueued for receive
1240 * buffers that were to be sent
1241 * Note: Buffers that had completed but which were
1242 * not yet processed are on a completion queue. They
1243 * are handled when the completion thread shuts down.
1245 static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1247 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1250 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1251 struct ath10k_pci_pipe *pipe_info;
1253 pipe_info = &ar_pci->pipe_info[pipe_num];
1254 ath10k_pci_rx_pipe_cleanup(pipe_info);
1255 ath10k_pci_tx_pipe_cleanup(pipe_info);
1259 static void ath10k_pci_ce_deinit(struct ath10k *ar)
1261 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1262 struct ath10k_pci_pipe *pipe_info;
1265 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1266 pipe_info = &ar_pci->pipe_info[pipe_num];
1267 if (pipe_info->ce_hdl) {
1268 ath10k_ce_deinit(pipe_info->ce_hdl);
1269 pipe_info->ce_hdl = NULL;
1270 pipe_info->buf_sz = 0;
1275 static void ath10k_pci_hif_stop(struct ath10k *ar)
1277 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1280 ath10k_dbg(ATH10K_DBG_BOOT, "boot hif stop\n");
1282 ret = ath10k_ce_disable_interrupts(ar);
1284 ath10k_warn("failed to disable CE interrupts: %d\n", ret);
1286 ath10k_pci_free_irq(ar);
1287 ath10k_pci_kill_tasklet(ar);
1289 ret = ath10k_pci_request_early_irq(ar);
1291 ath10k_warn("failed to re-enable early irq: %d\n", ret);
1293 /* At this point, asynchronous threads are stopped, the target should
1294 * not DMA nor interrupt. We process the leftovers and then free
1295 * everything else up. */
1297 ath10k_pci_buffer_cleanup(ar);
1299 /* Make the sure the device won't access any structures on the host by
1300 * resetting it. The device was fed with PCI CE ringbuffer
1301 * configuration during init. If ringbuffers are freed and the device
1302 * were to access them this could lead to memory corruption on the
1304 ath10k_pci_warm_reset(ar);
1306 ar_pci->started = 0;
1309 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1310 void *req, u32 req_len,
1311 void *resp, u32 *resp_len)
1313 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1314 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1315 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1316 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1317 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1318 dma_addr_t req_paddr = 0;
1319 dma_addr_t resp_paddr = 0;
1320 struct bmi_xfer xfer = {};
1321 void *treq, *tresp = NULL;
1326 if (resp && !resp_len)
1329 if (resp && resp_len && *resp_len == 0)
1332 treq = kmemdup(req, req_len, GFP_KERNEL);
1336 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1337 ret = dma_mapping_error(ar->dev, req_paddr);
1341 if (resp && resp_len) {
1342 tresp = kzalloc(*resp_len, GFP_KERNEL);
1348 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1350 ret = dma_mapping_error(ar->dev, resp_paddr);
1354 xfer.wait_for_resp = true;
1357 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1360 init_completion(&xfer.done);
1362 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1366 ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
1369 unsigned int unused_nbytes;
1370 unsigned int unused_id;
1372 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1373 &unused_nbytes, &unused_id);
1375 /* non-zero means we did not time out */
1383 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1384 dma_unmap_single(ar->dev, resp_paddr,
1385 *resp_len, DMA_FROM_DEVICE);
1388 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1390 if (ret == 0 && resp_len) {
1391 *resp_len = min(*resp_len, xfer.resp_len);
1392 memcpy(resp, tresp, xfer.resp_len);
1401 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
1403 struct bmi_xfer *xfer;
1405 unsigned int nbytes;
1406 unsigned int transfer_id;
1408 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1409 &nbytes, &transfer_id))
1412 if (xfer->wait_for_resp)
1415 complete(&xfer->done);
1418 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
1420 struct bmi_xfer *xfer;
1422 unsigned int nbytes;
1423 unsigned int transfer_id;
1426 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1427 &nbytes, &transfer_id, &flags))
1430 if (!xfer->wait_for_resp) {
1431 ath10k_warn("unexpected: BMI data received; ignoring\n");
1435 xfer->resp_len = nbytes;
1436 complete(&xfer->done);
1439 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
1440 struct ath10k_ce_pipe *rx_pipe,
1441 struct bmi_xfer *xfer)
1443 unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1445 while (time_before_eq(jiffies, timeout)) {
1446 ath10k_pci_bmi_send_done(tx_pipe);
1447 ath10k_pci_bmi_recv_data(rx_pipe);
1449 if (completion_done(&xfer->done))
1459 * Map from service/endpoint to Copy Engine.
1460 * This table is derived from the CE_PCI TABLE, above.
1461 * It is passed to the Target at startup for use by firmware.
1463 static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1465 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1466 PIPEDIR_OUT, /* out = UL = host -> target */
1470 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1471 PIPEDIR_IN, /* in = DL = target -> host */
1475 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1476 PIPEDIR_OUT, /* out = UL = host -> target */
1480 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1481 PIPEDIR_IN, /* in = DL = target -> host */
1485 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1486 PIPEDIR_OUT, /* out = UL = host -> target */
1490 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1491 PIPEDIR_IN, /* in = DL = target -> host */
1495 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1496 PIPEDIR_OUT, /* out = UL = host -> target */
1500 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1501 PIPEDIR_IN, /* in = DL = target -> host */
1505 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1506 PIPEDIR_OUT, /* out = UL = host -> target */
1510 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1511 PIPEDIR_IN, /* in = DL = target -> host */
1515 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1516 PIPEDIR_OUT, /* out = UL = host -> target */
1517 0, /* could be moved to 3 (share with WMI) */
1520 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1521 PIPEDIR_IN, /* in = DL = target -> host */
1525 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1526 PIPEDIR_OUT, /* out = UL = host -> target */
1530 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1531 PIPEDIR_IN, /* in = DL = target -> host */
1535 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1536 PIPEDIR_OUT, /* out = UL = host -> target */
1540 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1541 PIPEDIR_IN, /* in = DL = target -> host */
1545 /* (Additions here) */
1547 { /* Must be last */
1555 * Send an interrupt to the device to wake up the Target CPU
1556 * so it has an opportunity to notice any changed state.
1558 static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1563 ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1567 ath10k_warn("failed to read core_ctrl: %d\n", ret);
1571 /* A_INUM_FIRMWARE interrupt to Target CPU */
1572 core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1574 ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1578 ath10k_warn("failed to set target CPU interrupt mask: %d\n",
1586 static int ath10k_pci_init_config(struct ath10k *ar)
1588 u32 interconnect_targ_addr;
1589 u32 pcie_state_targ_addr = 0;
1590 u32 pipe_cfg_targ_addr = 0;
1591 u32 svc_to_pipe_map = 0;
1592 u32 pcie_config_flags = 0;
1594 u32 ealloc_targ_addr;
1596 u32 flag2_targ_addr;
1599 /* Download to Target the CE Config and the service-to-CE map */
1600 interconnect_targ_addr =
1601 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1603 /* Supply Target-side CE configuration */
1604 ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1605 &pcie_state_targ_addr);
1607 ath10k_err("Failed to get pcie state addr: %d\n", ret);
1611 if (pcie_state_targ_addr == 0) {
1613 ath10k_err("Invalid pcie state addr\n");
1617 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1618 offsetof(struct pcie_state,
1620 &pipe_cfg_targ_addr);
1622 ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1626 if (pipe_cfg_targ_addr == 0) {
1628 ath10k_err("Invalid pipe cfg addr\n");
1632 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1633 target_ce_config_wlan,
1634 sizeof(target_ce_config_wlan));
1637 ath10k_err("Failed to write pipe cfg: %d\n", ret);
1641 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1642 offsetof(struct pcie_state,
1646 ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1650 if (svc_to_pipe_map == 0) {
1652 ath10k_err("Invalid svc_to_pipe map\n");
1656 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1657 target_service_to_ce_map_wlan,
1658 sizeof(target_service_to_ce_map_wlan));
1660 ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1664 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1665 offsetof(struct pcie_state,
1667 &pcie_config_flags);
1669 ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1673 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1675 ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1676 offsetof(struct pcie_state, config_flags),
1678 sizeof(pcie_config_flags));
1680 ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1684 /* configure early allocation */
1685 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1687 ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1689 ath10k_err("Faile to get early alloc val: %d\n", ret);
1693 /* first bank is switched to IRAM */
1694 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1695 HI_EARLY_ALLOC_MAGIC_MASK);
1696 ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1697 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1699 ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1701 ath10k_err("Failed to set early alloc val: %d\n", ret);
1705 /* Tell Target to proceed with initialization */
1706 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1708 ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1710 ath10k_err("Failed to get option val: %d\n", ret);
1714 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1716 ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1718 ath10k_err("Failed to set option val: %d\n", ret);
1727 static int ath10k_pci_ce_init(struct ath10k *ar)
1729 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1730 struct ath10k_pci_pipe *pipe_info;
1731 const struct ce_attr *attr;
1734 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1735 pipe_info = &ar_pci->pipe_info[pipe_num];
1736 pipe_info->pipe_num = pipe_num;
1737 pipe_info->hif_ce_state = ar;
1738 attr = &host_ce_config_wlan[pipe_num];
1740 pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1741 if (pipe_info->ce_hdl == NULL) {
1742 ath10k_err("failed to initialize CE for pipe: %d\n",
1745 /* It is safe to call it here. It checks if ce_hdl is
1746 * valid for each pipe */
1747 ath10k_pci_ce_deinit(ar);
1751 if (pipe_num == CE_COUNT - 1) {
1753 * Reserve the ultimate CE for
1754 * diagnostic Window support
1756 ar_pci->ce_diag = pipe_info->ce_hdl;
1760 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1766 static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1768 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1771 ath10k_pci_wake(ar);
1773 fw_indicator = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
1775 if (fw_indicator & FW_IND_EVENT_PENDING) {
1776 /* ACK: clear Target-side pending event */
1777 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
1778 fw_indicator & ~FW_IND_EVENT_PENDING);
1780 if (ar_pci->started) {
1781 ath10k_pci_hif_dump_area(ar);
1784 * Probable Target failure before we're prepared
1785 * to handle it. Generally unexpected.
1787 ath10k_warn("early firmware event indicated\n");
1791 ath10k_pci_sleep(ar);
1794 static int ath10k_pci_warm_reset(struct ath10k *ar)
1799 ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset\n");
1801 ret = ath10k_do_pci_wake(ar);
1803 ath10k_err("failed to wake up target: %d\n", ret);
1808 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1809 PCIE_INTR_CAUSE_ADDRESS);
1810 ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
1812 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1814 ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
1817 /* disable pending irqs */
1818 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1819 PCIE_INTR_ENABLE_ADDRESS, 0);
1821 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1822 PCIE_INTR_CLR_ADDRESS, ~0);
1826 /* clear fw indicator */
1827 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
1829 /* clear target LF timer interrupts */
1830 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1831 SOC_LF_TIMER_CONTROL0_ADDRESS);
1832 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
1833 SOC_LF_TIMER_CONTROL0_ADDRESS,
1834 val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
1837 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1838 SOC_RESET_CONTROL_ADDRESS);
1839 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1840 val | SOC_RESET_CONTROL_CE_RST_MASK);
1841 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1842 SOC_RESET_CONTROL_ADDRESS);
1846 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1847 val & ~SOC_RESET_CONTROL_CE_RST_MASK);
1848 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1849 SOC_RESET_CONTROL_ADDRESS);
1853 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1854 PCIE_INTR_CAUSE_ADDRESS);
1855 ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
1857 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1859 ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
1862 /* CPU warm reset */
1863 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1864 SOC_RESET_CONTROL_ADDRESS);
1865 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1866 val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
1868 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1869 SOC_RESET_CONTROL_ADDRESS);
1870 ath10k_dbg(ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n", val);
1874 ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset complete\n");
1876 ath10k_do_pci_sleep(ar);
1880 static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
1882 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1883 const char *irq_mode;
1887 * Bring the target up cleanly.
1889 * The target may be in an undefined state with an AUX-powered Target
1890 * and a Host in WoW mode. If the Host crashes, loses power, or is
1891 * restarted (without unloading the driver) then the Target is left
1892 * (aux) powered and running. On a subsequent driver load, the Target
1893 * is in an unexpected state. We try to catch that here in order to
1894 * reset the Target and retry the probe.
1897 ret = ath10k_pci_cold_reset(ar);
1899 ret = ath10k_pci_warm_reset(ar);
1902 ath10k_err("failed to reset target: %d\n", ret);
1906 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1907 /* Force AWAKE forever */
1908 ath10k_do_pci_wake(ar);
1910 ret = ath10k_pci_ce_init(ar);
1912 ath10k_err("failed to initialize CE: %d\n", ret);
1916 ret = ath10k_ce_disable_interrupts(ar);
1918 ath10k_err("failed to disable CE interrupts: %d\n", ret);
1922 ret = ath10k_pci_init_irq(ar);
1924 ath10k_err("failed to init irqs: %d\n", ret);
1928 ret = ath10k_pci_request_early_irq(ar);
1930 ath10k_err("failed to request early irq: %d\n", ret);
1931 goto err_deinit_irq;
1934 ret = ath10k_pci_wait_for_target_init(ar);
1936 ath10k_err("failed to wait for target to init: %d\n", ret);
1937 goto err_free_early_irq;
1940 ret = ath10k_pci_init_config(ar);
1942 ath10k_err("failed to setup init config: %d\n", ret);
1943 goto err_free_early_irq;
1946 ret = ath10k_pci_wake_target_cpu(ar);
1948 ath10k_err("could not wake up target CPU: %d\n", ret);
1949 goto err_free_early_irq;
1952 if (ar_pci->num_msi_intrs > 1)
1954 else if (ar_pci->num_msi_intrs == 1)
1957 irq_mode = "legacy";
1959 if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
1960 ath10k_info("pci irq %s irq_mode %d reset_mode %d\n",
1961 irq_mode, ath10k_pci_irq_mode,
1962 ath10k_pci_reset_mode);
1967 ath10k_pci_free_early_irq(ar);
1969 ath10k_pci_deinit_irq(ar);
1971 ath10k_pci_ce_deinit(ar);
1972 ath10k_pci_warm_reset(ar);
1974 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1975 ath10k_do_pci_sleep(ar);
1980 static int ath10k_pci_hif_power_up(struct ath10k *ar)
1984 ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power up\n");
1987 * Hardware CUS232 version 2 has some issues with cold reset and the
1988 * preferred (and safer) way to perform a device reset is through a
1991 * Warm reset doesn't always work though (notably after a firmware
1992 * crash) so fall back to cold reset if necessary.
1994 ret = __ath10k_pci_hif_power_up(ar, false);
1996 ath10k_warn("failed to power up target using warm reset: %d\n",
1999 if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY)
2002 ath10k_warn("trying cold reset\n");
2004 ret = __ath10k_pci_hif_power_up(ar, true);
2006 ath10k_err("failed to power up target using cold reset too (%d)\n",
2015 static void ath10k_pci_hif_power_down(struct ath10k *ar)
2017 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2019 ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power down\n");
2021 ath10k_pci_free_early_irq(ar);
2022 ath10k_pci_kill_tasklet(ar);
2023 ath10k_pci_deinit_irq(ar);
2024 ath10k_pci_warm_reset(ar);
2026 ath10k_pci_ce_deinit(ar);
2027 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
2028 ath10k_do_pci_sleep(ar);
2033 #define ATH10K_PCI_PM_CONTROL 0x44
2035 static int ath10k_pci_hif_suspend(struct ath10k *ar)
2037 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2038 struct pci_dev *pdev = ar_pci->pdev;
2041 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2043 if ((val & 0x000000ff) != 0x3) {
2044 pci_save_state(pdev);
2045 pci_disable_device(pdev);
2046 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2047 (val & 0xffffff00) | 0x03);
2053 static int ath10k_pci_hif_resume(struct ath10k *ar)
2055 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2056 struct pci_dev *pdev = ar_pci->pdev;
2059 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2061 if ((val & 0x000000ff) != 0) {
2062 pci_restore_state(pdev);
2063 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2066 * Suspend/Resume resets the PCI configuration space,
2067 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
2068 * to keep PCI Tx retries from interfering with C3 CPU state
2070 pci_read_config_dword(pdev, 0x40, &val);
2072 if ((val & 0x0000ff00) != 0)
2073 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2080 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
2081 .tx_sg = ath10k_pci_hif_tx_sg,
2082 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
2083 .start = ath10k_pci_hif_start,
2084 .stop = ath10k_pci_hif_stop,
2085 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
2086 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
2087 .send_complete_check = ath10k_pci_hif_send_complete_check,
2088 .set_callbacks = ath10k_pci_hif_set_callbacks,
2089 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
2090 .power_up = ath10k_pci_hif_power_up,
2091 .power_down = ath10k_pci_hif_power_down,
2093 .suspend = ath10k_pci_hif_suspend,
2094 .resume = ath10k_pci_hif_resume,
2098 static void ath10k_pci_ce_tasklet(unsigned long ptr)
2100 struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
2101 struct ath10k_pci *ar_pci = pipe->ar_pci;
2103 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
2106 static void ath10k_msi_err_tasklet(unsigned long data)
2108 struct ath10k *ar = (struct ath10k *)data;
2110 ath10k_pci_fw_interrupt_handler(ar);
2114 * Handler for a per-engine interrupt on a PARTICULAR CE.
2115 * This is used in cases where each CE has a private MSI interrupt.
2117 static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2119 struct ath10k *ar = arg;
2120 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2121 int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2123 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
2124 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
2129 * NOTE: We are able to derive ce_id from irq because we
2130 * use a one-to-one mapping for CE's 0..5.
2131 * CE's 6 & 7 do not use interrupts at all.
2133 * This mapping must be kept in sync with the mapping
2136 tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2140 static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2142 struct ath10k *ar = arg;
2143 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2145 tasklet_schedule(&ar_pci->msi_fw_err);
2150 * Top-level interrupt handler for all PCI interrupts from a Target.
2151 * When a block of MSI interrupts is allocated, this top-level handler
2152 * is not used; instead, we directly call the correct sub-handler.
2154 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2156 struct ath10k *ar = arg;
2157 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2159 if (ar_pci->num_msi_intrs == 0) {
2160 if (!ath10k_pci_irq_pending(ar))
2163 ath10k_pci_disable_and_clear_legacy_irq(ar);
2166 tasklet_schedule(&ar_pci->intr_tq);
2171 static void ath10k_pci_early_irq_tasklet(unsigned long data)
2173 struct ath10k *ar = (struct ath10k *)data;
2177 ret = ath10k_pci_wake(ar);
2179 ath10k_warn("failed to wake target in early irq tasklet: %d\n",
2184 fw_ind = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2185 if (fw_ind & FW_IND_EVENT_PENDING) {
2186 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
2187 fw_ind & ~FW_IND_EVENT_PENDING);
2189 /* Some structures are unavailable during early boot or at
2190 * driver teardown so just print that the device has crashed. */
2191 ath10k_warn("device crashed - no diagnostics available\n");
2194 ath10k_pci_sleep(ar);
2195 ath10k_pci_enable_legacy_irq(ar);
2198 static void ath10k_pci_tasklet(unsigned long data)
2200 struct ath10k *ar = (struct ath10k *)data;
2201 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2203 ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
2204 ath10k_ce_per_engine_service_any(ar);
2206 /* Re-enable legacy irq that was disabled in the irq handler */
2207 if (ar_pci->num_msi_intrs == 0)
2208 ath10k_pci_enable_legacy_irq(ar);
2211 static int ath10k_pci_request_irq_msix(struct ath10k *ar)
2213 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2216 ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2217 ath10k_pci_msi_fw_handler,
2218 IRQF_SHARED, "ath10k_pci", ar);
2220 ath10k_warn("failed to request MSI-X fw irq %d: %d\n",
2221 ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2225 for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2226 ret = request_irq(ar_pci->pdev->irq + i,
2227 ath10k_pci_per_engine_handler,
2228 IRQF_SHARED, "ath10k_pci", ar);
2230 ath10k_warn("failed to request MSI-X ce irq %d: %d\n",
2231 ar_pci->pdev->irq + i, ret);
2233 for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2234 free_irq(ar_pci->pdev->irq + i, ar);
2236 free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
2244 static int ath10k_pci_request_irq_msi(struct ath10k *ar)
2246 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2249 ret = request_irq(ar_pci->pdev->irq,
2250 ath10k_pci_interrupt_handler,
2251 IRQF_SHARED, "ath10k_pci", ar);
2253 ath10k_warn("failed to request MSI irq %d: %d\n",
2254 ar_pci->pdev->irq, ret);
2261 static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
2263 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2266 ret = request_irq(ar_pci->pdev->irq,
2267 ath10k_pci_interrupt_handler,
2268 IRQF_SHARED, "ath10k_pci", ar);
2270 ath10k_warn("failed to request legacy irq %d: %d\n",
2271 ar_pci->pdev->irq, ret);
2278 static int ath10k_pci_request_irq(struct ath10k *ar)
2280 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2282 switch (ar_pci->num_msi_intrs) {
2284 return ath10k_pci_request_irq_legacy(ar);
2286 return ath10k_pci_request_irq_msi(ar);
2287 case MSI_NUM_REQUEST:
2288 return ath10k_pci_request_irq_msix(ar);
2291 ath10k_warn("unknown irq configuration upon request\n");
2295 static void ath10k_pci_free_irq(struct ath10k *ar)
2297 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2300 /* There's at least one interrupt irregardless whether its legacy INTR
2301 * or MSI or MSI-X */
2302 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2303 free_irq(ar_pci->pdev->irq + i, ar);
2306 static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
2308 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2311 tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
2312 tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2314 tasklet_init(&ar_pci->early_irq_tasklet, ath10k_pci_early_irq_tasklet,
2317 for (i = 0; i < CE_COUNT; i++) {
2318 ar_pci->pipe_info[i].ar_pci = ar_pci;
2319 tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
2320 (unsigned long)&ar_pci->pipe_info[i]);
2324 static int ath10k_pci_init_irq(struct ath10k *ar)
2326 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2327 bool msix_supported = test_bit(ATH10K_PCI_FEATURE_MSI_X,
2331 ath10k_pci_init_irq_tasklets(ar);
2333 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO &&
2334 !test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
2335 ath10k_info("limiting irq mode to: %d\n", ath10k_pci_irq_mode);
2338 if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO && msix_supported) {
2339 ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
2340 ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
2341 ar_pci->num_msi_intrs);
2349 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
2350 ar_pci->num_msi_intrs = 1;
2351 ret = pci_enable_msi(ar_pci->pdev);
2360 * A potential race occurs here: The CORE_BASE write
2361 * depends on target correctly decoding AXI address but
2362 * host won't know when target writes BAR to CORE_CTRL.
2363 * This write might get lost if target has NOT written BAR.
2364 * For now, fix the race by repeating the write in below
2365 * synchronization checking. */
2366 ar_pci->num_msi_intrs = 0;
2368 ret = ath10k_pci_wake(ar);
2370 ath10k_warn("failed to wake target: %d\n", ret);
2374 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2375 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
2376 ath10k_pci_sleep(ar);
2381 static int ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
2385 ret = ath10k_pci_wake(ar);
2387 ath10k_warn("failed to wake target: %d\n", ret);
2391 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2393 ath10k_pci_sleep(ar);
2398 static int ath10k_pci_deinit_irq(struct ath10k *ar)
2400 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2402 switch (ar_pci->num_msi_intrs) {
2404 return ath10k_pci_deinit_irq_legacy(ar);
2407 case MSI_NUM_REQUEST:
2408 pci_disable_msi(ar_pci->pdev);
2411 pci_disable_msi(ar_pci->pdev);
2414 ath10k_warn("unknown irq configuration upon deinit\n");
2418 static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
2420 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2421 unsigned long timeout;
2425 ath10k_dbg(ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
2427 ret = ath10k_pci_wake(ar);
2429 ath10k_err("failed to wake up target for init: %d\n", ret);
2433 timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
2436 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2438 ath10k_dbg(ATH10K_DBG_BOOT, "boot target indicator %x\n", val);
2440 /* target should never return this */
2441 if (val == 0xffffffff)
2444 if (val & FW_IND_INITIALIZED)
2447 if (ar_pci->num_msi_intrs == 0)
2448 /* Fix potential race by repeating CORE_BASE writes */
2449 ath10k_pci_soc_write32(ar, PCIE_INTR_ENABLE_ADDRESS,
2450 PCIE_INTR_FIRMWARE_MASK |
2451 PCIE_INTR_CE_MASK_ALL);
2454 } while (time_before(jiffies, timeout));
2456 if (val == 0xffffffff || !(val & FW_IND_INITIALIZED)) {
2457 ath10k_err("failed to receive initialized event from target: %08x\n",
2463 ath10k_dbg(ATH10K_DBG_BOOT, "boot target initialised\n");
2466 ath10k_pci_sleep(ar);
2470 static int ath10k_pci_cold_reset(struct ath10k *ar)
2475 ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset\n");
2477 ret = ath10k_do_pci_wake(ar);
2479 ath10k_err("failed to wake up target: %d\n",
2484 /* Put Target, including PCIe, into RESET. */
2485 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
2487 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2489 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2490 if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2491 RTC_STATE_COLD_RESET_MASK)
2496 /* Pull Target, including PCIe, out of RESET. */
2498 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2500 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2501 if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2502 RTC_STATE_COLD_RESET_MASK))
2507 ath10k_do_pci_sleep(ar);
2509 ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset complete\n");
2514 static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2518 for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2519 if (!test_bit(i, ar_pci->features))
2523 case ATH10K_PCI_FEATURE_MSI_X:
2524 ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
2526 case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
2527 ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
2533 static int ath10k_pci_probe(struct pci_dev *pdev,
2534 const struct pci_device_id *pci_dev)
2539 struct ath10k_pci *ar_pci;
2540 u32 lcr_val, chip_id;
2542 ath10k_dbg(ATH10K_DBG_PCI, "pci probe\n");
2544 ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2548 ar_pci->pdev = pdev;
2549 ar_pci->dev = &pdev->dev;
2551 switch (pci_dev->device) {
2552 case QCA988X_2_0_DEVICE_ID:
2553 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2557 ath10k_err("Unknown device ID: %d\n", pci_dev->device);
2561 if (ath10k_pci_target_ps)
2562 set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2564 ath10k_pci_dump_features(ar_pci);
2566 ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
2568 ath10k_err("failed to create driver core\n");
2574 atomic_set(&ar_pci->keep_awake_count, 0);
2576 pci_set_drvdata(pdev, ar);
2579 * Without any knowledge of the Host, the Target may have been reset or
2580 * power cycled and its Config Space may no longer reflect the PCI
2581 * address space that was assigned earlier by the PCI infrastructure.
2584 ret = pci_assign_resource(pdev, BAR_NUM);
2586 ath10k_err("failed to assign PCI space: %d\n", ret);
2590 ret = pci_enable_device(pdev);
2592 ath10k_err("failed to enable PCI device: %d\n", ret);
2596 /* Request MMIO resources */
2597 ret = pci_request_region(pdev, BAR_NUM, "ath");
2599 ath10k_err("failed to request MMIO region: %d\n", ret);
2604 * Target structures have a limit of 32 bit DMA pointers.
2605 * DMA pointers can be wider than 32 bits by default on some systems.
2607 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2609 ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret);
2613 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2615 ath10k_err("failed to set consistent DMA mask to 32-bit\n");
2619 /* Set bus master bit in PCI_COMMAND to enable DMA */
2620 pci_set_master(pdev);
2623 * Temporary FIX: disable ASPM
2624 * Will be removed after the OTP is programmed
2626 pci_read_config_dword(pdev, 0x80, &lcr_val);
2627 pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2629 /* Arrange for access to Target SoC registers. */
2630 mem = pci_iomap(pdev, BAR_NUM, 0);
2632 ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM);
2639 spin_lock_init(&ar_pci->ce_lock);
2641 ret = ath10k_do_pci_wake(ar);
2643 ath10k_err("Failed to get chip id: %d\n", ret);
2647 chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
2649 ath10k_do_pci_sleep(ar);
2651 ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2653 ret = ath10k_core_register(ar, chip_id);
2655 ath10k_err("failed to register driver core: %d\n", ret);
2662 pci_iounmap(pdev, mem);
2664 pci_clear_master(pdev);
2666 pci_release_region(pdev, BAR_NUM);
2668 pci_disable_device(pdev);
2670 ath10k_core_destroy(ar);
2672 /* call HIF PCI free here */
2678 static void ath10k_pci_remove(struct pci_dev *pdev)
2680 struct ath10k *ar = pci_get_drvdata(pdev);
2681 struct ath10k_pci *ar_pci;
2683 ath10k_dbg(ATH10K_DBG_PCI, "pci remove\n");
2688 ar_pci = ath10k_pci_priv(ar);
2693 tasklet_kill(&ar_pci->msi_fw_err);
2695 ath10k_core_unregister(ar);
2697 pci_iounmap(pdev, ar_pci->mem);
2698 pci_release_region(pdev, BAR_NUM);
2699 pci_clear_master(pdev);
2700 pci_disable_device(pdev);
2702 ath10k_core_destroy(ar);
2706 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2708 static struct pci_driver ath10k_pci_driver = {
2709 .name = "ath10k_pci",
2710 .id_table = ath10k_pci_id_table,
2711 .probe = ath10k_pci_probe,
2712 .remove = ath10k_pci_remove,
2715 static int __init ath10k_pci_init(void)
2719 ret = pci_register_driver(&ath10k_pci_driver);
2721 ath10k_err("failed to register PCI driver: %d\n", ret);
2725 module_init(ath10k_pci_init);
2727 static void __exit ath10k_pci_exit(void)
2729 pci_unregister_driver(&ath10k_pci_driver);
2732 module_exit(ath10k_pci_exit);
2734 MODULE_AUTHOR("Qualcomm Atheros");
2735 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2736 MODULE_LICENSE("Dual BSD/GPL");
2737 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_2_FILE);
2738 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);