2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include <linux/pci.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22 #include <linux/bitops.h>
27 #include "targaddrs.h"
36 enum ath10k_pci_irq_mode {
37 ATH10K_PCI_IRQ_AUTO = 0,
38 ATH10K_PCI_IRQ_LEGACY = 1,
39 ATH10K_PCI_IRQ_MSI = 2,
42 enum ath10k_pci_reset_mode {
43 ATH10K_PCI_RESET_AUTO = 0,
44 ATH10K_PCI_RESET_WARM_ONLY = 1,
47 static unsigned int ath10k_pci_target_ps;
48 static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
49 static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
51 module_param_named(target_ps, ath10k_pci_target_ps, uint, 0644);
52 MODULE_PARM_DESC(target_ps, "Enable ath10k Target (SoC) PS option");
54 module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
55 MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
57 module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
58 MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
60 /* how long wait to wait for target to initialise, in ms */
61 #define ATH10K_PCI_TARGET_WAIT 3000
63 #define QCA988X_2_0_DEVICE_ID (0x003c)
65 static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
66 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
70 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
73 static int ath10k_pci_post_rx(struct ath10k *ar);
74 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
76 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
77 static int ath10k_pci_cold_reset(struct ath10k *ar);
78 static int ath10k_pci_warm_reset(struct ath10k *ar);
79 static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
80 static int ath10k_pci_init_irq(struct ath10k *ar);
81 static int ath10k_pci_deinit_irq(struct ath10k *ar);
82 static int ath10k_pci_request_irq(struct ath10k *ar);
83 static void ath10k_pci_free_irq(struct ath10k *ar);
84 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
85 struct ath10k_ce_pipe *rx_pipe,
86 struct bmi_xfer *xfer);
88 static const struct ce_attr host_ce_config_wlan[] = {
89 /* CE0: host->target HTC control and raw streams */
91 .flags = CE_ATTR_FLAGS,
97 /* CE1: target->host HTT + HTC control */
99 .flags = CE_ATTR_FLAGS,
102 .dest_nentries = 512,
105 /* CE2: target->host WMI */
107 .flags = CE_ATTR_FLAGS,
113 /* CE3: host->target WMI */
115 .flags = CE_ATTR_FLAGS,
121 /* CE4: host->target HTT */
123 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
124 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
131 .flags = CE_ATTR_FLAGS,
137 /* CE6: target autonomous hif_memcpy */
139 .flags = CE_ATTR_FLAGS,
145 /* CE7: ce_diag, the Diagnostic Window */
147 .flags = CE_ATTR_FLAGS,
149 .src_sz_max = DIAG_TRANSFER_LIMIT,
154 /* Target firmware's Copy Engine configuration. */
155 static const struct ce_pipe_config target_ce_config_wlan[] = {
156 /* CE0: host->target HTC control and raw streams */
159 .pipedir = PIPEDIR_OUT,
162 .flags = CE_ATTR_FLAGS,
166 /* CE1: target->host HTT + HTC control */
169 .pipedir = PIPEDIR_IN,
172 .flags = CE_ATTR_FLAGS,
176 /* CE2: target->host WMI */
179 .pipedir = PIPEDIR_IN,
182 .flags = CE_ATTR_FLAGS,
186 /* CE3: host->target WMI */
189 .pipedir = PIPEDIR_OUT,
192 .flags = CE_ATTR_FLAGS,
196 /* CE4: host->target HTT */
199 .pipedir = PIPEDIR_OUT,
202 .flags = CE_ATTR_FLAGS,
206 /* NB: 50% of src nentries, since tx has 2 frags */
211 .pipedir = PIPEDIR_OUT,
214 .flags = CE_ATTR_FLAGS,
218 /* CE6: Reserved for target autonomous hif_memcpy */
221 .pipedir = PIPEDIR_INOUT,
224 .flags = CE_ATTR_FLAGS,
228 /* CE7 used only by Host */
231 static bool ath10k_pci_irq_pending(struct ath10k *ar)
235 /* Check if the shared legacy irq is for us */
236 cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
237 PCIE_INTR_CAUSE_ADDRESS);
238 if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
244 static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
246 /* IMPORTANT: INTR_CLR register has to be set after
247 * INTR_ENABLE is set to 0, otherwise interrupt can not be
249 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
251 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
252 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
254 /* IMPORTANT: this extra read transaction is required to
255 * flush the posted write buffer. */
256 (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
257 PCIE_INTR_ENABLE_ADDRESS);
260 static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
262 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
263 PCIE_INTR_ENABLE_ADDRESS,
264 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
266 /* IMPORTANT: this extra read transaction is required to
267 * flush the posted write buffer. */
268 (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
269 PCIE_INTR_ENABLE_ADDRESS);
272 static irqreturn_t ath10k_pci_early_irq_handler(int irq, void *arg)
274 struct ath10k *ar = arg;
275 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
277 if (ar_pci->num_msi_intrs == 0) {
278 if (!ath10k_pci_irq_pending(ar))
281 ath10k_pci_disable_and_clear_legacy_irq(ar);
284 tasklet_schedule(&ar_pci->early_irq_tasklet);
289 static int ath10k_pci_request_early_irq(struct ath10k *ar)
291 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
294 /* Regardless whether MSI-X/MSI/legacy irqs have been set up the first
295 * interrupt from irq vector is triggered in all cases for FW
296 * indication/errors */
297 ret = request_irq(ar_pci->pdev->irq, ath10k_pci_early_irq_handler,
298 IRQF_SHARED, "ath10k_pci (early)", ar);
300 ath10k_warn("failed to request early irq: %d\n", ret);
307 static void ath10k_pci_free_early_irq(struct ath10k *ar)
309 free_irq(ath10k_pci_priv(ar)->pdev->irq, ar);
313 * Diagnostic read/write access is provided for startup/config/debug usage.
314 * Caller must guarantee proper alignment, when applicable, and single user
317 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
320 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
323 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
326 struct ath10k_ce_pipe *ce_diag;
327 /* Host buffer address in CE space */
329 dma_addr_t ce_data_base = 0;
330 void *data_buf = NULL;
334 * This code cannot handle reads to non-memory space. Redirect to the
335 * register read fn but preserve the multi word read capability of
338 if (address < DRAM_BASE_ADDRESS) {
339 if (!IS_ALIGNED(address, 4) ||
340 !IS_ALIGNED((unsigned long)data, 4))
343 while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access(
344 ar, address, (u32 *)data)) == 0)) {
345 nbytes -= sizeof(u32);
346 address += sizeof(u32);
352 ce_diag = ar_pci->ce_diag;
355 * Allocate a temporary bounce buffer to hold caller's data
356 * to be DMA'ed from Target. This guarantees
357 * 1) 4-byte alignment
358 * 2) Buffer in DMA-able space
360 orig_nbytes = nbytes;
361 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
369 memset(data_buf, 0, orig_nbytes);
371 remaining_bytes = orig_nbytes;
372 ce_data = ce_data_base;
373 while (remaining_bytes) {
374 nbytes = min_t(unsigned int, remaining_bytes,
375 DIAG_TRANSFER_LIMIT);
377 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
381 /* Request CE to send from Target(!) address to Host buffer */
383 * The address supplied by the caller is in the
384 * Target CPU virtual address space.
386 * In order to use this address with the diagnostic CE,
387 * convert it from Target CPU virtual address space
388 * to CE address space
391 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
393 ath10k_pci_sleep(ar);
395 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
401 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
405 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
411 if (nbytes != completed_nbytes) {
416 if (buf != (u32) address) {
422 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
427 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
433 if (nbytes != completed_nbytes) {
438 if (buf != ce_data) {
443 remaining_bytes -= nbytes;
450 /* Copy data from allocated DMA buf to caller's buf */
451 WARN_ON_ONCE(orig_nbytes & 3);
452 for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
454 __le32_to_cpu(((__le32 *)data_buf)[i]);
457 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
461 pci_free_consistent(ar_pci->pdev, orig_nbytes,
462 data_buf, ce_data_base);
467 /* Read 4-byte aligned data from Target memory or register */
468 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
471 /* Assume range doesn't cross this boundary */
472 if (address >= DRAM_BASE_ADDRESS)
473 return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
476 *data = ath10k_pci_read32(ar, address);
477 ath10k_pci_sleep(ar);
481 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
482 const void *data, int nbytes)
484 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
487 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
490 struct ath10k_ce_pipe *ce_diag;
491 void *data_buf = NULL;
492 u32 ce_data; /* Host buffer address in CE space */
493 dma_addr_t ce_data_base = 0;
496 ce_diag = ar_pci->ce_diag;
499 * Allocate a temporary bounce buffer to hold caller's data
500 * to be DMA'ed to Target. This guarantees
501 * 1) 4-byte alignment
502 * 2) Buffer in DMA-able space
504 orig_nbytes = nbytes;
505 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
513 /* Copy caller's data to allocated DMA buf */
514 WARN_ON_ONCE(orig_nbytes & 3);
515 for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
516 ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
519 * The address supplied by the caller is in the
520 * Target CPU virtual address space.
522 * In order to use this address with the diagnostic CE,
524 * Target CPU virtual address space
529 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
530 ath10k_pci_sleep(ar);
532 remaining_bytes = orig_nbytes;
533 ce_data = ce_data_base;
534 while (remaining_bytes) {
535 /* FIXME: check cast */
536 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
538 /* Set up to receive directly into Target(!) address */
539 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
544 * Request CE to send caller-supplied data that
545 * was copied to bounce buffer to Target(!) address.
547 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
553 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
558 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
564 if (nbytes != completed_nbytes) {
569 if (buf != ce_data) {
575 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
580 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
586 if (nbytes != completed_nbytes) {
591 if (buf != address) {
596 remaining_bytes -= nbytes;
603 pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
608 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
614 /* Write 4B data to Target memory or register */
615 static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
618 /* Assume range doesn't cross this boundary */
619 if (address >= DRAM_BASE_ADDRESS)
620 return ath10k_pci_diag_write_mem(ar, address, &data,
624 ath10k_pci_write32(ar, address, data);
625 ath10k_pci_sleep(ar);
629 static bool ath10k_pci_target_is_awake(struct ath10k *ar)
631 void __iomem *mem = ath10k_pci_priv(ar)->mem;
633 val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
635 return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
638 int ath10k_do_pci_wake(struct ath10k *ar)
640 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
641 void __iomem *pci_addr = ar_pci->mem;
645 if (atomic_read(&ar_pci->keep_awake_count) == 0) {
647 iowrite32(PCIE_SOC_WAKE_V_MASK,
648 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
649 PCIE_SOC_WAKE_ADDRESS);
651 atomic_inc(&ar_pci->keep_awake_count);
653 if (ar_pci->verified_awake)
657 if (ath10k_pci_target_is_awake(ar)) {
658 ar_pci->verified_awake = true;
662 if (tot_delay > PCIE_WAKE_TIMEOUT) {
663 ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
665 atomic_read(&ar_pci->keep_awake_count));
670 tot_delay += curr_delay;
677 void ath10k_do_pci_sleep(struct ath10k *ar)
679 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
680 void __iomem *pci_addr = ar_pci->mem;
682 if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
684 ar_pci->verified_awake = false;
685 iowrite32(PCIE_SOC_WAKE_RESET,
686 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
687 PCIE_SOC_WAKE_ADDRESS);
691 /* Called by lower (CE) layer when a send to Target completes. */
692 static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
694 struct ath10k *ar = ce_state->ar;
695 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
696 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
697 void *transfer_context;
700 unsigned int transfer_id;
702 while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
704 &transfer_id) == 0) {
705 /* no need to call tx completion for NULL pointers */
706 if (transfer_context == NULL)
709 cb->tx_completion(ar, transfer_context, transfer_id);
713 /* Called by lower (CE) layer when data is received from the Target. */
714 static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
716 struct ath10k *ar = ce_state->ar;
717 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
718 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
719 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
721 void *transfer_context;
723 unsigned int nbytes, max_nbytes;
724 unsigned int transfer_id;
728 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
729 &ce_data, &nbytes, &transfer_id,
731 err = ath10k_pci_post_rx_pipe(pipe_info, 1);
734 ath10k_warn("failed to replenish CE rx ring %d: %d\n",
735 pipe_info->pipe_num, err);
738 skb = transfer_context;
739 max_nbytes = skb->len + skb_tailroom(skb);
740 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
741 max_nbytes, DMA_FROM_DEVICE);
743 if (unlikely(max_nbytes < nbytes)) {
744 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
746 dev_kfree_skb_any(skb);
750 skb_put(skb, nbytes);
751 cb->rx_completion(ar, skb, pipe_info->pipe_num);
755 static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
756 struct ath10k_hif_sg_item *items, int n_items)
758 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
759 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
760 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
761 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
762 unsigned int nentries_mask = src_ring->nentries_mask;
763 unsigned int sw_index = src_ring->sw_index;
764 unsigned int write_index = src_ring->write_index;
767 spin_lock_bh(&ar_pci->ce_lock);
769 if (unlikely(CE_RING_DELTA(nentries_mask,
770 write_index, sw_index - 1) < n_items)) {
775 for (i = 0; i < n_items - 1; i++) {
776 ath10k_dbg(ATH10K_DBG_PCI,
777 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
778 i, items[i].paddr, items[i].len, n_items);
779 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
780 items[i].vaddr, items[i].len);
782 err = ath10k_ce_send_nolock(ce_pipe,
783 items[i].transfer_context,
786 items[i].transfer_id,
787 CE_SEND_FLAG_GATHER);
792 /* `i` is equal to `n_items -1` after for() */
794 ath10k_dbg(ATH10K_DBG_PCI,
795 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
796 i, items[i].paddr, items[i].len, n_items);
797 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
798 items[i].vaddr, items[i].len);
800 err = ath10k_ce_send_nolock(ce_pipe,
801 items[i].transfer_context,
804 items[i].transfer_id,
811 spin_unlock_bh(&ar_pci->ce_lock);
815 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
817 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
818 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
821 static void ath10k_pci_hif_dump_area(struct ath10k *ar)
823 u32 reg_dump_area = 0;
824 u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
829 ath10k_err("firmware crashed!\n");
830 ath10k_err("hardware name %s version 0x%x\n",
831 ar->hw_params.name, ar->target_version);
832 ath10k_err("firmware version: %s\n", ar->hw->wiphy->fw_version);
834 host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
835 ret = ath10k_pci_diag_read_mem(ar, host_addr,
836 ®_dump_area, sizeof(u32));
838 ath10k_err("failed to read FW dump area address: %d\n", ret);
842 ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
844 ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
846 REG_DUMP_COUNT_QCA988X * sizeof(u32));
848 ath10k_err("failed to read FW dump area: %d\n", ret);
852 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
854 ath10k_err("target Register Dump\n");
855 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
856 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
859 reg_dump_values[i + 1],
860 reg_dump_values[i + 2],
861 reg_dump_values[i + 3]);
863 queue_work(ar->workqueue, &ar->restart_work);
866 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
872 * Decide whether to actually poll for completions, or just
873 * wait for a later chance.
874 * If there seem to be plenty of resources left, then just wait
875 * since checking involves reading a CE register, which is a
876 * relatively expensive operation.
878 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
881 * If at least 50% of the total resources are still available,
882 * don't bother checking again yet.
884 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
887 ath10k_ce_per_engine_service(ar, pipe);
890 static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
891 struct ath10k_hif_cb *callbacks)
893 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
895 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
897 memcpy(&ar_pci->msg_callbacks_current, callbacks,
898 sizeof(ar_pci->msg_callbacks_current));
901 static int ath10k_pci_setup_ce_irq(struct ath10k *ar)
903 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
904 const struct ce_attr *attr;
905 struct ath10k_pci_pipe *pipe_info;
906 int pipe_num, disable_interrupts;
908 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
909 pipe_info = &ar_pci->pipe_info[pipe_num];
911 /* Handle Diagnostic CE specially */
912 if (pipe_info->ce_hdl == ar_pci->ce_diag)
915 attr = &host_ce_config_wlan[pipe_num];
917 if (attr->src_nentries) {
918 disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
919 ath10k_ce_send_cb_register(pipe_info->ce_hdl,
920 ath10k_pci_ce_send_done,
924 if (attr->dest_nentries)
925 ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
926 ath10k_pci_ce_recv_data);
932 static void ath10k_pci_kill_tasklet(struct ath10k *ar)
934 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
937 tasklet_kill(&ar_pci->intr_tq);
938 tasklet_kill(&ar_pci->msi_fw_err);
939 tasklet_kill(&ar_pci->early_irq_tasklet);
941 for (i = 0; i < CE_COUNT; i++)
942 tasklet_kill(&ar_pci->pipe_info[i].intr);
945 /* TODO - temporary mapping while we have too few CE's */
946 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
947 u16 service_id, u8 *ul_pipe,
948 u8 *dl_pipe, int *ul_is_polled,
953 /* polling for received messages not supported */
956 switch (service_id) {
957 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
959 * Host->target HTT gets its own pipe, so it can be polled
960 * while other pipes are interrupt driven.
964 * Use the same target->host pipe for HTC ctrl, HTC raw
970 case ATH10K_HTC_SVC_ID_RSVD_CTRL:
971 case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
973 * Note: HTC_RAW_STREAMS_SVC is currently unused, and
974 * HTC_CTRL_RSVD_SVC could share the same pipe as the
975 * WMI services. So, if another CE is needed, change
976 * this to *ul_pipe = 3, which frees up CE 0.
983 case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
984 case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
985 case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
986 case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
988 case ATH10K_HTC_SVC_ID_WMI_CONTROL:
994 /* pipe 6 reserved */
995 /* pipe 7 reserved */
1002 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1007 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1008 u8 *ul_pipe, u8 *dl_pipe)
1010 int ul_is_polled, dl_is_polled;
1012 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1013 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1020 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
1023 struct ath10k *ar = pipe_info->hif_ce_state;
1024 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1025 struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
1026 struct sk_buff *skb;
1030 if (pipe_info->buf_sz == 0)
1033 for (i = 0; i < num; i++) {
1034 skb = dev_alloc_skb(pipe_info->buf_sz);
1036 ath10k_warn("failed to allocate skbuff for pipe %d\n",
1042 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1044 ce_data = dma_map_single(ar->dev, skb->data,
1045 skb->len + skb_tailroom(skb),
1048 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1049 ath10k_warn("failed to DMA map sk_buff\n");
1050 dev_kfree_skb_any(skb);
1055 ATH10K_SKB_CB(skb)->paddr = ce_data;
1057 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1059 PCI_DMA_FROMDEVICE);
1061 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1064 ath10k_warn("failed to enqueue to pipe %d: %d\n",
1073 ath10k_pci_rx_pipe_cleanup(pipe_info);
1077 static int ath10k_pci_post_rx(struct ath10k *ar)
1079 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1080 struct ath10k_pci_pipe *pipe_info;
1081 const struct ce_attr *attr;
1082 int pipe_num, ret = 0;
1084 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1085 pipe_info = &ar_pci->pipe_info[pipe_num];
1086 attr = &host_ce_config_wlan[pipe_num];
1088 if (attr->dest_nentries == 0)
1091 ret = ath10k_pci_post_rx_pipe(pipe_info,
1092 attr->dest_nentries - 1);
1094 ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1097 for (; pipe_num >= 0; pipe_num--) {
1098 pipe_info = &ar_pci->pipe_info[pipe_num];
1099 ath10k_pci_rx_pipe_cleanup(pipe_info);
1108 static int ath10k_pci_hif_start(struct ath10k *ar)
1110 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1113 ath10k_pci_free_early_irq(ar);
1114 ath10k_pci_kill_tasklet(ar);
1116 ret = ath10k_pci_request_irq(ar);
1118 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1123 ret = ath10k_pci_setup_ce_irq(ar);
1125 ath10k_warn("failed to setup CE interrupts: %d\n", ret);
1129 /* Post buffers once to start things off. */
1130 ret = ath10k_pci_post_rx(ar);
1132 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1137 ar_pci->started = 1;
1141 ath10k_ce_disable_interrupts(ar);
1142 ath10k_pci_free_irq(ar);
1143 ath10k_pci_kill_tasklet(ar);
1145 /* Though there should be no interrupts (device was reset)
1146 * power_down() expects the early IRQ to be installed as per the
1147 * driver lifecycle. */
1148 ret_early = ath10k_pci_request_early_irq(ar);
1150 ath10k_warn("failed to re-enable early irq: %d\n", ret_early);
1155 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1158 struct ath10k_pci *ar_pci;
1159 struct ath10k_ce_pipe *ce_hdl;
1161 struct sk_buff *netbuf;
1164 buf_sz = pipe_info->buf_sz;
1166 /* Unused Copy Engine */
1170 ar = pipe_info->hif_ce_state;
1171 ar_pci = ath10k_pci_priv(ar);
1173 if (!ar_pci->started)
1176 ce_hdl = pipe_info->ce_hdl;
1178 while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1180 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1181 netbuf->len + skb_tailroom(netbuf),
1183 dev_kfree_skb_any(netbuf);
1187 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1190 struct ath10k_pci *ar_pci;
1191 struct ath10k_ce_pipe *ce_hdl;
1192 struct sk_buff *netbuf;
1194 unsigned int nbytes;
1198 buf_sz = pipe_info->buf_sz;
1200 /* Unused Copy Engine */
1204 ar = pipe_info->hif_ce_state;
1205 ar_pci = ath10k_pci_priv(ar);
1207 if (!ar_pci->started)
1210 ce_hdl = pipe_info->ce_hdl;
1212 while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1213 &ce_data, &nbytes, &id) == 0) {
1214 /* no need to call tx completion for NULL pointers */
1218 ar_pci->msg_callbacks_current.tx_completion(ar,
1225 * Cleanup residual buffers for device shutdown:
1226 * buffers that were enqueued for receive
1227 * buffers that were to be sent
1228 * Note: Buffers that had completed but which were
1229 * not yet processed are on a completion queue. They
1230 * are handled when the completion thread shuts down.
1232 static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1234 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1237 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1238 struct ath10k_pci_pipe *pipe_info;
1240 pipe_info = &ar_pci->pipe_info[pipe_num];
1241 ath10k_pci_rx_pipe_cleanup(pipe_info);
1242 ath10k_pci_tx_pipe_cleanup(pipe_info);
1246 static void ath10k_pci_ce_deinit(struct ath10k *ar)
1248 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1249 struct ath10k_pci_pipe *pipe_info;
1252 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1253 pipe_info = &ar_pci->pipe_info[pipe_num];
1254 if (pipe_info->ce_hdl) {
1255 ath10k_ce_deinit(pipe_info->ce_hdl);
1256 pipe_info->ce_hdl = NULL;
1257 pipe_info->buf_sz = 0;
1262 static void ath10k_pci_hif_stop(struct ath10k *ar)
1264 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1267 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
1269 ret = ath10k_ce_disable_interrupts(ar);
1271 ath10k_warn("failed to disable CE interrupts: %d\n", ret);
1273 ath10k_pci_free_irq(ar);
1274 ath10k_pci_kill_tasklet(ar);
1276 ret = ath10k_pci_request_early_irq(ar);
1278 ath10k_warn("failed to re-enable early irq: %d\n", ret);
1280 /* At this point, asynchronous threads are stopped, the target should
1281 * not DMA nor interrupt. We process the leftovers and then free
1282 * everything else up. */
1284 ath10k_pci_buffer_cleanup(ar);
1286 /* Make the sure the device won't access any structures on the host by
1287 * resetting it. The device was fed with PCI CE ringbuffer
1288 * configuration during init. If ringbuffers are freed and the device
1289 * were to access them this could lead to memory corruption on the
1291 ath10k_pci_warm_reset(ar);
1293 ar_pci->started = 0;
1296 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1297 void *req, u32 req_len,
1298 void *resp, u32 *resp_len)
1300 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1301 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1302 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1303 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1304 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1305 dma_addr_t req_paddr = 0;
1306 dma_addr_t resp_paddr = 0;
1307 struct bmi_xfer xfer = {};
1308 void *treq, *tresp = NULL;
1313 if (resp && !resp_len)
1316 if (resp && resp_len && *resp_len == 0)
1319 treq = kmemdup(req, req_len, GFP_KERNEL);
1323 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1324 ret = dma_mapping_error(ar->dev, req_paddr);
1328 if (resp && resp_len) {
1329 tresp = kzalloc(*resp_len, GFP_KERNEL);
1335 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1337 ret = dma_mapping_error(ar->dev, resp_paddr);
1341 xfer.wait_for_resp = true;
1344 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1347 init_completion(&xfer.done);
1349 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1353 ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
1356 unsigned int unused_nbytes;
1357 unsigned int unused_id;
1359 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1360 &unused_nbytes, &unused_id);
1362 /* non-zero means we did not time out */
1370 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1371 dma_unmap_single(ar->dev, resp_paddr,
1372 *resp_len, DMA_FROM_DEVICE);
1375 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1377 if (ret == 0 && resp_len) {
1378 *resp_len = min(*resp_len, xfer.resp_len);
1379 memcpy(resp, tresp, xfer.resp_len);
1388 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
1390 struct bmi_xfer *xfer;
1392 unsigned int nbytes;
1393 unsigned int transfer_id;
1395 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1396 &nbytes, &transfer_id))
1399 if (xfer->wait_for_resp)
1402 complete(&xfer->done);
1405 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
1407 struct bmi_xfer *xfer;
1409 unsigned int nbytes;
1410 unsigned int transfer_id;
1413 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1414 &nbytes, &transfer_id, &flags))
1417 if (!xfer->wait_for_resp) {
1418 ath10k_warn("unexpected: BMI data received; ignoring\n");
1422 xfer->resp_len = nbytes;
1423 complete(&xfer->done);
1426 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
1427 struct ath10k_ce_pipe *rx_pipe,
1428 struct bmi_xfer *xfer)
1430 unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1432 while (time_before_eq(jiffies, timeout)) {
1433 ath10k_pci_bmi_send_done(tx_pipe);
1434 ath10k_pci_bmi_recv_data(rx_pipe);
1436 if (completion_done(&xfer->done))
1446 * Map from service/endpoint to Copy Engine.
1447 * This table is derived from the CE_PCI TABLE, above.
1448 * It is passed to the Target at startup for use by firmware.
1450 static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1452 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1453 PIPEDIR_OUT, /* out = UL = host -> target */
1457 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1458 PIPEDIR_IN, /* in = DL = target -> host */
1462 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1463 PIPEDIR_OUT, /* out = UL = host -> target */
1467 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1468 PIPEDIR_IN, /* in = DL = target -> host */
1472 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1473 PIPEDIR_OUT, /* out = UL = host -> target */
1477 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1478 PIPEDIR_IN, /* in = DL = target -> host */
1482 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1483 PIPEDIR_OUT, /* out = UL = host -> target */
1487 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1488 PIPEDIR_IN, /* in = DL = target -> host */
1492 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1493 PIPEDIR_OUT, /* out = UL = host -> target */
1497 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1498 PIPEDIR_IN, /* in = DL = target -> host */
1502 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1503 PIPEDIR_OUT, /* out = UL = host -> target */
1504 0, /* could be moved to 3 (share with WMI) */
1507 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1508 PIPEDIR_IN, /* in = DL = target -> host */
1512 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1513 PIPEDIR_OUT, /* out = UL = host -> target */
1517 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1518 PIPEDIR_IN, /* in = DL = target -> host */
1522 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1523 PIPEDIR_OUT, /* out = UL = host -> target */
1527 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1528 PIPEDIR_IN, /* in = DL = target -> host */
1532 /* (Additions here) */
1534 { /* Must be last */
1542 * Send an interrupt to the device to wake up the Target CPU
1543 * so it has an opportunity to notice any changed state.
1545 static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1550 ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1554 ath10k_warn("failed to read core_ctrl: %d\n", ret);
1558 /* A_INUM_FIRMWARE interrupt to Target CPU */
1559 core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1561 ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1565 ath10k_warn("failed to set target CPU interrupt mask: %d\n",
1573 static int ath10k_pci_init_config(struct ath10k *ar)
1575 u32 interconnect_targ_addr;
1576 u32 pcie_state_targ_addr = 0;
1577 u32 pipe_cfg_targ_addr = 0;
1578 u32 svc_to_pipe_map = 0;
1579 u32 pcie_config_flags = 0;
1581 u32 ealloc_targ_addr;
1583 u32 flag2_targ_addr;
1586 /* Download to Target the CE Config and the service-to-CE map */
1587 interconnect_targ_addr =
1588 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1590 /* Supply Target-side CE configuration */
1591 ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1592 &pcie_state_targ_addr);
1594 ath10k_err("Failed to get pcie state addr: %d\n", ret);
1598 if (pcie_state_targ_addr == 0) {
1600 ath10k_err("Invalid pcie state addr\n");
1604 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1605 offsetof(struct pcie_state,
1607 &pipe_cfg_targ_addr);
1609 ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1613 if (pipe_cfg_targ_addr == 0) {
1615 ath10k_err("Invalid pipe cfg addr\n");
1619 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1620 target_ce_config_wlan,
1621 sizeof(target_ce_config_wlan));
1624 ath10k_err("Failed to write pipe cfg: %d\n", ret);
1628 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1629 offsetof(struct pcie_state,
1633 ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1637 if (svc_to_pipe_map == 0) {
1639 ath10k_err("Invalid svc_to_pipe map\n");
1643 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1644 target_service_to_ce_map_wlan,
1645 sizeof(target_service_to_ce_map_wlan));
1647 ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1651 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1652 offsetof(struct pcie_state,
1654 &pcie_config_flags);
1656 ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1660 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1662 ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1663 offsetof(struct pcie_state, config_flags),
1665 sizeof(pcie_config_flags));
1667 ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1671 /* configure early allocation */
1672 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1674 ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1676 ath10k_err("Faile to get early alloc val: %d\n", ret);
1680 /* first bank is switched to IRAM */
1681 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1682 HI_EARLY_ALLOC_MAGIC_MASK);
1683 ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1684 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1686 ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1688 ath10k_err("Failed to set early alloc val: %d\n", ret);
1692 /* Tell Target to proceed with initialization */
1693 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1695 ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1697 ath10k_err("Failed to get option val: %d\n", ret);
1701 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1703 ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1705 ath10k_err("Failed to set option val: %d\n", ret);
1714 static int ath10k_pci_ce_init(struct ath10k *ar)
1716 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1717 struct ath10k_pci_pipe *pipe_info;
1718 const struct ce_attr *attr;
1721 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1722 pipe_info = &ar_pci->pipe_info[pipe_num];
1723 pipe_info->pipe_num = pipe_num;
1724 pipe_info->hif_ce_state = ar;
1725 attr = &host_ce_config_wlan[pipe_num];
1727 pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1728 if (pipe_info->ce_hdl == NULL) {
1729 ath10k_err("failed to initialize CE for pipe: %d\n",
1732 /* It is safe to call it here. It checks if ce_hdl is
1733 * valid for each pipe */
1734 ath10k_pci_ce_deinit(ar);
1738 if (pipe_num == CE_COUNT - 1) {
1740 * Reserve the ultimate CE for
1741 * diagnostic Window support
1743 ar_pci->ce_diag = pipe_info->ce_hdl;
1747 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1753 static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1755 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1758 ath10k_pci_wake(ar);
1760 fw_indicator = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
1762 if (fw_indicator & FW_IND_EVENT_PENDING) {
1763 /* ACK: clear Target-side pending event */
1764 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
1765 fw_indicator & ~FW_IND_EVENT_PENDING);
1767 if (ar_pci->started) {
1768 ath10k_pci_hif_dump_area(ar);
1771 * Probable Target failure before we're prepared
1772 * to handle it. Generally unexpected.
1774 ath10k_warn("early firmware event indicated\n");
1778 ath10k_pci_sleep(ar);
1781 static int ath10k_pci_warm_reset(struct ath10k *ar)
1786 ath10k_dbg(ATH10K_DBG_BOOT, "boot performing warm chip reset\n");
1788 ret = ath10k_do_pci_wake(ar);
1790 ath10k_err("failed to wake up target: %d\n", ret);
1795 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1796 PCIE_INTR_CAUSE_ADDRESS);
1797 ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
1799 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1801 ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
1804 /* disable pending irqs */
1805 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1806 PCIE_INTR_ENABLE_ADDRESS, 0);
1808 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1809 PCIE_INTR_CLR_ADDRESS, ~0);
1813 /* clear fw indicator */
1814 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
1816 /* clear target LF timer interrupts */
1817 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1818 SOC_LF_TIMER_CONTROL0_ADDRESS);
1819 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
1820 SOC_LF_TIMER_CONTROL0_ADDRESS,
1821 val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
1824 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1825 SOC_RESET_CONTROL_ADDRESS);
1826 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1827 val | SOC_RESET_CONTROL_CE_RST_MASK);
1828 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1829 SOC_RESET_CONTROL_ADDRESS);
1833 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1834 val & ~SOC_RESET_CONTROL_CE_RST_MASK);
1835 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1836 SOC_RESET_CONTROL_ADDRESS);
1840 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1841 PCIE_INTR_CAUSE_ADDRESS);
1842 ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
1844 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1846 ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
1849 /* CPU warm reset */
1850 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1851 SOC_RESET_CONTROL_ADDRESS);
1852 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1853 val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
1855 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1856 SOC_RESET_CONTROL_ADDRESS);
1857 ath10k_dbg(ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n", val);
1861 ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset complete\n");
1863 ath10k_do_pci_sleep(ar);
1867 static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
1869 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1870 const char *irq_mode;
1874 * Bring the target up cleanly.
1876 * The target may be in an undefined state with an AUX-powered Target
1877 * and a Host in WoW mode. If the Host crashes, loses power, or is
1878 * restarted (without unloading the driver) then the Target is left
1879 * (aux) powered and running. On a subsequent driver load, the Target
1880 * is in an unexpected state. We try to catch that here in order to
1881 * reset the Target and retry the probe.
1884 ret = ath10k_pci_cold_reset(ar);
1886 ret = ath10k_pci_warm_reset(ar);
1889 ath10k_err("failed to reset target: %d\n", ret);
1893 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1894 /* Force AWAKE forever */
1895 ath10k_do_pci_wake(ar);
1897 ret = ath10k_pci_ce_init(ar);
1899 ath10k_err("failed to initialize CE: %d\n", ret);
1903 ret = ath10k_ce_disable_interrupts(ar);
1905 ath10k_err("failed to disable CE interrupts: %d\n", ret);
1909 ret = ath10k_pci_init_irq(ar);
1911 ath10k_err("failed to init irqs: %d\n", ret);
1915 ret = ath10k_pci_request_early_irq(ar);
1917 ath10k_err("failed to request early irq: %d\n", ret);
1918 goto err_deinit_irq;
1921 ret = ath10k_pci_wait_for_target_init(ar);
1923 ath10k_err("failed to wait for target to init: %d\n", ret);
1924 goto err_free_early_irq;
1927 ret = ath10k_pci_init_config(ar);
1929 ath10k_err("failed to setup init config: %d\n", ret);
1930 goto err_free_early_irq;
1933 ret = ath10k_pci_wake_target_cpu(ar);
1935 ath10k_err("could not wake up target CPU: %d\n", ret);
1936 goto err_free_early_irq;
1939 if (ar_pci->num_msi_intrs > 1)
1941 else if (ar_pci->num_msi_intrs == 1)
1944 irq_mode = "legacy";
1946 if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
1947 ath10k_info("pci irq %s\n", irq_mode);
1952 ath10k_pci_free_early_irq(ar);
1954 ath10k_pci_deinit_irq(ar);
1956 ath10k_pci_ce_deinit(ar);
1957 ath10k_pci_warm_reset(ar);
1959 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1960 ath10k_do_pci_sleep(ar);
1965 static int ath10k_pci_hif_power_up(struct ath10k *ar)
1970 * Hardware CUS232 version 2 has some issues with cold reset and the
1971 * preferred (and safer) way to perform a device reset is through a
1974 * Warm reset doesn't always work though (notably after a firmware
1975 * crash) so fall back to cold reset if necessary.
1977 ret = __ath10k_pci_hif_power_up(ar, false);
1979 ath10k_warn("failed to power up target using warm reset: %d\n",
1982 if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY)
1985 ath10k_warn("trying cold reset\n");
1987 ret = __ath10k_pci_hif_power_up(ar, true);
1989 ath10k_err("failed to power up target using cold reset too (%d)\n",
1998 static void ath10k_pci_hif_power_down(struct ath10k *ar)
2000 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2002 ath10k_pci_free_early_irq(ar);
2003 ath10k_pci_kill_tasklet(ar);
2004 ath10k_pci_deinit_irq(ar);
2005 ath10k_pci_warm_reset(ar);
2007 ath10k_pci_ce_deinit(ar);
2008 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
2009 ath10k_do_pci_sleep(ar);
2014 #define ATH10K_PCI_PM_CONTROL 0x44
2016 static int ath10k_pci_hif_suspend(struct ath10k *ar)
2018 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2019 struct pci_dev *pdev = ar_pci->pdev;
2022 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2024 if ((val & 0x000000ff) != 0x3) {
2025 pci_save_state(pdev);
2026 pci_disable_device(pdev);
2027 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2028 (val & 0xffffff00) | 0x03);
2034 static int ath10k_pci_hif_resume(struct ath10k *ar)
2036 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2037 struct pci_dev *pdev = ar_pci->pdev;
2040 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2042 if ((val & 0x000000ff) != 0) {
2043 pci_restore_state(pdev);
2044 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2047 * Suspend/Resume resets the PCI configuration space,
2048 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
2049 * to keep PCI Tx retries from interfering with C3 CPU state
2051 pci_read_config_dword(pdev, 0x40, &val);
2053 if ((val & 0x0000ff00) != 0)
2054 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2061 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
2062 .tx_sg = ath10k_pci_hif_tx_sg,
2063 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
2064 .start = ath10k_pci_hif_start,
2065 .stop = ath10k_pci_hif_stop,
2066 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
2067 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
2068 .send_complete_check = ath10k_pci_hif_send_complete_check,
2069 .set_callbacks = ath10k_pci_hif_set_callbacks,
2070 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
2071 .power_up = ath10k_pci_hif_power_up,
2072 .power_down = ath10k_pci_hif_power_down,
2074 .suspend = ath10k_pci_hif_suspend,
2075 .resume = ath10k_pci_hif_resume,
2079 static void ath10k_pci_ce_tasklet(unsigned long ptr)
2081 struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
2082 struct ath10k_pci *ar_pci = pipe->ar_pci;
2084 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
2087 static void ath10k_msi_err_tasklet(unsigned long data)
2089 struct ath10k *ar = (struct ath10k *)data;
2091 ath10k_pci_fw_interrupt_handler(ar);
2095 * Handler for a per-engine interrupt on a PARTICULAR CE.
2096 * This is used in cases where each CE has a private MSI interrupt.
2098 static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2100 struct ath10k *ar = arg;
2101 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2102 int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2104 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
2105 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
2110 * NOTE: We are able to derive ce_id from irq because we
2111 * use a one-to-one mapping for CE's 0..5.
2112 * CE's 6 & 7 do not use interrupts at all.
2114 * This mapping must be kept in sync with the mapping
2117 tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2121 static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2123 struct ath10k *ar = arg;
2124 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2126 tasklet_schedule(&ar_pci->msi_fw_err);
2131 * Top-level interrupt handler for all PCI interrupts from a Target.
2132 * When a block of MSI interrupts is allocated, this top-level handler
2133 * is not used; instead, we directly call the correct sub-handler.
2135 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2137 struct ath10k *ar = arg;
2138 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2140 if (ar_pci->num_msi_intrs == 0) {
2141 if (!ath10k_pci_irq_pending(ar))
2144 ath10k_pci_disable_and_clear_legacy_irq(ar);
2147 tasklet_schedule(&ar_pci->intr_tq);
2152 static void ath10k_pci_early_irq_tasklet(unsigned long data)
2154 struct ath10k *ar = (struct ath10k *)data;
2158 ret = ath10k_pci_wake(ar);
2160 ath10k_warn("failed to wake target in early irq tasklet: %d\n",
2165 fw_ind = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2166 if (fw_ind & FW_IND_EVENT_PENDING) {
2167 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
2168 fw_ind & ~FW_IND_EVENT_PENDING);
2170 /* Some structures are unavailable during early boot or at
2171 * driver teardown so just print that the device has crashed. */
2172 ath10k_warn("device crashed - no diagnostics available\n");
2175 ath10k_pci_sleep(ar);
2176 ath10k_pci_enable_legacy_irq(ar);
2179 static void ath10k_pci_tasklet(unsigned long data)
2181 struct ath10k *ar = (struct ath10k *)data;
2182 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2184 ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
2185 ath10k_ce_per_engine_service_any(ar);
2187 /* Re-enable legacy irq that was disabled in the irq handler */
2188 if (ar_pci->num_msi_intrs == 0)
2189 ath10k_pci_enable_legacy_irq(ar);
2192 static int ath10k_pci_request_irq_msix(struct ath10k *ar)
2194 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2197 ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2198 ath10k_pci_msi_fw_handler,
2199 IRQF_SHARED, "ath10k_pci", ar);
2201 ath10k_warn("failed to request MSI-X fw irq %d: %d\n",
2202 ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2206 for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2207 ret = request_irq(ar_pci->pdev->irq + i,
2208 ath10k_pci_per_engine_handler,
2209 IRQF_SHARED, "ath10k_pci", ar);
2211 ath10k_warn("failed to request MSI-X ce irq %d: %d\n",
2212 ar_pci->pdev->irq + i, ret);
2214 for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2215 free_irq(ar_pci->pdev->irq + i, ar);
2217 free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
2225 static int ath10k_pci_request_irq_msi(struct ath10k *ar)
2227 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2230 ret = request_irq(ar_pci->pdev->irq,
2231 ath10k_pci_interrupt_handler,
2232 IRQF_SHARED, "ath10k_pci", ar);
2234 ath10k_warn("failed to request MSI irq %d: %d\n",
2235 ar_pci->pdev->irq, ret);
2242 static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
2244 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2247 ret = request_irq(ar_pci->pdev->irq,
2248 ath10k_pci_interrupt_handler,
2249 IRQF_SHARED, "ath10k_pci", ar);
2251 ath10k_warn("failed to request legacy irq %d: %d\n",
2252 ar_pci->pdev->irq, ret);
2259 static int ath10k_pci_request_irq(struct ath10k *ar)
2261 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2263 switch (ar_pci->num_msi_intrs) {
2265 return ath10k_pci_request_irq_legacy(ar);
2267 return ath10k_pci_request_irq_msi(ar);
2268 case MSI_NUM_REQUEST:
2269 return ath10k_pci_request_irq_msix(ar);
2272 ath10k_warn("unknown irq configuration upon request\n");
2276 static void ath10k_pci_free_irq(struct ath10k *ar)
2278 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2281 /* There's at least one interrupt irregardless whether its legacy INTR
2282 * or MSI or MSI-X */
2283 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2284 free_irq(ar_pci->pdev->irq + i, ar);
2287 static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
2289 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2292 tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
2293 tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2295 tasklet_init(&ar_pci->early_irq_tasklet, ath10k_pci_early_irq_tasklet,
2298 for (i = 0; i < CE_COUNT; i++) {
2299 ar_pci->pipe_info[i].ar_pci = ar_pci;
2300 tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
2301 (unsigned long)&ar_pci->pipe_info[i]);
2305 static int ath10k_pci_init_irq(struct ath10k *ar)
2307 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2308 bool msix_supported = test_bit(ATH10K_PCI_FEATURE_MSI_X,
2312 ath10k_pci_init_irq_tasklets(ar);
2314 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO &&
2315 !test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
2316 ath10k_info("limiting irq mode to: %d\n", ath10k_pci_irq_mode);
2319 if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO && msix_supported) {
2320 ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
2321 ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
2322 ar_pci->num_msi_intrs);
2330 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
2331 ar_pci->num_msi_intrs = 1;
2332 ret = pci_enable_msi(ar_pci->pdev);
2341 * A potential race occurs here: The CORE_BASE write
2342 * depends on target correctly decoding AXI address but
2343 * host won't know when target writes BAR to CORE_CTRL.
2344 * This write might get lost if target has NOT written BAR.
2345 * For now, fix the race by repeating the write in below
2346 * synchronization checking. */
2347 ar_pci->num_msi_intrs = 0;
2349 ret = ath10k_pci_wake(ar);
2351 ath10k_warn("failed to wake target: %d\n", ret);
2355 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2356 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
2357 ath10k_pci_sleep(ar);
2362 static int ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
2366 ret = ath10k_pci_wake(ar);
2368 ath10k_warn("failed to wake target: %d\n", ret);
2372 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2374 ath10k_pci_sleep(ar);
2379 static int ath10k_pci_deinit_irq(struct ath10k *ar)
2381 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2383 switch (ar_pci->num_msi_intrs) {
2385 return ath10k_pci_deinit_irq_legacy(ar);
2388 case MSI_NUM_REQUEST:
2389 pci_disable_msi(ar_pci->pdev);
2392 pci_disable_msi(ar_pci->pdev);
2395 ath10k_warn("unknown irq configuration upon deinit\n");
2399 static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
2401 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2402 unsigned long timeout;
2406 ret = ath10k_pci_wake(ar);
2408 ath10k_err("failed to wake up target for init: %d\n", ret);
2412 timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
2415 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2417 /* target should never return this */
2418 if (val == 0xffffffff)
2421 if (val & FW_IND_INITIALIZED)
2424 if (ar_pci->num_msi_intrs == 0)
2425 /* Fix potential race by repeating CORE_BASE writes */
2426 ath10k_pci_soc_write32(ar, PCIE_INTR_ENABLE_ADDRESS,
2427 PCIE_INTR_FIRMWARE_MASK |
2428 PCIE_INTR_CE_MASK_ALL);
2431 } while (time_before(jiffies, timeout));
2433 if (val == 0xffffffff || !(val & FW_IND_INITIALIZED)) {
2434 ath10k_err("failed to receive initialized event from target: %08x\n",
2441 ath10k_pci_sleep(ar);
2445 static int ath10k_pci_cold_reset(struct ath10k *ar)
2450 ret = ath10k_do_pci_wake(ar);
2452 ath10k_err("failed to wake up target: %d\n",
2457 /* Put Target, including PCIe, into RESET. */
2458 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
2460 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2462 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2463 if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2464 RTC_STATE_COLD_RESET_MASK)
2469 /* Pull Target, including PCIe, out of RESET. */
2471 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2473 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2474 if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2475 RTC_STATE_COLD_RESET_MASK))
2480 ath10k_do_pci_sleep(ar);
2484 static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2488 for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2489 if (!test_bit(i, ar_pci->features))
2493 case ATH10K_PCI_FEATURE_MSI_X:
2494 ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
2496 case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
2497 ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
2503 static int ath10k_pci_probe(struct pci_dev *pdev,
2504 const struct pci_device_id *pci_dev)
2509 struct ath10k_pci *ar_pci;
2510 u32 lcr_val, chip_id;
2512 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2514 ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2518 ar_pci->pdev = pdev;
2519 ar_pci->dev = &pdev->dev;
2521 switch (pci_dev->device) {
2522 case QCA988X_2_0_DEVICE_ID:
2523 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2527 ath10k_err("Unknown device ID: %d\n", pci_dev->device);
2531 if (ath10k_pci_target_ps)
2532 set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2534 ath10k_pci_dump_features(ar_pci);
2536 ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
2538 ath10k_err("failed to create driver core\n");
2544 atomic_set(&ar_pci->keep_awake_count, 0);
2546 pci_set_drvdata(pdev, ar);
2549 * Without any knowledge of the Host, the Target may have been reset or
2550 * power cycled and its Config Space may no longer reflect the PCI
2551 * address space that was assigned earlier by the PCI infrastructure.
2554 ret = pci_assign_resource(pdev, BAR_NUM);
2556 ath10k_err("failed to assign PCI space: %d\n", ret);
2560 ret = pci_enable_device(pdev);
2562 ath10k_err("failed to enable PCI device: %d\n", ret);
2566 /* Request MMIO resources */
2567 ret = pci_request_region(pdev, BAR_NUM, "ath");
2569 ath10k_err("failed to request MMIO region: %d\n", ret);
2574 * Target structures have a limit of 32 bit DMA pointers.
2575 * DMA pointers can be wider than 32 bits by default on some systems.
2577 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2579 ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret);
2583 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2585 ath10k_err("failed to set consistent DMA mask to 32-bit\n");
2589 /* Set bus master bit in PCI_COMMAND to enable DMA */
2590 pci_set_master(pdev);
2593 * Temporary FIX: disable ASPM
2594 * Will be removed after the OTP is programmed
2596 pci_read_config_dword(pdev, 0x80, &lcr_val);
2597 pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2599 /* Arrange for access to Target SoC registers. */
2600 mem = pci_iomap(pdev, BAR_NUM, 0);
2602 ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM);
2609 spin_lock_init(&ar_pci->ce_lock);
2611 ret = ath10k_do_pci_wake(ar);
2613 ath10k_err("Failed to get chip id: %d\n", ret);
2617 chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
2619 ath10k_do_pci_sleep(ar);
2621 ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2623 ret = ath10k_core_register(ar, chip_id);
2625 ath10k_err("failed to register driver core: %d\n", ret);
2632 pci_iounmap(pdev, mem);
2634 pci_clear_master(pdev);
2636 pci_release_region(pdev, BAR_NUM);
2638 pci_disable_device(pdev);
2640 ath10k_core_destroy(ar);
2642 /* call HIF PCI free here */
2648 static void ath10k_pci_remove(struct pci_dev *pdev)
2650 struct ath10k *ar = pci_get_drvdata(pdev);
2651 struct ath10k_pci *ar_pci;
2653 ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2658 ar_pci = ath10k_pci_priv(ar);
2663 tasklet_kill(&ar_pci->msi_fw_err);
2665 ath10k_core_unregister(ar);
2667 pci_iounmap(pdev, ar_pci->mem);
2668 pci_release_region(pdev, BAR_NUM);
2669 pci_clear_master(pdev);
2670 pci_disable_device(pdev);
2672 ath10k_core_destroy(ar);
2676 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2678 static struct pci_driver ath10k_pci_driver = {
2679 .name = "ath10k_pci",
2680 .id_table = ath10k_pci_id_table,
2681 .probe = ath10k_pci_probe,
2682 .remove = ath10k_pci_remove,
2685 static int __init ath10k_pci_init(void)
2689 ret = pci_register_driver(&ath10k_pci_driver);
2691 ath10k_err("failed to register PCI driver: %d\n", ret);
2695 module_init(ath10k_pci_init);
2697 static void __exit ath10k_pci_exit(void)
2699 pci_unregister_driver(&ath10k_pci_driver);
2702 module_exit(ath10k_pci_exit);
2704 MODULE_AUTHOR("Qualcomm Atheros");
2705 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2706 MODULE_LICENSE("Dual BSD/GPL");
2707 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_2_FILE);
2708 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);