2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 #include <linux/pci.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22 #include <linux/bitops.h>
27 #include "targaddrs.h"
36 enum ath10k_pci_irq_mode {
37 ATH10K_PCI_IRQ_AUTO = 0,
38 ATH10K_PCI_IRQ_LEGACY = 1,
39 ATH10K_PCI_IRQ_MSI = 2,
42 enum ath10k_pci_reset_mode {
43 ATH10K_PCI_RESET_AUTO = 0,
44 ATH10K_PCI_RESET_WARM_ONLY = 1,
47 static unsigned int ath10k_pci_target_ps;
48 static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
49 static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
51 module_param_named(target_ps, ath10k_pci_target_ps, uint, 0644);
52 MODULE_PARM_DESC(target_ps, "Enable ath10k Target (SoC) PS option");
54 module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
55 MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
57 module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
58 MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
60 /* how long wait to wait for target to initialise, in ms */
61 #define ATH10K_PCI_TARGET_WAIT 3000
63 #define QCA988X_2_0_DEVICE_ID (0x003c)
65 static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
66 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
70 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
73 static int ath10k_pci_post_rx(struct ath10k *ar);
74 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
76 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
77 static int ath10k_pci_cold_reset(struct ath10k *ar);
78 static int ath10k_pci_warm_reset(struct ath10k *ar);
79 static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
80 static int ath10k_pci_init_irq(struct ath10k *ar);
81 static int ath10k_pci_deinit_irq(struct ath10k *ar);
82 static int ath10k_pci_request_irq(struct ath10k *ar);
83 static void ath10k_pci_free_irq(struct ath10k *ar);
84 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
85 struct ath10k_ce_pipe *rx_pipe,
86 struct bmi_xfer *xfer);
88 static const struct ce_attr host_ce_config_wlan[] = {
89 /* CE0: host->target HTC control and raw streams */
91 .flags = CE_ATTR_FLAGS,
97 /* CE1: target->host HTT + HTC control */
99 .flags = CE_ATTR_FLAGS,
102 .dest_nentries = 512,
105 /* CE2: target->host WMI */
107 .flags = CE_ATTR_FLAGS,
113 /* CE3: host->target WMI */
115 .flags = CE_ATTR_FLAGS,
121 /* CE4: host->target HTT */
123 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
124 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
131 .flags = CE_ATTR_FLAGS,
137 /* CE6: target autonomous hif_memcpy */
139 .flags = CE_ATTR_FLAGS,
145 /* CE7: ce_diag, the Diagnostic Window */
147 .flags = CE_ATTR_FLAGS,
149 .src_sz_max = DIAG_TRANSFER_LIMIT,
154 /* Target firmware's Copy Engine configuration. */
155 static const struct ce_pipe_config target_ce_config_wlan[] = {
156 /* CE0: host->target HTC control and raw streams */
159 .pipedir = PIPEDIR_OUT,
162 .flags = CE_ATTR_FLAGS,
166 /* CE1: target->host HTT + HTC control */
169 .pipedir = PIPEDIR_IN,
172 .flags = CE_ATTR_FLAGS,
176 /* CE2: target->host WMI */
179 .pipedir = PIPEDIR_IN,
182 .flags = CE_ATTR_FLAGS,
186 /* CE3: host->target WMI */
189 .pipedir = PIPEDIR_OUT,
192 .flags = CE_ATTR_FLAGS,
196 /* CE4: host->target HTT */
199 .pipedir = PIPEDIR_OUT,
202 .flags = CE_ATTR_FLAGS,
206 /* NB: 50% of src nentries, since tx has 2 frags */
211 .pipedir = PIPEDIR_OUT,
214 .flags = CE_ATTR_FLAGS,
218 /* CE6: Reserved for target autonomous hif_memcpy */
221 .pipedir = PIPEDIR_INOUT,
224 .flags = CE_ATTR_FLAGS,
228 /* CE7 used only by Host */
231 static bool ath10k_pci_irq_pending(struct ath10k *ar)
235 /* Check if the shared legacy irq is for us */
236 cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
237 PCIE_INTR_CAUSE_ADDRESS);
238 if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
244 static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
246 /* IMPORTANT: INTR_CLR register has to be set after
247 * INTR_ENABLE is set to 0, otherwise interrupt can not be
249 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
251 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
252 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
254 /* IMPORTANT: this extra read transaction is required to
255 * flush the posted write buffer. */
256 (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
257 PCIE_INTR_ENABLE_ADDRESS);
260 static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
262 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
263 PCIE_INTR_ENABLE_ADDRESS,
264 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
266 /* IMPORTANT: this extra read transaction is required to
267 * flush the posted write buffer. */
268 (void) ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
269 PCIE_INTR_ENABLE_ADDRESS);
272 static irqreturn_t ath10k_pci_early_irq_handler(int irq, void *arg)
274 struct ath10k *ar = arg;
275 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
277 if (ar_pci->num_msi_intrs == 0) {
278 if (!ath10k_pci_irq_pending(ar))
281 ath10k_pci_disable_and_clear_legacy_irq(ar);
284 tasklet_schedule(&ar_pci->early_irq_tasklet);
289 static int ath10k_pci_request_early_irq(struct ath10k *ar)
291 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
294 /* Regardless whether MSI-X/MSI/legacy irqs have been set up the first
295 * interrupt from irq vector is triggered in all cases for FW
296 * indication/errors */
297 ret = request_irq(ar_pci->pdev->irq, ath10k_pci_early_irq_handler,
298 IRQF_SHARED, "ath10k_pci (early)", ar);
300 ath10k_warn("failed to request early irq: %d\n", ret);
307 static void ath10k_pci_free_early_irq(struct ath10k *ar)
309 free_irq(ath10k_pci_priv(ar)->pdev->irq, ar);
313 * Diagnostic read/write access is provided for startup/config/debug usage.
314 * Caller must guarantee proper alignment, when applicable, and single user
317 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
320 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
323 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
326 struct ath10k_ce_pipe *ce_diag;
327 /* Host buffer address in CE space */
329 dma_addr_t ce_data_base = 0;
330 void *data_buf = NULL;
334 * This code cannot handle reads to non-memory space. Redirect to the
335 * register read fn but preserve the multi word read capability of
338 if (address < DRAM_BASE_ADDRESS) {
339 if (!IS_ALIGNED(address, 4) ||
340 !IS_ALIGNED((unsigned long)data, 4))
343 while ((nbytes >= 4) && ((ret = ath10k_pci_diag_read_access(
344 ar, address, (u32 *)data)) == 0)) {
345 nbytes -= sizeof(u32);
346 address += sizeof(u32);
352 ce_diag = ar_pci->ce_diag;
355 * Allocate a temporary bounce buffer to hold caller's data
356 * to be DMA'ed from Target. This guarantees
357 * 1) 4-byte alignment
358 * 2) Buffer in DMA-able space
360 orig_nbytes = nbytes;
361 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
369 memset(data_buf, 0, orig_nbytes);
371 remaining_bytes = orig_nbytes;
372 ce_data = ce_data_base;
373 while (remaining_bytes) {
374 nbytes = min_t(unsigned int, remaining_bytes,
375 DIAG_TRANSFER_LIMIT);
377 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
381 /* Request CE to send from Target(!) address to Host buffer */
383 * The address supplied by the caller is in the
384 * Target CPU virtual address space.
386 * In order to use this address with the diagnostic CE,
387 * convert it from Target CPU virtual address space
388 * to CE address space
391 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
393 ath10k_pci_sleep(ar);
395 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
401 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
405 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
411 if (nbytes != completed_nbytes) {
416 if (buf != (u32) address) {
422 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
427 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
433 if (nbytes != completed_nbytes) {
438 if (buf != ce_data) {
443 remaining_bytes -= nbytes;
450 /* Copy data from allocated DMA buf to caller's buf */
451 WARN_ON_ONCE(orig_nbytes & 3);
452 for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
454 __le32_to_cpu(((__le32 *)data_buf)[i]);
457 ath10k_warn("failed to read diag value at 0x%x: %d\n",
461 pci_free_consistent(ar_pci->pdev, orig_nbytes,
462 data_buf, ce_data_base);
467 /* Read 4-byte aligned data from Target memory or register */
468 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
471 /* Assume range doesn't cross this boundary */
472 if (address >= DRAM_BASE_ADDRESS)
473 return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
476 *data = ath10k_pci_read32(ar, address);
477 ath10k_pci_sleep(ar);
481 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
482 const void *data, int nbytes)
484 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
487 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
490 struct ath10k_ce_pipe *ce_diag;
491 void *data_buf = NULL;
492 u32 ce_data; /* Host buffer address in CE space */
493 dma_addr_t ce_data_base = 0;
496 ce_diag = ar_pci->ce_diag;
499 * Allocate a temporary bounce buffer to hold caller's data
500 * to be DMA'ed to Target. This guarantees
501 * 1) 4-byte alignment
502 * 2) Buffer in DMA-able space
504 orig_nbytes = nbytes;
505 data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
513 /* Copy caller's data to allocated DMA buf */
514 WARN_ON_ONCE(orig_nbytes & 3);
515 for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
516 ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
519 * The address supplied by the caller is in the
520 * Target CPU virtual address space.
522 * In order to use this address with the diagnostic CE,
524 * Target CPU virtual address space
529 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
530 ath10k_pci_sleep(ar);
532 remaining_bytes = orig_nbytes;
533 ce_data = ce_data_base;
534 while (remaining_bytes) {
535 /* FIXME: check cast */
536 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
538 /* Set up to receive directly into Target(!) address */
539 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
544 * Request CE to send caller-supplied data that
545 * was copied to bounce buffer to Target(!) address.
547 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
553 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
558 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
564 if (nbytes != completed_nbytes) {
569 if (buf != ce_data) {
575 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
580 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
586 if (nbytes != completed_nbytes) {
591 if (buf != address) {
596 remaining_bytes -= nbytes;
603 pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
608 ath10k_warn("failed to write diag value at 0x%x: %d\n",
614 /* Write 4B data to Target memory or register */
615 static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
618 /* Assume range doesn't cross this boundary */
619 if (address >= DRAM_BASE_ADDRESS)
620 return ath10k_pci_diag_write_mem(ar, address, &data,
624 ath10k_pci_write32(ar, address, data);
625 ath10k_pci_sleep(ar);
629 static bool ath10k_pci_target_is_awake(struct ath10k *ar)
631 void __iomem *mem = ath10k_pci_priv(ar)->mem;
633 val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
635 return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
638 int ath10k_do_pci_wake(struct ath10k *ar)
640 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
641 void __iomem *pci_addr = ar_pci->mem;
645 if (atomic_read(&ar_pci->keep_awake_count) == 0) {
647 iowrite32(PCIE_SOC_WAKE_V_MASK,
648 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
649 PCIE_SOC_WAKE_ADDRESS);
651 atomic_inc(&ar_pci->keep_awake_count);
653 if (ar_pci->verified_awake)
657 if (ath10k_pci_target_is_awake(ar)) {
658 ar_pci->verified_awake = true;
662 if (tot_delay > PCIE_WAKE_TIMEOUT) {
663 ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
665 atomic_read(&ar_pci->keep_awake_count));
670 tot_delay += curr_delay;
677 void ath10k_do_pci_sleep(struct ath10k *ar)
679 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
680 void __iomem *pci_addr = ar_pci->mem;
682 if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
684 ar_pci->verified_awake = false;
685 iowrite32(PCIE_SOC_WAKE_RESET,
686 pci_addr + PCIE_LOCAL_BASE_ADDRESS +
687 PCIE_SOC_WAKE_ADDRESS);
691 /* Called by lower (CE) layer when a send to Target completes. */
692 static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
694 struct ath10k *ar = ce_state->ar;
695 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
696 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
697 void *transfer_context;
700 unsigned int transfer_id;
702 while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
704 &transfer_id) == 0) {
705 /* no need to call tx completion for NULL pointers */
706 if (transfer_context == NULL)
709 cb->tx_completion(ar, transfer_context, transfer_id);
713 /* Called by lower (CE) layer when data is received from the Target. */
714 static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
716 struct ath10k *ar = ce_state->ar;
717 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
718 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
719 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
721 void *transfer_context;
723 unsigned int nbytes, max_nbytes;
724 unsigned int transfer_id;
728 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
729 &ce_data, &nbytes, &transfer_id,
731 err = ath10k_pci_post_rx_pipe(pipe_info, 1);
734 ath10k_warn("failed to replenish CE rx ring %d: %d\n",
735 pipe_info->pipe_num, err);
738 skb = transfer_context;
739 max_nbytes = skb->len + skb_tailroom(skb);
740 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
741 max_nbytes, DMA_FROM_DEVICE);
743 if (unlikely(max_nbytes < nbytes)) {
744 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
746 dev_kfree_skb_any(skb);
750 skb_put(skb, nbytes);
751 cb->rx_completion(ar, skb, pipe_info->pipe_num);
755 static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
756 struct ath10k_hif_sg_item *items, int n_items)
758 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
759 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
760 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
761 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
762 unsigned int nentries_mask = src_ring->nentries_mask;
763 unsigned int sw_index = src_ring->sw_index;
764 unsigned int write_index = src_ring->write_index;
767 spin_lock_bh(&ar_pci->ce_lock);
769 if (unlikely(CE_RING_DELTA(nentries_mask,
770 write_index, sw_index - 1) < n_items)) {
775 for (i = 0; i < n_items - 1; i++) {
776 ath10k_dbg(ATH10K_DBG_PCI,
777 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
778 i, items[i].paddr, items[i].len, n_items);
779 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
780 items[i].vaddr, items[i].len);
782 err = ath10k_ce_send_nolock(ce_pipe,
783 items[i].transfer_context,
786 items[i].transfer_id,
787 CE_SEND_FLAG_GATHER);
792 /* `i` is equal to `n_items -1` after for() */
794 ath10k_dbg(ATH10K_DBG_PCI,
795 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
796 i, items[i].paddr, items[i].len, n_items);
797 ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL, "item data: ",
798 items[i].vaddr, items[i].len);
800 err = ath10k_ce_send_nolock(ce_pipe,
801 items[i].transfer_context,
804 items[i].transfer_id,
811 spin_unlock_bh(&ar_pci->ce_lock);
815 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
817 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
819 ath10k_dbg(ATH10K_DBG_PCI, "pci hif get free queue number\n");
821 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
824 static void ath10k_pci_hif_dump_area(struct ath10k *ar)
826 u32 reg_dump_area = 0;
827 u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
832 ath10k_err("firmware crashed!\n");
833 ath10k_err("hardware name %s version 0x%x\n",
834 ar->hw_params.name, ar->target_version);
835 ath10k_err("firmware version: %s\n", ar->hw->wiphy->fw_version);
837 host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
838 ret = ath10k_pci_diag_read_mem(ar, host_addr,
839 ®_dump_area, sizeof(u32));
841 ath10k_err("failed to read FW dump area address: %d\n", ret);
845 ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
847 ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
849 REG_DUMP_COUNT_QCA988X * sizeof(u32));
851 ath10k_err("failed to read FW dump area: %d\n", ret);
855 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
857 ath10k_err("target Register Dump\n");
858 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
859 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
862 reg_dump_values[i + 1],
863 reg_dump_values[i + 2],
864 reg_dump_values[i + 3]);
866 queue_work(ar->workqueue, &ar->restart_work);
869 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
872 ath10k_dbg(ATH10K_DBG_PCI, "pci hif send complete check\n");
877 * Decide whether to actually poll for completions, or just
878 * wait for a later chance.
879 * If there seem to be plenty of resources left, then just wait
880 * since checking involves reading a CE register, which is a
881 * relatively expensive operation.
883 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
886 * If at least 50% of the total resources are still available,
887 * don't bother checking again yet.
889 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
892 ath10k_ce_per_engine_service(ar, pipe);
895 static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
896 struct ath10k_hif_cb *callbacks)
898 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
900 ath10k_dbg(ATH10K_DBG_PCI, "pci hif set callbacks\n");
902 memcpy(&ar_pci->msg_callbacks_current, callbacks,
903 sizeof(ar_pci->msg_callbacks_current));
906 static int ath10k_pci_setup_ce_irq(struct ath10k *ar)
908 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
909 const struct ce_attr *attr;
910 struct ath10k_pci_pipe *pipe_info;
911 int pipe_num, disable_interrupts;
913 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
914 pipe_info = &ar_pci->pipe_info[pipe_num];
916 /* Handle Diagnostic CE specially */
917 if (pipe_info->ce_hdl == ar_pci->ce_diag)
920 attr = &host_ce_config_wlan[pipe_num];
922 if (attr->src_nentries) {
923 disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
924 ath10k_ce_send_cb_register(pipe_info->ce_hdl,
925 ath10k_pci_ce_send_done,
929 if (attr->dest_nentries)
930 ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
931 ath10k_pci_ce_recv_data);
937 static void ath10k_pci_kill_tasklet(struct ath10k *ar)
939 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
942 tasklet_kill(&ar_pci->intr_tq);
943 tasklet_kill(&ar_pci->msi_fw_err);
944 tasklet_kill(&ar_pci->early_irq_tasklet);
946 for (i = 0; i < CE_COUNT; i++)
947 tasklet_kill(&ar_pci->pipe_info[i].intr);
950 /* TODO - temporary mapping while we have too few CE's */
951 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
952 u16 service_id, u8 *ul_pipe,
953 u8 *dl_pipe, int *ul_is_polled,
958 ath10k_dbg(ATH10K_DBG_PCI, "pci hif map service\n");
960 /* polling for received messages not supported */
963 switch (service_id) {
964 case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
966 * Host->target HTT gets its own pipe, so it can be polled
967 * while other pipes are interrupt driven.
971 * Use the same target->host pipe for HTC ctrl, HTC raw
977 case ATH10K_HTC_SVC_ID_RSVD_CTRL:
978 case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
980 * Note: HTC_RAW_STREAMS_SVC is currently unused, and
981 * HTC_CTRL_RSVD_SVC could share the same pipe as the
982 * WMI services. So, if another CE is needed, change
983 * this to *ul_pipe = 3, which frees up CE 0.
990 case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
991 case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
992 case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
993 case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
995 case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1001 /* pipe 6 reserved */
1002 /* pipe 7 reserved */
1009 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1014 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1015 u8 *ul_pipe, u8 *dl_pipe)
1017 int ul_is_polled, dl_is_polled;
1019 ath10k_dbg(ATH10K_DBG_PCI, "pci hif get default pipe\n");
1021 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1022 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1029 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
1032 struct ath10k *ar = pipe_info->hif_ce_state;
1033 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1034 struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
1035 struct sk_buff *skb;
1039 if (pipe_info->buf_sz == 0)
1042 for (i = 0; i < num; i++) {
1043 skb = dev_alloc_skb(pipe_info->buf_sz);
1045 ath10k_warn("failed to allocate skbuff for pipe %d\n",
1051 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1053 ce_data = dma_map_single(ar->dev, skb->data,
1054 skb->len + skb_tailroom(skb),
1057 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1058 ath10k_warn("failed to DMA map sk_buff\n");
1059 dev_kfree_skb_any(skb);
1064 ATH10K_SKB_CB(skb)->paddr = ce_data;
1066 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1068 PCI_DMA_FROMDEVICE);
1070 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1073 ath10k_warn("failed to enqueue to pipe %d: %d\n",
1082 ath10k_pci_rx_pipe_cleanup(pipe_info);
1086 static int ath10k_pci_post_rx(struct ath10k *ar)
1088 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1089 struct ath10k_pci_pipe *pipe_info;
1090 const struct ce_attr *attr;
1091 int pipe_num, ret = 0;
1093 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1094 pipe_info = &ar_pci->pipe_info[pipe_num];
1095 attr = &host_ce_config_wlan[pipe_num];
1097 if (attr->dest_nentries == 0)
1100 ret = ath10k_pci_post_rx_pipe(pipe_info,
1101 attr->dest_nentries - 1);
1103 ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1106 for (; pipe_num >= 0; pipe_num--) {
1107 pipe_info = &ar_pci->pipe_info[pipe_num];
1108 ath10k_pci_rx_pipe_cleanup(pipe_info);
1117 static int ath10k_pci_hif_start(struct ath10k *ar)
1119 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1122 ath10k_dbg(ATH10K_DBG_BOOT, "boot hif start\n");
1124 ath10k_pci_free_early_irq(ar);
1125 ath10k_pci_kill_tasklet(ar);
1127 ret = ath10k_pci_request_irq(ar);
1129 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1134 ret = ath10k_pci_setup_ce_irq(ar);
1136 ath10k_warn("failed to setup CE interrupts: %d\n", ret);
1140 /* Post buffers once to start things off. */
1141 ret = ath10k_pci_post_rx(ar);
1143 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1148 ar_pci->started = 1;
1152 ath10k_ce_disable_interrupts(ar);
1153 ath10k_pci_free_irq(ar);
1154 ath10k_pci_kill_tasklet(ar);
1156 /* Though there should be no interrupts (device was reset)
1157 * power_down() expects the early IRQ to be installed as per the
1158 * driver lifecycle. */
1159 ret_early = ath10k_pci_request_early_irq(ar);
1161 ath10k_warn("failed to re-enable early irq: %d\n", ret_early);
1166 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1169 struct ath10k_pci *ar_pci;
1170 struct ath10k_ce_pipe *ce_hdl;
1172 struct sk_buff *netbuf;
1175 buf_sz = pipe_info->buf_sz;
1177 /* Unused Copy Engine */
1181 ar = pipe_info->hif_ce_state;
1182 ar_pci = ath10k_pci_priv(ar);
1184 if (!ar_pci->started)
1187 ce_hdl = pipe_info->ce_hdl;
1189 while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1191 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1192 netbuf->len + skb_tailroom(netbuf),
1194 dev_kfree_skb_any(netbuf);
1198 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1201 struct ath10k_pci *ar_pci;
1202 struct ath10k_ce_pipe *ce_hdl;
1203 struct sk_buff *netbuf;
1205 unsigned int nbytes;
1209 buf_sz = pipe_info->buf_sz;
1211 /* Unused Copy Engine */
1215 ar = pipe_info->hif_ce_state;
1216 ar_pci = ath10k_pci_priv(ar);
1218 if (!ar_pci->started)
1221 ce_hdl = pipe_info->ce_hdl;
1223 while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1224 &ce_data, &nbytes, &id) == 0) {
1225 /* no need to call tx completion for NULL pointers */
1229 ar_pci->msg_callbacks_current.tx_completion(ar,
1236 * Cleanup residual buffers for device shutdown:
1237 * buffers that were enqueued for receive
1238 * buffers that were to be sent
1239 * Note: Buffers that had completed but which were
1240 * not yet processed are on a completion queue. They
1241 * are handled when the completion thread shuts down.
1243 static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1245 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1248 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1249 struct ath10k_pci_pipe *pipe_info;
1251 pipe_info = &ar_pci->pipe_info[pipe_num];
1252 ath10k_pci_rx_pipe_cleanup(pipe_info);
1253 ath10k_pci_tx_pipe_cleanup(pipe_info);
1257 static void ath10k_pci_ce_deinit(struct ath10k *ar)
1259 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1260 struct ath10k_pci_pipe *pipe_info;
1263 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1264 pipe_info = &ar_pci->pipe_info[pipe_num];
1265 if (pipe_info->ce_hdl) {
1266 ath10k_ce_deinit(pipe_info->ce_hdl);
1267 pipe_info->ce_hdl = NULL;
1268 pipe_info->buf_sz = 0;
1273 static void ath10k_pci_hif_stop(struct ath10k *ar)
1275 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1278 ath10k_dbg(ATH10K_DBG_BOOT, "boot hif stop\n");
1280 ret = ath10k_ce_disable_interrupts(ar);
1282 ath10k_warn("failed to disable CE interrupts: %d\n", ret);
1284 ath10k_pci_free_irq(ar);
1285 ath10k_pci_kill_tasklet(ar);
1287 ret = ath10k_pci_request_early_irq(ar);
1289 ath10k_warn("failed to re-enable early irq: %d\n", ret);
1291 /* At this point, asynchronous threads are stopped, the target should
1292 * not DMA nor interrupt. We process the leftovers and then free
1293 * everything else up. */
1295 ath10k_pci_buffer_cleanup(ar);
1297 /* Make the sure the device won't access any structures on the host by
1298 * resetting it. The device was fed with PCI CE ringbuffer
1299 * configuration during init. If ringbuffers are freed and the device
1300 * were to access them this could lead to memory corruption on the
1302 ath10k_pci_warm_reset(ar);
1304 ar_pci->started = 0;
1307 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1308 void *req, u32 req_len,
1309 void *resp, u32 *resp_len)
1311 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1312 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1313 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1314 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1315 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1316 dma_addr_t req_paddr = 0;
1317 dma_addr_t resp_paddr = 0;
1318 struct bmi_xfer xfer = {};
1319 void *treq, *tresp = NULL;
1324 if (resp && !resp_len)
1327 if (resp && resp_len && *resp_len == 0)
1330 treq = kmemdup(req, req_len, GFP_KERNEL);
1334 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1335 ret = dma_mapping_error(ar->dev, req_paddr);
1339 if (resp && resp_len) {
1340 tresp = kzalloc(*resp_len, GFP_KERNEL);
1346 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1348 ret = dma_mapping_error(ar->dev, resp_paddr);
1352 xfer.wait_for_resp = true;
1355 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1358 init_completion(&xfer.done);
1360 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1364 ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
1367 unsigned int unused_nbytes;
1368 unsigned int unused_id;
1370 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1371 &unused_nbytes, &unused_id);
1373 /* non-zero means we did not time out */
1381 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1382 dma_unmap_single(ar->dev, resp_paddr,
1383 *resp_len, DMA_FROM_DEVICE);
1386 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1388 if (ret == 0 && resp_len) {
1389 *resp_len = min(*resp_len, xfer.resp_len);
1390 memcpy(resp, tresp, xfer.resp_len);
1399 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
1401 struct bmi_xfer *xfer;
1403 unsigned int nbytes;
1404 unsigned int transfer_id;
1406 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1407 &nbytes, &transfer_id))
1410 if (xfer->wait_for_resp)
1413 complete(&xfer->done);
1416 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
1418 struct bmi_xfer *xfer;
1420 unsigned int nbytes;
1421 unsigned int transfer_id;
1424 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1425 &nbytes, &transfer_id, &flags))
1428 if (!xfer->wait_for_resp) {
1429 ath10k_warn("unexpected: BMI data received; ignoring\n");
1433 xfer->resp_len = nbytes;
1434 complete(&xfer->done);
1437 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
1438 struct ath10k_ce_pipe *rx_pipe,
1439 struct bmi_xfer *xfer)
1441 unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1443 while (time_before_eq(jiffies, timeout)) {
1444 ath10k_pci_bmi_send_done(tx_pipe);
1445 ath10k_pci_bmi_recv_data(rx_pipe);
1447 if (completion_done(&xfer->done))
1457 * Map from service/endpoint to Copy Engine.
1458 * This table is derived from the CE_PCI TABLE, above.
1459 * It is passed to the Target at startup for use by firmware.
1461 static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1463 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1464 PIPEDIR_OUT, /* out = UL = host -> target */
1468 ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1469 PIPEDIR_IN, /* in = DL = target -> host */
1473 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1474 PIPEDIR_OUT, /* out = UL = host -> target */
1478 ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1479 PIPEDIR_IN, /* in = DL = target -> host */
1483 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1484 PIPEDIR_OUT, /* out = UL = host -> target */
1488 ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1489 PIPEDIR_IN, /* in = DL = target -> host */
1493 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1494 PIPEDIR_OUT, /* out = UL = host -> target */
1498 ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1499 PIPEDIR_IN, /* in = DL = target -> host */
1503 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1504 PIPEDIR_OUT, /* out = UL = host -> target */
1508 ATH10K_HTC_SVC_ID_WMI_CONTROL,
1509 PIPEDIR_IN, /* in = DL = target -> host */
1513 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1514 PIPEDIR_OUT, /* out = UL = host -> target */
1515 0, /* could be moved to 3 (share with WMI) */
1518 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1519 PIPEDIR_IN, /* in = DL = target -> host */
1523 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1524 PIPEDIR_OUT, /* out = UL = host -> target */
1528 ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS, /* not currently used */
1529 PIPEDIR_IN, /* in = DL = target -> host */
1533 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1534 PIPEDIR_OUT, /* out = UL = host -> target */
1538 ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1539 PIPEDIR_IN, /* in = DL = target -> host */
1543 /* (Additions here) */
1545 { /* Must be last */
1553 * Send an interrupt to the device to wake up the Target CPU
1554 * so it has an opportunity to notice any changed state.
1556 static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1561 ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1565 ath10k_warn("failed to read core_ctrl: %d\n", ret);
1569 /* A_INUM_FIRMWARE interrupt to Target CPU */
1570 core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1572 ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1576 ath10k_warn("failed to set target CPU interrupt mask: %d\n",
1584 static int ath10k_pci_init_config(struct ath10k *ar)
1586 u32 interconnect_targ_addr;
1587 u32 pcie_state_targ_addr = 0;
1588 u32 pipe_cfg_targ_addr = 0;
1589 u32 svc_to_pipe_map = 0;
1590 u32 pcie_config_flags = 0;
1592 u32 ealloc_targ_addr;
1594 u32 flag2_targ_addr;
1597 /* Download to Target the CE Config and the service-to-CE map */
1598 interconnect_targ_addr =
1599 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1601 /* Supply Target-side CE configuration */
1602 ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1603 &pcie_state_targ_addr);
1605 ath10k_err("Failed to get pcie state addr: %d\n", ret);
1609 if (pcie_state_targ_addr == 0) {
1611 ath10k_err("Invalid pcie state addr\n");
1615 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1616 offsetof(struct pcie_state,
1618 &pipe_cfg_targ_addr);
1620 ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1624 if (pipe_cfg_targ_addr == 0) {
1626 ath10k_err("Invalid pipe cfg addr\n");
1630 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1631 target_ce_config_wlan,
1632 sizeof(target_ce_config_wlan));
1635 ath10k_err("Failed to write pipe cfg: %d\n", ret);
1639 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1640 offsetof(struct pcie_state,
1644 ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1648 if (svc_to_pipe_map == 0) {
1650 ath10k_err("Invalid svc_to_pipe map\n");
1654 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1655 target_service_to_ce_map_wlan,
1656 sizeof(target_service_to_ce_map_wlan));
1658 ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1662 ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1663 offsetof(struct pcie_state,
1665 &pcie_config_flags);
1667 ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1671 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1673 ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1674 offsetof(struct pcie_state, config_flags),
1676 sizeof(pcie_config_flags));
1678 ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1682 /* configure early allocation */
1683 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1685 ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1687 ath10k_err("Faile to get early alloc val: %d\n", ret);
1691 /* first bank is switched to IRAM */
1692 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1693 HI_EARLY_ALLOC_MAGIC_MASK);
1694 ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1695 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1697 ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1699 ath10k_err("Failed to set early alloc val: %d\n", ret);
1703 /* Tell Target to proceed with initialization */
1704 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1706 ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1708 ath10k_err("Failed to get option val: %d\n", ret);
1712 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1714 ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1716 ath10k_err("Failed to set option val: %d\n", ret);
1725 static int ath10k_pci_ce_init(struct ath10k *ar)
1727 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1728 struct ath10k_pci_pipe *pipe_info;
1729 const struct ce_attr *attr;
1732 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1733 pipe_info = &ar_pci->pipe_info[pipe_num];
1734 pipe_info->pipe_num = pipe_num;
1735 pipe_info->hif_ce_state = ar;
1736 attr = &host_ce_config_wlan[pipe_num];
1738 pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1739 if (pipe_info->ce_hdl == NULL) {
1740 ath10k_err("failed to initialize CE for pipe: %d\n",
1743 /* It is safe to call it here. It checks if ce_hdl is
1744 * valid for each pipe */
1745 ath10k_pci_ce_deinit(ar);
1749 if (pipe_num == CE_COUNT - 1) {
1751 * Reserve the ultimate CE for
1752 * diagnostic Window support
1754 ar_pci->ce_diag = pipe_info->ce_hdl;
1758 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1764 static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1766 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1769 ath10k_pci_wake(ar);
1771 fw_indicator = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
1773 if (fw_indicator & FW_IND_EVENT_PENDING) {
1774 /* ACK: clear Target-side pending event */
1775 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
1776 fw_indicator & ~FW_IND_EVENT_PENDING);
1778 if (ar_pci->started) {
1779 ath10k_pci_hif_dump_area(ar);
1782 * Probable Target failure before we're prepared
1783 * to handle it. Generally unexpected.
1785 ath10k_warn("early firmware event indicated\n");
1789 ath10k_pci_sleep(ar);
1792 static int ath10k_pci_warm_reset(struct ath10k *ar)
1797 ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset\n");
1799 ret = ath10k_do_pci_wake(ar);
1801 ath10k_err("failed to wake up target: %d\n", ret);
1806 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1807 PCIE_INTR_CAUSE_ADDRESS);
1808 ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
1810 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1812 ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
1815 /* disable pending irqs */
1816 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1817 PCIE_INTR_ENABLE_ADDRESS, 0);
1819 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1820 PCIE_INTR_CLR_ADDRESS, ~0);
1824 /* clear fw indicator */
1825 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
1827 /* clear target LF timer interrupts */
1828 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1829 SOC_LF_TIMER_CONTROL0_ADDRESS);
1830 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
1831 SOC_LF_TIMER_CONTROL0_ADDRESS,
1832 val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
1835 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1836 SOC_RESET_CONTROL_ADDRESS);
1837 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1838 val | SOC_RESET_CONTROL_CE_RST_MASK);
1839 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1840 SOC_RESET_CONTROL_ADDRESS);
1844 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1845 val & ~SOC_RESET_CONTROL_CE_RST_MASK);
1846 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1847 SOC_RESET_CONTROL_ADDRESS);
1851 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1852 PCIE_INTR_CAUSE_ADDRESS);
1853 ath10k_dbg(ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n", val);
1855 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1857 ath10k_dbg(ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
1860 /* CPU warm reset */
1861 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1862 SOC_RESET_CONTROL_ADDRESS);
1863 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1864 val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
1866 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1867 SOC_RESET_CONTROL_ADDRESS);
1868 ath10k_dbg(ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n", val);
1872 ath10k_dbg(ATH10K_DBG_BOOT, "boot warm reset complete\n");
1874 ath10k_do_pci_sleep(ar);
1878 static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
1880 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1881 const char *irq_mode;
1885 * Bring the target up cleanly.
1887 * The target may be in an undefined state with an AUX-powered Target
1888 * and a Host in WoW mode. If the Host crashes, loses power, or is
1889 * restarted (without unloading the driver) then the Target is left
1890 * (aux) powered and running. On a subsequent driver load, the Target
1891 * is in an unexpected state. We try to catch that here in order to
1892 * reset the Target and retry the probe.
1895 ret = ath10k_pci_cold_reset(ar);
1897 ret = ath10k_pci_warm_reset(ar);
1900 ath10k_err("failed to reset target: %d\n", ret);
1904 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1905 /* Force AWAKE forever */
1906 ath10k_do_pci_wake(ar);
1908 ret = ath10k_pci_ce_init(ar);
1910 ath10k_err("failed to initialize CE: %d\n", ret);
1914 ret = ath10k_ce_disable_interrupts(ar);
1916 ath10k_err("failed to disable CE interrupts: %d\n", ret);
1920 ret = ath10k_pci_init_irq(ar);
1922 ath10k_err("failed to init irqs: %d\n", ret);
1926 ret = ath10k_pci_request_early_irq(ar);
1928 ath10k_err("failed to request early irq: %d\n", ret);
1929 goto err_deinit_irq;
1932 ret = ath10k_pci_wait_for_target_init(ar);
1934 ath10k_err("failed to wait for target to init: %d\n", ret);
1935 goto err_free_early_irq;
1938 ret = ath10k_pci_init_config(ar);
1940 ath10k_err("failed to setup init config: %d\n", ret);
1941 goto err_free_early_irq;
1944 ret = ath10k_pci_wake_target_cpu(ar);
1946 ath10k_err("could not wake up target CPU: %d\n", ret);
1947 goto err_free_early_irq;
1950 if (ar_pci->num_msi_intrs > 1)
1952 else if (ar_pci->num_msi_intrs == 1)
1955 irq_mode = "legacy";
1957 if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
1958 ath10k_info("pci irq %s irq_mode %d reset_mode %d\n",
1959 irq_mode, ath10k_pci_irq_mode,
1960 ath10k_pci_reset_mode);
1965 ath10k_pci_free_early_irq(ar);
1967 ath10k_pci_deinit_irq(ar);
1969 ath10k_pci_ce_deinit(ar);
1970 ath10k_pci_warm_reset(ar);
1972 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1973 ath10k_do_pci_sleep(ar);
1978 static int ath10k_pci_hif_power_up(struct ath10k *ar)
1982 ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power up\n");
1985 * Hardware CUS232 version 2 has some issues with cold reset and the
1986 * preferred (and safer) way to perform a device reset is through a
1989 * Warm reset doesn't always work though (notably after a firmware
1990 * crash) so fall back to cold reset if necessary.
1992 ret = __ath10k_pci_hif_power_up(ar, false);
1994 ath10k_warn("failed to power up target using warm reset: %d\n",
1997 if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY)
2000 ath10k_warn("trying cold reset\n");
2002 ret = __ath10k_pci_hif_power_up(ar, true);
2004 ath10k_err("failed to power up target using cold reset too (%d)\n",
2013 static void ath10k_pci_hif_power_down(struct ath10k *ar)
2015 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2017 ath10k_dbg(ATH10K_DBG_BOOT, "boot hif power down\n");
2019 ath10k_pci_free_early_irq(ar);
2020 ath10k_pci_kill_tasklet(ar);
2021 ath10k_pci_deinit_irq(ar);
2022 ath10k_pci_warm_reset(ar);
2024 ath10k_pci_ce_deinit(ar);
2025 if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
2026 ath10k_do_pci_sleep(ar);
2031 #define ATH10K_PCI_PM_CONTROL 0x44
2033 static int ath10k_pci_hif_suspend(struct ath10k *ar)
2035 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2036 struct pci_dev *pdev = ar_pci->pdev;
2039 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2041 if ((val & 0x000000ff) != 0x3) {
2042 pci_save_state(pdev);
2043 pci_disable_device(pdev);
2044 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2045 (val & 0xffffff00) | 0x03);
2051 static int ath10k_pci_hif_resume(struct ath10k *ar)
2053 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2054 struct pci_dev *pdev = ar_pci->pdev;
2057 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2059 if ((val & 0x000000ff) != 0) {
2060 pci_restore_state(pdev);
2061 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2064 * Suspend/Resume resets the PCI configuration space,
2065 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
2066 * to keep PCI Tx retries from interfering with C3 CPU state
2068 pci_read_config_dword(pdev, 0x40, &val);
2070 if ((val & 0x0000ff00) != 0)
2071 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2078 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
2079 .tx_sg = ath10k_pci_hif_tx_sg,
2080 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
2081 .start = ath10k_pci_hif_start,
2082 .stop = ath10k_pci_hif_stop,
2083 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
2084 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
2085 .send_complete_check = ath10k_pci_hif_send_complete_check,
2086 .set_callbacks = ath10k_pci_hif_set_callbacks,
2087 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
2088 .power_up = ath10k_pci_hif_power_up,
2089 .power_down = ath10k_pci_hif_power_down,
2091 .suspend = ath10k_pci_hif_suspend,
2092 .resume = ath10k_pci_hif_resume,
2096 static void ath10k_pci_ce_tasklet(unsigned long ptr)
2098 struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
2099 struct ath10k_pci *ar_pci = pipe->ar_pci;
2101 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
2104 static void ath10k_msi_err_tasklet(unsigned long data)
2106 struct ath10k *ar = (struct ath10k *)data;
2108 ath10k_pci_fw_interrupt_handler(ar);
2112 * Handler for a per-engine interrupt on a PARTICULAR CE.
2113 * This is used in cases where each CE has a private MSI interrupt.
2115 static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2117 struct ath10k *ar = arg;
2118 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2119 int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2121 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
2122 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
2127 * NOTE: We are able to derive ce_id from irq because we
2128 * use a one-to-one mapping for CE's 0..5.
2129 * CE's 6 & 7 do not use interrupts at all.
2131 * This mapping must be kept in sync with the mapping
2134 tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2138 static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2140 struct ath10k *ar = arg;
2141 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2143 tasklet_schedule(&ar_pci->msi_fw_err);
2148 * Top-level interrupt handler for all PCI interrupts from a Target.
2149 * When a block of MSI interrupts is allocated, this top-level handler
2150 * is not used; instead, we directly call the correct sub-handler.
2152 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2154 struct ath10k *ar = arg;
2155 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2157 if (ar_pci->num_msi_intrs == 0) {
2158 if (!ath10k_pci_irq_pending(ar))
2161 ath10k_pci_disable_and_clear_legacy_irq(ar);
2164 tasklet_schedule(&ar_pci->intr_tq);
2169 static void ath10k_pci_early_irq_tasklet(unsigned long data)
2171 struct ath10k *ar = (struct ath10k *)data;
2175 ret = ath10k_pci_wake(ar);
2177 ath10k_warn("failed to wake target in early irq tasklet: %d\n",
2182 fw_ind = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2183 if (fw_ind & FW_IND_EVENT_PENDING) {
2184 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS,
2185 fw_ind & ~FW_IND_EVENT_PENDING);
2187 /* Some structures are unavailable during early boot or at
2188 * driver teardown so just print that the device has crashed. */
2189 ath10k_warn("device crashed - no diagnostics available\n");
2192 ath10k_pci_sleep(ar);
2193 ath10k_pci_enable_legacy_irq(ar);
2196 static void ath10k_pci_tasklet(unsigned long data)
2198 struct ath10k *ar = (struct ath10k *)data;
2199 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2201 ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
2202 ath10k_ce_per_engine_service_any(ar);
2204 /* Re-enable legacy irq that was disabled in the irq handler */
2205 if (ar_pci->num_msi_intrs == 0)
2206 ath10k_pci_enable_legacy_irq(ar);
2209 static int ath10k_pci_request_irq_msix(struct ath10k *ar)
2211 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2214 ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2215 ath10k_pci_msi_fw_handler,
2216 IRQF_SHARED, "ath10k_pci", ar);
2218 ath10k_warn("failed to request MSI-X fw irq %d: %d\n",
2219 ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2223 for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2224 ret = request_irq(ar_pci->pdev->irq + i,
2225 ath10k_pci_per_engine_handler,
2226 IRQF_SHARED, "ath10k_pci", ar);
2228 ath10k_warn("failed to request MSI-X ce irq %d: %d\n",
2229 ar_pci->pdev->irq + i, ret);
2231 for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2232 free_irq(ar_pci->pdev->irq + i, ar);
2234 free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
2242 static int ath10k_pci_request_irq_msi(struct ath10k *ar)
2244 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2247 ret = request_irq(ar_pci->pdev->irq,
2248 ath10k_pci_interrupt_handler,
2249 IRQF_SHARED, "ath10k_pci", ar);
2251 ath10k_warn("failed to request MSI irq %d: %d\n",
2252 ar_pci->pdev->irq, ret);
2259 static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
2261 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2264 ret = request_irq(ar_pci->pdev->irq,
2265 ath10k_pci_interrupt_handler,
2266 IRQF_SHARED, "ath10k_pci", ar);
2268 ath10k_warn("failed to request legacy irq %d: %d\n",
2269 ar_pci->pdev->irq, ret);
2276 static int ath10k_pci_request_irq(struct ath10k *ar)
2278 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2280 switch (ar_pci->num_msi_intrs) {
2282 return ath10k_pci_request_irq_legacy(ar);
2284 return ath10k_pci_request_irq_msi(ar);
2285 case MSI_NUM_REQUEST:
2286 return ath10k_pci_request_irq_msix(ar);
2289 ath10k_warn("unknown irq configuration upon request\n");
2293 static void ath10k_pci_free_irq(struct ath10k *ar)
2295 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2298 /* There's at least one interrupt irregardless whether its legacy INTR
2299 * or MSI or MSI-X */
2300 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2301 free_irq(ar_pci->pdev->irq + i, ar);
2304 static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
2306 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2309 tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
2310 tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2312 tasklet_init(&ar_pci->early_irq_tasklet, ath10k_pci_early_irq_tasklet,
2315 for (i = 0; i < CE_COUNT; i++) {
2316 ar_pci->pipe_info[i].ar_pci = ar_pci;
2317 tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
2318 (unsigned long)&ar_pci->pipe_info[i]);
2322 static int ath10k_pci_init_irq(struct ath10k *ar)
2324 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2325 bool msix_supported = test_bit(ATH10K_PCI_FEATURE_MSI_X,
2329 ath10k_pci_init_irq_tasklets(ar);
2331 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO &&
2332 !test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
2333 ath10k_info("limiting irq mode to: %d\n", ath10k_pci_irq_mode);
2336 if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO && msix_supported) {
2337 ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
2338 ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
2339 ar_pci->num_msi_intrs);
2347 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
2348 ar_pci->num_msi_intrs = 1;
2349 ret = pci_enable_msi(ar_pci->pdev);
2358 * A potential race occurs here: The CORE_BASE write
2359 * depends on target correctly decoding AXI address but
2360 * host won't know when target writes BAR to CORE_CTRL.
2361 * This write might get lost if target has NOT written BAR.
2362 * For now, fix the race by repeating the write in below
2363 * synchronization checking. */
2364 ar_pci->num_msi_intrs = 0;
2366 ret = ath10k_pci_wake(ar);
2368 ath10k_warn("failed to wake target: %d\n", ret);
2372 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2373 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
2374 ath10k_pci_sleep(ar);
2379 static int ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
2383 ret = ath10k_pci_wake(ar);
2385 ath10k_warn("failed to wake target: %d\n", ret);
2389 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2391 ath10k_pci_sleep(ar);
2396 static int ath10k_pci_deinit_irq(struct ath10k *ar)
2398 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2400 switch (ar_pci->num_msi_intrs) {
2402 return ath10k_pci_deinit_irq_legacy(ar);
2405 case MSI_NUM_REQUEST:
2406 pci_disable_msi(ar_pci->pdev);
2409 pci_disable_msi(ar_pci->pdev);
2412 ath10k_warn("unknown irq configuration upon deinit\n");
2416 static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
2418 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2419 unsigned long timeout;
2423 ath10k_dbg(ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
2425 ret = ath10k_pci_wake(ar);
2427 ath10k_err("failed to wake up target for init: %d\n", ret);
2431 timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
2434 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2436 ath10k_dbg(ATH10K_DBG_BOOT, "boot target indicator %x\n", val);
2438 /* target should never return this */
2439 if (val == 0xffffffff)
2442 if (val & FW_IND_INITIALIZED)
2445 if (ar_pci->num_msi_intrs == 0)
2446 /* Fix potential race by repeating CORE_BASE writes */
2447 ath10k_pci_soc_write32(ar, PCIE_INTR_ENABLE_ADDRESS,
2448 PCIE_INTR_FIRMWARE_MASK |
2449 PCIE_INTR_CE_MASK_ALL);
2452 } while (time_before(jiffies, timeout));
2454 if (val == 0xffffffff || !(val & FW_IND_INITIALIZED)) {
2455 ath10k_err("failed to receive initialized event from target: %08x\n",
2461 ath10k_dbg(ATH10K_DBG_BOOT, "boot target initialised\n");
2464 ath10k_pci_sleep(ar);
2468 static int ath10k_pci_cold_reset(struct ath10k *ar)
2473 ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset\n");
2475 ret = ath10k_do_pci_wake(ar);
2477 ath10k_err("failed to wake up target: %d\n",
2482 /* Put Target, including PCIe, into RESET. */
2483 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
2485 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2487 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2488 if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2489 RTC_STATE_COLD_RESET_MASK)
2494 /* Pull Target, including PCIe, out of RESET. */
2496 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2498 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2499 if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2500 RTC_STATE_COLD_RESET_MASK))
2505 ath10k_do_pci_sleep(ar);
2507 ath10k_dbg(ATH10K_DBG_BOOT, "boot cold reset complete\n");
2512 static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2516 for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2517 if (!test_bit(i, ar_pci->features))
2521 case ATH10K_PCI_FEATURE_MSI_X:
2522 ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
2524 case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
2525 ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
2531 static int ath10k_pci_probe(struct pci_dev *pdev,
2532 const struct pci_device_id *pci_dev)
2537 struct ath10k_pci *ar_pci;
2538 u32 lcr_val, chip_id;
2540 ath10k_dbg(ATH10K_DBG_PCI, "pci probe\n");
2542 ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2546 ar_pci->pdev = pdev;
2547 ar_pci->dev = &pdev->dev;
2549 switch (pci_dev->device) {
2550 case QCA988X_2_0_DEVICE_ID:
2551 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2555 ath10k_err("Unknown device ID: %d\n", pci_dev->device);
2559 if (ath10k_pci_target_ps)
2560 set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2562 ath10k_pci_dump_features(ar_pci);
2564 ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
2566 ath10k_err("failed to create driver core\n");
2572 atomic_set(&ar_pci->keep_awake_count, 0);
2574 pci_set_drvdata(pdev, ar);
2577 * Without any knowledge of the Host, the Target may have been reset or
2578 * power cycled and its Config Space may no longer reflect the PCI
2579 * address space that was assigned earlier by the PCI infrastructure.
2582 ret = pci_assign_resource(pdev, BAR_NUM);
2584 ath10k_err("failed to assign PCI space: %d\n", ret);
2588 ret = pci_enable_device(pdev);
2590 ath10k_err("failed to enable PCI device: %d\n", ret);
2594 /* Request MMIO resources */
2595 ret = pci_request_region(pdev, BAR_NUM, "ath");
2597 ath10k_err("failed to request MMIO region: %d\n", ret);
2602 * Target structures have a limit of 32 bit DMA pointers.
2603 * DMA pointers can be wider than 32 bits by default on some systems.
2605 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2607 ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret);
2611 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2613 ath10k_err("failed to set consistent DMA mask to 32-bit\n");
2617 /* Set bus master bit in PCI_COMMAND to enable DMA */
2618 pci_set_master(pdev);
2621 * Temporary FIX: disable ASPM
2622 * Will be removed after the OTP is programmed
2624 pci_read_config_dword(pdev, 0x80, &lcr_val);
2625 pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2627 /* Arrange for access to Target SoC registers. */
2628 mem = pci_iomap(pdev, BAR_NUM, 0);
2630 ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM);
2637 spin_lock_init(&ar_pci->ce_lock);
2639 ret = ath10k_do_pci_wake(ar);
2641 ath10k_err("Failed to get chip id: %d\n", ret);
2645 chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
2647 ath10k_do_pci_sleep(ar);
2649 ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2651 ret = ath10k_core_register(ar, chip_id);
2653 ath10k_err("failed to register driver core: %d\n", ret);
2660 pci_iounmap(pdev, mem);
2662 pci_clear_master(pdev);
2664 pci_release_region(pdev, BAR_NUM);
2666 pci_disable_device(pdev);
2668 ath10k_core_destroy(ar);
2670 /* call HIF PCI free here */
2676 static void ath10k_pci_remove(struct pci_dev *pdev)
2678 struct ath10k *ar = pci_get_drvdata(pdev);
2679 struct ath10k_pci *ar_pci;
2681 ath10k_dbg(ATH10K_DBG_PCI, "pci remove\n");
2686 ar_pci = ath10k_pci_priv(ar);
2691 tasklet_kill(&ar_pci->msi_fw_err);
2693 ath10k_core_unregister(ar);
2695 pci_iounmap(pdev, ar_pci->mem);
2696 pci_release_region(pdev, BAR_NUM);
2697 pci_clear_master(pdev);
2698 pci_disable_device(pdev);
2700 ath10k_core_destroy(ar);
2704 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2706 static struct pci_driver ath10k_pci_driver = {
2707 .name = "ath10k_pci",
2708 .id_table = ath10k_pci_id_table,
2709 .probe = ath10k_pci_probe,
2710 .remove = ath10k_pci_remove,
2713 static int __init ath10k_pci_init(void)
2717 ret = pci_register_driver(&ath10k_pci_driver);
2719 ath10k_err("failed to register PCI driver: %d\n", ret);
2723 module_init(ath10k_pci_init);
2725 static void __exit ath10k_pci_exit(void)
2727 pci_unregister_driver(&ath10k_pci_driver);
2730 module_exit(ath10k_pci_exit);
2732 MODULE_AUTHOR("Qualcomm Atheros");
2733 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2734 MODULE_LICENSE("Dual BSD/GPL");
2735 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_2_FILE);
2736 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);