1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
25 * The full GNU General Public License is included in this distribution
26 * in the file called COPYING.
28 * Contact Information:
29 * Intel Linux Wireless <ilw@linux.intel.com>
30 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
34 * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
35 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
36 * All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
42 * * Redistributions of source code must retain the above copyright
43 * notice, this list of conditions and the following disclaimer.
44 * * Redistributions in binary form must reproduce the above copyright
45 * notice, this list of conditions and the following disclaimer in
46 * the documentation and/or other materials provided with the
48 * * Neither the name Intel Corporation nor the names of its
49 * contributors may be used to endorse or promote products derived
50 * from this software without specific prior written permission.
52 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
53 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
54 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
55 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
56 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
57 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
58 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
59 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
60 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
61 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
62 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
64 *****************************************************************************/
65 #include <linux/pci.h>
66 #include <linux/pci-aspm.h>
67 #include <linux/interrupt.h>
68 #include <linux/debugfs.h>
69 #include <linux/sched.h>
70 #include <linux/bitops.h>
71 #include <linux/gfp.h>
72 #include <linux/vmalloc.h>
75 #include "iwl-trans.h"
79 #include "iwl-agn-hw.h"
80 #include "iwl-fw-error-dump.h"
84 /* extended range in FW SRAM */
85 #define IWL_FW_MEM_EXTENDED_START 0x40000
86 #define IWL_FW_MEM_EXTENDED_END 0x57FFF
88 static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
90 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
92 if (!trans_pcie->fw_mon_page)
95 dma_unmap_page(trans->dev, trans_pcie->fw_mon_phys,
96 trans_pcie->fw_mon_size, DMA_FROM_DEVICE);
97 __free_pages(trans_pcie->fw_mon_page,
98 get_order(trans_pcie->fw_mon_size));
99 trans_pcie->fw_mon_page = NULL;
100 trans_pcie->fw_mon_phys = 0;
101 trans_pcie->fw_mon_size = 0;
104 static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
106 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
107 struct page *page = NULL;
113 /* default max_power is maximum */
119 if (WARN(max_power > 26,
120 "External buffer size for monitor is too big %d, check the FW TLV\n",
124 if (trans_pcie->fw_mon_page) {
125 dma_sync_single_for_device(trans->dev, trans_pcie->fw_mon_phys,
126 trans_pcie->fw_mon_size,
132 for (power = max_power; power >= 11; power--) {
136 order = get_order(size);
137 page = alloc_pages(__GFP_COMP | __GFP_NOWARN | __GFP_ZERO,
142 phys = dma_map_page(trans->dev, page, 0, PAGE_SIZE << order,
144 if (dma_mapping_error(trans->dev, phys)) {
145 __free_pages(page, order);
150 "Allocated 0x%08x bytes (order %d) for firmware monitor.\n",
155 if (WARN_ON_ONCE(!page))
158 if (power != max_power)
160 "Sorry - debug buffer is only %luK while you requested %luK\n",
161 (unsigned long)BIT(power - 10),
162 (unsigned long)BIT(max_power - 10));
164 trans_pcie->fw_mon_page = page;
165 trans_pcie->fw_mon_phys = phys;
166 trans_pcie->fw_mon_size = size;
169 static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
171 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
172 ((reg & 0x0000ffff) | (2 << 28)));
173 return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
176 static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
178 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);
179 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
180 ((reg & 0x0000ffff) | (3 << 28)));
183 static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
185 if (!trans->cfg->apmg_not_supported)
188 if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
189 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
190 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
191 ~APMG_PS_CTRL_MSK_PWR_SRC);
193 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
194 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
195 ~APMG_PS_CTRL_MSK_PWR_SRC);
199 #define PCI_CFG_RETRY_TIMEOUT 0x041
201 static void iwl_pcie_apm_config(struct iwl_trans *trans)
203 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
208 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
209 * Check if BIOS (or OS) enabled L1-ASPM on this device.
210 * If so (likely), disable L0S, so device moves directly L0->L1;
211 * costs negligible amount of power savings.
212 * If not (unlikely), enable L0S, so there is at least some
213 * power savings, even without L1.
215 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
216 if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
217 iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
219 iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
220 trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
222 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
223 trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
224 dev_info(trans->dev, "L1 %sabled - LTR %sabled\n",
225 (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
226 trans->ltr_enabled ? "En" : "Dis");
230 * Start up NIC's basic functionality after it has been reset
231 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
232 * NOTE: This does not load uCode nor start the embedded processor
234 static int iwl_pcie_apm_init(struct iwl_trans *trans)
237 IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
240 * Use "set_bit" below rather than "write", to preserve any hardware
241 * bits already set by default after reset.
244 /* Disable L0S exit timer (platform NMI Work/Around) */
245 if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000)
246 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
247 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
250 * Disable L0s without affecting L1;
251 * don't wait for ICH L0s (ICH bug W/A)
253 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
254 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
256 /* Set FH wait threshold to maximum (HW error during stress W/A) */
257 iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
260 * Enable HAP INTA (interrupt from management bus) to
261 * wake device's PCI Express link L1a -> L0s
263 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
264 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
266 iwl_pcie_apm_config(trans);
268 /* Configure analog phase-lock-loop before activating to D0A */
269 if (trans->cfg->base_params->pll_cfg_val)
270 iwl_set_bit(trans, CSR_ANA_PLL_CFG,
271 trans->cfg->base_params->pll_cfg_val);
274 * Set "initialization complete" bit to move adapter from
275 * D0U* --> D0A* (powered-up active) state.
277 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
280 * Wait for clock stabilization; once stabilized, access to
281 * device-internal resources is supported, e.g. iwl_write_prph()
282 * and accesses to uCode SRAM.
284 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
285 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
286 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
288 IWL_DEBUG_INFO(trans, "Failed to init the card\n");
292 if (trans->cfg->host_interrupt_operation_mode) {
294 * This is a bit of an abuse - This is needed for 7260 / 3160
295 * only check host_interrupt_operation_mode even if this is
296 * not related to host_interrupt_operation_mode.
298 * Enable the oscillator to count wake up time for L1 exit. This
299 * consumes slightly more power (100uA) - but allows to be sure
300 * that we wake up from L1 on time.
302 * This looks weird: read twice the same register, discard the
303 * value, set a bit, and yet again, read that same register
304 * just to discard the value. But that's the way the hardware
307 iwl_read_prph(trans, OSC_CLK);
308 iwl_read_prph(trans, OSC_CLK);
309 iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);
310 iwl_read_prph(trans, OSC_CLK);
311 iwl_read_prph(trans, OSC_CLK);
315 * Enable DMA clock and wait for it to stabilize.
317 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
318 * bits do not disable clocks. This preserves any hardware
319 * bits already set by default in "CLK_CTRL_REG" after reset.
321 if (!trans->cfg->apmg_not_supported) {
322 iwl_write_prph(trans, APMG_CLK_EN_REG,
323 APMG_CLK_VAL_DMA_CLK_RQT);
326 /* Disable L1-Active */
327 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
328 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
330 /* Clear the interrupt in APMG if the NIC is in RFKILL */
331 iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
332 APMG_RTC_INT_STT_RFKILL);
335 set_bit(STATUS_DEVICE_ENABLED, &trans->status);
342 * Enable LP XTAL to avoid HW bug where device may consume much power if
343 * FW is not loaded after device reset. LP XTAL is disabled by default
344 * after device HW reset. Do it only if XTAL is fed by internal source.
345 * Configure device's "persistence" mode to avoid resetting XTAL again when
346 * SHRD_HW_RST occurs in S3.
348 static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
352 u32 apmg_xtal_cfg_reg;
356 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
357 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
359 /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
360 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
365 * Set "initialization complete" bit to move adapter from
366 * D0U* --> D0A* (powered-up active) state.
368 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
371 * Wait for clock stabilization; once stabilized, access to
372 * device-internal resources is possible.
374 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
375 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
376 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
378 if (WARN_ON(ret < 0)) {
379 IWL_ERR(trans, "Access time out - failed to enable LP XTAL\n");
380 /* Release XTAL ON request */
381 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
382 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
387 * Clear "disable persistence" to avoid LP XTAL resetting when
388 * SHRD_HW_RST is applied in S3.
390 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
391 APMG_PCIDEV_STT_VAL_PERSIST_DIS);
394 * Force APMG XTAL to be active to prevent its disabling by HW
395 * caused by APMG idle state.
397 apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
398 SHR_APMG_XTAL_CFG_REG);
399 iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
401 SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
404 * Reset entire device again - do controller reset (results in
405 * SHRD_HW_RST). Turn MAC off before proceeding.
407 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
411 /* Enable LP XTAL by indirect access through CSR */
412 apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
413 iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |
414 SHR_APMG_GP1_WF_XTAL_LP_EN |
415 SHR_APMG_GP1_CHICKEN_BIT_SELECT);
417 /* Clear delay line clock power up */
418 dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);
419 iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &
420 ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);
423 * Enable persistence mode to avoid LP XTAL resetting when
424 * SHRD_HW_RST is applied in S3.
426 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
427 CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
430 * Clear "initialization complete" bit to move adapter from
431 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
433 iwl_clear_bit(trans, CSR_GP_CNTRL,
434 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
436 /* Activates XTAL resources monitor */
437 __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
438 CSR_MONITOR_XTAL_RESOURCES);
440 /* Release XTAL ON request */
441 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
442 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
445 /* Release APMG XTAL */
446 iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
448 ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
451 static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
455 /* stop device's busmaster DMA activity */
456 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
458 ret = iwl_poll_bit(trans, CSR_RESET,
459 CSR_RESET_REG_FLAG_MASTER_DISABLED,
460 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
462 IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
464 IWL_DEBUG_INFO(trans, "stop master\n");
469 static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
471 IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
474 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
475 iwl_pcie_apm_init(trans);
477 /* inform ME that we are leaving */
478 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
479 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
480 APMG_PCIDEV_STT_VAL_WAKE_ME);
481 else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
482 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
483 CSR_HW_IF_CONFIG_REG_PREPARE |
484 CSR_HW_IF_CONFIG_REG_ENABLE_PME);
488 clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
490 /* Stop device's DMA activity */
491 iwl_pcie_apm_stop_master(trans);
493 if (trans->cfg->lp_xtal_workaround) {
494 iwl_pcie_apm_lp_xtal_enable(trans);
498 /* Reset the entire device */
499 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
504 * Clear "initialization complete" bit to move adapter from
505 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
507 iwl_clear_bit(trans, CSR_GP_CNTRL,
508 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
511 static int iwl_pcie_nic_init(struct iwl_trans *trans)
513 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
516 spin_lock(&trans_pcie->irq_lock);
517 iwl_pcie_apm_init(trans);
519 spin_unlock(&trans_pcie->irq_lock);
521 iwl_pcie_set_pwr(trans, false);
523 iwl_op_mode_nic_config(trans->op_mode);
525 /* Allocate the RX queue, or reset if it is already allocated */
526 iwl_pcie_rx_init(trans);
528 /* Allocate or reset and init all Tx and Command queues */
529 if (iwl_pcie_tx_init(trans))
532 if (trans->cfg->base_params->shadow_reg_enable) {
533 /* enable shadow regs in HW */
534 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
535 IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
541 #define HW_READY_TIMEOUT (50)
543 /* Note: returns poll_bit return value, which is >= 0 if success */
544 static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
548 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
549 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
551 /* See if we got it */
552 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
553 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
554 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
558 iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE);
560 IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
564 /* Note: returns standard 0/-ERROR code */
565 static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
571 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
573 ret = iwl_pcie_set_hw_ready(trans);
574 /* If the card is ready, exit 0 */
578 for (iter = 0; iter < 10; iter++) {
579 /* If HW is not ready, prepare the conditions to check again */
580 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
581 CSR_HW_IF_CONFIG_REG_PREPARE);
584 ret = iwl_pcie_set_hw_ready(trans);
588 usleep_range(200, 1000);
590 } while (t < 150000);
594 IWL_ERR(trans, "Couldn't prepare the card\n");
602 static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
603 dma_addr_t phy_addr, u32 byte_cnt)
605 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
608 trans_pcie->ucode_write_complete = false;
610 iwl_write_direct32(trans,
611 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
612 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
614 iwl_write_direct32(trans,
615 FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
618 iwl_write_direct32(trans,
619 FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
620 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
622 iwl_write_direct32(trans,
623 FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
624 (iwl_get_dma_hi_addr(phy_addr)
625 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
627 iwl_write_direct32(trans,
628 FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
629 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
630 1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
631 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
633 iwl_write_direct32(trans,
634 FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
635 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
636 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
637 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
639 ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
640 trans_pcie->ucode_write_complete, 5 * HZ);
642 IWL_ERR(trans, "Failed to load firmware chunk!\n");
649 static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
650 const struct fw_desc *section)
654 u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len);
657 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
660 v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr,
661 GFP_KERNEL | __GFP_NOWARN);
663 IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n");
664 chunk_sz = PAGE_SIZE;
665 v_addr = dma_alloc_coherent(trans->dev, chunk_sz,
666 &p_addr, GFP_KERNEL);
671 for (offset = 0; offset < section->len; offset += chunk_sz) {
672 u32 copy_size, dst_addr;
673 bool extended_addr = false;
675 copy_size = min_t(u32, chunk_sz, section->len - offset);
676 dst_addr = section->offset + offset;
678 if (dst_addr >= IWL_FW_MEM_EXTENDED_START &&
679 dst_addr <= IWL_FW_MEM_EXTENDED_END)
680 extended_addr = true;
683 iwl_set_bits_prph(trans, LMPM_CHICK,
684 LMPM_CHICK_EXTENDED_ADDR_SPACE);
686 memcpy(v_addr, (u8 *)section->data + offset, copy_size);
687 ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr,
691 iwl_clear_bits_prph(trans, LMPM_CHICK,
692 LMPM_CHICK_EXTENDED_ADDR_SPACE);
696 "Could not load the [%d] uCode section\n",
702 dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr);
707 * Driver Takes the ownership on secure machine before FW load
708 * and prevent race with the BT load.
709 * W/A for ROM bug. (should be remove in the next Si step)
711 static int iwl_pcie_rsa_race_bug_wa(struct iwl_trans *trans)
713 u32 val, loop = 1000;
716 * Check the RSA semaphore is accessible.
717 * If the HW isn't locked and the rsa semaphore isn't accessible,
720 val = iwl_read_prph(trans, PREG_AUX_BUS_WPROT_0);
721 if (val & (BIT(1) | BIT(17))) {
723 "can't access the RSA semaphore it is write protected\n");
727 /* take ownership on the AUX IF */
728 iwl_write_prph(trans, WFPM_CTRL_REG, WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK);
729 iwl_write_prph(trans, AUX_MISC_MASTER1_EN, AUX_MISC_MASTER1_EN_SBE_MSK);
732 iwl_write_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS, 0x1);
733 val = iwl_read_prph(trans, AUX_MISC_MASTER1_SMPHR_STATUS);
735 iwl_write_prph(trans, RSA_ENABLE, 0);
743 IWL_ERR(trans, "Failed to take ownership on secure machine\n");
747 static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
748 const struct fw_img *image,
750 int *first_ucode_section)
753 int i, ret = 0, sec_num = 0x1;
754 u32 val, last_read_idx = 0;
758 *first_ucode_section = 0;
761 (*first_ucode_section)++;
764 for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
768 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
770 * PAGING_SEPARATOR_SECTION delimiter - separate between
771 * CPU2 non paged to CPU2 paging sec.
773 if (!image->sec[i].data ||
774 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
775 image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
777 "Break since Data not valid or Empty section, sec = %d\n",
782 ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
786 /* Notify the ucode of the loaded section number and status */
787 val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
788 val = val | (sec_num << shift_param);
789 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
790 sec_num = (sec_num << 1) | 0x1;
793 *first_ucode_section = last_read_idx;
796 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFF);
798 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
803 static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
804 const struct fw_img *image,
806 int *first_ucode_section)
810 u32 last_read_idx = 0;
814 *first_ucode_section = 0;
817 (*first_ucode_section)++;
820 for (i = *first_ucode_section; i < IWL_UCODE_SECTION_MAX; i++) {
824 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
826 * PAGING_SEPARATOR_SECTION delimiter - separate between
827 * CPU2 non paged to CPU2 paging sec.
829 if (!image->sec[i].data ||
830 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
831 image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
833 "Break since Data not valid or Empty section, sec = %d\n",
838 ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
843 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
844 iwl_set_bits_prph(trans,
845 CSR_UCODE_LOAD_STATUS_ADDR,
846 (LMPM_CPU_UCODE_LOADING_COMPLETED |
847 LMPM_CPU_HDRS_LOADING_COMPLETED |
848 LMPM_CPU_UCODE_LOADING_STARTED) <<
851 *first_ucode_section = last_read_idx;
856 static void iwl_pcie_apply_destination(struct iwl_trans *trans)
858 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
859 const struct iwl_fw_dbg_dest_tlv *dest = trans->dbg_dest_tlv;
864 "DBG DEST version is %d - expect issues\n",
867 IWL_INFO(trans, "Applying debug destination %s\n",
868 get_fw_dbg_mode_string(dest->monitor_mode));
870 if (dest->monitor_mode == EXTERNAL_MODE)
871 iwl_pcie_alloc_fw_monitor(trans, dest->size_power);
873 IWL_WARN(trans, "PCI should have external buffer debug\n");
875 for (i = 0; i < trans->dbg_dest_reg_num; i++) {
876 u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
877 u32 val = le32_to_cpu(dest->reg_ops[i].val);
879 switch (dest->reg_ops[i].op) {
881 iwl_write32(trans, addr, val);
884 iwl_set_bit(trans, addr, BIT(val));
887 iwl_clear_bit(trans, addr, BIT(val));
890 iwl_write_prph(trans, addr, val);
893 iwl_set_bits_prph(trans, addr, BIT(val));
896 iwl_clear_bits_prph(trans, addr, BIT(val));
899 if (iwl_read_prph(trans, addr) & BIT(val)) {
901 "BIT(%u) in address 0x%x is 1, stopping FW configuration\n",
907 IWL_ERR(trans, "FW debug - unknown OP %d\n",
908 dest->reg_ops[i].op);
914 if (dest->monitor_mode == EXTERNAL_MODE && trans_pcie->fw_mon_size) {
915 iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
916 trans_pcie->fw_mon_phys >> dest->base_shift);
917 iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
918 (trans_pcie->fw_mon_phys +
919 trans_pcie->fw_mon_size) >> dest->end_shift);
923 static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
924 const struct fw_img *image)
926 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
928 int first_ucode_section;
930 IWL_DEBUG_FW(trans, "working with %s CPU\n",
931 image->is_dual_cpus ? "Dual" : "Single");
933 /* load to FW the binary non secured sections of CPU1 */
934 ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section);
938 if (image->is_dual_cpus) {
939 /* set CPU2 header address */
940 iwl_write_prph(trans,
941 LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
942 LMPM_SECURE_CPU2_HDR_MEM_SPACE);
944 /* load to FW the binary sections of CPU2 */
945 ret = iwl_pcie_load_cpu_sections(trans, image, 2,
946 &first_ucode_section);
951 /* supported for 7000 only for the moment */
952 if (iwlwifi_mod_params.fw_monitor &&
953 trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
954 iwl_pcie_alloc_fw_monitor(trans, 0);
956 if (trans_pcie->fw_mon_size) {
957 iwl_write_prph(trans, MON_BUFF_BASE_ADDR,
958 trans_pcie->fw_mon_phys >> 4);
959 iwl_write_prph(trans, MON_BUFF_END_ADDR,
960 (trans_pcie->fw_mon_phys +
961 trans_pcie->fw_mon_size) >> 4);
963 } else if (trans->dbg_dest_tlv) {
964 iwl_pcie_apply_destination(trans);
967 /* release CPU reset */
968 iwl_write32(trans, CSR_RESET, 0);
973 static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
974 const struct fw_img *image)
977 int first_ucode_section;
979 IWL_DEBUG_FW(trans, "working with %s CPU\n",
980 image->is_dual_cpus ? "Dual" : "Single");
982 if (trans->dbg_dest_tlv)
983 iwl_pcie_apply_destination(trans);
985 /* TODO: remove in the next Si step */
986 ret = iwl_pcie_rsa_race_bug_wa(trans);
990 /* configure the ucode to be ready to get the secured image */
991 /* release CPU reset */
992 iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
994 /* load to FW the binary Secured sections of CPU1 */
995 ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1,
996 &first_ucode_section);
1000 /* load to FW the binary sections of CPU2 */
1001 return iwl_pcie_load_cpu_sections_8000(trans, image, 2,
1002 &first_ucode_section);
1005 static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1006 const struct fw_img *fw, bool run_in_rfkill)
1008 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1012 mutex_lock(&trans_pcie->mutex);
1014 /* Someone called stop_device, don't try to start_fw */
1015 if (trans_pcie->is_down) {
1017 "Can't start_fw since the HW hasn't been started\n");
1022 /* This may fail if AMT took ownership of the device */
1023 if (iwl_pcie_prepare_card_hw(trans)) {
1024 IWL_WARN(trans, "Exit HW not ready\n");
1029 iwl_enable_rfkill_int(trans);
1031 /* If platform's RF_KILL switch is NOT set to KILL */
1032 hw_rfkill = iwl_is_rfkill_set(trans);
1034 set_bit(STATUS_RFKILL, &trans->status);
1036 clear_bit(STATUS_RFKILL, &trans->status);
1037 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1038 if (hw_rfkill && !run_in_rfkill) {
1043 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1045 ret = iwl_pcie_nic_init(trans);
1047 IWL_ERR(trans, "Unable to init nic\n");
1051 /* make sure rfkill handshake bits are cleared */
1052 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1053 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
1054 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
1056 /* clear (again), then enable host interrupts */
1057 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1058 iwl_enable_interrupts(trans);
1060 /* really make sure rfkill handshake bits are cleared */
1061 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1062 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1064 /* Load the given image to the HW */
1065 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
1066 ret = iwl_pcie_load_given_ucode_8000(trans, fw);
1068 ret = iwl_pcie_load_given_ucode(trans, fw);
1071 mutex_unlock(&trans_pcie->mutex);
1075 static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1077 iwl_pcie_reset_ict(trans);
1078 iwl_pcie_tx_start(trans, scd_addr);
1081 static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1083 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1084 bool hw_rfkill, was_hw_rfkill;
1086 lockdep_assert_held(&trans_pcie->mutex);
1088 if (trans_pcie->is_down)
1091 trans_pcie->is_down = true;
1093 was_hw_rfkill = iwl_is_rfkill_set(trans);
1095 /* tell the device to stop sending interrupts */
1096 spin_lock(&trans_pcie->irq_lock);
1097 iwl_disable_interrupts(trans);
1098 spin_unlock(&trans_pcie->irq_lock);
1100 /* device going down, Stop using ICT table */
1101 iwl_pcie_disable_ict(trans);
1104 * If a HW restart happens during firmware loading,
1105 * then the firmware loading might call this function
1106 * and later it might be called again due to the
1107 * restart. So don't process again if the device is
1110 if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
1111 IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n");
1112 iwl_pcie_tx_stop(trans);
1113 iwl_pcie_rx_stop(trans);
1115 /* Power-down device's busmaster DMA clocks */
1116 if (!trans->cfg->apmg_not_supported) {
1117 iwl_write_prph(trans, APMG_CLK_DIS_REG,
1118 APMG_CLK_VAL_DMA_CLK_RQT);
1123 /* Make sure (redundant) we've released our request to stay awake */
1124 iwl_clear_bit(trans, CSR_GP_CNTRL,
1125 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1127 /* Stop the device, and put it in low power state */
1128 iwl_pcie_apm_stop(trans, false);
1130 /* stop and reset the on-board processor */
1131 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
1135 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1136 * This is a bug in certain verions of the hardware.
1137 * Certain devices also keep sending HW RF kill interrupt all
1138 * the time, unless the interrupt is ACKed even if the interrupt
1139 * should be masked. Re-ACK all the interrupts here.
1141 spin_lock(&trans_pcie->irq_lock);
1142 iwl_disable_interrupts(trans);
1143 spin_unlock(&trans_pcie->irq_lock);
1146 /* clear all status bits */
1147 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1148 clear_bit(STATUS_INT_ENABLED, &trans->status);
1149 clear_bit(STATUS_TPOWER_PMI, &trans->status);
1150 clear_bit(STATUS_RFKILL, &trans->status);
1153 * Even if we stop the HW, we still want the RF kill
1156 iwl_enable_rfkill_int(trans);
1159 * Check again since the RF kill state may have changed while
1160 * all the interrupts were disabled, in this case we couldn't
1161 * receive the RF kill interrupt and update the state in the
1163 * Don't call the op_mode if the rkfill state hasn't changed.
1164 * This allows the op_mode to call stop_device from the rfkill
1165 * notification without endless recursion. Under very rare
1166 * circumstances, we might have a small recursion if the rfkill
1167 * state changed exactly now while we were called from stop_device.
1168 * This is very unlikely but can happen and is supported.
1170 hw_rfkill = iwl_is_rfkill_set(trans);
1172 set_bit(STATUS_RFKILL, &trans->status);
1174 clear_bit(STATUS_RFKILL, &trans->status);
1175 if (hw_rfkill != was_hw_rfkill)
1176 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1178 /* re-take ownership to prevent other users from stealing the deivce */
1179 iwl_pcie_prepare_card_hw(trans);
1182 static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
1184 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1186 mutex_lock(&trans_pcie->mutex);
1187 _iwl_trans_pcie_stop_device(trans, low_power);
1188 mutex_unlock(&trans_pcie->mutex);
1191 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
1193 struct iwl_trans_pcie __maybe_unused *trans_pcie =
1194 IWL_TRANS_GET_PCIE_TRANS(trans);
1196 lockdep_assert_held(&trans_pcie->mutex);
1198 if (iwl_op_mode_hw_rf_kill(trans->op_mode, state))
1199 _iwl_trans_pcie_stop_device(trans, true);
1202 static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
1204 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1206 if (trans->wowlan_d0i3) {
1207 /* Enable persistence mode to avoid reset */
1208 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
1209 CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
1212 iwl_disable_interrupts(trans);
1215 * in testing mode, the host stays awake and the
1216 * hardware won't be reset (not even partially)
1221 iwl_pcie_disable_ict(trans);
1223 synchronize_irq(trans_pcie->pci_dev->irq);
1225 iwl_clear_bit(trans, CSR_GP_CNTRL,
1226 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1227 iwl_clear_bit(trans, CSR_GP_CNTRL,
1228 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1230 if (!trans->wowlan_d0i3) {
1232 * reset TX queues -- some of their registers reset during S3
1233 * so if we don't reset everything here the D3 image would try
1234 * to execute some invalid memory upon resume
1236 iwl_trans_pcie_tx_reset(trans);
1239 iwl_pcie_set_pwr(trans, true);
1242 static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
1243 enum iwl_d3_status *status,
1250 iwl_enable_interrupts(trans);
1251 *status = IWL_D3_STATUS_ALIVE;
1256 * Also enables interrupts - none will happen as the device doesn't
1257 * know we're waking it up, only when the opmode actually tells it
1260 iwl_pcie_reset_ict(trans);
1262 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1263 iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1265 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
1268 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
1269 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1270 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1273 IWL_ERR(trans, "Failed to resume the device (mac ready)\n");
1277 iwl_pcie_set_pwr(trans, false);
1279 if (trans->wowlan_d0i3) {
1280 iwl_clear_bit(trans, CSR_GP_CNTRL,
1281 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1283 iwl_trans_pcie_tx_reset(trans);
1285 ret = iwl_pcie_rx_init(trans);
1288 "Failed to resume the device (RX reset)\n");
1293 val = iwl_read32(trans, CSR_RESET);
1294 if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
1295 *status = IWL_D3_STATUS_RESET;
1297 *status = IWL_D3_STATUS_ALIVE;
1302 static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
1304 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1308 lockdep_assert_held(&trans_pcie->mutex);
1310 err = iwl_pcie_prepare_card_hw(trans);
1312 IWL_ERR(trans, "Error while preparing HW: %d\n", err);
1316 /* Reset the entire device */
1317 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
1319 usleep_range(10, 15);
1321 iwl_pcie_apm_init(trans);
1323 /* From now on, the op_mode will be kept updated about RF kill state */
1324 iwl_enable_rfkill_int(trans);
1326 /* Set is_down to false here so that...*/
1327 trans_pcie->is_down = false;
1329 hw_rfkill = iwl_is_rfkill_set(trans);
1331 set_bit(STATUS_RFKILL, &trans->status);
1333 clear_bit(STATUS_RFKILL, &trans->status);
1334 /* ... rfkill can call stop_device and set it false if needed */
1335 iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1340 static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
1342 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1345 mutex_lock(&trans_pcie->mutex);
1346 ret = _iwl_trans_pcie_start_hw(trans, low_power);
1347 mutex_unlock(&trans_pcie->mutex);
1352 static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
1354 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1356 mutex_lock(&trans_pcie->mutex);
1358 /* disable interrupts - don't enable HW RF kill interrupt */
1359 spin_lock(&trans_pcie->irq_lock);
1360 iwl_disable_interrupts(trans);
1361 spin_unlock(&trans_pcie->irq_lock);
1363 iwl_pcie_apm_stop(trans, true);
1365 spin_lock(&trans_pcie->irq_lock);
1366 iwl_disable_interrupts(trans);
1367 spin_unlock(&trans_pcie->irq_lock);
1369 iwl_pcie_disable_ict(trans);
1371 mutex_unlock(&trans_pcie->mutex);
1373 synchronize_irq(trans_pcie->pci_dev->irq);
1376 static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1378 writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1381 static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1383 writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1386 static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
1388 return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1391 static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
1393 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
1394 ((reg & 0x000FFFFF) | (3 << 24)));
1395 return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
1398 static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
1401 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
1402 ((addr & 0x000FFFFF) | (3 << 24)));
1403 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
1406 static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
1412 static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1413 const struct iwl_trans_config *trans_cfg)
1415 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1417 trans_pcie->cmd_queue = trans_cfg->cmd_queue;
1418 trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
1419 trans_pcie->cmd_q_wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
1420 if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
1421 trans_pcie->n_no_reclaim_cmds = 0;
1423 trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
1424 if (trans_pcie->n_no_reclaim_cmds)
1425 memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
1426 trans_pcie->n_no_reclaim_cmds * sizeof(u8));
1428 trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k;
1429 if (trans_pcie->rx_buf_size_8k)
1430 trans_pcie->rx_page_order = get_order(8 * 1024);
1432 trans_pcie->rx_page_order = get_order(4 * 1024);
1434 trans_pcie->wide_cmd_header = trans_cfg->wide_cmd_header;
1435 trans_pcie->command_names = trans_cfg->command_names;
1436 trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
1437 trans_pcie->scd_set_active = trans_cfg->scd_set_active;
1439 /* init ref_count to 1 (should be cleared when ucode is loaded) */
1440 trans_pcie->ref_count = 1;
1442 /* Initialize NAPI here - it should be before registering to mac80211
1443 * in the opmode but after the HW struct is allocated.
1444 * As this function may be called again in some corner cases don't
1445 * do anything if NAPI was already initialized.
1447 if (!trans_pcie->napi.poll && trans->op_mode->ops->napi_add) {
1448 init_dummy_netdev(&trans_pcie->napi_dev);
1449 iwl_op_mode_napi_add(trans->op_mode, &trans_pcie->napi,
1450 &trans_pcie->napi_dev,
1451 iwl_pcie_dummy_napi_poll, 64);
1455 void iwl_trans_pcie_free(struct iwl_trans *trans)
1457 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1459 synchronize_irq(trans_pcie->pci_dev->irq);
1461 iwl_pcie_tx_free(trans);
1462 iwl_pcie_rx_free(trans);
1464 free_irq(trans_pcie->pci_dev->irq, trans);
1465 iwl_pcie_free_ict(trans);
1467 pci_disable_msi(trans_pcie->pci_dev);
1468 iounmap(trans_pcie->hw_base);
1469 pci_release_regions(trans_pcie->pci_dev);
1470 pci_disable_device(trans_pcie->pci_dev);
1472 if (trans_pcie->napi.poll)
1473 netif_napi_del(&trans_pcie->napi);
1475 iwl_pcie_free_fw_monitor(trans);
1477 iwl_trans_free(trans);
1480 static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
1483 set_bit(STATUS_TPOWER_PMI, &trans->status);
1485 clear_bit(STATUS_TPOWER_PMI, &trans->status);
1488 static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
1489 unsigned long *flags)
1492 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1494 spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
1496 if (trans_pcie->cmd_hold_nic_awake)
1499 /* this bit wakes up the NIC */
1500 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
1501 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1502 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
1506 * These bits say the device is running, and should keep running for
1507 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
1508 * but they do not indicate that embedded SRAM is restored yet;
1509 * 3945 and 4965 have volatile SRAM, and must save/restore contents
1510 * to/from host DRAM when sleeping/waking for power-saving.
1511 * Each direction takes approximately 1/4 millisecond; with this
1512 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
1513 * series of register accesses are expected (e.g. reading Event Log),
1514 * to keep device from sleeping.
1516 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
1517 * SRAM is okay/restored. We don't check that here because this call
1518 * is just for hardware register access; but GP1 MAC_SLEEP check is a
1519 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
1521 * 5000 series and later (including 1000 series) have non-volatile SRAM,
1522 * and do not save/restore SRAM when power cycling.
1524 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
1525 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
1526 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
1527 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
1528 if (unlikely(ret < 0)) {
1529 iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
1531 u32 val = iwl_read32(trans, CSR_GP_CNTRL);
1533 "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
1535 spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
1542 * Fool sparse by faking we release the lock - sparse will
1543 * track nic_access anyway.
1545 __release(&trans_pcie->reg_lock);
1549 static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
1550 unsigned long *flags)
1552 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1554 lockdep_assert_held(&trans_pcie->reg_lock);
1557 * Fool sparse by faking we acquiring the lock - sparse will
1558 * track nic_access anyway.
1560 __acquire(&trans_pcie->reg_lock);
1562 if (trans_pcie->cmd_hold_nic_awake)
1565 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
1566 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1568 * Above we read the CSR_GP_CNTRL register, which will flush
1569 * any previous writes, but we need the write that clears the
1570 * MAC_ACCESS_REQ bit to be performed before any other writes
1571 * scheduled on different CPUs (after we drop reg_lock).
1575 spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
1578 static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
1579 void *buf, int dwords)
1581 unsigned long flags;
1585 if (iwl_trans_grab_nic_access(trans, false, &flags)) {
1586 iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
1587 for (offs = 0; offs < dwords; offs++)
1588 vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
1589 iwl_trans_release_nic_access(trans, &flags);
1596 static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
1597 const void *buf, int dwords)
1599 unsigned long flags;
1601 const u32 *vals = buf;
1603 if (iwl_trans_grab_nic_access(trans, false, &flags)) {
1604 iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
1605 for (offs = 0; offs < dwords; offs++)
1606 iwl_write32(trans, HBUS_TARG_MEM_WDAT,
1607 vals ? vals[offs] : 0);
1608 iwl_trans_release_nic_access(trans, &flags);
1615 static void iwl_trans_pcie_freeze_txq_timer(struct iwl_trans *trans,
1619 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1622 for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
1623 struct iwl_txq *txq = &trans_pcie->txq[queue];
1626 spin_lock_bh(&txq->lock);
1630 if (txq->frozen == freeze)
1633 IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
1634 freeze ? "Freezing" : "Waking", queue);
1636 txq->frozen = freeze;
1638 if (txq->q.read_ptr == txq->q.write_ptr)
1642 if (unlikely(time_after(now,
1643 txq->stuck_timer.expires))) {
1645 * The timer should have fired, maybe it is
1646 * spinning right now on the lock.
1650 /* remember how long until the timer fires */
1651 txq->frozen_expiry_remainder =
1652 txq->stuck_timer.expires - now;
1653 del_timer(&txq->stuck_timer);
1658 * Wake a non-empty queue -> arm timer with the
1659 * remainder before it froze
1661 mod_timer(&txq->stuck_timer,
1662 now + txq->frozen_expiry_remainder);
1665 spin_unlock_bh(&txq->lock);
1669 #define IWL_FLUSH_WAIT_MS 2000
1671 static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, u32 txq_bm)
1673 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1674 struct iwl_txq *txq;
1675 struct iwl_queue *q;
1677 unsigned long now = jiffies;
1682 /* waiting for all the tx frames complete might take a while */
1683 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
1686 if (cnt == trans_pcie->cmd_queue)
1688 if (!test_bit(cnt, trans_pcie->queue_used))
1690 if (!(BIT(cnt) & txq_bm))
1693 IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", cnt);
1694 txq = &trans_pcie->txq[cnt];
1696 wr_ptr = ACCESS_ONCE(q->write_ptr);
1698 while (q->read_ptr != ACCESS_ONCE(q->write_ptr) &&
1699 !time_after(jiffies,
1700 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
1701 u8 write_ptr = ACCESS_ONCE(q->write_ptr);
1703 if (WARN_ONCE(wr_ptr != write_ptr,
1704 "WR pointer moved while flushing %d -> %d\n",
1710 if (q->read_ptr != q->write_ptr) {
1712 "fail to flush all tx fifo queues Q %d\n", cnt);
1716 IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", cnt);
1722 IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
1723 txq->q.read_ptr, txq->q.write_ptr);
1725 scd_sram_addr = trans_pcie->scd_base_addr +
1726 SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
1727 iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
1729 iwl_print_hex_error(trans, buf, sizeof(buf));
1731 for (cnt = 0; cnt < FH_TCSR_CHNL_NUM; cnt++)
1732 IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", cnt,
1733 iwl_read_direct32(trans, FH_TX_TRB_REG(cnt)));
1735 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
1736 u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(cnt));
1737 u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
1738 bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
1740 iwl_trans_read_mem32(trans, trans_pcie->scd_base_addr +
1741 SCD_TRANS_TBL_OFFSET_QUEUE(cnt));
1744 tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
1746 tbl_dw = tbl_dw & 0x0000FFFF;
1749 "Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
1750 cnt, active ? "" : "in", fifo, tbl_dw,
1751 iwl_read_prph(trans, SCD_QUEUE_RDPTR(cnt)) &
1752 (TFD_QUEUE_SIZE_MAX - 1),
1753 iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
1759 static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
1760 u32 mask, u32 value)
1762 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1763 unsigned long flags;
1765 spin_lock_irqsave(&trans_pcie->reg_lock, flags);
1766 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
1767 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
1770 void iwl_trans_pcie_ref(struct iwl_trans *trans)
1772 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1773 unsigned long flags;
1775 if (iwlwifi_mod_params.d0i3_disable)
1778 spin_lock_irqsave(&trans_pcie->ref_lock, flags);
1779 IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
1780 trans_pcie->ref_count++;
1781 spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
1784 void iwl_trans_pcie_unref(struct iwl_trans *trans)
1786 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1787 unsigned long flags;
1789 if (iwlwifi_mod_params.d0i3_disable)
1792 spin_lock_irqsave(&trans_pcie->ref_lock, flags);
1793 IWL_DEBUG_RPM(trans, "ref_counter: %d\n", trans_pcie->ref_count);
1794 if (WARN_ON_ONCE(trans_pcie->ref_count == 0)) {
1795 spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
1798 trans_pcie->ref_count--;
1799 spin_unlock_irqrestore(&trans_pcie->ref_lock, flags);
1802 static const char *get_csr_string(int cmd)
1804 #define IWL_CMD(x) case x: return #x
1806 IWL_CMD(CSR_HW_IF_CONFIG_REG);
1807 IWL_CMD(CSR_INT_COALESCING);
1809 IWL_CMD(CSR_INT_MASK);
1810 IWL_CMD(CSR_FH_INT_STATUS);
1811 IWL_CMD(CSR_GPIO_IN);
1813 IWL_CMD(CSR_GP_CNTRL);
1814 IWL_CMD(CSR_HW_REV);
1815 IWL_CMD(CSR_EEPROM_REG);
1816 IWL_CMD(CSR_EEPROM_GP);
1817 IWL_CMD(CSR_OTP_GP_REG);
1818 IWL_CMD(CSR_GIO_REG);
1819 IWL_CMD(CSR_GP_UCODE_REG);
1820 IWL_CMD(CSR_GP_DRIVER_REG);
1821 IWL_CMD(CSR_UCODE_DRV_GP1);
1822 IWL_CMD(CSR_UCODE_DRV_GP2);
1823 IWL_CMD(CSR_LED_REG);
1824 IWL_CMD(CSR_DRAM_INT_TBL_REG);
1825 IWL_CMD(CSR_GIO_CHICKEN_BITS);
1826 IWL_CMD(CSR_ANA_PLL_CFG);
1827 IWL_CMD(CSR_HW_REV_WA_REG);
1828 IWL_CMD(CSR_MONITOR_STATUS_REG);
1829 IWL_CMD(CSR_DBG_HPET_MEM_REG);
1836 void iwl_pcie_dump_csr(struct iwl_trans *trans)
1839 static const u32 csr_tbl[] = {
1840 CSR_HW_IF_CONFIG_REG,
1858 CSR_DRAM_INT_TBL_REG,
1859 CSR_GIO_CHICKEN_BITS,
1861 CSR_MONITOR_STATUS_REG,
1863 CSR_DBG_HPET_MEM_REG
1865 IWL_ERR(trans, "CSR values:\n");
1866 IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
1867 "CSR_INT_PERIODIC_REG)\n");
1868 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
1869 IWL_ERR(trans, " %25s: 0X%08x\n",
1870 get_csr_string(csr_tbl[i]),
1871 iwl_read32(trans, csr_tbl[i]));
1875 #ifdef CONFIG_IWLWIFI_DEBUGFS
1876 /* create and remove of files */
1877 #define DEBUGFS_ADD_FILE(name, parent, mode) do { \
1878 if (!debugfs_create_file(#name, mode, parent, trans, \
1879 &iwl_dbgfs_##name##_ops)) \
1883 /* file operation */
1884 #define DEBUGFS_READ_FILE_OPS(name) \
1885 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1886 .read = iwl_dbgfs_##name##_read, \
1887 .open = simple_open, \
1888 .llseek = generic_file_llseek, \
1891 #define DEBUGFS_WRITE_FILE_OPS(name) \
1892 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1893 .write = iwl_dbgfs_##name##_write, \
1894 .open = simple_open, \
1895 .llseek = generic_file_llseek, \
1898 #define DEBUGFS_READ_WRITE_FILE_OPS(name) \
1899 static const struct file_operations iwl_dbgfs_##name##_ops = { \
1900 .write = iwl_dbgfs_##name##_write, \
1901 .read = iwl_dbgfs_##name##_read, \
1902 .open = simple_open, \
1903 .llseek = generic_file_llseek, \
1906 static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
1907 char __user *user_buf,
1908 size_t count, loff_t *ppos)
1910 struct iwl_trans *trans = file->private_data;
1911 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1912 struct iwl_txq *txq;
1913 struct iwl_queue *q;
1920 bufsz = sizeof(char) * 75 * trans->cfg->base_params->num_of_queues;
1922 if (!trans_pcie->txq)
1925 buf = kzalloc(bufsz, GFP_KERNEL);
1929 for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
1930 txq = &trans_pcie->txq[cnt];
1932 pos += scnprintf(buf + pos, bufsz - pos,
1933 "hwq %.2d: read=%u write=%u use=%d stop=%d need_update=%d frozen=%d%s\n",
1934 cnt, q->read_ptr, q->write_ptr,
1935 !!test_bit(cnt, trans_pcie->queue_used),
1936 !!test_bit(cnt, trans_pcie->queue_stopped),
1937 txq->need_update, txq->frozen,
1938 (cnt == trans_pcie->cmd_queue ? " HCMD" : ""));
1940 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1945 static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1946 char __user *user_buf,
1947 size_t count, loff_t *ppos)
1949 struct iwl_trans *trans = file->private_data;
1950 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1951 struct iwl_rxq *rxq = &trans_pcie->rxq;
1954 const size_t bufsz = sizeof(buf);
1956 pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
1958 pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
1960 pos += scnprintf(buf + pos, bufsz - pos, "write_actual: %u\n",
1962 pos += scnprintf(buf + pos, bufsz - pos, "need_update: %d\n",
1964 pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
1967 pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
1968 le16_to_cpu(rxq->rb_stts->closed_rb_num) & 0x0FFF);
1970 pos += scnprintf(buf + pos, bufsz - pos,
1971 "closed_rb_num: Not Allocated\n");
1973 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1976 static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
1977 char __user *user_buf,
1978 size_t count, loff_t *ppos)
1980 struct iwl_trans *trans = file->private_data;
1981 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1982 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1986 int bufsz = 24 * 64; /* 24 items * 64 char per item */
1989 buf = kzalloc(bufsz, GFP_KERNEL);
1993 pos += scnprintf(buf + pos, bufsz - pos,
1994 "Interrupt Statistics Report:\n");
1996 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
1998 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
2000 if (isr_stats->sw || isr_stats->hw) {
2001 pos += scnprintf(buf + pos, bufsz - pos,
2002 "\tLast Restarting Code: 0x%X\n",
2003 isr_stats->err_code);
2005 #ifdef CONFIG_IWLWIFI_DEBUG
2006 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
2008 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
2011 pos += scnprintf(buf + pos, bufsz - pos,
2012 "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
2014 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
2017 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
2020 pos += scnprintf(buf + pos, bufsz - pos,
2021 "Rx command responses:\t\t %u\n", isr_stats->rx);
2023 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
2026 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
2027 isr_stats->unhandled);
2029 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2034 static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
2035 const char __user *user_buf,
2036 size_t count, loff_t *ppos)
2038 struct iwl_trans *trans = file->private_data;
2039 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2040 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2046 memset(buf, 0, sizeof(buf));
2047 buf_size = min(count, sizeof(buf) - 1);
2048 if (copy_from_user(buf, user_buf, buf_size))
2050 if (sscanf(buf, "%x", &reset_flag) != 1)
2052 if (reset_flag == 0)
2053 memset(isr_stats, 0, sizeof(*isr_stats));
2058 static ssize_t iwl_dbgfs_csr_write(struct file *file,
2059 const char __user *user_buf,
2060 size_t count, loff_t *ppos)
2062 struct iwl_trans *trans = file->private_data;
2067 memset(buf, 0, sizeof(buf));
2068 buf_size = min(count, sizeof(buf) - 1);
2069 if (copy_from_user(buf, user_buf, buf_size))
2071 if (sscanf(buf, "%d", &csr) != 1)
2074 iwl_pcie_dump_csr(trans);
2079 static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
2080 char __user *user_buf,
2081 size_t count, loff_t *ppos)
2083 struct iwl_trans *trans = file->private_data;
2087 ret = iwl_dump_fh(trans, &buf);
2092 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
2097 DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
2098 DEBUGFS_READ_FILE_OPS(fh_reg);
2099 DEBUGFS_READ_FILE_OPS(rx_queue);
2100 DEBUGFS_READ_FILE_OPS(tx_queue);
2101 DEBUGFS_WRITE_FILE_OPS(csr);
2104 * Create the debugfs files and directories
2107 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
2110 DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
2111 DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
2112 DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
2113 DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
2114 DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
2118 IWL_ERR(trans, "failed to create the trans debugfs entry\n");
2122 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
2127 #endif /*CONFIG_IWLWIFI_DEBUGFS */
2129 static u32 iwl_trans_pcie_get_cmdlen(struct iwl_tfd *tfd)
2134 for (i = 0; i < IWL_NUM_OF_TBS; i++)
2135 cmdlen += iwl_pcie_tfd_tb_get_len(tfd, i);
2140 static const struct {
2142 } iwl_prph_dump_addr[] = {
2143 { .start = 0x00a00000, .end = 0x00a00000 },
2144 { .start = 0x00a0000c, .end = 0x00a00024 },
2145 { .start = 0x00a0002c, .end = 0x00a0003c },
2146 { .start = 0x00a00410, .end = 0x00a00418 },
2147 { .start = 0x00a00420, .end = 0x00a00420 },
2148 { .start = 0x00a00428, .end = 0x00a00428 },
2149 { .start = 0x00a00430, .end = 0x00a0043c },
2150 { .start = 0x00a00444, .end = 0x00a00444 },
2151 { .start = 0x00a004c0, .end = 0x00a004cc },
2152 { .start = 0x00a004d8, .end = 0x00a004d8 },
2153 { .start = 0x00a004e0, .end = 0x00a004f0 },
2154 { .start = 0x00a00840, .end = 0x00a00840 },
2155 { .start = 0x00a00850, .end = 0x00a00858 },
2156 { .start = 0x00a01004, .end = 0x00a01008 },
2157 { .start = 0x00a01010, .end = 0x00a01010 },
2158 { .start = 0x00a01018, .end = 0x00a01018 },
2159 { .start = 0x00a01024, .end = 0x00a01024 },
2160 { .start = 0x00a0102c, .end = 0x00a01034 },
2161 { .start = 0x00a0103c, .end = 0x00a01040 },
2162 { .start = 0x00a01048, .end = 0x00a01094 },
2163 { .start = 0x00a01c00, .end = 0x00a01c20 },
2164 { .start = 0x00a01c58, .end = 0x00a01c58 },
2165 { .start = 0x00a01c7c, .end = 0x00a01c7c },
2166 { .start = 0x00a01c28, .end = 0x00a01c54 },
2167 { .start = 0x00a01c5c, .end = 0x00a01c5c },
2168 { .start = 0x00a01c60, .end = 0x00a01cdc },
2169 { .start = 0x00a01ce0, .end = 0x00a01d0c },
2170 { .start = 0x00a01d18, .end = 0x00a01d20 },
2171 { .start = 0x00a01d2c, .end = 0x00a01d30 },
2172 { .start = 0x00a01d40, .end = 0x00a01d5c },
2173 { .start = 0x00a01d80, .end = 0x00a01d80 },
2174 { .start = 0x00a01d98, .end = 0x00a01d9c },
2175 { .start = 0x00a01da8, .end = 0x00a01da8 },
2176 { .start = 0x00a01db8, .end = 0x00a01df4 },
2177 { .start = 0x00a01dc0, .end = 0x00a01dfc },
2178 { .start = 0x00a01e00, .end = 0x00a01e2c },
2179 { .start = 0x00a01e40, .end = 0x00a01e60 },
2180 { .start = 0x00a01e68, .end = 0x00a01e6c },
2181 { .start = 0x00a01e74, .end = 0x00a01e74 },
2182 { .start = 0x00a01e84, .end = 0x00a01e90 },
2183 { .start = 0x00a01e9c, .end = 0x00a01ec4 },
2184 { .start = 0x00a01ed0, .end = 0x00a01ee0 },
2185 { .start = 0x00a01f00, .end = 0x00a01f1c },
2186 { .start = 0x00a01f44, .end = 0x00a01ffc },
2187 { .start = 0x00a02000, .end = 0x00a02048 },
2188 { .start = 0x00a02068, .end = 0x00a020f0 },
2189 { .start = 0x00a02100, .end = 0x00a02118 },
2190 { .start = 0x00a02140, .end = 0x00a0214c },
2191 { .start = 0x00a02168, .end = 0x00a0218c },
2192 { .start = 0x00a021c0, .end = 0x00a021c0 },
2193 { .start = 0x00a02400, .end = 0x00a02410 },
2194 { .start = 0x00a02418, .end = 0x00a02420 },
2195 { .start = 0x00a02428, .end = 0x00a0242c },
2196 { .start = 0x00a02434, .end = 0x00a02434 },
2197 { .start = 0x00a02440, .end = 0x00a02460 },
2198 { .start = 0x00a02468, .end = 0x00a024b0 },
2199 { .start = 0x00a024c8, .end = 0x00a024cc },
2200 { .start = 0x00a02500, .end = 0x00a02504 },
2201 { .start = 0x00a0250c, .end = 0x00a02510 },
2202 { .start = 0x00a02540, .end = 0x00a02554 },
2203 { .start = 0x00a02580, .end = 0x00a025f4 },
2204 { .start = 0x00a02600, .end = 0x00a0260c },
2205 { .start = 0x00a02648, .end = 0x00a02650 },
2206 { .start = 0x00a02680, .end = 0x00a02680 },
2207 { .start = 0x00a026c0, .end = 0x00a026d0 },
2208 { .start = 0x00a02700, .end = 0x00a0270c },
2209 { .start = 0x00a02804, .end = 0x00a02804 },
2210 { .start = 0x00a02818, .end = 0x00a0281c },
2211 { .start = 0x00a02c00, .end = 0x00a02db4 },
2212 { .start = 0x00a02df4, .end = 0x00a02fb0 },
2213 { .start = 0x00a03000, .end = 0x00a03014 },
2214 { .start = 0x00a0301c, .end = 0x00a0302c },
2215 { .start = 0x00a03034, .end = 0x00a03038 },
2216 { .start = 0x00a03040, .end = 0x00a03048 },
2217 { .start = 0x00a03060, .end = 0x00a03068 },
2218 { .start = 0x00a03070, .end = 0x00a03074 },
2219 { .start = 0x00a0307c, .end = 0x00a0307c },
2220 { .start = 0x00a03080, .end = 0x00a03084 },
2221 { .start = 0x00a0308c, .end = 0x00a03090 },
2222 { .start = 0x00a03098, .end = 0x00a03098 },
2223 { .start = 0x00a030a0, .end = 0x00a030a0 },
2224 { .start = 0x00a030a8, .end = 0x00a030b4 },
2225 { .start = 0x00a030bc, .end = 0x00a030bc },
2226 { .start = 0x00a030c0, .end = 0x00a0312c },
2227 { .start = 0x00a03c00, .end = 0x00a03c5c },
2228 { .start = 0x00a04400, .end = 0x00a04454 },
2229 { .start = 0x00a04460, .end = 0x00a04474 },
2230 { .start = 0x00a044c0, .end = 0x00a044ec },
2231 { .start = 0x00a04500, .end = 0x00a04504 },
2232 { .start = 0x00a04510, .end = 0x00a04538 },
2233 { .start = 0x00a04540, .end = 0x00a04548 },
2234 { .start = 0x00a04560, .end = 0x00a0457c },
2235 { .start = 0x00a04590, .end = 0x00a04598 },
2236 { .start = 0x00a045c0, .end = 0x00a045f4 },
2239 static u32 iwl_trans_pcie_dump_prph(struct iwl_trans *trans,
2240 struct iwl_fw_error_dump_data **data)
2242 struct iwl_fw_error_dump_prph *prph;
2243 unsigned long flags;
2244 u32 prph_len = 0, i;
2246 if (!iwl_trans_grab_nic_access(trans, false, &flags))
2249 for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
2250 /* The range includes both boundaries */
2251 int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
2252 iwl_prph_dump_addr[i].start + 4;
2256 prph_len += sizeof(**data) + sizeof(*prph) + num_bytes_in_chunk;
2258 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH);
2259 (*data)->len = cpu_to_le32(sizeof(*prph) +
2260 num_bytes_in_chunk);
2261 prph = (void *)(*data)->data;
2262 prph->prph_start = cpu_to_le32(iwl_prph_dump_addr[i].start);
2263 val = (void *)prph->data;
2265 for (reg = iwl_prph_dump_addr[i].start;
2266 reg <= iwl_prph_dump_addr[i].end;
2268 *val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans,
2270 *data = iwl_fw_error_next_data(*data);
2273 iwl_trans_release_nic_access(trans, &flags);
2278 static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
2279 struct iwl_fw_error_dump_data **data,
2280 int allocated_rb_nums)
2282 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2283 int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
2284 struct iwl_rxq *rxq = &trans_pcie->rxq;
2285 u32 i, r, j, rb_len = 0;
2287 spin_lock(&rxq->lock);
2289 r = le16_to_cpu(ACCESS_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF;
2291 for (i = rxq->read, j = 0;
2292 i != r && j < allocated_rb_nums;
2293 i = (i + 1) & RX_QUEUE_MASK, j++) {
2294 struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
2295 struct iwl_fw_error_dump_rb *rb;
2297 dma_unmap_page(trans->dev, rxb->page_dma, max_len,
2300 rb_len += sizeof(**data) + sizeof(*rb) + max_len;
2302 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB);
2303 (*data)->len = cpu_to_le32(sizeof(*rb) + max_len);
2304 rb = (void *)(*data)->data;
2305 rb->index = cpu_to_le32(i);
2306 memcpy(rb->data, page_address(rxb->page), max_len);
2307 /* remap the page for the free benefit */
2308 rxb->page_dma = dma_map_page(trans->dev, rxb->page, 0,
2312 *data = iwl_fw_error_next_data(*data);
2315 spin_unlock(&rxq->lock);
2319 #define IWL_CSR_TO_DUMP (0x250)
2321 static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
2322 struct iwl_fw_error_dump_data **data)
2324 u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP;
2328 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR);
2329 (*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP);
2330 val = (void *)(*data)->data;
2332 for (i = 0; i < IWL_CSR_TO_DUMP; i += 4)
2333 *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
2335 *data = iwl_fw_error_next_data(*data);
2340 static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
2341 struct iwl_fw_error_dump_data **data)
2343 u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND;
2344 unsigned long flags;
2348 if (!iwl_trans_grab_nic_access(trans, false, &flags))
2351 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS);
2352 (*data)->len = cpu_to_le32(fh_regs_len);
2353 val = (void *)(*data)->data;
2355 for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND; i += sizeof(u32))
2356 *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
2358 iwl_trans_release_nic_access(trans, &flags);
2360 *data = iwl_fw_error_next_data(*data);
2362 return sizeof(**data) + fh_regs_len;
2366 iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
2367 struct iwl_fw_error_dump_fw_mon *fw_mon_data,
2370 u32 buf_size_in_dwords = (monitor_len >> 2);
2371 u32 *buffer = (u32 *)fw_mon_data->data;
2372 unsigned long flags;
2375 if (!iwl_trans_grab_nic_access(trans, false, &flags))
2378 __iwl_write_prph(trans, MON_DMARB_RD_CTL_ADDR, 0x1);
2379 for (i = 0; i < buf_size_in_dwords; i++)
2380 buffer[i] = __iwl_read_prph(trans, MON_DMARB_RD_DATA_ADDR);
2381 __iwl_write_prph(trans, MON_DMARB_RD_CTL_ADDR, 0x0);
2383 iwl_trans_release_nic_access(trans, &flags);
2389 struct iwl_trans_dump_data *iwl_trans_pcie_dump_data(struct iwl_trans *trans)
2391 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2392 struct iwl_fw_error_dump_data *data;
2393 struct iwl_txq *cmdq = &trans_pcie->txq[trans_pcie->cmd_queue];
2394 struct iwl_fw_error_dump_txcmd *txcmd;
2395 struct iwl_trans_dump_data *dump_data;
2399 bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status);
2401 /* transport dump header */
2402 len = sizeof(*dump_data);
2405 len += sizeof(*data) +
2406 cmdq->q.n_window * (sizeof(*txcmd) + TFD_MAX_PAYLOAD_SIZE);
2409 len += sizeof(*data) + IWL_CSR_TO_DUMP;
2411 /* PRPH registers */
2412 for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr); i++) {
2413 /* The range includes both boundaries */
2414 int num_bytes_in_chunk = iwl_prph_dump_addr[i].end -
2415 iwl_prph_dump_addr[i].start + 4;
2417 len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_prph) +
2422 len += sizeof(*data) + (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND);
2426 num_rbs = le16_to_cpu(ACCESS_ONCE(
2427 trans_pcie->rxq.rb_stts->closed_rb_num))
2429 num_rbs = (num_rbs - trans_pcie->rxq.read) & RX_QUEUE_MASK;
2430 len += num_rbs * (sizeof(*data) +
2431 sizeof(struct iwl_fw_error_dump_rb) +
2432 (PAGE_SIZE << trans_pcie->rx_page_order));
2436 if (trans_pcie->fw_mon_page) {
2437 len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
2438 trans_pcie->fw_mon_size;
2439 monitor_len = trans_pcie->fw_mon_size;
2440 } else if (trans->dbg_dest_tlv) {
2443 base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
2444 end = le32_to_cpu(trans->dbg_dest_tlv->end_reg);
2446 base = iwl_read_prph(trans, base) <<
2447 trans->dbg_dest_tlv->base_shift;
2448 end = iwl_read_prph(trans, end) <<
2449 trans->dbg_dest_tlv->end_shift;
2451 /* Make "end" point to the actual end */
2452 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000 ||
2453 trans->dbg_dest_tlv->monitor_mode == MARBH_MODE)
2454 end += (1 << trans->dbg_dest_tlv->end_shift);
2455 monitor_len = end - base;
2456 len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_fw_mon) +
2462 dump_data = vzalloc(len);
2467 data = (void *)dump_data->data;
2468 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
2469 txcmd = (void *)data->data;
2470 spin_lock_bh(&cmdq->lock);
2471 ptr = cmdq->q.write_ptr;
2472 for (i = 0; i < cmdq->q.n_window; i++) {
2473 u8 idx = get_cmd_index(&cmdq->q, ptr);
2476 cmdlen = iwl_trans_pcie_get_cmdlen(&cmdq->tfds[ptr]);
2477 caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
2480 len += sizeof(*txcmd) + caplen;
2481 txcmd->cmdlen = cpu_to_le32(cmdlen);
2482 txcmd->caplen = cpu_to_le32(caplen);
2483 memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen);
2484 txcmd = (void *)((u8 *)txcmd->data + caplen);
2487 ptr = iwl_queue_dec_wrap(ptr);
2489 spin_unlock_bh(&cmdq->lock);
2491 data->len = cpu_to_le32(len);
2492 len += sizeof(*data);
2493 data = iwl_fw_error_next_data(data);
2495 len += iwl_trans_pcie_dump_prph(trans, &data);
2496 len += iwl_trans_pcie_dump_csr(trans, &data);
2497 len += iwl_trans_pcie_fh_regs_dump(trans, &data);
2499 len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
2501 /* data is already pointing to the next section */
2502 if ((trans_pcie->fw_mon_page &&
2503 trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) ||
2504 trans->dbg_dest_tlv) {
2505 struct iwl_fw_error_dump_fw_mon *fw_mon_data;
2506 u32 base, write_ptr, wrap_cnt;
2508 /* If there was a dest TLV - use the values from there */
2509 if (trans->dbg_dest_tlv) {
2511 le32_to_cpu(trans->dbg_dest_tlv->write_ptr_reg);
2512 wrap_cnt = le32_to_cpu(trans->dbg_dest_tlv->wrap_count);
2513 base = le32_to_cpu(trans->dbg_dest_tlv->base_reg);
2515 base = MON_BUFF_BASE_ADDR;
2516 write_ptr = MON_BUFF_WRPTR;
2517 wrap_cnt = MON_BUFF_CYCLE_CNT;
2520 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
2521 fw_mon_data = (void *)data->data;
2522 fw_mon_data->fw_mon_wr_ptr =
2523 cpu_to_le32(iwl_read_prph(trans, write_ptr));
2524 fw_mon_data->fw_mon_cycle_cnt =
2525 cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
2526 fw_mon_data->fw_mon_base_ptr =
2527 cpu_to_le32(iwl_read_prph(trans, base));
2529 len += sizeof(*data) + sizeof(*fw_mon_data);
2530 if (trans_pcie->fw_mon_page) {
2532 * The firmware is now asserted, it won't write anything
2533 * to the buffer. CPU can take ownership to fetch the
2534 * data. The buffer will be handed back to the device
2535 * before the firmware will be restarted.
2537 dma_sync_single_for_cpu(trans->dev,
2538 trans_pcie->fw_mon_phys,
2539 trans_pcie->fw_mon_size,
2541 memcpy(fw_mon_data->data,
2542 page_address(trans_pcie->fw_mon_page),
2543 trans_pcie->fw_mon_size);
2545 monitor_len = trans_pcie->fw_mon_size;
2546 } else if (trans->dbg_dest_tlv->monitor_mode == SMEM_MODE) {
2548 * Update pointers to reflect actual values after
2551 base = iwl_read_prph(trans, base) <<
2552 trans->dbg_dest_tlv->base_shift;
2553 iwl_trans_read_mem(trans, base, fw_mon_data->data,
2554 monitor_len / sizeof(u32));
2555 } else if (trans->dbg_dest_tlv->monitor_mode == MARBH_MODE) {
2557 iwl_trans_pci_dump_marbh_monitor(trans,
2561 /* Didn't match anything - output no monitor data */
2566 data->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
2569 dump_data->len = len;
2574 static const struct iwl_trans_ops trans_ops_pcie = {
2575 .start_hw = iwl_trans_pcie_start_hw,
2576 .op_mode_leave = iwl_trans_pcie_op_mode_leave,
2577 .fw_alive = iwl_trans_pcie_fw_alive,
2578 .start_fw = iwl_trans_pcie_start_fw,
2579 .stop_device = iwl_trans_pcie_stop_device,
2581 .d3_suspend = iwl_trans_pcie_d3_suspend,
2582 .d3_resume = iwl_trans_pcie_d3_resume,
2584 .send_cmd = iwl_trans_pcie_send_hcmd,
2586 .tx = iwl_trans_pcie_tx,
2587 .reclaim = iwl_trans_pcie_reclaim,
2589 .txq_disable = iwl_trans_pcie_txq_disable,
2590 .txq_enable = iwl_trans_pcie_txq_enable,
2592 .dbgfs_register = iwl_trans_pcie_dbgfs_register,
2594 .wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
2595 .freeze_txq_timer = iwl_trans_pcie_freeze_txq_timer,
2597 .write8 = iwl_trans_pcie_write8,
2598 .write32 = iwl_trans_pcie_write32,
2599 .read32 = iwl_trans_pcie_read32,
2600 .read_prph = iwl_trans_pcie_read_prph,
2601 .write_prph = iwl_trans_pcie_write_prph,
2602 .read_mem = iwl_trans_pcie_read_mem,
2603 .write_mem = iwl_trans_pcie_write_mem,
2604 .configure = iwl_trans_pcie_configure,
2605 .set_pmi = iwl_trans_pcie_set_pmi,
2606 .grab_nic_access = iwl_trans_pcie_grab_nic_access,
2607 .release_nic_access = iwl_trans_pcie_release_nic_access,
2608 .set_bits_mask = iwl_trans_pcie_set_bits_mask,
2610 .ref = iwl_trans_pcie_ref,
2611 .unref = iwl_trans_pcie_unref,
2613 .dump_data = iwl_trans_pcie_dump_data,
2616 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2617 const struct pci_device_id *ent,
2618 const struct iwl_cfg *cfg)
2620 struct iwl_trans_pcie *trans_pcie;
2621 struct iwl_trans *trans;
2625 trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
2626 &pdev->dev, cfg, &trans_ops_pcie, 0);
2628 return ERR_PTR(-ENOMEM);
2630 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2632 trans_pcie->trans = trans;
2633 spin_lock_init(&trans_pcie->irq_lock);
2634 spin_lock_init(&trans_pcie->reg_lock);
2635 spin_lock_init(&trans_pcie->ref_lock);
2636 mutex_init(&trans_pcie->mutex);
2637 init_waitqueue_head(&trans_pcie->ucode_write_waitq);
2639 err = pci_enable_device(pdev);
2643 if (!cfg->base_params->pcie_l1_allowed) {
2645 * W/A - seems to solve weird behavior. We need to remove this
2646 * if we don't want to stay in L1 all the time. This wastes a
2649 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
2650 PCIE_LINK_STATE_L1 |
2651 PCIE_LINK_STATE_CLKPM);
2654 pci_set_master(pdev);
2656 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
2658 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
2660 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2662 err = pci_set_consistent_dma_mask(pdev,
2664 /* both attempts failed: */
2666 dev_err(&pdev->dev, "No suitable DMA available\n");
2667 goto out_pci_disable_device;
2671 err = pci_request_regions(pdev, DRV_NAME);
2673 dev_err(&pdev->dev, "pci_request_regions failed\n");
2674 goto out_pci_disable_device;
2677 trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
2678 if (!trans_pcie->hw_base) {
2679 dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
2681 goto out_pci_release_regions;
2684 /* We disable the RETRY_TIMEOUT register (0x41) to keep
2685 * PCI Tx retries from interfering with C3 CPU state */
2686 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
2688 trans->dev = &pdev->dev;
2689 trans_pcie->pci_dev = pdev;
2690 iwl_disable_interrupts(trans);
2692 err = pci_enable_msi(pdev);
2694 dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
2695 /* enable rfkill interrupt: hw bug w/a */
2696 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
2697 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
2698 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
2699 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
2703 trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
2705 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
2706 * changed, and now the revision step also includes bit 0-1 (no more
2707 * "dash" value). To keep hw_rev backwards compatible - we'll store it
2708 * in the old format.
2710 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
2711 unsigned long flags;
2714 trans->hw_rev = (trans->hw_rev & 0xfff0) |
2715 (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
2718 * in-order to recognize C step driver should read chip version
2719 * id located at the AUX bus MISC address space.
2721 iwl_set_bit(trans, CSR_GP_CNTRL,
2722 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
2725 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
2726 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2727 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
2730 IWL_DEBUG_INFO(trans, "Failed to wake up the nic\n");
2731 goto out_pci_disable_msi;
2734 if (iwl_trans_grab_nic_access(trans, false, &flags)) {
2737 hw_step = __iwl_read_prph(trans, WFPM_CTRL_REG);
2738 hw_step |= ENABLE_WFPM;
2739 __iwl_write_prph(trans, WFPM_CTRL_REG, hw_step);
2740 hw_step = __iwl_read_prph(trans, AUX_MISC_REG);
2741 hw_step = (hw_step >> HW_STEP_LOCATION_BITS) & 0xF;
2743 trans->hw_rev = (trans->hw_rev & 0xFFFFFFF3) |
2744 (SILICON_C_STEP << 2);
2745 iwl_trans_release_nic_access(trans, &flags);
2749 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
2750 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
2751 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
2753 /* Initialize the wait queue for commands */
2754 init_waitqueue_head(&trans_pcie->wait_command_queue);
2756 if (iwl_pcie_alloc_ict(trans))
2757 goto out_pci_disable_msi;
2759 err = request_threaded_irq(pdev->irq, iwl_pcie_isr,
2760 iwl_pcie_irq_handler,
2761 IRQF_SHARED, DRV_NAME, trans);
2763 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
2767 trans_pcie->inta_mask = CSR_INI_SET_MASK;
2768 trans->d0i3_mode = IWL_D0I3_MODE_ON_SUSPEND;
2773 iwl_pcie_free_ict(trans);
2774 out_pci_disable_msi:
2775 pci_disable_msi(pdev);
2776 out_pci_release_regions:
2777 pci_release_regions(pdev);
2778 out_pci_disable_device:
2779 pci_disable_device(pdev);
2781 iwl_trans_free(trans);
2782 return ERR_PTR(err);