2 * PCIe host controller driver for Tegra SoCs
4 * Copyright (c) 2010, CompuLab, Ltd.
5 * Author: Mike Rapoport <mike@compulab.co.il>
7 * Based on NVIDIA PCIe driver
8 * Copyright (c) 2008-2009, NVIDIA Corporation.
10 * Bits taken from arch/arm/mach-dove/pcie.c
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
27 #include <linux/clk.h>
28 #include <linux/debugfs.h>
29 #include <linux/delay.h>
30 #include <linux/export.h>
31 #include <linux/interrupt.h>
32 #include <linux/irq.h>
33 #include <linux/irqdomain.h>
34 #include <linux/kernel.h>
35 #include <linux/module.h>
36 #include <linux/msi.h>
37 #include <linux/of_address.h>
38 #include <linux/of_pci.h>
39 #include <linux/of_platform.h>
40 #include <linux/pci.h>
41 #include <linux/phy/phy.h>
42 #include <linux/platform_device.h>
43 #include <linux/reset.h>
44 #include <linux/sizes.h>
45 #include <linux/slab.h>
46 #include <linux/vmalloc.h>
47 #include <linux/regulator/consumer.h>
49 #include <soc/tegra/cpuidle.h>
50 #include <soc/tegra/pmc.h>
52 #include <asm/mach/irq.h>
53 #include <asm/mach/map.h>
54 #include <asm/mach/pci.h>
56 #define INT_PCI_MSI_NR (8 * 32)
58 /* register definitions */
60 #define AFI_AXI_BAR0_SZ 0x00
61 #define AFI_AXI_BAR1_SZ 0x04
62 #define AFI_AXI_BAR2_SZ 0x08
63 #define AFI_AXI_BAR3_SZ 0x0c
64 #define AFI_AXI_BAR4_SZ 0x10
65 #define AFI_AXI_BAR5_SZ 0x14
67 #define AFI_AXI_BAR0_START 0x18
68 #define AFI_AXI_BAR1_START 0x1c
69 #define AFI_AXI_BAR2_START 0x20
70 #define AFI_AXI_BAR3_START 0x24
71 #define AFI_AXI_BAR4_START 0x28
72 #define AFI_AXI_BAR5_START 0x2c
74 #define AFI_FPCI_BAR0 0x30
75 #define AFI_FPCI_BAR1 0x34
76 #define AFI_FPCI_BAR2 0x38
77 #define AFI_FPCI_BAR3 0x3c
78 #define AFI_FPCI_BAR4 0x40
79 #define AFI_FPCI_BAR5 0x44
81 #define AFI_CACHE_BAR0_SZ 0x48
82 #define AFI_CACHE_BAR0_ST 0x4c
83 #define AFI_CACHE_BAR1_SZ 0x50
84 #define AFI_CACHE_BAR1_ST 0x54
86 #define AFI_MSI_BAR_SZ 0x60
87 #define AFI_MSI_FPCI_BAR_ST 0x64
88 #define AFI_MSI_AXI_BAR_ST 0x68
90 #define AFI_MSI_VEC0 0x6c
91 #define AFI_MSI_VEC1 0x70
92 #define AFI_MSI_VEC2 0x74
93 #define AFI_MSI_VEC3 0x78
94 #define AFI_MSI_VEC4 0x7c
95 #define AFI_MSI_VEC5 0x80
96 #define AFI_MSI_VEC6 0x84
97 #define AFI_MSI_VEC7 0x88
99 #define AFI_MSI_EN_VEC0 0x8c
100 #define AFI_MSI_EN_VEC1 0x90
101 #define AFI_MSI_EN_VEC2 0x94
102 #define AFI_MSI_EN_VEC3 0x98
103 #define AFI_MSI_EN_VEC4 0x9c
104 #define AFI_MSI_EN_VEC5 0xa0
105 #define AFI_MSI_EN_VEC6 0xa4
106 #define AFI_MSI_EN_VEC7 0xa8
108 #define AFI_CONFIGURATION 0xac
109 #define AFI_CONFIGURATION_EN_FPCI (1 << 0)
111 #define AFI_FPCI_ERROR_MASKS 0xb0
113 #define AFI_INTR_MASK 0xb4
114 #define AFI_INTR_MASK_INT_MASK (1 << 0)
115 #define AFI_INTR_MASK_MSI_MASK (1 << 8)
117 #define AFI_INTR_CODE 0xb8
118 #define AFI_INTR_CODE_MASK 0xf
119 #define AFI_INTR_INI_SLAVE_ERROR 1
120 #define AFI_INTR_INI_DECODE_ERROR 2
121 #define AFI_INTR_TARGET_ABORT 3
122 #define AFI_INTR_MASTER_ABORT 4
123 #define AFI_INTR_INVALID_WRITE 5
124 #define AFI_INTR_LEGACY 6
125 #define AFI_INTR_FPCI_DECODE_ERROR 7
126 #define AFI_INTR_AXI_DECODE_ERROR 8
127 #define AFI_INTR_FPCI_TIMEOUT 9
128 #define AFI_INTR_PE_PRSNT_SENSE 10
129 #define AFI_INTR_PE_CLKREQ_SENSE 11
130 #define AFI_INTR_CLKCLAMP_SENSE 12
131 #define AFI_INTR_RDY4PD_SENSE 13
132 #define AFI_INTR_P2P_ERROR 14
134 #define AFI_INTR_SIGNATURE 0xbc
135 #define AFI_UPPER_FPCI_ADDRESS 0xc0
136 #define AFI_SM_INTR_ENABLE 0xc4
137 #define AFI_SM_INTR_INTA_ASSERT (1 << 0)
138 #define AFI_SM_INTR_INTB_ASSERT (1 << 1)
139 #define AFI_SM_INTR_INTC_ASSERT (1 << 2)
140 #define AFI_SM_INTR_INTD_ASSERT (1 << 3)
141 #define AFI_SM_INTR_INTA_DEASSERT (1 << 4)
142 #define AFI_SM_INTR_INTB_DEASSERT (1 << 5)
143 #define AFI_SM_INTR_INTC_DEASSERT (1 << 6)
144 #define AFI_SM_INTR_INTD_DEASSERT (1 << 7)
146 #define AFI_AFI_INTR_ENABLE 0xc8
147 #define AFI_INTR_EN_INI_SLVERR (1 << 0)
148 #define AFI_INTR_EN_INI_DECERR (1 << 1)
149 #define AFI_INTR_EN_TGT_SLVERR (1 << 2)
150 #define AFI_INTR_EN_TGT_DECERR (1 << 3)
151 #define AFI_INTR_EN_TGT_WRERR (1 << 4)
152 #define AFI_INTR_EN_DFPCI_DECERR (1 << 5)
153 #define AFI_INTR_EN_AXI_DECERR (1 << 6)
154 #define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7)
155 #define AFI_INTR_EN_PRSNT_SENSE (1 << 8)
157 #define AFI_PCIE_CONFIG 0x0f8
158 #define AFI_PCIE_CONFIG_PCIE_DISABLE(x) (1 << ((x) + 1))
159 #define AFI_PCIE_CONFIG_PCIE_DISABLE_ALL 0xe
160 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
161 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
162 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20)
163 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1 (0x0 << 20)
164 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
165 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20)
166 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1 (0x1 << 20)
167 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20)
169 #define AFI_FUSE 0x104
170 #define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
172 #define AFI_PEX0_CTRL 0x110
173 #define AFI_PEX1_CTRL 0x118
174 #define AFI_PEX2_CTRL 0x128
175 #define AFI_PEX_CTRL_RST (1 << 0)
176 #define AFI_PEX_CTRL_CLKREQ_EN (1 << 1)
177 #define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
178 #define AFI_PEX_CTRL_OVERRIDE_EN (1 << 4)
180 #define AFI_PLLE_CONTROL 0x160
181 #define AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
182 #define AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
184 #define AFI_PEXBIAS_CTRL_0 0x168
186 #define RP_VEND_XP 0x00000F00
187 #define RP_VEND_XP_DL_UP (1 << 30)
189 #define RP_PRIV_MISC 0x00000FE0
190 #define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xE << 0)
191 #define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xF << 0)
193 #define RP_LINK_CONTROL_STATUS 0x00000090
194 #define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
195 #define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
197 #define PADS_CTL_SEL 0x0000009C
199 #define PADS_CTL 0x000000A0
200 #define PADS_CTL_IDDQ_1L (1 << 0)
201 #define PADS_CTL_TX_DATA_EN_1L (1 << 6)
202 #define PADS_CTL_RX_DATA_EN_1L (1 << 10)
204 #define PADS_PLL_CTL_TEGRA20 0x000000B8
205 #define PADS_PLL_CTL_TEGRA30 0x000000B4
206 #define PADS_PLL_CTL_RST_B4SM (1 << 1)
207 #define PADS_PLL_CTL_LOCKDET (1 << 8)
208 #define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16)
209 #define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16)
210 #define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16)
211 #define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16)
212 #define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20)
213 #define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20)
214 #define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20)
215 #define PADS_PLL_CTL_TXCLKREF_BUF_EN (1 << 22)
217 #define PADS_REFCLK_CFG0 0x000000C8
218 #define PADS_REFCLK_CFG1 0x000000CC
219 #define PADS_REFCLK_BIAS 0x000000D0
222 * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
223 * entries, one entry per PCIe port. These field definitions and desired
224 * values aren't in the TRM, but do come from NVIDIA.
226 #define PADS_REFCLK_CFG_TERM_SHIFT 2 /* 6:2 */
227 #define PADS_REFCLK_CFG_E_TERM_SHIFT 7
228 #define PADS_REFCLK_CFG_PREDI_SHIFT 8 /* 11:8 */
229 #define PADS_REFCLK_CFG_DRVI_SHIFT 12 /* 15:12 */
231 /* Default value provided by HW engineering is 0xfa5c */
232 #define PADS_REFCLK_CFG_VALUE \
234 (0x17 << PADS_REFCLK_CFG_TERM_SHIFT) | \
235 (0 << PADS_REFCLK_CFG_E_TERM_SHIFT) | \
236 (0xa << PADS_REFCLK_CFG_PREDI_SHIFT) | \
237 (0xf << PADS_REFCLK_CFG_DRVI_SHIFT) \
241 struct msi_chip chip;
242 DECLARE_BITMAP(used, INT_PCI_MSI_NR);
243 struct irq_domain *domain;
249 /* used to differentiate between Tegra SoC generations */
250 struct tegra_pcie_soc_data {
251 unsigned int num_ports;
252 unsigned int msi_base_shift;
255 bool has_pex_clkreq_en;
256 bool has_pex_bias_ctrl;
257 bool has_intr_prsnt_sense;
262 static inline struct tegra_msi *to_tegra_msi(struct msi_chip *chip)
264 return container_of(chip, struct tegra_msi, chip);
274 struct list_head buses;
280 struct resource prefetch;
281 struct resource busn;
288 struct reset_control *pex_rst;
289 struct reset_control *afi_rst;
290 struct reset_control *pcie_xrst;
294 struct tegra_msi msi;
296 struct list_head ports;
297 unsigned int num_ports;
300 struct regulator_bulk_data *supplies;
301 unsigned int num_supplies;
303 const struct tegra_pcie_soc_data *soc_data;
304 struct dentry *debugfs;
307 struct tegra_pcie_port {
308 struct tegra_pcie *pcie;
309 struct list_head list;
310 struct resource regs;
316 struct tegra_pcie_bus {
317 struct vm_struct *area;
318 struct list_head list;
322 static inline struct tegra_pcie *sys_to_pcie(struct pci_sys_data *sys)
324 return sys->private_data;
327 static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
328 unsigned long offset)
330 writel(value, pcie->afi + offset);
333 static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
335 return readl(pcie->afi + offset);
338 static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
339 unsigned long offset)
341 writel(value, pcie->pads + offset);
344 static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
346 return readl(pcie->pads + offset);
350 * The configuration space mapping on Tegra is somewhat similar to the ECAM
351 * defined by PCIe. However it deviates a bit in how the 4 bits for extended
352 * register accesses are mapped:
354 * [27:24] extended register number
356 * [15:11] device number
357 * [10: 8] function number
358 * [ 7: 0] register number
360 * Mapping the whole extended configuration space would require 256 MiB of
361 * virtual address space, only a small part of which will actually be used.
362 * To work around this, a 1 MiB of virtual addresses are allocated per bus
363 * when the bus is first accessed. When the physical range is mapped, the
364 * the bus number bits are hidden so that the extended register number bits
365 * appear as bits [19:16]. Therefore the virtual mapping looks like this:
367 * [19:16] extended register number
368 * [15:11] device number
369 * [10: 8] function number
370 * [ 7: 0] register number
372 * This is achieved by stitching together 16 chunks of 64 KiB of physical
373 * address space via the MMU.
375 static unsigned long tegra_pcie_conf_offset(unsigned int devfn, int where)
377 return ((where & 0xf00) << 8) | (PCI_SLOT(devfn) << 11) |
378 (PCI_FUNC(devfn) << 8) | (where & 0xfc);
381 static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
384 pgprot_t prot = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_XN |
385 L_PTE_MT_DEV_SHARED | L_PTE_SHARED;
386 phys_addr_t cs = pcie->cs->start;
387 struct tegra_pcie_bus *bus;
391 bus = kzalloc(sizeof(*bus), GFP_KERNEL);
393 return ERR_PTR(-ENOMEM);
395 INIT_LIST_HEAD(&bus->list);
398 /* allocate 1 MiB of virtual addresses */
399 bus->area = get_vm_area(SZ_1M, VM_IOREMAP);
405 /* map each of the 16 chunks of 64 KiB each */
406 for (i = 0; i < 16; i++) {
407 unsigned long virt = (unsigned long)bus->area->addr +
409 phys_addr_t phys = cs + i * SZ_16M + busnr * SZ_64K;
411 err = ioremap_page_range(virt, virt + SZ_64K, phys, prot);
413 dev_err(pcie->dev, "ioremap_page_range() failed: %d\n",
422 vunmap(bus->area->addr);
429 * Look up a virtual address mapping for the specified bus number. If no such
430 * mapping exists, try to create one.
432 static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie,
435 struct tegra_pcie_bus *bus;
437 list_for_each_entry(bus, &pcie->buses, list)
438 if (bus->nr == busnr)
439 return (void __iomem *)bus->area->addr;
441 bus = tegra_pcie_bus_alloc(pcie, busnr);
445 list_add_tail(&bus->list, &pcie->buses);
447 return (void __iomem *)bus->area->addr;
450 static void __iomem *tegra_pcie_conf_address(struct pci_bus *bus,
454 struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
455 void __iomem *addr = NULL;
457 if (bus->number == 0) {
458 unsigned int slot = PCI_SLOT(devfn);
459 struct tegra_pcie_port *port;
461 list_for_each_entry(port, &pcie->ports, list) {
462 if (port->index + 1 == slot) {
463 addr = port->base + (where & ~3);
468 addr = tegra_pcie_bus_map(pcie, bus->number);
471 "failed to map cfg. space for bus %u\n",
476 addr += tegra_pcie_conf_offset(devfn, where);
482 static int tegra_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
483 int where, int size, u32 *value)
487 addr = tegra_pcie_conf_address(bus, devfn, where);
490 return PCIBIOS_DEVICE_NOT_FOUND;
493 *value = readl(addr);
496 *value = (*value >> (8 * (where & 3))) & 0xff;
498 *value = (*value >> (8 * (where & 3))) & 0xffff;
500 return PCIBIOS_SUCCESSFUL;
503 static int tegra_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
504 int where, int size, u32 value)
509 addr = tegra_pcie_conf_address(bus, devfn, where);
511 return PCIBIOS_DEVICE_NOT_FOUND;
515 return PCIBIOS_SUCCESSFUL;
519 mask = ~(0xffff << ((where & 0x3) * 8));
521 mask = ~(0xff << ((where & 0x3) * 8));
523 return PCIBIOS_BAD_REGISTER_NUMBER;
525 tmp = readl(addr) & mask;
526 tmp |= value << ((where & 0x3) * 8);
529 return PCIBIOS_SUCCESSFUL;
532 static struct pci_ops tegra_pcie_ops = {
533 .read = tegra_pcie_read_conf,
534 .write = tegra_pcie_write_conf,
537 static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
539 unsigned long ret = 0;
541 switch (port->index) {
558 static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
560 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
563 /* pulse reset signal */
564 value = afi_readl(port->pcie, ctrl);
565 value &= ~AFI_PEX_CTRL_RST;
566 afi_writel(port->pcie, value, ctrl);
568 usleep_range(1000, 2000);
570 value = afi_readl(port->pcie, ctrl);
571 value |= AFI_PEX_CTRL_RST;
572 afi_writel(port->pcie, value, ctrl);
575 static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
577 const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
578 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
581 /* enable reference clock */
582 value = afi_readl(port->pcie, ctrl);
583 value |= AFI_PEX_CTRL_REFCLK_EN;
585 if (soc->has_pex_clkreq_en)
586 value |= AFI_PEX_CTRL_CLKREQ_EN;
588 value |= AFI_PEX_CTRL_OVERRIDE_EN;
590 afi_writel(port->pcie, value, ctrl);
592 tegra_pcie_port_reset(port);
595 static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
597 const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
598 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
601 /* assert port reset */
602 value = afi_readl(port->pcie, ctrl);
603 value &= ~AFI_PEX_CTRL_RST;
604 afi_writel(port->pcie, value, ctrl);
606 /* disable reference clock */
607 value = afi_readl(port->pcie, ctrl);
609 if (soc->has_pex_clkreq_en)
610 value &= ~AFI_PEX_CTRL_CLKREQ_EN;
612 value &= ~AFI_PEX_CTRL_REFCLK_EN;
613 afi_writel(port->pcie, value, ctrl);
616 static void tegra_pcie_port_free(struct tegra_pcie_port *port)
618 struct tegra_pcie *pcie = port->pcie;
620 devm_iounmap(pcie->dev, port->base);
621 devm_release_mem_region(pcie->dev, port->regs.start,
622 resource_size(&port->regs));
623 list_del(&port->list);
624 devm_kfree(pcie->dev, port);
627 static void tegra_pcie_fixup_bridge(struct pci_dev *dev)
631 if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) {
632 pci_read_config_word(dev, PCI_COMMAND, ®);
633 reg |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
634 PCI_COMMAND_MASTER | PCI_COMMAND_SERR);
635 pci_write_config_word(dev, PCI_COMMAND, reg);
638 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_fixup_bridge);
640 /* Tegra PCIE root complex wrongly reports device class */
641 static void tegra_pcie_fixup_class(struct pci_dev *dev)
643 dev->class = PCI_CLASS_BRIDGE_PCI << 8;
645 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
646 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
647 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
648 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
650 /* Tegra PCIE requires relaxed ordering */
651 static void tegra_pcie_relax_enable(struct pci_dev *dev)
653 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
655 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
657 static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
659 struct tegra_pcie *pcie = sys_to_pcie(sys);
661 phys_addr_t io_start;
663 err = devm_request_resource(pcie->dev, &pcie->all, &pcie->mem);
667 err = devm_request_resource(pcie->dev, &pcie->all, &pcie->prefetch);
671 io_start = pci_pio_to_address(pcie->io.start);
673 pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
674 pci_add_resource_offset(&sys->resources, &pcie->prefetch,
676 pci_add_resource(&sys->resources, &pcie->busn);
678 pci_ioremap_io(nr * SZ_64K, io_start);
683 static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
685 struct tegra_pcie *pcie = sys_to_pcie(pdev->bus->sysdata);
688 tegra_cpuidle_pcie_irqs_in_use();
690 irq = of_irq_parse_and_map_pci(pdev, slot, pin);
697 static void tegra_pcie_add_bus(struct pci_bus *bus)
699 if (IS_ENABLED(CONFIG_PCI_MSI)) {
700 struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
702 bus->msi = &pcie->msi.chip;
706 static struct pci_bus *tegra_pcie_scan_bus(int nr, struct pci_sys_data *sys)
708 struct tegra_pcie *pcie = sys_to_pcie(sys);
711 bus = pci_create_root_bus(pcie->dev, sys->busnr, &tegra_pcie_ops, sys,
716 pci_scan_child_bus(bus);
721 static irqreturn_t tegra_pcie_isr(int irq, void *arg)
723 const char *err_msg[] = {
731 "Response decoding error",
732 "AXI response decoding error",
733 "Transaction timeout",
734 "Slot present pin change",
735 "Slot clock request change",
736 "TMS clock ramp change",
737 "TMS ready for power down",
740 struct tegra_pcie *pcie = arg;
743 code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
744 signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
745 afi_writel(pcie, 0, AFI_INTR_CODE);
747 if (code == AFI_INTR_LEGACY)
750 if (code >= ARRAY_SIZE(err_msg))
754 * do not pollute kernel log with master abort reports since they
755 * happen a lot during enumeration
757 if (code == AFI_INTR_MASTER_ABORT)
758 dev_dbg(pcie->dev, "%s, signature: %08x\n", err_msg[code],
761 dev_err(pcie->dev, "%s, signature: %08x\n", err_msg[code],
764 if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
765 code == AFI_INTR_FPCI_DECODE_ERROR) {
766 u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
767 u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
769 if (code == AFI_INTR_MASTER_ABORT)
770 dev_dbg(pcie->dev, " FPCI address: %10llx\n", address);
772 dev_err(pcie->dev, " FPCI address: %10llx\n", address);
779 * FPCI map is as follows:
780 * - 0xfdfc000000: I/O space
781 * - 0xfdfe000000: type 0 configuration space
782 * - 0xfdff000000: type 1 configuration space
783 * - 0xfe00000000: type 0 extended configuration space
784 * - 0xfe10000000: type 1 extended configuration space
786 static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
788 u32 fpci_bar, size, axi_address;
789 phys_addr_t io_start = pci_pio_to_address(pcie->io.start);
791 /* Bar 0: type 1 extended configuration space */
792 fpci_bar = 0xfe100000;
793 size = resource_size(pcie->cs);
794 axi_address = pcie->cs->start;
795 afi_writel(pcie, axi_address, AFI_AXI_BAR0_START);
796 afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
797 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR0);
799 /* Bar 1: downstream IO bar */
800 fpci_bar = 0xfdfc0000;
801 size = resource_size(&pcie->io);
802 axi_address = io_start;
803 afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
804 afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
805 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
807 /* Bar 2: prefetchable memory BAR */
808 fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1;
809 size = resource_size(&pcie->prefetch);
810 axi_address = pcie->prefetch.start;
811 afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
812 afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
813 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
815 /* Bar 3: non prefetchable memory BAR */
816 fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1;
817 size = resource_size(&pcie->mem);
818 axi_address = pcie->mem.start;
819 afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
820 afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
821 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
823 /* NULL out the remaining BARs as they are not used */
824 afi_writel(pcie, 0, AFI_AXI_BAR4_START);
825 afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
826 afi_writel(pcie, 0, AFI_FPCI_BAR4);
828 afi_writel(pcie, 0, AFI_AXI_BAR5_START);
829 afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
830 afi_writel(pcie, 0, AFI_FPCI_BAR5);
832 /* map all upstream transactions as uncached */
833 afi_writel(pcie, PHYS_OFFSET, AFI_CACHE_BAR0_ST);
834 afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
835 afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
836 afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
838 /* MSI translations are setup only when needed */
839 afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
840 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
841 afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
842 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
845 static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
847 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
850 timeout = jiffies + msecs_to_jiffies(timeout);
852 while (time_before(jiffies, timeout)) {
853 value = pads_readl(pcie, soc->pads_pll_ctl);
854 if (value & PADS_PLL_CTL_LOCKDET)
861 static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
863 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
867 /* initialize internal PHY, enable up to 16 PCIE lanes */
868 pads_writel(pcie, 0x0, PADS_CTL_SEL);
870 /* override IDDQ to 1 on all 4 lanes */
871 value = pads_readl(pcie, PADS_CTL);
872 value |= PADS_CTL_IDDQ_1L;
873 pads_writel(pcie, value, PADS_CTL);
876 * Set up PHY PLL inputs select PLLE output as refclock,
877 * set TX ref sel to div10 (not div5).
879 value = pads_readl(pcie, soc->pads_pll_ctl);
880 value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
881 value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
882 pads_writel(pcie, value, soc->pads_pll_ctl);
885 value = pads_readl(pcie, soc->pads_pll_ctl);
886 value &= ~PADS_PLL_CTL_RST_B4SM;
887 pads_writel(pcie, value, soc->pads_pll_ctl);
889 usleep_range(20, 100);
891 /* take PLL out of reset */
892 value = pads_readl(pcie, soc->pads_pll_ctl);
893 value |= PADS_PLL_CTL_RST_B4SM;
894 pads_writel(pcie, value, soc->pads_pll_ctl);
896 /* Configure the reference clock driver */
897 value = PADS_REFCLK_CFG_VALUE | (PADS_REFCLK_CFG_VALUE << 16);
898 pads_writel(pcie, value, PADS_REFCLK_CFG0);
899 if (soc->num_ports > 2)
900 pads_writel(pcie, PADS_REFCLK_CFG_VALUE, PADS_REFCLK_CFG1);
902 /* wait for the PLL to lock */
903 err = tegra_pcie_pll_wait(pcie, 500);
905 dev_err(pcie->dev, "PLL failed to lock: %d\n", err);
909 /* turn off IDDQ override */
910 value = pads_readl(pcie, PADS_CTL);
911 value &= ~PADS_CTL_IDDQ_1L;
912 pads_writel(pcie, value, PADS_CTL);
914 /* enable TX/RX data */
915 value = pads_readl(pcie, PADS_CTL);
916 value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
917 pads_writel(pcie, value, PADS_CTL);
922 static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
924 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
925 struct tegra_pcie_port *port;
929 /* enable PLL power down */
931 value = afi_readl(pcie, AFI_PLLE_CONTROL);
932 value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
933 value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
934 afi_writel(pcie, value, AFI_PLLE_CONTROL);
937 /* power down PCIe slot clock bias pad */
938 if (soc->has_pex_bias_ctrl)
939 afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
941 /* configure mode and disable all ports */
942 value = afi_readl(pcie, AFI_PCIE_CONFIG);
943 value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
944 value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
946 list_for_each_entry(port, &pcie->ports, list)
947 value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
949 afi_writel(pcie, value, AFI_PCIE_CONFIG);
952 value = afi_readl(pcie, AFI_FUSE);
953 value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
954 afi_writel(pcie, value, AFI_FUSE);
956 value = afi_readl(pcie, AFI_FUSE);
957 value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
958 afi_writel(pcie, value, AFI_FUSE);
962 err = tegra_pcie_phy_enable(pcie);
964 err = phy_power_on(pcie->phy);
967 dev_err(pcie->dev, "failed to power on PHY: %d\n", err);
971 /* take the PCIe interface module out of reset */
972 reset_control_deassert(pcie->pcie_xrst);
974 /* finally enable PCIe */
975 value = afi_readl(pcie, AFI_CONFIGURATION);
976 value |= AFI_CONFIGURATION_EN_FPCI;
977 afi_writel(pcie, value, AFI_CONFIGURATION);
979 value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
980 AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
981 AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
983 if (soc->has_intr_prsnt_sense)
984 value |= AFI_INTR_EN_PRSNT_SENSE;
986 afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
987 afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
989 /* don't enable MSI for now, only when needed */
990 afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
992 /* disable all exceptions */
993 afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
998 static void tegra_pcie_power_off(struct tegra_pcie *pcie)
1002 /* TODO: disable and unprepare clocks? */
1004 err = phy_power_off(pcie->phy);
1006 dev_warn(pcie->dev, "failed to power off PHY: %d\n", err);
1008 reset_control_assert(pcie->pcie_xrst);
1009 reset_control_assert(pcie->afi_rst);
1010 reset_control_assert(pcie->pex_rst);
1012 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1014 err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1016 dev_warn(pcie->dev, "failed to disable regulators: %d\n", err);
1019 static int tegra_pcie_power_on(struct tegra_pcie *pcie)
1021 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
1024 reset_control_assert(pcie->pcie_xrst);
1025 reset_control_assert(pcie->afi_rst);
1026 reset_control_assert(pcie->pex_rst);
1028 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1030 /* enable regulators */
1031 err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
1033 dev_err(pcie->dev, "failed to enable regulators: %d\n", err);
1035 err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
1039 dev_err(pcie->dev, "powerup sequence failed: %d\n", err);
1043 reset_control_deassert(pcie->afi_rst);
1045 err = clk_prepare_enable(pcie->afi_clk);
1047 dev_err(pcie->dev, "failed to enable AFI clock: %d\n", err);
1051 if (soc->has_cml_clk) {
1052 err = clk_prepare_enable(pcie->cml_clk);
1054 dev_err(pcie->dev, "failed to enable CML clock: %d\n",
1060 err = clk_prepare_enable(pcie->pll_e);
1062 dev_err(pcie->dev, "failed to enable PLLE clock: %d\n", err);
1069 static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
1071 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
1073 pcie->pex_clk = devm_clk_get(pcie->dev, "pex");
1074 if (IS_ERR(pcie->pex_clk))
1075 return PTR_ERR(pcie->pex_clk);
1077 pcie->afi_clk = devm_clk_get(pcie->dev, "afi");
1078 if (IS_ERR(pcie->afi_clk))
1079 return PTR_ERR(pcie->afi_clk);
1081 pcie->pll_e = devm_clk_get(pcie->dev, "pll_e");
1082 if (IS_ERR(pcie->pll_e))
1083 return PTR_ERR(pcie->pll_e);
1085 if (soc->has_cml_clk) {
1086 pcie->cml_clk = devm_clk_get(pcie->dev, "cml");
1087 if (IS_ERR(pcie->cml_clk))
1088 return PTR_ERR(pcie->cml_clk);
1094 static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
1096 pcie->pex_rst = devm_reset_control_get(pcie->dev, "pex");
1097 if (IS_ERR(pcie->pex_rst))
1098 return PTR_ERR(pcie->pex_rst);
1100 pcie->afi_rst = devm_reset_control_get(pcie->dev, "afi");
1101 if (IS_ERR(pcie->afi_rst))
1102 return PTR_ERR(pcie->afi_rst);
1104 pcie->pcie_xrst = devm_reset_control_get(pcie->dev, "pcie_x");
1105 if (IS_ERR(pcie->pcie_xrst))
1106 return PTR_ERR(pcie->pcie_xrst);
1111 static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1113 struct platform_device *pdev = to_platform_device(pcie->dev);
1114 struct resource *pads, *afi, *res;
1117 err = tegra_pcie_clocks_get(pcie);
1119 dev_err(&pdev->dev, "failed to get clocks: %d\n", err);
1123 err = tegra_pcie_resets_get(pcie);
1125 dev_err(&pdev->dev, "failed to get resets: %d\n", err);
1129 pcie->phy = devm_phy_optional_get(pcie->dev, "pcie");
1130 if (IS_ERR(pcie->phy)) {
1131 err = PTR_ERR(pcie->phy);
1132 dev_err(&pdev->dev, "failed to get PHY: %d\n", err);
1136 err = phy_init(pcie->phy);
1138 dev_err(&pdev->dev, "failed to initialize PHY: %d\n", err);
1142 err = tegra_pcie_power_on(pcie);
1144 dev_err(&pdev->dev, "failed to power up: %d\n", err);
1148 pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
1149 pcie->pads = devm_ioremap_resource(&pdev->dev, pads);
1150 if (IS_ERR(pcie->pads)) {
1151 err = PTR_ERR(pcie->pads);
1155 afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
1156 pcie->afi = devm_ioremap_resource(&pdev->dev, afi);
1157 if (IS_ERR(pcie->afi)) {
1158 err = PTR_ERR(pcie->afi);
1162 /* request configuration space, but remap later, on demand */
1163 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
1165 err = -EADDRNOTAVAIL;
1169 pcie->cs = devm_request_mem_region(pcie->dev, res->start,
1170 resource_size(res), res->name);
1172 err = -EADDRNOTAVAIL;
1176 /* request interrupt */
1177 err = platform_get_irq_byname(pdev, "intr");
1179 dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
1185 err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
1187 dev_err(&pdev->dev, "failed to register IRQ: %d\n", err);
1194 tegra_pcie_power_off(pcie);
1198 static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1203 free_irq(pcie->irq, pcie);
1205 tegra_pcie_power_off(pcie);
1207 err = phy_exit(pcie->phy);
1209 dev_err(pcie->dev, "failed to teardown PHY: %d\n", err);
1214 static int tegra_msi_alloc(struct tegra_msi *chip)
1218 mutex_lock(&chip->lock);
1220 msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
1221 if (msi < INT_PCI_MSI_NR)
1222 set_bit(msi, chip->used);
1226 mutex_unlock(&chip->lock);
1231 static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
1233 struct device *dev = chip->chip.dev;
1235 mutex_lock(&chip->lock);
1237 if (!test_bit(irq, chip->used))
1238 dev_err(dev, "trying to free unused MSI#%lu\n", irq);
1240 clear_bit(irq, chip->used);
1242 mutex_unlock(&chip->lock);
1245 static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
1247 struct tegra_pcie *pcie = data;
1248 struct tegra_msi *msi = &pcie->msi;
1249 unsigned int i, processed = 0;
1251 for (i = 0; i < 8; i++) {
1252 unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1255 unsigned int offset = find_first_bit(®, 32);
1256 unsigned int index = i * 32 + offset;
1259 /* clear the interrupt */
1260 afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4);
1262 irq = irq_find_mapping(msi->domain, index);
1264 if (test_bit(index, msi->used))
1265 generic_handle_irq(irq);
1267 dev_info(pcie->dev, "unhandled MSI\n");
1270 * that's weird who triggered this?
1273 dev_info(pcie->dev, "unexpected MSI\n");
1276 /* see if there's any more pending in this vector */
1277 reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1283 return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
1286 static int tegra_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
1287 struct msi_desc *desc)
1289 struct tegra_msi *msi = to_tegra_msi(chip);
1294 hwirq = tegra_msi_alloc(msi);
1298 irq = irq_create_mapping(msi->domain, hwirq);
1300 tegra_msi_free(msi, hwirq);
1304 irq_set_msi_desc(irq, desc);
1306 msg.address_lo = virt_to_phys((void *)msi->pages);
1307 /* 32 bit address only */
1311 write_msi_msg(irq, &msg);
1316 static void tegra_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
1318 struct tegra_msi *msi = to_tegra_msi(chip);
1319 struct irq_data *d = irq_get_irq_data(irq);
1320 irq_hw_number_t hwirq = irqd_to_hwirq(d);
1322 irq_dispose_mapping(irq);
1323 tegra_msi_free(msi, hwirq);
1326 static struct irq_chip tegra_msi_irq_chip = {
1327 .name = "Tegra PCIe MSI",
1328 .irq_enable = unmask_msi_irq,
1329 .irq_disable = mask_msi_irq,
1330 .irq_mask = mask_msi_irq,
1331 .irq_unmask = unmask_msi_irq,
1334 static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
1335 irq_hw_number_t hwirq)
1337 irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
1338 irq_set_chip_data(irq, domain->host_data);
1339 set_irq_flags(irq, IRQF_VALID);
1341 tegra_cpuidle_pcie_irqs_in_use();
1346 static const struct irq_domain_ops msi_domain_ops = {
1347 .map = tegra_msi_map,
1350 static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1352 struct platform_device *pdev = to_platform_device(pcie->dev);
1353 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
1354 struct tegra_msi *msi = &pcie->msi;
1359 mutex_init(&msi->lock);
1361 msi->chip.dev = pcie->dev;
1362 msi->chip.setup_irq = tegra_msi_setup_irq;
1363 msi->chip.teardown_irq = tegra_msi_teardown_irq;
1365 msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
1366 &msi_domain_ops, &msi->chip);
1368 dev_err(&pdev->dev, "failed to create IRQ domain\n");
1372 err = platform_get_irq_byname(pdev, "msi");
1374 dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
1380 err = request_irq(msi->irq, tegra_pcie_msi_irq, 0,
1381 tegra_msi_irq_chip.name, pcie);
1383 dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
1387 /* setup AFI/FPCI range */
1388 msi->pages = __get_free_pages(GFP_KERNEL, 0);
1389 base = virt_to_phys((void *)msi->pages);
1391 afi_writel(pcie, base >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
1392 afi_writel(pcie, base, AFI_MSI_AXI_BAR_ST);
1393 /* this register is in 4K increments */
1394 afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1396 /* enable all MSI vectors */
1397 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0);
1398 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1);
1399 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2);
1400 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3);
1401 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4);
1402 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5);
1403 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6);
1404 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7);
1406 /* and unmask the MSI interrupt */
1407 reg = afi_readl(pcie, AFI_INTR_MASK);
1408 reg |= AFI_INTR_MASK_MSI_MASK;
1409 afi_writel(pcie, reg, AFI_INTR_MASK);
1414 irq_domain_remove(msi->domain);
1418 static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1420 struct tegra_msi *msi = &pcie->msi;
1421 unsigned int i, irq;
1424 /* mask the MSI interrupt */
1425 value = afi_readl(pcie, AFI_INTR_MASK);
1426 value &= ~AFI_INTR_MASK_MSI_MASK;
1427 afi_writel(pcie, value, AFI_INTR_MASK);
1429 /* disable all MSI vectors */
1430 afi_writel(pcie, 0, AFI_MSI_EN_VEC0);
1431 afi_writel(pcie, 0, AFI_MSI_EN_VEC1);
1432 afi_writel(pcie, 0, AFI_MSI_EN_VEC2);
1433 afi_writel(pcie, 0, AFI_MSI_EN_VEC3);
1434 afi_writel(pcie, 0, AFI_MSI_EN_VEC4);
1435 afi_writel(pcie, 0, AFI_MSI_EN_VEC5);
1436 afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
1437 afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
1439 free_pages(msi->pages, 0);
1442 free_irq(msi->irq, pcie);
1444 for (i = 0; i < INT_PCI_MSI_NR; i++) {
1445 irq = irq_find_mapping(msi->domain, i);
1447 irq_dispose_mapping(irq);
1450 irq_domain_remove(msi->domain);
1455 static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1458 struct device_node *np = pcie->dev->of_node;
1460 if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
1463 dev_info(pcie->dev, "4x1, 1x1 configuration\n");
1464 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
1468 dev_info(pcie->dev, "2x1, 1x1 configuration\n");
1469 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
1472 } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1475 dev_info(pcie->dev, "4x1, 2x1 configuration\n");
1476 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
1480 dev_info(pcie->dev, "2x3 configuration\n");
1481 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
1485 dev_info(pcie->dev, "4x1, 1x2 configuration\n");
1486 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
1489 } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1492 dev_info(pcie->dev, "single-mode configuration\n");
1493 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
1497 dev_info(pcie->dev, "dual-mode configuration\n");
1498 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
1507 * Check whether a given set of supplies is available in a device tree node.
1508 * This is used to check whether the new or the legacy device tree bindings
1511 static bool of_regulator_bulk_available(struct device_node *np,
1512 struct regulator_bulk_data *supplies,
1513 unsigned int num_supplies)
1518 for (i = 0; i < num_supplies; i++) {
1519 snprintf(property, 32, "%s-supply", supplies[i].supply);
1521 if (of_find_property(np, property, NULL) == NULL)
1529 * Old versions of the device tree binding for this device used a set of power
1530 * supplies that didn't match the hardware inputs. This happened to work for a
1531 * number of cases but is not future proof. However to preserve backwards-
1532 * compatibility with old device trees, this function will try to use the old
1535 static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
1537 struct device_node *np = pcie->dev->of_node;
1539 if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
1540 pcie->num_supplies = 3;
1541 else if (of_device_is_compatible(np, "nvidia,tegra20-pcie"))
1542 pcie->num_supplies = 2;
1544 if (pcie->num_supplies == 0) {
1545 dev_err(pcie->dev, "device %s not supported in legacy mode\n",
1550 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1551 sizeof(*pcie->supplies),
1553 if (!pcie->supplies)
1556 pcie->supplies[0].supply = "pex-clk";
1557 pcie->supplies[1].supply = "vdd";
1559 if (pcie->num_supplies > 2)
1560 pcie->supplies[2].supply = "avdd";
1562 return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies,
1567 * Obtains the list of regulators required for a particular generation of the
1570 * This would've been nice to do simply by providing static tables for use
1571 * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky
1572 * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB)
1573 * and either seems to be optional depending on which ports are being used.
1575 static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
1577 struct device_node *np = pcie->dev->of_node;
1580 if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
1581 pcie->num_supplies = 7;
1583 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1584 sizeof(*pcie->supplies),
1586 if (!pcie->supplies)
1589 pcie->supplies[i++].supply = "avddio-pex";
1590 pcie->supplies[i++].supply = "dvddio-pex";
1591 pcie->supplies[i++].supply = "avdd-pex-pll";
1592 pcie->supplies[i++].supply = "hvdd-pex";
1593 pcie->supplies[i++].supply = "hvdd-pex-pll-e";
1594 pcie->supplies[i++].supply = "vddio-pex-ctl";
1595 pcie->supplies[i++].supply = "avdd-pll-erefe";
1596 } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1597 bool need_pexa = false, need_pexb = false;
1599 /* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
1600 if (lane_mask & 0x0f)
1603 /* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */
1604 if (lane_mask & 0x30)
1607 pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
1608 (need_pexb ? 2 : 0);
1610 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1611 sizeof(*pcie->supplies),
1613 if (!pcie->supplies)
1616 pcie->supplies[i++].supply = "avdd-pex-pll";
1617 pcie->supplies[i++].supply = "hvdd-pex";
1618 pcie->supplies[i++].supply = "vddio-pex-ctl";
1619 pcie->supplies[i++].supply = "avdd-plle";
1622 pcie->supplies[i++].supply = "avdd-pexa";
1623 pcie->supplies[i++].supply = "vdd-pexa";
1627 pcie->supplies[i++].supply = "avdd-pexb";
1628 pcie->supplies[i++].supply = "vdd-pexb";
1630 } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1631 pcie->num_supplies = 5;
1633 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1634 sizeof(*pcie->supplies),
1636 if (!pcie->supplies)
1639 pcie->supplies[0].supply = "avdd-pex";
1640 pcie->supplies[1].supply = "vdd-pex";
1641 pcie->supplies[2].supply = "avdd-pex-pll";
1642 pcie->supplies[3].supply = "avdd-plle";
1643 pcie->supplies[4].supply = "vddio-pex-clk";
1646 if (of_regulator_bulk_available(pcie->dev->of_node, pcie->supplies,
1647 pcie->num_supplies))
1648 return devm_regulator_bulk_get(pcie->dev, pcie->num_supplies,
1652 * If not all regulators are available for this new scheme, assume
1653 * that the device tree complies with an older version of the device
1656 dev_info(pcie->dev, "using legacy DT binding for power supplies\n");
1658 devm_kfree(pcie->dev, pcie->supplies);
1659 pcie->num_supplies = 0;
1661 return tegra_pcie_get_legacy_regulators(pcie);
1664 static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
1666 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
1667 struct device_node *np = pcie->dev->of_node, *port;
1668 struct of_pci_range_parser parser;
1669 struct of_pci_range range;
1670 u32 lanes = 0, mask = 0;
1671 unsigned int lane = 0;
1672 struct resource res;
1675 memset(&pcie->all, 0, sizeof(pcie->all));
1676 pcie->all.flags = IORESOURCE_MEM;
1677 pcie->all.name = np->full_name;
1678 pcie->all.start = ~0;
1681 if (of_pci_range_parser_init(&parser, np)) {
1682 dev_err(pcie->dev, "missing \"ranges\" property\n");
1686 for_each_of_pci_range(&parser, &range) {
1687 err = of_pci_range_to_resource(&range, np, &res);
1691 switch (res.flags & IORESOURCE_TYPE_BITS) {
1693 memcpy(&pcie->io, &res, sizeof(res));
1694 pcie->io.name = np->full_name;
1697 case IORESOURCE_MEM:
1698 if (res.flags & IORESOURCE_PREFETCH) {
1699 memcpy(&pcie->prefetch, &res, sizeof(res));
1700 pcie->prefetch.name = "prefetchable";
1702 memcpy(&pcie->mem, &res, sizeof(res));
1703 pcie->mem.name = "non-prefetchable";
1708 if (res.start <= pcie->all.start)
1709 pcie->all.start = res.start;
1711 if (res.end >= pcie->all.end)
1712 pcie->all.end = res.end;
1715 err = devm_request_resource(pcie->dev, &iomem_resource, &pcie->all);
1719 err = of_pci_parse_bus_range(np, &pcie->busn);
1721 dev_err(pcie->dev, "failed to parse ranges property: %d\n",
1723 pcie->busn.name = np->name;
1724 pcie->busn.start = 0;
1725 pcie->busn.end = 0xff;
1726 pcie->busn.flags = IORESOURCE_BUS;
1729 /* parse root ports */
1730 for_each_child_of_node(np, port) {
1731 struct tegra_pcie_port *rp;
1735 err = of_pci_get_devfn(port);
1737 dev_err(pcie->dev, "failed to parse address: %d\n",
1742 index = PCI_SLOT(err);
1744 if (index < 1 || index > soc->num_ports) {
1745 dev_err(pcie->dev, "invalid port number: %d\n", index);
1751 err = of_property_read_u32(port, "nvidia,num-lanes", &value);
1753 dev_err(pcie->dev, "failed to parse # of lanes: %d\n",
1759 dev_err(pcie->dev, "invalid # of lanes: %u\n", value);
1763 lanes |= value << (index << 3);
1765 if (!of_device_is_available(port)) {
1770 mask |= ((1 << value) - 1) << lane;
1773 rp = devm_kzalloc(pcie->dev, sizeof(*rp), GFP_KERNEL);
1777 err = of_address_to_resource(port, 0, &rp->regs);
1779 dev_err(pcie->dev, "failed to parse address: %d\n",
1784 INIT_LIST_HEAD(&rp->list);
1789 rp->base = devm_ioremap_resource(pcie->dev, &rp->regs);
1790 if (IS_ERR(rp->base))
1791 return PTR_ERR(rp->base);
1793 list_add_tail(&rp->list, &pcie->ports);
1796 err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
1798 dev_err(pcie->dev, "invalid lane configuration\n");
1802 err = tegra_pcie_get_regulators(pcie, mask);
1810 * FIXME: If there are no PCIe cards attached, then calling this function
1811 * can result in the increase of the bootup time as there are big timeout
1814 #define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */
1815 static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
1817 unsigned int retries = 3;
1818 unsigned long value;
1820 /* override presence detection */
1821 value = readl(port->base + RP_PRIV_MISC);
1822 value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
1823 value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
1824 writel(value, port->base + RP_PRIV_MISC);
1827 unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1830 value = readl(port->base + RP_VEND_XP);
1832 if (value & RP_VEND_XP_DL_UP)
1835 usleep_range(1000, 2000);
1836 } while (--timeout);
1839 dev_err(port->pcie->dev, "link %u down, retrying\n",
1844 timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1847 value = readl(port->base + RP_LINK_CONTROL_STATUS);
1849 if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
1852 usleep_range(1000, 2000);
1853 } while (--timeout);
1856 tegra_pcie_port_reset(port);
1857 } while (--retries);
1862 static int tegra_pcie_enable(struct tegra_pcie *pcie)
1864 struct tegra_pcie_port *port, *tmp;
1867 list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
1868 dev_info(pcie->dev, "probing port %u, using %u lanes\n",
1869 port->index, port->lanes);
1871 tegra_pcie_port_enable(port);
1873 if (tegra_pcie_port_check_link(port))
1876 dev_info(pcie->dev, "link %u down, ignoring\n", port->index);
1878 tegra_pcie_port_disable(port);
1879 tegra_pcie_port_free(port);
1882 memset(&hw, 0, sizeof(hw));
1884 hw.nr_controllers = 1;
1885 hw.private_data = (void **)&pcie;
1886 hw.setup = tegra_pcie_setup;
1887 hw.map_irq = tegra_pcie_map_irq;
1888 hw.add_bus = tegra_pcie_add_bus;
1889 hw.scan = tegra_pcie_scan_bus;
1890 hw.ops = &tegra_pcie_ops;
1892 pci_common_init_dev(pcie->dev, &hw);
1897 static const struct tegra_pcie_soc_data tegra20_pcie_data = {
1899 .msi_base_shift = 0,
1900 .pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
1901 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
1902 .has_pex_clkreq_en = false,
1903 .has_pex_bias_ctrl = false,
1904 .has_intr_prsnt_sense = false,
1905 .has_cml_clk = false,
1909 static const struct tegra_pcie_soc_data tegra30_pcie_data = {
1911 .msi_base_shift = 8,
1912 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
1913 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
1914 .has_pex_clkreq_en = true,
1915 .has_pex_bias_ctrl = true,
1916 .has_intr_prsnt_sense = true,
1917 .has_cml_clk = true,
1921 static const struct tegra_pcie_soc_data tegra124_pcie_data = {
1923 .msi_base_shift = 8,
1924 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
1925 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
1926 .has_pex_clkreq_en = true,
1927 .has_pex_bias_ctrl = true,
1928 .has_intr_prsnt_sense = true,
1929 .has_cml_clk = true,
1933 static const struct of_device_id tegra_pcie_of_match[] = {
1934 { .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie_data },
1935 { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie_data },
1936 { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie_data },
1939 MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
1941 static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
1943 struct tegra_pcie *pcie = s->private;
1945 if (list_empty(&pcie->ports))
1948 seq_printf(s, "Index Status\n");
1950 return seq_list_start(&pcie->ports, *pos);
1953 static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
1955 struct tegra_pcie *pcie = s->private;
1957 return seq_list_next(v, &pcie->ports, pos);
1960 static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
1964 static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
1966 bool up = false, active = false;
1967 struct tegra_pcie_port *port;
1970 port = list_entry(v, struct tegra_pcie_port, list);
1972 value = readl(port->base + RP_VEND_XP);
1974 if (value & RP_VEND_XP_DL_UP)
1977 value = readl(port->base + RP_LINK_CONTROL_STATUS);
1979 if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
1982 seq_printf(s, "%2u ", port->index);
1985 seq_printf(s, "up");
1989 seq_printf(s, ", ");
1991 seq_printf(s, "active");
1994 seq_printf(s, "\n");
1998 static const struct seq_operations tegra_pcie_ports_seq_ops = {
1999 .start = tegra_pcie_ports_seq_start,
2000 .next = tegra_pcie_ports_seq_next,
2001 .stop = tegra_pcie_ports_seq_stop,
2002 .show = tegra_pcie_ports_seq_show,
2005 static int tegra_pcie_ports_open(struct inode *inode, struct file *file)
2007 struct tegra_pcie *pcie = inode->i_private;
2011 err = seq_open(file, &tegra_pcie_ports_seq_ops);
2015 s = file->private_data;
2021 static const struct file_operations tegra_pcie_ports_ops = {
2022 .owner = THIS_MODULE,
2023 .open = tegra_pcie_ports_open,
2025 .llseek = seq_lseek,
2026 .release = seq_release,
2029 static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
2031 struct dentry *file;
2033 pcie->debugfs = debugfs_create_dir("pcie", NULL);
2037 file = debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs,
2038 pcie, &tegra_pcie_ports_ops);
2045 debugfs_remove_recursive(pcie->debugfs);
2046 pcie->debugfs = NULL;
2050 static int tegra_pcie_probe(struct platform_device *pdev)
2052 const struct of_device_id *match;
2053 struct tegra_pcie *pcie;
2056 match = of_match_device(tegra_pcie_of_match, &pdev->dev);
2060 pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
2064 INIT_LIST_HEAD(&pcie->buses);
2065 INIT_LIST_HEAD(&pcie->ports);
2066 pcie->soc_data = match->data;
2067 pcie->dev = &pdev->dev;
2069 err = tegra_pcie_parse_dt(pcie);
2073 pcibios_min_mem = 0;
2075 err = tegra_pcie_get_resources(pcie);
2077 dev_err(&pdev->dev, "failed to request resources: %d\n", err);
2081 err = tegra_pcie_enable_controller(pcie);
2085 /* setup the AFI address translations */
2086 tegra_pcie_setup_translations(pcie);
2088 if (IS_ENABLED(CONFIG_PCI_MSI)) {
2089 err = tegra_pcie_enable_msi(pcie);
2092 "failed to enable MSI support: %d\n",
2098 err = tegra_pcie_enable(pcie);
2100 dev_err(&pdev->dev, "failed to enable PCIe ports: %d\n", err);
2104 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
2105 err = tegra_pcie_debugfs_init(pcie);
2107 dev_err(&pdev->dev, "failed to setup debugfs: %d\n",
2111 platform_set_drvdata(pdev, pcie);
2115 if (IS_ENABLED(CONFIG_PCI_MSI))
2116 tegra_pcie_disable_msi(pcie);
2118 tegra_pcie_put_resources(pcie);
2122 static struct platform_driver tegra_pcie_driver = {
2124 .name = "tegra-pcie",
2125 .owner = THIS_MODULE,
2126 .of_match_table = tegra_pcie_of_match,
2127 .suppress_bind_attrs = true,
2129 .probe = tegra_pcie_probe,
2131 module_platform_driver(tegra_pcie_driver);
2133 MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
2134 MODULE_DESCRIPTION("NVIDIA Tegra PCIe driver");
2135 MODULE_LICENSE("GPL v2");