2 * PCIe host controller driver for Tegra SoCs
4 * Copyright (c) 2010, CompuLab, Ltd.
5 * Author: Mike Rapoport <mike@compulab.co.il>
7 * Based on NVIDIA PCIe driver
8 * Copyright (c) 2008-2009, NVIDIA Corporation.
10 * Bits taken from arch/arm/mach-dove/pcie.c
12 * Author: Thierry Reding <treding@nvidia.com>
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
19 * This program is distributed in the hope that it will be useful, but WITHOUT
20 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
21 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
24 * You should have received a copy of the GNU General Public License along
25 * with this program; if not, write to the Free Software Foundation, Inc.,
26 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
29 #include <linux/clk.h>
30 #include <linux/debugfs.h>
31 #include <linux/delay.h>
32 #include <linux/export.h>
33 #include <linux/interrupt.h>
34 #include <linux/irq.h>
35 #include <linux/irqdomain.h>
36 #include <linux/kernel.h>
37 #include <linux/init.h>
38 #include <linux/msi.h>
39 #include <linux/of_address.h>
40 #include <linux/of_pci.h>
41 #include <linux/of_platform.h>
42 #include <linux/pci.h>
43 #include <linux/phy/phy.h>
44 #include <linux/platform_device.h>
45 #include <linux/reset.h>
46 #include <linux/sizes.h>
47 #include <linux/slab.h>
48 #include <linux/vmalloc.h>
49 #include <linux/regulator/consumer.h>
51 #include <soc/tegra/cpuidle.h>
52 #include <soc/tegra/pmc.h>
54 #define INT_PCI_MSI_NR (8 * 32)
56 /* register definitions */
58 #define AFI_AXI_BAR0_SZ 0x00
59 #define AFI_AXI_BAR1_SZ 0x04
60 #define AFI_AXI_BAR2_SZ 0x08
61 #define AFI_AXI_BAR3_SZ 0x0c
62 #define AFI_AXI_BAR4_SZ 0x10
63 #define AFI_AXI_BAR5_SZ 0x14
65 #define AFI_AXI_BAR0_START 0x18
66 #define AFI_AXI_BAR1_START 0x1c
67 #define AFI_AXI_BAR2_START 0x20
68 #define AFI_AXI_BAR3_START 0x24
69 #define AFI_AXI_BAR4_START 0x28
70 #define AFI_AXI_BAR5_START 0x2c
72 #define AFI_FPCI_BAR0 0x30
73 #define AFI_FPCI_BAR1 0x34
74 #define AFI_FPCI_BAR2 0x38
75 #define AFI_FPCI_BAR3 0x3c
76 #define AFI_FPCI_BAR4 0x40
77 #define AFI_FPCI_BAR5 0x44
79 #define AFI_CACHE_BAR0_SZ 0x48
80 #define AFI_CACHE_BAR0_ST 0x4c
81 #define AFI_CACHE_BAR1_SZ 0x50
82 #define AFI_CACHE_BAR1_ST 0x54
84 #define AFI_MSI_BAR_SZ 0x60
85 #define AFI_MSI_FPCI_BAR_ST 0x64
86 #define AFI_MSI_AXI_BAR_ST 0x68
88 #define AFI_MSI_VEC0 0x6c
89 #define AFI_MSI_VEC1 0x70
90 #define AFI_MSI_VEC2 0x74
91 #define AFI_MSI_VEC3 0x78
92 #define AFI_MSI_VEC4 0x7c
93 #define AFI_MSI_VEC5 0x80
94 #define AFI_MSI_VEC6 0x84
95 #define AFI_MSI_VEC7 0x88
97 #define AFI_MSI_EN_VEC0 0x8c
98 #define AFI_MSI_EN_VEC1 0x90
99 #define AFI_MSI_EN_VEC2 0x94
100 #define AFI_MSI_EN_VEC3 0x98
101 #define AFI_MSI_EN_VEC4 0x9c
102 #define AFI_MSI_EN_VEC5 0xa0
103 #define AFI_MSI_EN_VEC6 0xa4
104 #define AFI_MSI_EN_VEC7 0xa8
106 #define AFI_CONFIGURATION 0xac
107 #define AFI_CONFIGURATION_EN_FPCI (1 << 0)
109 #define AFI_FPCI_ERROR_MASKS 0xb0
111 #define AFI_INTR_MASK 0xb4
112 #define AFI_INTR_MASK_INT_MASK (1 << 0)
113 #define AFI_INTR_MASK_MSI_MASK (1 << 8)
115 #define AFI_INTR_CODE 0xb8
116 #define AFI_INTR_CODE_MASK 0xf
117 #define AFI_INTR_INI_SLAVE_ERROR 1
118 #define AFI_INTR_INI_DECODE_ERROR 2
119 #define AFI_INTR_TARGET_ABORT 3
120 #define AFI_INTR_MASTER_ABORT 4
121 #define AFI_INTR_INVALID_WRITE 5
122 #define AFI_INTR_LEGACY 6
123 #define AFI_INTR_FPCI_DECODE_ERROR 7
124 #define AFI_INTR_AXI_DECODE_ERROR 8
125 #define AFI_INTR_FPCI_TIMEOUT 9
126 #define AFI_INTR_PE_PRSNT_SENSE 10
127 #define AFI_INTR_PE_CLKREQ_SENSE 11
128 #define AFI_INTR_CLKCLAMP_SENSE 12
129 #define AFI_INTR_RDY4PD_SENSE 13
130 #define AFI_INTR_P2P_ERROR 14
132 #define AFI_INTR_SIGNATURE 0xbc
133 #define AFI_UPPER_FPCI_ADDRESS 0xc0
134 #define AFI_SM_INTR_ENABLE 0xc4
135 #define AFI_SM_INTR_INTA_ASSERT (1 << 0)
136 #define AFI_SM_INTR_INTB_ASSERT (1 << 1)
137 #define AFI_SM_INTR_INTC_ASSERT (1 << 2)
138 #define AFI_SM_INTR_INTD_ASSERT (1 << 3)
139 #define AFI_SM_INTR_INTA_DEASSERT (1 << 4)
140 #define AFI_SM_INTR_INTB_DEASSERT (1 << 5)
141 #define AFI_SM_INTR_INTC_DEASSERT (1 << 6)
142 #define AFI_SM_INTR_INTD_DEASSERT (1 << 7)
144 #define AFI_AFI_INTR_ENABLE 0xc8
145 #define AFI_INTR_EN_INI_SLVERR (1 << 0)
146 #define AFI_INTR_EN_INI_DECERR (1 << 1)
147 #define AFI_INTR_EN_TGT_SLVERR (1 << 2)
148 #define AFI_INTR_EN_TGT_DECERR (1 << 3)
149 #define AFI_INTR_EN_TGT_WRERR (1 << 4)
150 #define AFI_INTR_EN_DFPCI_DECERR (1 << 5)
151 #define AFI_INTR_EN_AXI_DECERR (1 << 6)
152 #define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7)
153 #define AFI_INTR_EN_PRSNT_SENSE (1 << 8)
155 #define AFI_PCIE_CONFIG 0x0f8
156 #define AFI_PCIE_CONFIG_PCIE_DISABLE(x) (1 << ((x) + 1))
157 #define AFI_PCIE_CONFIG_PCIE_DISABLE_ALL 0xe
158 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
159 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
160 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20)
161 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1 (0x0 << 20)
162 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401 (0x0 << 20)
163 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
164 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20)
165 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1 (0x1 << 20)
166 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211 (0x1 << 20)
167 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20)
168 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111 (0x2 << 20)
170 #define AFI_FUSE 0x104
171 #define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
173 #define AFI_PEX0_CTRL 0x110
174 #define AFI_PEX1_CTRL 0x118
175 #define AFI_PEX2_CTRL 0x128
176 #define AFI_PEX_CTRL_RST (1 << 0)
177 #define AFI_PEX_CTRL_CLKREQ_EN (1 << 1)
178 #define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
179 #define AFI_PEX_CTRL_OVERRIDE_EN (1 << 4)
181 #define AFI_PLLE_CONTROL 0x160
182 #define AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
183 #define AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
185 #define AFI_PEXBIAS_CTRL_0 0x168
187 #define RP_VEND_XP 0x00000f00
188 #define RP_VEND_XP_DL_UP (1 << 30)
190 #define RP_VEND_CTL2 0x00000fa8
191 #define RP_VEND_CTL2_PCA_ENABLE (1 << 7)
193 #define RP_PRIV_MISC 0x00000fe0
194 #define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xe << 0)
195 #define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xf << 0)
197 #define RP_LINK_CONTROL_STATUS 0x00000090
198 #define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
199 #define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
201 #define PADS_CTL_SEL 0x0000009c
203 #define PADS_CTL 0x000000a0
204 #define PADS_CTL_IDDQ_1L (1 << 0)
205 #define PADS_CTL_TX_DATA_EN_1L (1 << 6)
206 #define PADS_CTL_RX_DATA_EN_1L (1 << 10)
208 #define PADS_PLL_CTL_TEGRA20 0x000000b8
209 #define PADS_PLL_CTL_TEGRA30 0x000000b4
210 #define PADS_PLL_CTL_RST_B4SM (1 << 1)
211 #define PADS_PLL_CTL_LOCKDET (1 << 8)
212 #define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16)
213 #define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16)
214 #define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16)
215 #define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16)
216 #define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20)
217 #define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20)
218 #define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20)
219 #define PADS_PLL_CTL_TXCLKREF_BUF_EN (1 << 22)
221 #define PADS_REFCLK_CFG0 0x000000c8
222 #define PADS_REFCLK_CFG1 0x000000cc
223 #define PADS_REFCLK_BIAS 0x000000d0
226 * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
227 * entries, one entry per PCIe port. These field definitions and desired
228 * values aren't in the TRM, but do come from NVIDIA.
230 #define PADS_REFCLK_CFG_TERM_SHIFT 2 /* 6:2 */
231 #define PADS_REFCLK_CFG_E_TERM_SHIFT 7
232 #define PADS_REFCLK_CFG_PREDI_SHIFT 8 /* 11:8 */
233 #define PADS_REFCLK_CFG_DRVI_SHIFT 12 /* 15:12 */
236 struct msi_controller chip
;
237 DECLARE_BITMAP(used
, INT_PCI_MSI_NR
);
238 struct irq_domain
*domain
;
245 /* used to differentiate between Tegra SoC generations */
246 struct tegra_pcie_soc
{
247 unsigned int num_ports
;
248 unsigned int msi_base_shift
;
251 u32 pads_refclk_cfg0
;
252 u32 pads_refclk_cfg1
;
253 bool has_pex_clkreq_en
;
254 bool has_pex_bias_ctrl
;
255 bool has_intr_prsnt_sense
;
258 bool force_pca_enable
;
262 static inline struct tegra_msi
*to_tegra_msi(struct msi_controller
*chip
)
264 return container_of(chip
, struct tegra_msi
, chip
);
274 struct list_head buses
;
280 struct resource prefetch
;
281 struct resource busn
;
293 struct reset_control
*pex_rst
;
294 struct reset_control
*afi_rst
;
295 struct reset_control
*pcie_xrst
;
300 struct tegra_msi msi
;
302 struct list_head ports
;
305 struct regulator_bulk_data
*supplies
;
306 unsigned int num_supplies
;
308 const struct tegra_pcie_soc
*soc
;
309 struct dentry
*debugfs
;
312 struct tegra_pcie_port
{
313 struct tegra_pcie
*pcie
;
314 struct device_node
*np
;
315 struct list_head list
;
316 struct resource regs
;
324 struct tegra_pcie_bus
{
325 struct vm_struct
*area
;
326 struct list_head list
;
330 static inline void afi_writel(struct tegra_pcie
*pcie
, u32 value
,
331 unsigned long offset
)
333 writel(value
, pcie
->afi
+ offset
);
336 static inline u32
afi_readl(struct tegra_pcie
*pcie
, unsigned long offset
)
338 return readl(pcie
->afi
+ offset
);
341 static inline void pads_writel(struct tegra_pcie
*pcie
, u32 value
,
342 unsigned long offset
)
344 writel(value
, pcie
->pads
+ offset
);
347 static inline u32
pads_readl(struct tegra_pcie
*pcie
, unsigned long offset
)
349 return readl(pcie
->pads
+ offset
);
353 * The configuration space mapping on Tegra is somewhat similar to the ECAM
354 * defined by PCIe. However it deviates a bit in how the 4 bits for extended
355 * register accesses are mapped:
357 * [27:24] extended register number
359 * [15:11] device number
360 * [10: 8] function number
361 * [ 7: 0] register number
363 * Mapping the whole extended configuration space would require 256 MiB of
364 * virtual address space, only a small part of which will actually be used.
365 * To work around this, a 1 MiB of virtual addresses are allocated per bus
366 * when the bus is first accessed. When the physical range is mapped, the
367 * the bus number bits are hidden so that the extended register number bits
368 * appear as bits [19:16]. Therefore the virtual mapping looks like this:
370 * [19:16] extended register number
371 * [15:11] device number
372 * [10: 8] function number
373 * [ 7: 0] register number
375 * This is achieved by stitching together 16 chunks of 64 KiB of physical
376 * address space via the MMU.
378 static unsigned long tegra_pcie_conf_offset(unsigned int devfn
, int where
)
380 return ((where
& 0xf00) << 8) | (PCI_SLOT(devfn
) << 11) |
381 (PCI_FUNC(devfn
) << 8) | (where
& 0xfc);
384 static struct tegra_pcie_bus
*tegra_pcie_bus_alloc(struct tegra_pcie
*pcie
,
387 struct device
*dev
= pcie
->dev
;
388 pgprot_t prot
= pgprot_noncached(PAGE_KERNEL
);
389 phys_addr_t cs
= pcie
->cs
->start
;
390 struct tegra_pcie_bus
*bus
;
394 bus
= kzalloc(sizeof(*bus
), GFP_KERNEL
);
396 return ERR_PTR(-ENOMEM
);
398 INIT_LIST_HEAD(&bus
->list
);
401 /* allocate 1 MiB of virtual addresses */
402 bus
->area
= get_vm_area(SZ_1M
, VM_IOREMAP
);
408 /* map each of the 16 chunks of 64 KiB each */
409 for (i
= 0; i
< 16; i
++) {
410 unsigned long virt
= (unsigned long)bus
->area
->addr
+
412 phys_addr_t phys
= cs
+ i
* SZ_16M
+ busnr
* SZ_64K
;
414 err
= ioremap_page_range(virt
, virt
+ SZ_64K
, phys
, prot
);
416 dev_err(dev
, "ioremap_page_range() failed: %d\n", err
);
424 vunmap(bus
->area
->addr
);
430 static int tegra_pcie_add_bus(struct pci_bus
*bus
)
432 struct pci_host_bridge
*host
= pci_find_host_bridge(bus
);
433 struct tegra_pcie
*pcie
= pci_host_bridge_priv(host
);
434 struct tegra_pcie_bus
*b
;
436 b
= tegra_pcie_bus_alloc(pcie
, bus
->number
);
440 list_add_tail(&b
->list
, &pcie
->buses
);
445 static void tegra_pcie_remove_bus(struct pci_bus
*child
)
447 struct pci_host_bridge
*host
= pci_find_host_bridge(child
);
448 struct tegra_pcie
*pcie
= pci_host_bridge_priv(host
);
449 struct tegra_pcie_bus
*bus
, *tmp
;
451 list_for_each_entry_safe(bus
, tmp
, &pcie
->buses
, list
) {
452 if (bus
->nr
== child
->number
) {
453 vunmap(bus
->area
->addr
);
454 list_del(&bus
->list
);
461 static void __iomem
*tegra_pcie_map_bus(struct pci_bus
*bus
,
465 struct pci_host_bridge
*host
= pci_find_host_bridge(bus
);
466 struct tegra_pcie
*pcie
= pci_host_bridge_priv(host
);
467 struct device
*dev
= pcie
->dev
;
468 void __iomem
*addr
= NULL
;
470 if (bus
->number
== 0) {
471 unsigned int slot
= PCI_SLOT(devfn
);
472 struct tegra_pcie_port
*port
;
474 list_for_each_entry(port
, &pcie
->ports
, list
) {
475 if (port
->index
+ 1 == slot
) {
476 addr
= port
->base
+ (where
& ~3);
481 struct tegra_pcie_bus
*b
;
483 list_for_each_entry(b
, &pcie
->buses
, list
)
484 if (b
->nr
== bus
->number
)
485 addr
= (void __iomem
*)b
->area
->addr
;
488 dev_err(dev
, "failed to map cfg. space for bus %u\n",
493 addr
+= tegra_pcie_conf_offset(devfn
, where
);
499 static int tegra_pcie_config_read(struct pci_bus
*bus
, unsigned int devfn
,
500 int where
, int size
, u32
*value
)
502 if (bus
->number
== 0)
503 return pci_generic_config_read32(bus
, devfn
, where
, size
,
506 return pci_generic_config_read(bus
, devfn
, where
, size
, value
);
509 static int tegra_pcie_config_write(struct pci_bus
*bus
, unsigned int devfn
,
510 int where
, int size
, u32 value
)
512 if (bus
->number
== 0)
513 return pci_generic_config_write32(bus
, devfn
, where
, size
,
516 return pci_generic_config_write(bus
, devfn
, where
, size
, value
);
519 static struct pci_ops tegra_pcie_ops
= {
520 .add_bus
= tegra_pcie_add_bus
,
521 .remove_bus
= tegra_pcie_remove_bus
,
522 .map_bus
= tegra_pcie_map_bus
,
523 .read
= tegra_pcie_config_read
,
524 .write
= tegra_pcie_config_write
,
527 static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port
*port
)
529 unsigned long ret
= 0;
531 switch (port
->index
) {
548 static void tegra_pcie_port_reset(struct tegra_pcie_port
*port
)
550 unsigned long ctrl
= tegra_pcie_port_get_pex_ctrl(port
);
553 /* pulse reset signal */
554 value
= afi_readl(port
->pcie
, ctrl
);
555 value
&= ~AFI_PEX_CTRL_RST
;
556 afi_writel(port
->pcie
, value
, ctrl
);
558 usleep_range(1000, 2000);
560 value
= afi_readl(port
->pcie
, ctrl
);
561 value
|= AFI_PEX_CTRL_RST
;
562 afi_writel(port
->pcie
, value
, ctrl
);
565 static void tegra_pcie_port_enable(struct tegra_pcie_port
*port
)
567 unsigned long ctrl
= tegra_pcie_port_get_pex_ctrl(port
);
568 const struct tegra_pcie_soc
*soc
= port
->pcie
->soc
;
571 /* enable reference clock */
572 value
= afi_readl(port
->pcie
, ctrl
);
573 value
|= AFI_PEX_CTRL_REFCLK_EN
;
575 if (soc
->has_pex_clkreq_en
)
576 value
|= AFI_PEX_CTRL_CLKREQ_EN
;
578 value
|= AFI_PEX_CTRL_OVERRIDE_EN
;
580 afi_writel(port
->pcie
, value
, ctrl
);
582 tegra_pcie_port_reset(port
);
584 if (soc
->force_pca_enable
) {
585 value
= readl(port
->base
+ RP_VEND_CTL2
);
586 value
|= RP_VEND_CTL2_PCA_ENABLE
;
587 writel(value
, port
->base
+ RP_VEND_CTL2
);
591 static void tegra_pcie_port_disable(struct tegra_pcie_port
*port
)
593 unsigned long ctrl
= tegra_pcie_port_get_pex_ctrl(port
);
594 const struct tegra_pcie_soc
*soc
= port
->pcie
->soc
;
597 /* assert port reset */
598 value
= afi_readl(port
->pcie
, ctrl
);
599 value
&= ~AFI_PEX_CTRL_RST
;
600 afi_writel(port
->pcie
, value
, ctrl
);
602 /* disable reference clock */
603 value
= afi_readl(port
->pcie
, ctrl
);
605 if (soc
->has_pex_clkreq_en
)
606 value
&= ~AFI_PEX_CTRL_CLKREQ_EN
;
608 value
&= ~AFI_PEX_CTRL_REFCLK_EN
;
609 afi_writel(port
->pcie
, value
, ctrl
);
612 static void tegra_pcie_port_free(struct tegra_pcie_port
*port
)
614 struct tegra_pcie
*pcie
= port
->pcie
;
615 struct device
*dev
= pcie
->dev
;
617 devm_iounmap(dev
, port
->base
);
618 devm_release_mem_region(dev
, port
->regs
.start
,
619 resource_size(&port
->regs
));
620 list_del(&port
->list
);
621 devm_kfree(dev
, port
);
624 /* Tegra PCIE root complex wrongly reports device class */
625 static void tegra_pcie_fixup_class(struct pci_dev
*dev
)
627 dev
->class = PCI_CLASS_BRIDGE_PCI
<< 8;
629 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA
, 0x0bf0, tegra_pcie_fixup_class
);
630 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA
, 0x0bf1, tegra_pcie_fixup_class
);
631 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA
, 0x0e1c, tegra_pcie_fixup_class
);
632 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA
, 0x0e1d, tegra_pcie_fixup_class
);
634 /* Tegra PCIE requires relaxed ordering */
635 static void tegra_pcie_relax_enable(struct pci_dev
*dev
)
637 pcie_capability_set_word(dev
, PCI_EXP_DEVCTL
, PCI_EXP_DEVCTL_RELAX_EN
);
639 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID
, PCI_ANY_ID
, tegra_pcie_relax_enable
);
641 static int tegra_pcie_request_resources(struct tegra_pcie
*pcie
)
643 struct pci_host_bridge
*host
= pci_host_bridge_from_priv(pcie
);
644 struct list_head
*windows
= &host
->windows
;
645 struct device
*dev
= pcie
->dev
;
648 pci_add_resource_offset(windows
, &pcie
->pio
, pcie
->offset
.io
);
649 pci_add_resource_offset(windows
, &pcie
->mem
, pcie
->offset
.mem
);
650 pci_add_resource_offset(windows
, &pcie
->prefetch
, pcie
->offset
.mem
);
651 pci_add_resource(windows
, &pcie
->busn
);
653 err
= devm_request_pci_bus_resources(dev
, windows
);
657 pci_remap_iospace(&pcie
->pio
, pcie
->io
.start
);
662 static int tegra_pcie_map_irq(const struct pci_dev
*pdev
, u8 slot
, u8 pin
)
664 struct pci_host_bridge
*host
= pci_find_host_bridge(pdev
->bus
);
665 struct tegra_pcie
*pcie
= pci_host_bridge_priv(host
);
668 tegra_cpuidle_pcie_irqs_in_use();
670 irq
= of_irq_parse_and_map_pci(pdev
, slot
, pin
);
677 static irqreturn_t
tegra_pcie_isr(int irq
, void *arg
)
679 const char *err_msg
[] = {
687 "Response decoding error",
688 "AXI response decoding error",
689 "Transaction timeout",
690 "Slot present pin change",
691 "Slot clock request change",
692 "TMS clock ramp change",
693 "TMS ready for power down",
696 struct tegra_pcie
*pcie
= arg
;
697 struct device
*dev
= pcie
->dev
;
700 code
= afi_readl(pcie
, AFI_INTR_CODE
) & AFI_INTR_CODE_MASK
;
701 signature
= afi_readl(pcie
, AFI_INTR_SIGNATURE
);
702 afi_writel(pcie
, 0, AFI_INTR_CODE
);
704 if (code
== AFI_INTR_LEGACY
)
707 if (code
>= ARRAY_SIZE(err_msg
))
711 * do not pollute kernel log with master abort reports since they
712 * happen a lot during enumeration
714 if (code
== AFI_INTR_MASTER_ABORT
)
715 dev_dbg(dev
, "%s, signature: %08x\n", err_msg
[code
], signature
);
717 dev_err(dev
, "%s, signature: %08x\n", err_msg
[code
], signature
);
719 if (code
== AFI_INTR_TARGET_ABORT
|| code
== AFI_INTR_MASTER_ABORT
||
720 code
== AFI_INTR_FPCI_DECODE_ERROR
) {
721 u32 fpci
= afi_readl(pcie
, AFI_UPPER_FPCI_ADDRESS
) & 0xff;
722 u64 address
= (u64
)fpci
<< 32 | (signature
& 0xfffffffc);
724 if (code
== AFI_INTR_MASTER_ABORT
)
725 dev_dbg(dev
, " FPCI address: %10llx\n", address
);
727 dev_err(dev
, " FPCI address: %10llx\n", address
);
734 * FPCI map is as follows:
735 * - 0xfdfc000000: I/O space
736 * - 0xfdfe000000: type 0 configuration space
737 * - 0xfdff000000: type 1 configuration space
738 * - 0xfe00000000: type 0 extended configuration space
739 * - 0xfe10000000: type 1 extended configuration space
741 static void tegra_pcie_setup_translations(struct tegra_pcie
*pcie
)
743 u32 fpci_bar
, size
, axi_address
;
745 /* Bar 0: type 1 extended configuration space */
746 fpci_bar
= 0xfe100000;
747 size
= resource_size(pcie
->cs
);
748 axi_address
= pcie
->cs
->start
;
749 afi_writel(pcie
, axi_address
, AFI_AXI_BAR0_START
);
750 afi_writel(pcie
, size
>> 12, AFI_AXI_BAR0_SZ
);
751 afi_writel(pcie
, fpci_bar
, AFI_FPCI_BAR0
);
753 /* Bar 1: downstream IO bar */
754 fpci_bar
= 0xfdfc0000;
755 size
= resource_size(&pcie
->io
);
756 axi_address
= pcie
->io
.start
;
757 afi_writel(pcie
, axi_address
, AFI_AXI_BAR1_START
);
758 afi_writel(pcie
, size
>> 12, AFI_AXI_BAR1_SZ
);
759 afi_writel(pcie
, fpci_bar
, AFI_FPCI_BAR1
);
761 /* Bar 2: prefetchable memory BAR */
762 fpci_bar
= (((pcie
->prefetch
.start
>> 12) & 0x0fffffff) << 4) | 0x1;
763 size
= resource_size(&pcie
->prefetch
);
764 axi_address
= pcie
->prefetch
.start
;
765 afi_writel(pcie
, axi_address
, AFI_AXI_BAR2_START
);
766 afi_writel(pcie
, size
>> 12, AFI_AXI_BAR2_SZ
);
767 afi_writel(pcie
, fpci_bar
, AFI_FPCI_BAR2
);
769 /* Bar 3: non prefetchable memory BAR */
770 fpci_bar
= (((pcie
->mem
.start
>> 12) & 0x0fffffff) << 4) | 0x1;
771 size
= resource_size(&pcie
->mem
);
772 axi_address
= pcie
->mem
.start
;
773 afi_writel(pcie
, axi_address
, AFI_AXI_BAR3_START
);
774 afi_writel(pcie
, size
>> 12, AFI_AXI_BAR3_SZ
);
775 afi_writel(pcie
, fpci_bar
, AFI_FPCI_BAR3
);
777 /* NULL out the remaining BARs as they are not used */
778 afi_writel(pcie
, 0, AFI_AXI_BAR4_START
);
779 afi_writel(pcie
, 0, AFI_AXI_BAR4_SZ
);
780 afi_writel(pcie
, 0, AFI_FPCI_BAR4
);
782 afi_writel(pcie
, 0, AFI_AXI_BAR5_START
);
783 afi_writel(pcie
, 0, AFI_AXI_BAR5_SZ
);
784 afi_writel(pcie
, 0, AFI_FPCI_BAR5
);
786 /* map all upstream transactions as uncached */
787 afi_writel(pcie
, 0, AFI_CACHE_BAR0_ST
);
788 afi_writel(pcie
, 0, AFI_CACHE_BAR0_SZ
);
789 afi_writel(pcie
, 0, AFI_CACHE_BAR1_ST
);
790 afi_writel(pcie
, 0, AFI_CACHE_BAR1_SZ
);
792 /* MSI translations are setup only when needed */
793 afi_writel(pcie
, 0, AFI_MSI_FPCI_BAR_ST
);
794 afi_writel(pcie
, 0, AFI_MSI_BAR_SZ
);
795 afi_writel(pcie
, 0, AFI_MSI_AXI_BAR_ST
);
796 afi_writel(pcie
, 0, AFI_MSI_BAR_SZ
);
799 static int tegra_pcie_pll_wait(struct tegra_pcie
*pcie
, unsigned long timeout
)
801 const struct tegra_pcie_soc
*soc
= pcie
->soc
;
804 timeout
= jiffies
+ msecs_to_jiffies(timeout
);
806 while (time_before(jiffies
, timeout
)) {
807 value
= pads_readl(pcie
, soc
->pads_pll_ctl
);
808 if (value
& PADS_PLL_CTL_LOCKDET
)
815 static int tegra_pcie_phy_enable(struct tegra_pcie
*pcie
)
817 struct device
*dev
= pcie
->dev
;
818 const struct tegra_pcie_soc
*soc
= pcie
->soc
;
822 /* initialize internal PHY, enable up to 16 PCIE lanes */
823 pads_writel(pcie
, 0x0, PADS_CTL_SEL
);
825 /* override IDDQ to 1 on all 4 lanes */
826 value
= pads_readl(pcie
, PADS_CTL
);
827 value
|= PADS_CTL_IDDQ_1L
;
828 pads_writel(pcie
, value
, PADS_CTL
);
831 * Set up PHY PLL inputs select PLLE output as refclock,
832 * set TX ref sel to div10 (not div5).
834 value
= pads_readl(pcie
, soc
->pads_pll_ctl
);
835 value
&= ~(PADS_PLL_CTL_REFCLK_MASK
| PADS_PLL_CTL_TXCLKREF_MASK
);
836 value
|= PADS_PLL_CTL_REFCLK_INTERNAL_CML
| soc
->tx_ref_sel
;
837 pads_writel(pcie
, value
, soc
->pads_pll_ctl
);
840 value
= pads_readl(pcie
, soc
->pads_pll_ctl
);
841 value
&= ~PADS_PLL_CTL_RST_B4SM
;
842 pads_writel(pcie
, value
, soc
->pads_pll_ctl
);
844 usleep_range(20, 100);
846 /* take PLL out of reset */
847 value
= pads_readl(pcie
, soc
->pads_pll_ctl
);
848 value
|= PADS_PLL_CTL_RST_B4SM
;
849 pads_writel(pcie
, value
, soc
->pads_pll_ctl
);
851 /* wait for the PLL to lock */
852 err
= tegra_pcie_pll_wait(pcie
, 500);
854 dev_err(dev
, "PLL failed to lock: %d\n", err
);
858 /* turn off IDDQ override */
859 value
= pads_readl(pcie
, PADS_CTL
);
860 value
&= ~PADS_CTL_IDDQ_1L
;
861 pads_writel(pcie
, value
, PADS_CTL
);
863 /* enable TX/RX data */
864 value
= pads_readl(pcie
, PADS_CTL
);
865 value
|= PADS_CTL_TX_DATA_EN_1L
| PADS_CTL_RX_DATA_EN_1L
;
866 pads_writel(pcie
, value
, PADS_CTL
);
871 static int tegra_pcie_phy_disable(struct tegra_pcie
*pcie
)
873 const struct tegra_pcie_soc
*soc
= pcie
->soc
;
876 /* disable TX/RX data */
877 value
= pads_readl(pcie
, PADS_CTL
);
878 value
&= ~(PADS_CTL_TX_DATA_EN_1L
| PADS_CTL_RX_DATA_EN_1L
);
879 pads_writel(pcie
, value
, PADS_CTL
);
882 value
= pads_readl(pcie
, PADS_CTL
);
883 value
|= PADS_CTL_IDDQ_1L
;
884 pads_writel(pcie
, value
, PADS_CTL
);
887 value
= pads_readl(pcie
, soc
->pads_pll_ctl
);
888 value
&= ~PADS_PLL_CTL_RST_B4SM
;
889 pads_writel(pcie
, value
, soc
->pads_pll_ctl
);
891 usleep_range(20, 100);
896 static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port
*port
)
898 struct device
*dev
= port
->pcie
->dev
;
902 for (i
= 0; i
< port
->lanes
; i
++) {
903 err
= phy_power_on(port
->phys
[i
]);
905 dev_err(dev
, "failed to power on PHY#%u: %d\n", i
, err
);
913 static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port
*port
)
915 struct device
*dev
= port
->pcie
->dev
;
919 for (i
= 0; i
< port
->lanes
; i
++) {
920 err
= phy_power_off(port
->phys
[i
]);
922 dev_err(dev
, "failed to power off PHY#%u: %d\n", i
,
931 static int tegra_pcie_phy_power_on(struct tegra_pcie
*pcie
)
933 struct device
*dev
= pcie
->dev
;
934 const struct tegra_pcie_soc
*soc
= pcie
->soc
;
935 struct tegra_pcie_port
*port
;
938 if (pcie
->legacy_phy
) {
940 err
= phy_power_on(pcie
->phy
);
942 err
= tegra_pcie_phy_enable(pcie
);
945 dev_err(dev
, "failed to power on PHY: %d\n", err
);
950 list_for_each_entry(port
, &pcie
->ports
, list
) {
951 err
= tegra_pcie_port_phy_power_on(port
);
954 "failed to power on PCIe port %u PHY: %d\n",
960 /* Configure the reference clock driver */
961 pads_writel(pcie
, soc
->pads_refclk_cfg0
, PADS_REFCLK_CFG0
);
963 if (soc
->num_ports
> 2)
964 pads_writel(pcie
, soc
->pads_refclk_cfg1
, PADS_REFCLK_CFG1
);
969 static int tegra_pcie_phy_power_off(struct tegra_pcie
*pcie
)
971 struct device
*dev
= pcie
->dev
;
972 struct tegra_pcie_port
*port
;
975 if (pcie
->legacy_phy
) {
977 err
= phy_power_off(pcie
->phy
);
979 err
= tegra_pcie_phy_disable(pcie
);
982 dev_err(dev
, "failed to power off PHY: %d\n", err
);
987 list_for_each_entry(port
, &pcie
->ports
, list
) {
988 err
= tegra_pcie_port_phy_power_off(port
);
991 "failed to power off PCIe port %u PHY: %d\n",
1000 static int tegra_pcie_enable_controller(struct tegra_pcie
*pcie
)
1002 struct device
*dev
= pcie
->dev
;
1003 const struct tegra_pcie_soc
*soc
= pcie
->soc
;
1004 struct tegra_pcie_port
*port
;
1005 unsigned long value
;
1008 /* enable PLL power down */
1010 value
= afi_readl(pcie
, AFI_PLLE_CONTROL
);
1011 value
&= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL
;
1012 value
|= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN
;
1013 afi_writel(pcie
, value
, AFI_PLLE_CONTROL
);
1016 /* power down PCIe slot clock bias pad */
1017 if (soc
->has_pex_bias_ctrl
)
1018 afi_writel(pcie
, 0, AFI_PEXBIAS_CTRL_0
);
1020 /* configure mode and disable all ports */
1021 value
= afi_readl(pcie
, AFI_PCIE_CONFIG
);
1022 value
&= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK
;
1023 value
|= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL
| pcie
->xbar_config
;
1025 list_for_each_entry(port
, &pcie
->ports
, list
)
1026 value
&= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port
->index
);
1028 afi_writel(pcie
, value
, AFI_PCIE_CONFIG
);
1030 if (soc
->has_gen2
) {
1031 value
= afi_readl(pcie
, AFI_FUSE
);
1032 value
&= ~AFI_FUSE_PCIE_T0_GEN2_DIS
;
1033 afi_writel(pcie
, value
, AFI_FUSE
);
1035 value
= afi_readl(pcie
, AFI_FUSE
);
1036 value
|= AFI_FUSE_PCIE_T0_GEN2_DIS
;
1037 afi_writel(pcie
, value
, AFI_FUSE
);
1040 if (soc
->program_uphy
) {
1041 err
= tegra_pcie_phy_power_on(pcie
);
1043 dev_err(dev
, "failed to power on PHY(s): %d\n", err
);
1048 /* take the PCIe interface module out of reset */
1049 reset_control_deassert(pcie
->pcie_xrst
);
1051 /* finally enable PCIe */
1052 value
= afi_readl(pcie
, AFI_CONFIGURATION
);
1053 value
|= AFI_CONFIGURATION_EN_FPCI
;
1054 afi_writel(pcie
, value
, AFI_CONFIGURATION
);
1056 value
= AFI_INTR_EN_INI_SLVERR
| AFI_INTR_EN_INI_DECERR
|
1057 AFI_INTR_EN_TGT_SLVERR
| AFI_INTR_EN_TGT_DECERR
|
1058 AFI_INTR_EN_TGT_WRERR
| AFI_INTR_EN_DFPCI_DECERR
;
1060 if (soc
->has_intr_prsnt_sense
)
1061 value
|= AFI_INTR_EN_PRSNT_SENSE
;
1063 afi_writel(pcie
, value
, AFI_AFI_INTR_ENABLE
);
1064 afi_writel(pcie
, 0xffffffff, AFI_SM_INTR_ENABLE
);
1066 /* don't enable MSI for now, only when needed */
1067 afi_writel(pcie
, AFI_INTR_MASK_INT_MASK
, AFI_INTR_MASK
);
1069 /* disable all exceptions */
1070 afi_writel(pcie
, 0, AFI_FPCI_ERROR_MASKS
);
1075 static void tegra_pcie_power_off(struct tegra_pcie
*pcie
)
1077 struct device
*dev
= pcie
->dev
;
1078 const struct tegra_pcie_soc
*soc
= pcie
->soc
;
1081 /* TODO: disable and unprepare clocks? */
1083 if (soc
->program_uphy
) {
1084 err
= tegra_pcie_phy_power_off(pcie
);
1086 dev_err(dev
, "failed to power off PHY(s): %d\n", err
);
1089 reset_control_assert(pcie
->pcie_xrst
);
1090 reset_control_assert(pcie
->afi_rst
);
1091 reset_control_assert(pcie
->pex_rst
);
1093 if (!dev
->pm_domain
)
1094 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE
);
1096 err
= regulator_bulk_disable(pcie
->num_supplies
, pcie
->supplies
);
1098 dev_warn(dev
, "failed to disable regulators: %d\n", err
);
1101 static int tegra_pcie_power_on(struct tegra_pcie
*pcie
)
1103 struct device
*dev
= pcie
->dev
;
1104 const struct tegra_pcie_soc
*soc
= pcie
->soc
;
1107 reset_control_assert(pcie
->pcie_xrst
);
1108 reset_control_assert(pcie
->afi_rst
);
1109 reset_control_assert(pcie
->pex_rst
);
1111 if (!dev
->pm_domain
)
1112 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE
);
1114 /* enable regulators */
1115 err
= regulator_bulk_enable(pcie
->num_supplies
, pcie
->supplies
);
1117 dev_err(dev
, "failed to enable regulators: %d\n", err
);
1119 if (dev
->pm_domain
) {
1120 err
= clk_prepare_enable(pcie
->pex_clk
);
1122 dev_err(dev
, "failed to enable PEX clock: %d\n", err
);
1125 reset_control_deassert(pcie
->pex_rst
);
1127 err
= tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE
,
1131 dev_err(dev
, "powerup sequence failed: %d\n", err
);
1136 reset_control_deassert(pcie
->afi_rst
);
1138 err
= clk_prepare_enable(pcie
->afi_clk
);
1140 dev_err(dev
, "failed to enable AFI clock: %d\n", err
);
1144 if (soc
->has_cml_clk
) {
1145 err
= clk_prepare_enable(pcie
->cml_clk
);
1147 dev_err(dev
, "failed to enable CML clock: %d\n", err
);
1152 err
= clk_prepare_enable(pcie
->pll_e
);
1154 dev_err(dev
, "failed to enable PLLE clock: %d\n", err
);
1161 static int tegra_pcie_clocks_get(struct tegra_pcie
*pcie
)
1163 struct device
*dev
= pcie
->dev
;
1164 const struct tegra_pcie_soc
*soc
= pcie
->soc
;
1166 pcie
->pex_clk
= devm_clk_get(dev
, "pex");
1167 if (IS_ERR(pcie
->pex_clk
))
1168 return PTR_ERR(pcie
->pex_clk
);
1170 pcie
->afi_clk
= devm_clk_get(dev
, "afi");
1171 if (IS_ERR(pcie
->afi_clk
))
1172 return PTR_ERR(pcie
->afi_clk
);
1174 pcie
->pll_e
= devm_clk_get(dev
, "pll_e");
1175 if (IS_ERR(pcie
->pll_e
))
1176 return PTR_ERR(pcie
->pll_e
);
1178 if (soc
->has_cml_clk
) {
1179 pcie
->cml_clk
= devm_clk_get(dev
, "cml");
1180 if (IS_ERR(pcie
->cml_clk
))
1181 return PTR_ERR(pcie
->cml_clk
);
1187 static int tegra_pcie_resets_get(struct tegra_pcie
*pcie
)
1189 struct device
*dev
= pcie
->dev
;
1191 pcie
->pex_rst
= devm_reset_control_get_exclusive(dev
, "pex");
1192 if (IS_ERR(pcie
->pex_rst
))
1193 return PTR_ERR(pcie
->pex_rst
);
1195 pcie
->afi_rst
= devm_reset_control_get_exclusive(dev
, "afi");
1196 if (IS_ERR(pcie
->afi_rst
))
1197 return PTR_ERR(pcie
->afi_rst
);
1199 pcie
->pcie_xrst
= devm_reset_control_get_exclusive(dev
, "pcie_x");
1200 if (IS_ERR(pcie
->pcie_xrst
))
1201 return PTR_ERR(pcie
->pcie_xrst
);
1206 static int tegra_pcie_phys_get_legacy(struct tegra_pcie
*pcie
)
1208 struct device
*dev
= pcie
->dev
;
1211 pcie
->phy
= devm_phy_optional_get(dev
, "pcie");
1212 if (IS_ERR(pcie
->phy
)) {
1213 err
= PTR_ERR(pcie
->phy
);
1214 dev_err(dev
, "failed to get PHY: %d\n", err
);
1218 err
= phy_init(pcie
->phy
);
1220 dev_err(dev
, "failed to initialize PHY: %d\n", err
);
1224 pcie
->legacy_phy
= true;
1229 static struct phy
*devm_of_phy_optional_get_index(struct device
*dev
,
1230 struct device_node
*np
,
1231 const char *consumer
,
1237 name
= kasprintf(GFP_KERNEL
, "%s-%u", consumer
, index
);
1239 return ERR_PTR(-ENOMEM
);
1241 phy
= devm_of_phy_get(dev
, np
, name
);
1244 if (IS_ERR(phy
) && PTR_ERR(phy
) == -ENODEV
)
1250 static int tegra_pcie_port_get_phys(struct tegra_pcie_port
*port
)
1252 struct device
*dev
= port
->pcie
->dev
;
1257 port
->phys
= devm_kcalloc(dev
, sizeof(phy
), port
->lanes
, GFP_KERNEL
);
1261 for (i
= 0; i
< port
->lanes
; i
++) {
1262 phy
= devm_of_phy_optional_get_index(dev
, port
->np
, "pcie", i
);
1264 dev_err(dev
, "failed to get PHY#%u: %ld\n", i
,
1266 return PTR_ERR(phy
);
1269 err
= phy_init(phy
);
1271 dev_err(dev
, "failed to initialize PHY#%u: %d\n", i
,
1276 port
->phys
[i
] = phy
;
1282 static int tegra_pcie_phys_get(struct tegra_pcie
*pcie
)
1284 const struct tegra_pcie_soc
*soc
= pcie
->soc
;
1285 struct device_node
*np
= pcie
->dev
->of_node
;
1286 struct tegra_pcie_port
*port
;
1289 if (!soc
->has_gen2
|| of_find_property(np
, "phys", NULL
) != NULL
)
1290 return tegra_pcie_phys_get_legacy(pcie
);
1292 list_for_each_entry(port
, &pcie
->ports
, list
) {
1293 err
= tegra_pcie_port_get_phys(port
);
1301 static int tegra_pcie_get_resources(struct tegra_pcie
*pcie
)
1303 struct device
*dev
= pcie
->dev
;
1304 struct platform_device
*pdev
= to_platform_device(dev
);
1305 struct resource
*pads
, *afi
, *res
;
1306 const struct tegra_pcie_soc
*soc
= pcie
->soc
;
1309 err
= tegra_pcie_clocks_get(pcie
);
1311 dev_err(dev
, "failed to get clocks: %d\n", err
);
1315 err
= tegra_pcie_resets_get(pcie
);
1317 dev_err(dev
, "failed to get resets: %d\n", err
);
1321 if (soc
->program_uphy
) {
1322 err
= tegra_pcie_phys_get(pcie
);
1324 dev_err(dev
, "failed to get PHYs: %d\n", err
);
1329 err
= tegra_pcie_power_on(pcie
);
1331 dev_err(dev
, "failed to power up: %d\n", err
);
1335 pads
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "pads");
1336 pcie
->pads
= devm_ioremap_resource(dev
, pads
);
1337 if (IS_ERR(pcie
->pads
)) {
1338 err
= PTR_ERR(pcie
->pads
);
1342 afi
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "afi");
1343 pcie
->afi
= devm_ioremap_resource(dev
, afi
);
1344 if (IS_ERR(pcie
->afi
)) {
1345 err
= PTR_ERR(pcie
->afi
);
1349 /* request configuration space, but remap later, on demand */
1350 res
= platform_get_resource_byname(pdev
, IORESOURCE_MEM
, "cs");
1352 err
= -EADDRNOTAVAIL
;
1356 pcie
->cs
= devm_request_mem_region(dev
, res
->start
,
1357 resource_size(res
), res
->name
);
1359 err
= -EADDRNOTAVAIL
;
1363 /* request interrupt */
1364 err
= platform_get_irq_byname(pdev
, "intr");
1366 dev_err(dev
, "failed to get IRQ: %d\n", err
);
1372 err
= request_irq(pcie
->irq
, tegra_pcie_isr
, IRQF_SHARED
, "PCIE", pcie
);
1374 dev_err(dev
, "failed to register IRQ: %d\n", err
);
1381 tegra_pcie_power_off(pcie
);
1385 static int tegra_pcie_put_resources(struct tegra_pcie
*pcie
)
1387 struct device
*dev
= pcie
->dev
;
1388 const struct tegra_pcie_soc
*soc
= pcie
->soc
;
1392 free_irq(pcie
->irq
, pcie
);
1394 tegra_pcie_power_off(pcie
);
1396 if (soc
->program_uphy
) {
1397 err
= phy_exit(pcie
->phy
);
1399 dev_err(dev
, "failed to teardown PHY: %d\n", err
);
1405 static int tegra_msi_alloc(struct tegra_msi
*chip
)
1409 mutex_lock(&chip
->lock
);
1411 msi
= find_first_zero_bit(chip
->used
, INT_PCI_MSI_NR
);
1412 if (msi
< INT_PCI_MSI_NR
)
1413 set_bit(msi
, chip
->used
);
1417 mutex_unlock(&chip
->lock
);
1422 static void tegra_msi_free(struct tegra_msi
*chip
, unsigned long irq
)
1424 struct device
*dev
= chip
->chip
.dev
;
1426 mutex_lock(&chip
->lock
);
1428 if (!test_bit(irq
, chip
->used
))
1429 dev_err(dev
, "trying to free unused MSI#%lu\n", irq
);
1431 clear_bit(irq
, chip
->used
);
1433 mutex_unlock(&chip
->lock
);
1436 static irqreturn_t
tegra_pcie_msi_irq(int irq
, void *data
)
1438 struct tegra_pcie
*pcie
= data
;
1439 struct device
*dev
= pcie
->dev
;
1440 struct tegra_msi
*msi
= &pcie
->msi
;
1441 unsigned int i
, processed
= 0;
1443 for (i
= 0; i
< 8; i
++) {
1444 unsigned long reg
= afi_readl(pcie
, AFI_MSI_VEC0
+ i
* 4);
1447 unsigned int offset
= find_first_bit(®
, 32);
1448 unsigned int index
= i
* 32 + offset
;
1451 /* clear the interrupt */
1452 afi_writel(pcie
, 1 << offset
, AFI_MSI_VEC0
+ i
* 4);
1454 irq
= irq_find_mapping(msi
->domain
, index
);
1456 if (test_bit(index
, msi
->used
))
1457 generic_handle_irq(irq
);
1459 dev_info(dev
, "unhandled MSI\n");
1462 * that's weird who triggered this?
1465 dev_info(dev
, "unexpected MSI\n");
1468 /* see if there's any more pending in this vector */
1469 reg
= afi_readl(pcie
, AFI_MSI_VEC0
+ i
* 4);
1475 return processed
> 0 ? IRQ_HANDLED
: IRQ_NONE
;
1478 static int tegra_msi_setup_irq(struct msi_controller
*chip
,
1479 struct pci_dev
*pdev
, struct msi_desc
*desc
)
1481 struct tegra_msi
*msi
= to_tegra_msi(chip
);
1486 hwirq
= tegra_msi_alloc(msi
);
1490 irq
= irq_create_mapping(msi
->domain
, hwirq
);
1492 tegra_msi_free(msi
, hwirq
);
1496 irq_set_msi_desc(irq
, desc
);
1498 msg
.address_lo
= lower_32_bits(msi
->phys
);
1499 msg
.address_hi
= upper_32_bits(msi
->phys
);
1502 pci_write_msi_msg(irq
, &msg
);
1507 static void tegra_msi_teardown_irq(struct msi_controller
*chip
,
1510 struct tegra_msi
*msi
= to_tegra_msi(chip
);
1511 struct irq_data
*d
= irq_get_irq_data(irq
);
1512 irq_hw_number_t hwirq
= irqd_to_hwirq(d
);
1514 irq_dispose_mapping(irq
);
1515 tegra_msi_free(msi
, hwirq
);
1518 static struct irq_chip tegra_msi_irq_chip
= {
1519 .name
= "Tegra PCIe MSI",
1520 .irq_enable
= pci_msi_unmask_irq
,
1521 .irq_disable
= pci_msi_mask_irq
,
1522 .irq_mask
= pci_msi_mask_irq
,
1523 .irq_unmask
= pci_msi_unmask_irq
,
1526 static int tegra_msi_map(struct irq_domain
*domain
, unsigned int irq
,
1527 irq_hw_number_t hwirq
)
1529 irq_set_chip_and_handler(irq
, &tegra_msi_irq_chip
, handle_simple_irq
);
1530 irq_set_chip_data(irq
, domain
->host_data
);
1532 tegra_cpuidle_pcie_irqs_in_use();
1537 static const struct irq_domain_ops msi_domain_ops
= {
1538 .map
= tegra_msi_map
,
1541 static int tegra_pcie_enable_msi(struct tegra_pcie
*pcie
)
1543 struct pci_host_bridge
*host
= pci_host_bridge_from_priv(pcie
);
1544 struct platform_device
*pdev
= to_platform_device(pcie
->dev
);
1545 const struct tegra_pcie_soc
*soc
= pcie
->soc
;
1546 struct tegra_msi
*msi
= &pcie
->msi
;
1547 struct device
*dev
= pcie
->dev
;
1551 mutex_init(&msi
->lock
);
1553 msi
->chip
.dev
= dev
;
1554 msi
->chip
.setup_irq
= tegra_msi_setup_irq
;
1555 msi
->chip
.teardown_irq
= tegra_msi_teardown_irq
;
1557 msi
->domain
= irq_domain_add_linear(dev
->of_node
, INT_PCI_MSI_NR
,
1558 &msi_domain_ops
, &msi
->chip
);
1560 dev_err(dev
, "failed to create IRQ domain\n");
1564 err
= platform_get_irq_byname(pdev
, "msi");
1566 dev_err(dev
, "failed to get IRQ: %d\n", err
);
1572 err
= request_irq(msi
->irq
, tegra_pcie_msi_irq
, IRQF_NO_THREAD
,
1573 tegra_msi_irq_chip
.name
, pcie
);
1575 dev_err(dev
, "failed to request IRQ: %d\n", err
);
1579 /* setup AFI/FPCI range */
1580 msi
->pages
= __get_free_pages(GFP_KERNEL
, 0);
1581 msi
->phys
= virt_to_phys((void *)msi
->pages
);
1583 afi_writel(pcie
, msi
->phys
>> soc
->msi_base_shift
, AFI_MSI_FPCI_BAR_ST
);
1584 afi_writel(pcie
, msi
->phys
, AFI_MSI_AXI_BAR_ST
);
1585 /* this register is in 4K increments */
1586 afi_writel(pcie
, 1, AFI_MSI_BAR_SZ
);
1588 /* enable all MSI vectors */
1589 afi_writel(pcie
, 0xffffffff, AFI_MSI_EN_VEC0
);
1590 afi_writel(pcie
, 0xffffffff, AFI_MSI_EN_VEC1
);
1591 afi_writel(pcie
, 0xffffffff, AFI_MSI_EN_VEC2
);
1592 afi_writel(pcie
, 0xffffffff, AFI_MSI_EN_VEC3
);
1593 afi_writel(pcie
, 0xffffffff, AFI_MSI_EN_VEC4
);
1594 afi_writel(pcie
, 0xffffffff, AFI_MSI_EN_VEC5
);
1595 afi_writel(pcie
, 0xffffffff, AFI_MSI_EN_VEC6
);
1596 afi_writel(pcie
, 0xffffffff, AFI_MSI_EN_VEC7
);
1598 /* and unmask the MSI interrupt */
1599 reg
= afi_readl(pcie
, AFI_INTR_MASK
);
1600 reg
|= AFI_INTR_MASK_MSI_MASK
;
1601 afi_writel(pcie
, reg
, AFI_INTR_MASK
);
1603 host
->msi
= &msi
->chip
;
1608 irq_domain_remove(msi
->domain
);
1612 static int tegra_pcie_disable_msi(struct tegra_pcie
*pcie
)
1614 struct tegra_msi
*msi
= &pcie
->msi
;
1615 unsigned int i
, irq
;
1618 /* mask the MSI interrupt */
1619 value
= afi_readl(pcie
, AFI_INTR_MASK
);
1620 value
&= ~AFI_INTR_MASK_MSI_MASK
;
1621 afi_writel(pcie
, value
, AFI_INTR_MASK
);
1623 /* disable all MSI vectors */
1624 afi_writel(pcie
, 0, AFI_MSI_EN_VEC0
);
1625 afi_writel(pcie
, 0, AFI_MSI_EN_VEC1
);
1626 afi_writel(pcie
, 0, AFI_MSI_EN_VEC2
);
1627 afi_writel(pcie
, 0, AFI_MSI_EN_VEC3
);
1628 afi_writel(pcie
, 0, AFI_MSI_EN_VEC4
);
1629 afi_writel(pcie
, 0, AFI_MSI_EN_VEC5
);
1630 afi_writel(pcie
, 0, AFI_MSI_EN_VEC6
);
1631 afi_writel(pcie
, 0, AFI_MSI_EN_VEC7
);
1633 free_pages(msi
->pages
, 0);
1636 free_irq(msi
->irq
, pcie
);
1638 for (i
= 0; i
< INT_PCI_MSI_NR
; i
++) {
1639 irq
= irq_find_mapping(msi
->domain
, i
);
1641 irq_dispose_mapping(irq
);
1644 irq_domain_remove(msi
->domain
);
1649 static int tegra_pcie_get_xbar_config(struct tegra_pcie
*pcie
, u32 lanes
,
1652 struct device
*dev
= pcie
->dev
;
1653 struct device_node
*np
= dev
->of_node
;
1655 if (of_device_is_compatible(np
, "nvidia,tegra186-pcie")) {
1658 dev_info(dev
, "4x1, 1x1 configuration\n");
1659 *xbar
= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401
;
1663 dev_info(dev
, "2x1, 1X1, 1x1 configuration\n");
1664 *xbar
= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211
;
1668 dev_info(dev
, "1x1, 1x1, 1x1 configuration\n");
1669 *xbar
= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111
;
1673 dev_info(dev
, "wrong configuration updated in DT, "
1674 "switching to default 2x1, 1x1, 1x1 "
1676 *xbar
= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211
;
1679 } else if (of_device_is_compatible(np
, "nvidia,tegra124-pcie") ||
1680 of_device_is_compatible(np
, "nvidia,tegra210-pcie")) {
1683 dev_info(dev
, "4x1, 1x1 configuration\n");
1684 *xbar
= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1
;
1688 dev_info(dev
, "2x1, 1x1 configuration\n");
1689 *xbar
= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1
;
1692 } else if (of_device_is_compatible(np
, "nvidia,tegra30-pcie")) {
1695 dev_info(dev
, "4x1, 2x1 configuration\n");
1696 *xbar
= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420
;
1700 dev_info(dev
, "2x3 configuration\n");
1701 *xbar
= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222
;
1705 dev_info(dev
, "4x1, 1x2 configuration\n");
1706 *xbar
= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411
;
1709 } else if (of_device_is_compatible(np
, "nvidia,tegra20-pcie")) {
1712 dev_info(dev
, "single-mode configuration\n");
1713 *xbar
= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE
;
1717 dev_info(dev
, "dual-mode configuration\n");
1718 *xbar
= AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL
;
1727 * Check whether a given set of supplies is available in a device tree node.
1728 * This is used to check whether the new or the legacy device tree bindings
1731 static bool of_regulator_bulk_available(struct device_node
*np
,
1732 struct regulator_bulk_data
*supplies
,
1733 unsigned int num_supplies
)
1738 for (i
= 0; i
< num_supplies
; i
++) {
1739 snprintf(property
, 32, "%s-supply", supplies
[i
].supply
);
1741 if (of_find_property(np
, property
, NULL
) == NULL
)
1749 * Old versions of the device tree binding for this device used a set of power
1750 * supplies that didn't match the hardware inputs. This happened to work for a
1751 * number of cases but is not future proof. However to preserve backwards-
1752 * compatibility with old device trees, this function will try to use the old
1755 static int tegra_pcie_get_legacy_regulators(struct tegra_pcie
*pcie
)
1757 struct device
*dev
= pcie
->dev
;
1758 struct device_node
*np
= dev
->of_node
;
1760 if (of_device_is_compatible(np
, "nvidia,tegra30-pcie"))
1761 pcie
->num_supplies
= 3;
1762 else if (of_device_is_compatible(np
, "nvidia,tegra20-pcie"))
1763 pcie
->num_supplies
= 2;
1765 if (pcie
->num_supplies
== 0) {
1766 dev_err(dev
, "device %pOF not supported in legacy mode\n", np
);
1770 pcie
->supplies
= devm_kcalloc(dev
, pcie
->num_supplies
,
1771 sizeof(*pcie
->supplies
),
1773 if (!pcie
->supplies
)
1776 pcie
->supplies
[0].supply
= "pex-clk";
1777 pcie
->supplies
[1].supply
= "vdd";
1779 if (pcie
->num_supplies
> 2)
1780 pcie
->supplies
[2].supply
= "avdd";
1782 return devm_regulator_bulk_get(dev
, pcie
->num_supplies
, pcie
->supplies
);
1786 * Obtains the list of regulators required for a particular generation of the
1789 * This would've been nice to do simply by providing static tables for use
1790 * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky
1791 * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB)
1792 * and either seems to be optional depending on which ports are being used.
1794 static int tegra_pcie_get_regulators(struct tegra_pcie
*pcie
, u32 lane_mask
)
1796 struct device
*dev
= pcie
->dev
;
1797 struct device_node
*np
= dev
->of_node
;
1800 if (of_device_is_compatible(np
, "nvidia,tegra186-pcie")) {
1801 pcie
->num_supplies
= 4;
1803 pcie
->supplies
= devm_kcalloc(pcie
->dev
, pcie
->num_supplies
,
1804 sizeof(*pcie
->supplies
),
1806 if (!pcie
->supplies
)
1809 pcie
->supplies
[i
++].supply
= "dvdd-pex";
1810 pcie
->supplies
[i
++].supply
= "hvdd-pex-pll";
1811 pcie
->supplies
[i
++].supply
= "hvdd-pex";
1812 pcie
->supplies
[i
++].supply
= "vddio-pexctl-aud";
1813 } else if (of_device_is_compatible(np
, "nvidia,tegra210-pcie")) {
1814 pcie
->num_supplies
= 6;
1816 pcie
->supplies
= devm_kcalloc(pcie
->dev
, pcie
->num_supplies
,
1817 sizeof(*pcie
->supplies
),
1819 if (!pcie
->supplies
)
1822 pcie
->supplies
[i
++].supply
= "avdd-pll-uerefe";
1823 pcie
->supplies
[i
++].supply
= "hvddio-pex";
1824 pcie
->supplies
[i
++].supply
= "dvddio-pex";
1825 pcie
->supplies
[i
++].supply
= "dvdd-pex-pll";
1826 pcie
->supplies
[i
++].supply
= "hvdd-pex-pll-e";
1827 pcie
->supplies
[i
++].supply
= "vddio-pex-ctl";
1828 } else if (of_device_is_compatible(np
, "nvidia,tegra124-pcie")) {
1829 pcie
->num_supplies
= 7;
1831 pcie
->supplies
= devm_kcalloc(dev
, pcie
->num_supplies
,
1832 sizeof(*pcie
->supplies
),
1834 if (!pcie
->supplies
)
1837 pcie
->supplies
[i
++].supply
= "avddio-pex";
1838 pcie
->supplies
[i
++].supply
= "dvddio-pex";
1839 pcie
->supplies
[i
++].supply
= "avdd-pex-pll";
1840 pcie
->supplies
[i
++].supply
= "hvdd-pex";
1841 pcie
->supplies
[i
++].supply
= "hvdd-pex-pll-e";
1842 pcie
->supplies
[i
++].supply
= "vddio-pex-ctl";
1843 pcie
->supplies
[i
++].supply
= "avdd-pll-erefe";
1844 } else if (of_device_is_compatible(np
, "nvidia,tegra30-pcie")) {
1845 bool need_pexa
= false, need_pexb
= false;
1847 /* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
1848 if (lane_mask
& 0x0f)
1851 /* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */
1852 if (lane_mask
& 0x30)
1855 pcie
->num_supplies
= 4 + (need_pexa
? 2 : 0) +
1856 (need_pexb
? 2 : 0);
1858 pcie
->supplies
= devm_kcalloc(dev
, pcie
->num_supplies
,
1859 sizeof(*pcie
->supplies
),
1861 if (!pcie
->supplies
)
1864 pcie
->supplies
[i
++].supply
= "avdd-pex-pll";
1865 pcie
->supplies
[i
++].supply
= "hvdd-pex";
1866 pcie
->supplies
[i
++].supply
= "vddio-pex-ctl";
1867 pcie
->supplies
[i
++].supply
= "avdd-plle";
1870 pcie
->supplies
[i
++].supply
= "avdd-pexa";
1871 pcie
->supplies
[i
++].supply
= "vdd-pexa";
1875 pcie
->supplies
[i
++].supply
= "avdd-pexb";
1876 pcie
->supplies
[i
++].supply
= "vdd-pexb";
1878 } else if (of_device_is_compatible(np
, "nvidia,tegra20-pcie")) {
1879 pcie
->num_supplies
= 5;
1881 pcie
->supplies
= devm_kcalloc(dev
, pcie
->num_supplies
,
1882 sizeof(*pcie
->supplies
),
1884 if (!pcie
->supplies
)
1887 pcie
->supplies
[0].supply
= "avdd-pex";
1888 pcie
->supplies
[1].supply
= "vdd-pex";
1889 pcie
->supplies
[2].supply
= "avdd-pex-pll";
1890 pcie
->supplies
[3].supply
= "avdd-plle";
1891 pcie
->supplies
[4].supply
= "vddio-pex-clk";
1894 if (of_regulator_bulk_available(dev
->of_node
, pcie
->supplies
,
1895 pcie
->num_supplies
))
1896 return devm_regulator_bulk_get(dev
, pcie
->num_supplies
,
1900 * If not all regulators are available for this new scheme, assume
1901 * that the device tree complies with an older version of the device
1904 dev_info(dev
, "using legacy DT binding for power supplies\n");
1906 devm_kfree(dev
, pcie
->supplies
);
1907 pcie
->num_supplies
= 0;
1909 return tegra_pcie_get_legacy_regulators(pcie
);
1912 static int tegra_pcie_parse_dt(struct tegra_pcie
*pcie
)
1914 struct device
*dev
= pcie
->dev
;
1915 struct device_node
*np
= dev
->of_node
, *port
;
1916 const struct tegra_pcie_soc
*soc
= pcie
->soc
;
1917 struct of_pci_range_parser parser
;
1918 struct of_pci_range range
;
1919 u32 lanes
= 0, mask
= 0;
1920 unsigned int lane
= 0;
1921 struct resource res
;
1924 if (of_pci_range_parser_init(&parser
, np
)) {
1925 dev_err(dev
, "missing \"ranges\" property\n");
1929 for_each_of_pci_range(&parser
, &range
) {
1930 err
= of_pci_range_to_resource(&range
, np
, &res
);
1934 switch (res
.flags
& IORESOURCE_TYPE_BITS
) {
1936 /* Track the bus -> CPU I/O mapping offset. */
1937 pcie
->offset
.io
= res
.start
- range
.pci_addr
;
1939 memcpy(&pcie
->pio
, &res
, sizeof(res
));
1940 pcie
->pio
.name
= np
->full_name
;
1943 * The Tegra PCIe host bridge uses this to program the
1944 * mapping of the I/O space to the physical address,
1945 * so we override the .start and .end fields here that
1946 * of_pci_range_to_resource() converted to I/O space.
1947 * We also set the IORESOURCE_MEM type to clarify that
1948 * the resource is in the physical memory space.
1950 pcie
->io
.start
= range
.cpu_addr
;
1951 pcie
->io
.end
= range
.cpu_addr
+ range
.size
- 1;
1952 pcie
->io
.flags
= IORESOURCE_MEM
;
1953 pcie
->io
.name
= "I/O";
1955 memcpy(&res
, &pcie
->io
, sizeof(res
));
1958 case IORESOURCE_MEM
:
1960 * Track the bus -> CPU memory mapping offset. This
1961 * assumes that the prefetchable and non-prefetchable
1962 * regions will be the last of type IORESOURCE_MEM in
1963 * the ranges property.
1965 pcie
->offset
.mem
= res
.start
- range
.pci_addr
;
1967 if (res
.flags
& IORESOURCE_PREFETCH
) {
1968 memcpy(&pcie
->prefetch
, &res
, sizeof(res
));
1969 pcie
->prefetch
.name
= "prefetchable";
1971 memcpy(&pcie
->mem
, &res
, sizeof(res
));
1972 pcie
->mem
.name
= "non-prefetchable";
1978 err
= of_pci_parse_bus_range(np
, &pcie
->busn
);
1980 dev_err(dev
, "failed to parse ranges property: %d\n", err
);
1981 pcie
->busn
.name
= np
->name
;
1982 pcie
->busn
.start
= 0;
1983 pcie
->busn
.end
= 0xff;
1984 pcie
->busn
.flags
= IORESOURCE_BUS
;
1987 /* parse root ports */
1988 for_each_child_of_node(np
, port
) {
1989 struct tegra_pcie_port
*rp
;
1993 err
= of_pci_get_devfn(port
);
1995 dev_err(dev
, "failed to parse address: %d\n", err
);
1999 index
= PCI_SLOT(err
);
2001 if (index
< 1 || index
> soc
->num_ports
) {
2002 dev_err(dev
, "invalid port number: %d\n", index
);
2008 err
= of_property_read_u32(port
, "nvidia,num-lanes", &value
);
2010 dev_err(dev
, "failed to parse # of lanes: %d\n",
2016 dev_err(dev
, "invalid # of lanes: %u\n", value
);
2020 lanes
|= value
<< (index
<< 3);
2022 if (!of_device_is_available(port
)) {
2027 mask
|= ((1 << value
) - 1) << lane
;
2030 rp
= devm_kzalloc(dev
, sizeof(*rp
), GFP_KERNEL
);
2034 err
= of_address_to_resource(port
, 0, &rp
->regs
);
2036 dev_err(dev
, "failed to parse address: %d\n", err
);
2040 INIT_LIST_HEAD(&rp
->list
);
2046 rp
->base
= devm_pci_remap_cfg_resource(dev
, &rp
->regs
);
2047 if (IS_ERR(rp
->base
))
2048 return PTR_ERR(rp
->base
);
2050 list_add_tail(&rp
->list
, &pcie
->ports
);
2053 err
= tegra_pcie_get_xbar_config(pcie
, lanes
, &pcie
->xbar_config
);
2055 dev_err(dev
, "invalid lane configuration\n");
2059 err
= tegra_pcie_get_regulators(pcie
, mask
);
2067 * FIXME: If there are no PCIe cards attached, then calling this function
2068 * can result in the increase of the bootup time as there are big timeout
2071 #define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */
2072 static bool tegra_pcie_port_check_link(struct tegra_pcie_port
*port
)
2074 struct device
*dev
= port
->pcie
->dev
;
2075 unsigned int retries
= 3;
2076 unsigned long value
;
2078 /* override presence detection */
2079 value
= readl(port
->base
+ RP_PRIV_MISC
);
2080 value
&= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT
;
2081 value
|= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT
;
2082 writel(value
, port
->base
+ RP_PRIV_MISC
);
2085 unsigned int timeout
= TEGRA_PCIE_LINKUP_TIMEOUT
;
2088 value
= readl(port
->base
+ RP_VEND_XP
);
2090 if (value
& RP_VEND_XP_DL_UP
)
2093 usleep_range(1000, 2000);
2094 } while (--timeout
);
2097 dev_err(dev
, "link %u down, retrying\n", port
->index
);
2101 timeout
= TEGRA_PCIE_LINKUP_TIMEOUT
;
2104 value
= readl(port
->base
+ RP_LINK_CONTROL_STATUS
);
2106 if (value
& RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE
)
2109 usleep_range(1000, 2000);
2110 } while (--timeout
);
2113 tegra_pcie_port_reset(port
);
2114 } while (--retries
);
2119 static void tegra_pcie_enable_ports(struct tegra_pcie
*pcie
)
2121 struct device
*dev
= pcie
->dev
;
2122 struct tegra_pcie_port
*port
, *tmp
;
2124 list_for_each_entry_safe(port
, tmp
, &pcie
->ports
, list
) {
2125 dev_info(dev
, "probing port %u, using %u lanes\n",
2126 port
->index
, port
->lanes
);
2128 tegra_pcie_port_enable(port
);
2130 if (tegra_pcie_port_check_link(port
))
2133 dev_info(dev
, "link %u down, ignoring\n", port
->index
);
2135 tegra_pcie_port_disable(port
);
2136 tegra_pcie_port_free(port
);
2140 static const struct tegra_pcie_soc tegra20_pcie
= {
2142 .msi_base_shift
= 0,
2143 .pads_pll_ctl
= PADS_PLL_CTL_TEGRA20
,
2144 .tx_ref_sel
= PADS_PLL_CTL_TXCLKREF_DIV10
,
2145 .pads_refclk_cfg0
= 0xfa5cfa5c,
2146 .has_pex_clkreq_en
= false,
2147 .has_pex_bias_ctrl
= false,
2148 .has_intr_prsnt_sense
= false,
2149 .has_cml_clk
= false,
2151 .force_pca_enable
= false,
2152 .program_uphy
= true,
2155 static const struct tegra_pcie_soc tegra30_pcie
= {
2157 .msi_base_shift
= 8,
2158 .pads_pll_ctl
= PADS_PLL_CTL_TEGRA30
,
2159 .tx_ref_sel
= PADS_PLL_CTL_TXCLKREF_BUF_EN
,
2160 .pads_refclk_cfg0
= 0xfa5cfa5c,
2161 .pads_refclk_cfg1
= 0xfa5cfa5c,
2162 .has_pex_clkreq_en
= true,
2163 .has_pex_bias_ctrl
= true,
2164 .has_intr_prsnt_sense
= true,
2165 .has_cml_clk
= true,
2167 .force_pca_enable
= false,
2168 .program_uphy
= true,
2171 static const struct tegra_pcie_soc tegra124_pcie
= {
2173 .msi_base_shift
= 8,
2174 .pads_pll_ctl
= PADS_PLL_CTL_TEGRA30
,
2175 .tx_ref_sel
= PADS_PLL_CTL_TXCLKREF_BUF_EN
,
2176 .pads_refclk_cfg0
= 0x44ac44ac,
2177 .has_pex_clkreq_en
= true,
2178 .has_pex_bias_ctrl
= true,
2179 .has_intr_prsnt_sense
= true,
2180 .has_cml_clk
= true,
2182 .force_pca_enable
= false,
2183 .program_uphy
= true,
2186 static const struct tegra_pcie_soc tegra210_pcie
= {
2188 .msi_base_shift
= 8,
2189 .pads_pll_ctl
= PADS_PLL_CTL_TEGRA30
,
2190 .tx_ref_sel
= PADS_PLL_CTL_TXCLKREF_BUF_EN
,
2191 .pads_refclk_cfg0
= 0x90b890b8,
2192 .has_pex_clkreq_en
= true,
2193 .has_pex_bias_ctrl
= true,
2194 .has_intr_prsnt_sense
= true,
2195 .has_cml_clk
= true,
2197 .force_pca_enable
= true,
2198 .program_uphy
= true,
2201 static const struct tegra_pcie_soc tegra186_pcie
= {
2203 .msi_base_shift
= 8,
2204 .pads_pll_ctl
= PADS_PLL_CTL_TEGRA30
,
2205 .tx_ref_sel
= PADS_PLL_CTL_TXCLKREF_BUF_EN
,
2206 .pads_refclk_cfg0
= 0x80b880b8,
2207 .pads_refclk_cfg1
= 0x000480b8,
2208 .has_pex_clkreq_en
= true,
2209 .has_pex_bias_ctrl
= true,
2210 .has_intr_prsnt_sense
= true,
2211 .has_cml_clk
= false,
2213 .force_pca_enable
= false,
2214 .program_uphy
= false,
2217 static const struct of_device_id tegra_pcie_of_match
[] = {
2218 { .compatible
= "nvidia,tegra186-pcie", .data
= &tegra186_pcie
},
2219 { .compatible
= "nvidia,tegra210-pcie", .data
= &tegra210_pcie
},
2220 { .compatible
= "nvidia,tegra124-pcie", .data
= &tegra124_pcie
},
2221 { .compatible
= "nvidia,tegra30-pcie", .data
= &tegra30_pcie
},
2222 { .compatible
= "nvidia,tegra20-pcie", .data
= &tegra20_pcie
},
2226 static void *tegra_pcie_ports_seq_start(struct seq_file
*s
, loff_t
*pos
)
2228 struct tegra_pcie
*pcie
= s
->private;
2230 if (list_empty(&pcie
->ports
))
2233 seq_printf(s
, "Index Status\n");
2235 return seq_list_start(&pcie
->ports
, *pos
);
2238 static void *tegra_pcie_ports_seq_next(struct seq_file
*s
, void *v
, loff_t
*pos
)
2240 struct tegra_pcie
*pcie
= s
->private;
2242 return seq_list_next(v
, &pcie
->ports
, pos
);
2245 static void tegra_pcie_ports_seq_stop(struct seq_file
*s
, void *v
)
2249 static int tegra_pcie_ports_seq_show(struct seq_file
*s
, void *v
)
2251 bool up
= false, active
= false;
2252 struct tegra_pcie_port
*port
;
2255 port
= list_entry(v
, struct tegra_pcie_port
, list
);
2257 value
= readl(port
->base
+ RP_VEND_XP
);
2259 if (value
& RP_VEND_XP_DL_UP
)
2262 value
= readl(port
->base
+ RP_LINK_CONTROL_STATUS
);
2264 if (value
& RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE
)
2267 seq_printf(s
, "%2u ", port
->index
);
2270 seq_printf(s
, "up");
2274 seq_printf(s
, ", ");
2276 seq_printf(s
, "active");
2279 seq_printf(s
, "\n");
2283 static const struct seq_operations tegra_pcie_ports_seq_ops
= {
2284 .start
= tegra_pcie_ports_seq_start
,
2285 .next
= tegra_pcie_ports_seq_next
,
2286 .stop
= tegra_pcie_ports_seq_stop
,
2287 .show
= tegra_pcie_ports_seq_show
,
2290 static int tegra_pcie_ports_open(struct inode
*inode
, struct file
*file
)
2292 struct tegra_pcie
*pcie
= inode
->i_private
;
2296 err
= seq_open(file
, &tegra_pcie_ports_seq_ops
);
2300 s
= file
->private_data
;
2306 static const struct file_operations tegra_pcie_ports_ops
= {
2307 .owner
= THIS_MODULE
,
2308 .open
= tegra_pcie_ports_open
,
2310 .llseek
= seq_lseek
,
2311 .release
= seq_release
,
2314 static int tegra_pcie_debugfs_init(struct tegra_pcie
*pcie
)
2316 struct dentry
*file
;
2318 pcie
->debugfs
= debugfs_create_dir("pcie", NULL
);
2322 file
= debugfs_create_file("ports", S_IFREG
| S_IRUGO
, pcie
->debugfs
,
2323 pcie
, &tegra_pcie_ports_ops
);
2330 debugfs_remove_recursive(pcie
->debugfs
);
2331 pcie
->debugfs
= NULL
;
2335 static int tegra_pcie_probe(struct platform_device
*pdev
)
2337 struct device
*dev
= &pdev
->dev
;
2338 struct pci_host_bridge
*host
;
2339 struct tegra_pcie
*pcie
;
2340 struct pci_bus
*child
;
2343 host
= devm_pci_alloc_host_bridge(dev
, sizeof(*pcie
));
2347 pcie
= pci_host_bridge_priv(host
);
2349 pcie
->soc
= of_device_get_match_data(dev
);
2350 INIT_LIST_HEAD(&pcie
->buses
);
2351 INIT_LIST_HEAD(&pcie
->ports
);
2354 err
= tegra_pcie_parse_dt(pcie
);
2358 err
= tegra_pcie_get_resources(pcie
);
2360 dev_err(dev
, "failed to request resources: %d\n", err
);
2364 err
= tegra_pcie_enable_controller(pcie
);
2368 err
= tegra_pcie_request_resources(pcie
);
2372 /* setup the AFI address translations */
2373 tegra_pcie_setup_translations(pcie
);
2375 if (IS_ENABLED(CONFIG_PCI_MSI
)) {
2376 err
= tegra_pcie_enable_msi(pcie
);
2378 dev_err(dev
, "failed to enable MSI support: %d\n", err
);
2383 tegra_pcie_enable_ports(pcie
);
2385 pci_add_flags(PCI_REASSIGN_ALL_RSRC
| PCI_REASSIGN_ALL_BUS
);
2386 host
->busnr
= pcie
->busn
.start
;
2387 host
->dev
.parent
= &pdev
->dev
;
2388 host
->ops
= &tegra_pcie_ops
;
2389 host
->map_irq
= tegra_pcie_map_irq
;
2390 host
->swizzle_irq
= pci_common_swizzle
;
2392 err
= pci_scan_root_bus_bridge(host
);
2394 dev_err(dev
, "failed to register host: %d\n", err
);
2398 pci_bus_size_bridges(host
->bus
);
2399 pci_bus_assign_resources(host
->bus
);
2401 list_for_each_entry(child
, &host
->bus
->children
, node
)
2402 pcie_bus_configure_settings(child
);
2404 pci_bus_add_devices(host
->bus
);
2406 if (IS_ENABLED(CONFIG_DEBUG_FS
)) {
2407 err
= tegra_pcie_debugfs_init(pcie
);
2409 dev_err(dev
, "failed to setup debugfs: %d\n", err
);
2415 if (IS_ENABLED(CONFIG_PCI_MSI
))
2416 tegra_pcie_disable_msi(pcie
);
2418 tegra_pcie_put_resources(pcie
);
2422 static struct platform_driver tegra_pcie_driver
= {
2424 .name
= "tegra-pcie",
2425 .of_match_table
= tegra_pcie_of_match
,
2426 .suppress_bind_attrs
= true,
2428 .probe
= tegra_pcie_probe
,
2430 builtin_platform_driver(tegra_pcie_driver
);