]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/pci/host/pci-tegra.c
Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[mirror_ubuntu-bionic-kernel.git] / drivers / pci / host / pci-tegra.c
1 /*
2 * PCIe host controller driver for Tegra SoCs
3 *
4 * Copyright (c) 2010, CompuLab, Ltd.
5 * Author: Mike Rapoport <mike@compulab.co.il>
6 *
7 * Based on NVIDIA PCIe driver
8 * Copyright (c) 2008-2009, NVIDIA Corporation.
9 *
10 * Bits taken from arch/arm/mach-dove/pcie.c
11 *
12 * Author: Thierry Reding <treding@nvidia.com>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful, but WITHOUT
20 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
21 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
22 * more details.
23 *
24 * You should have received a copy of the GNU General Public License along
25 * with this program; if not, write to the Free Software Foundation, Inc.,
26 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
27 */
28
29 #include <linux/clk.h>
30 #include <linux/debugfs.h>
31 #include <linux/delay.h>
32 #include <linux/export.h>
33 #include <linux/interrupt.h>
34 #include <linux/irq.h>
35 #include <linux/irqdomain.h>
36 #include <linux/kernel.h>
37 #include <linux/init.h>
38 #include <linux/msi.h>
39 #include <linux/of_address.h>
40 #include <linux/of_pci.h>
41 #include <linux/of_platform.h>
42 #include <linux/pci.h>
43 #include <linux/phy/phy.h>
44 #include <linux/platform_device.h>
45 #include <linux/reset.h>
46 #include <linux/sizes.h>
47 #include <linux/slab.h>
48 #include <linux/vmalloc.h>
49 #include <linux/regulator/consumer.h>
50
51 #include <soc/tegra/cpuidle.h>
52 #include <soc/tegra/pmc.h>
53
54 #define INT_PCI_MSI_NR (8 * 32)
55
56 /* register definitions */
57
58 #define AFI_AXI_BAR0_SZ 0x00
59 #define AFI_AXI_BAR1_SZ 0x04
60 #define AFI_AXI_BAR2_SZ 0x08
61 #define AFI_AXI_BAR3_SZ 0x0c
62 #define AFI_AXI_BAR4_SZ 0x10
63 #define AFI_AXI_BAR5_SZ 0x14
64
65 #define AFI_AXI_BAR0_START 0x18
66 #define AFI_AXI_BAR1_START 0x1c
67 #define AFI_AXI_BAR2_START 0x20
68 #define AFI_AXI_BAR3_START 0x24
69 #define AFI_AXI_BAR4_START 0x28
70 #define AFI_AXI_BAR5_START 0x2c
71
72 #define AFI_FPCI_BAR0 0x30
73 #define AFI_FPCI_BAR1 0x34
74 #define AFI_FPCI_BAR2 0x38
75 #define AFI_FPCI_BAR3 0x3c
76 #define AFI_FPCI_BAR4 0x40
77 #define AFI_FPCI_BAR5 0x44
78
79 #define AFI_CACHE_BAR0_SZ 0x48
80 #define AFI_CACHE_BAR0_ST 0x4c
81 #define AFI_CACHE_BAR1_SZ 0x50
82 #define AFI_CACHE_BAR1_ST 0x54
83
84 #define AFI_MSI_BAR_SZ 0x60
85 #define AFI_MSI_FPCI_BAR_ST 0x64
86 #define AFI_MSI_AXI_BAR_ST 0x68
87
88 #define AFI_MSI_VEC0 0x6c
89 #define AFI_MSI_VEC1 0x70
90 #define AFI_MSI_VEC2 0x74
91 #define AFI_MSI_VEC3 0x78
92 #define AFI_MSI_VEC4 0x7c
93 #define AFI_MSI_VEC5 0x80
94 #define AFI_MSI_VEC6 0x84
95 #define AFI_MSI_VEC7 0x88
96
97 #define AFI_MSI_EN_VEC0 0x8c
98 #define AFI_MSI_EN_VEC1 0x90
99 #define AFI_MSI_EN_VEC2 0x94
100 #define AFI_MSI_EN_VEC3 0x98
101 #define AFI_MSI_EN_VEC4 0x9c
102 #define AFI_MSI_EN_VEC5 0xa0
103 #define AFI_MSI_EN_VEC6 0xa4
104 #define AFI_MSI_EN_VEC7 0xa8
105
106 #define AFI_CONFIGURATION 0xac
107 #define AFI_CONFIGURATION_EN_FPCI (1 << 0)
108
109 #define AFI_FPCI_ERROR_MASKS 0xb0
110
111 #define AFI_INTR_MASK 0xb4
112 #define AFI_INTR_MASK_INT_MASK (1 << 0)
113 #define AFI_INTR_MASK_MSI_MASK (1 << 8)
114
115 #define AFI_INTR_CODE 0xb8
116 #define AFI_INTR_CODE_MASK 0xf
117 #define AFI_INTR_INI_SLAVE_ERROR 1
118 #define AFI_INTR_INI_DECODE_ERROR 2
119 #define AFI_INTR_TARGET_ABORT 3
120 #define AFI_INTR_MASTER_ABORT 4
121 #define AFI_INTR_INVALID_WRITE 5
122 #define AFI_INTR_LEGACY 6
123 #define AFI_INTR_FPCI_DECODE_ERROR 7
124 #define AFI_INTR_AXI_DECODE_ERROR 8
125 #define AFI_INTR_FPCI_TIMEOUT 9
126 #define AFI_INTR_PE_PRSNT_SENSE 10
127 #define AFI_INTR_PE_CLKREQ_SENSE 11
128 #define AFI_INTR_CLKCLAMP_SENSE 12
129 #define AFI_INTR_RDY4PD_SENSE 13
130 #define AFI_INTR_P2P_ERROR 14
131
132 #define AFI_INTR_SIGNATURE 0xbc
133 #define AFI_UPPER_FPCI_ADDRESS 0xc0
134 #define AFI_SM_INTR_ENABLE 0xc4
135 #define AFI_SM_INTR_INTA_ASSERT (1 << 0)
136 #define AFI_SM_INTR_INTB_ASSERT (1 << 1)
137 #define AFI_SM_INTR_INTC_ASSERT (1 << 2)
138 #define AFI_SM_INTR_INTD_ASSERT (1 << 3)
139 #define AFI_SM_INTR_INTA_DEASSERT (1 << 4)
140 #define AFI_SM_INTR_INTB_DEASSERT (1 << 5)
141 #define AFI_SM_INTR_INTC_DEASSERT (1 << 6)
142 #define AFI_SM_INTR_INTD_DEASSERT (1 << 7)
143
144 #define AFI_AFI_INTR_ENABLE 0xc8
145 #define AFI_INTR_EN_INI_SLVERR (1 << 0)
146 #define AFI_INTR_EN_INI_DECERR (1 << 1)
147 #define AFI_INTR_EN_TGT_SLVERR (1 << 2)
148 #define AFI_INTR_EN_TGT_DECERR (1 << 3)
149 #define AFI_INTR_EN_TGT_WRERR (1 << 4)
150 #define AFI_INTR_EN_DFPCI_DECERR (1 << 5)
151 #define AFI_INTR_EN_AXI_DECERR (1 << 6)
152 #define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7)
153 #define AFI_INTR_EN_PRSNT_SENSE (1 << 8)
154
155 #define AFI_PCIE_CONFIG 0x0f8
156 #define AFI_PCIE_CONFIG_PCIE_DISABLE(x) (1 << ((x) + 1))
157 #define AFI_PCIE_CONFIG_PCIE_DISABLE_ALL 0xe
158 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
159 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
160 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20)
161 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1 (0x0 << 20)
162 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401 (0x0 << 20)
163 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
164 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20)
165 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1 (0x1 << 20)
166 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211 (0x1 << 20)
167 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20)
168 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111 (0x2 << 20)
169
170 #define AFI_FUSE 0x104
171 #define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
172
173 #define AFI_PEX0_CTRL 0x110
174 #define AFI_PEX1_CTRL 0x118
175 #define AFI_PEX2_CTRL 0x128
176 #define AFI_PEX_CTRL_RST (1 << 0)
177 #define AFI_PEX_CTRL_CLKREQ_EN (1 << 1)
178 #define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
179 #define AFI_PEX_CTRL_OVERRIDE_EN (1 << 4)
180
181 #define AFI_PLLE_CONTROL 0x160
182 #define AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
183 #define AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
184
185 #define AFI_PEXBIAS_CTRL_0 0x168
186
187 #define RP_VEND_XP 0x00000f00
188 #define RP_VEND_XP_DL_UP (1 << 30)
189
190 #define RP_VEND_CTL2 0x00000fa8
191 #define RP_VEND_CTL2_PCA_ENABLE (1 << 7)
192
193 #define RP_PRIV_MISC 0x00000fe0
194 #define RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xe << 0)
195 #define RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xf << 0)
196
197 #define RP_LINK_CONTROL_STATUS 0x00000090
198 #define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
199 #define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
200
201 #define PADS_CTL_SEL 0x0000009c
202
203 #define PADS_CTL 0x000000a0
204 #define PADS_CTL_IDDQ_1L (1 << 0)
205 #define PADS_CTL_TX_DATA_EN_1L (1 << 6)
206 #define PADS_CTL_RX_DATA_EN_1L (1 << 10)
207
208 #define PADS_PLL_CTL_TEGRA20 0x000000b8
209 #define PADS_PLL_CTL_TEGRA30 0x000000b4
210 #define PADS_PLL_CTL_RST_B4SM (1 << 1)
211 #define PADS_PLL_CTL_LOCKDET (1 << 8)
212 #define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16)
213 #define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16)
214 #define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16)
215 #define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16)
216 #define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20)
217 #define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20)
218 #define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20)
219 #define PADS_PLL_CTL_TXCLKREF_BUF_EN (1 << 22)
220
221 #define PADS_REFCLK_CFG0 0x000000c8
222 #define PADS_REFCLK_CFG1 0x000000cc
223 #define PADS_REFCLK_BIAS 0x000000d0
224
225 /*
226 * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
227 * entries, one entry per PCIe port. These field definitions and desired
228 * values aren't in the TRM, but do come from NVIDIA.
229 */
230 #define PADS_REFCLK_CFG_TERM_SHIFT 2 /* 6:2 */
231 #define PADS_REFCLK_CFG_E_TERM_SHIFT 7
232 #define PADS_REFCLK_CFG_PREDI_SHIFT 8 /* 11:8 */
233 #define PADS_REFCLK_CFG_DRVI_SHIFT 12 /* 15:12 */
234
235 struct tegra_msi {
236 struct msi_controller chip;
237 DECLARE_BITMAP(used, INT_PCI_MSI_NR);
238 struct irq_domain *domain;
239 unsigned long pages;
240 struct mutex lock;
241 u64 phys;
242 int irq;
243 };
244
245 /* used to differentiate between Tegra SoC generations */
246 struct tegra_pcie_soc {
247 unsigned int num_ports;
248 unsigned int msi_base_shift;
249 u32 pads_pll_ctl;
250 u32 tx_ref_sel;
251 u32 pads_refclk_cfg0;
252 u32 pads_refclk_cfg1;
253 bool has_pex_clkreq_en;
254 bool has_pex_bias_ctrl;
255 bool has_intr_prsnt_sense;
256 bool has_cml_clk;
257 bool has_gen2;
258 bool force_pca_enable;
259 bool program_uphy;
260 };
261
262 static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip)
263 {
264 return container_of(chip, struct tegra_msi, chip);
265 }
266
267 struct tegra_pcie {
268 struct device *dev;
269
270 void __iomem *pads;
271 void __iomem *afi;
272 int irq;
273
274 struct list_head buses;
275 struct resource *cs;
276
277 struct resource io;
278 struct resource pio;
279 struct resource mem;
280 struct resource prefetch;
281 struct resource busn;
282
283 struct {
284 resource_size_t mem;
285 resource_size_t io;
286 } offset;
287
288 struct clk *pex_clk;
289 struct clk *afi_clk;
290 struct clk *pll_e;
291 struct clk *cml_clk;
292
293 struct reset_control *pex_rst;
294 struct reset_control *afi_rst;
295 struct reset_control *pcie_xrst;
296
297 bool legacy_phy;
298 struct phy *phy;
299
300 struct tegra_msi msi;
301
302 struct list_head ports;
303 u32 xbar_config;
304
305 struct regulator_bulk_data *supplies;
306 unsigned int num_supplies;
307
308 const struct tegra_pcie_soc *soc;
309 struct dentry *debugfs;
310 };
311
312 struct tegra_pcie_port {
313 struct tegra_pcie *pcie;
314 struct device_node *np;
315 struct list_head list;
316 struct resource regs;
317 void __iomem *base;
318 unsigned int index;
319 unsigned int lanes;
320
321 struct phy **phys;
322 };
323
324 struct tegra_pcie_bus {
325 struct vm_struct *area;
326 struct list_head list;
327 unsigned int nr;
328 };
329
330 static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
331 unsigned long offset)
332 {
333 writel(value, pcie->afi + offset);
334 }
335
336 static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
337 {
338 return readl(pcie->afi + offset);
339 }
340
341 static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
342 unsigned long offset)
343 {
344 writel(value, pcie->pads + offset);
345 }
346
347 static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
348 {
349 return readl(pcie->pads + offset);
350 }
351
352 /*
353 * The configuration space mapping on Tegra is somewhat similar to the ECAM
354 * defined by PCIe. However it deviates a bit in how the 4 bits for extended
355 * register accesses are mapped:
356 *
357 * [27:24] extended register number
358 * [23:16] bus number
359 * [15:11] device number
360 * [10: 8] function number
361 * [ 7: 0] register number
362 *
363 * Mapping the whole extended configuration space would require 256 MiB of
364 * virtual address space, only a small part of which will actually be used.
365 * To work around this, a 1 MiB of virtual addresses are allocated per bus
366 * when the bus is first accessed. When the physical range is mapped, the
367 * the bus number bits are hidden so that the extended register number bits
368 * appear as bits [19:16]. Therefore the virtual mapping looks like this:
369 *
370 * [19:16] extended register number
371 * [15:11] device number
372 * [10: 8] function number
373 * [ 7: 0] register number
374 *
375 * This is achieved by stitching together 16 chunks of 64 KiB of physical
376 * address space via the MMU.
377 */
378 static unsigned long tegra_pcie_conf_offset(unsigned int devfn, int where)
379 {
380 return ((where & 0xf00) << 8) | (PCI_SLOT(devfn) << 11) |
381 (PCI_FUNC(devfn) << 8) | (where & 0xfc);
382 }
383
384 static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
385 unsigned int busnr)
386 {
387 struct device *dev = pcie->dev;
388 pgprot_t prot = pgprot_noncached(PAGE_KERNEL);
389 phys_addr_t cs = pcie->cs->start;
390 struct tegra_pcie_bus *bus;
391 unsigned int i;
392 int err;
393
394 bus = kzalloc(sizeof(*bus), GFP_KERNEL);
395 if (!bus)
396 return ERR_PTR(-ENOMEM);
397
398 INIT_LIST_HEAD(&bus->list);
399 bus->nr = busnr;
400
401 /* allocate 1 MiB of virtual addresses */
402 bus->area = get_vm_area(SZ_1M, VM_IOREMAP);
403 if (!bus->area) {
404 err = -ENOMEM;
405 goto free;
406 }
407
408 /* map each of the 16 chunks of 64 KiB each */
409 for (i = 0; i < 16; i++) {
410 unsigned long virt = (unsigned long)bus->area->addr +
411 i * SZ_64K;
412 phys_addr_t phys = cs + i * SZ_16M + busnr * SZ_64K;
413
414 err = ioremap_page_range(virt, virt + SZ_64K, phys, prot);
415 if (err < 0) {
416 dev_err(dev, "ioremap_page_range() failed: %d\n", err);
417 goto unmap;
418 }
419 }
420
421 return bus;
422
423 unmap:
424 vunmap(bus->area->addr);
425 free:
426 kfree(bus);
427 return ERR_PTR(err);
428 }
429
430 static int tegra_pcie_add_bus(struct pci_bus *bus)
431 {
432 struct pci_host_bridge *host = pci_find_host_bridge(bus);
433 struct tegra_pcie *pcie = pci_host_bridge_priv(host);
434 struct tegra_pcie_bus *b;
435
436 b = tegra_pcie_bus_alloc(pcie, bus->number);
437 if (IS_ERR(b))
438 return PTR_ERR(b);
439
440 list_add_tail(&b->list, &pcie->buses);
441
442 return 0;
443 }
444
445 static void tegra_pcie_remove_bus(struct pci_bus *child)
446 {
447 struct pci_host_bridge *host = pci_find_host_bridge(child);
448 struct tegra_pcie *pcie = pci_host_bridge_priv(host);
449 struct tegra_pcie_bus *bus, *tmp;
450
451 list_for_each_entry_safe(bus, tmp, &pcie->buses, list) {
452 if (bus->nr == child->number) {
453 vunmap(bus->area->addr);
454 list_del(&bus->list);
455 kfree(bus);
456 break;
457 }
458 }
459 }
460
461 static void __iomem *tegra_pcie_map_bus(struct pci_bus *bus,
462 unsigned int devfn,
463 int where)
464 {
465 struct pci_host_bridge *host = pci_find_host_bridge(bus);
466 struct tegra_pcie *pcie = pci_host_bridge_priv(host);
467 struct device *dev = pcie->dev;
468 void __iomem *addr = NULL;
469
470 if (bus->number == 0) {
471 unsigned int slot = PCI_SLOT(devfn);
472 struct tegra_pcie_port *port;
473
474 list_for_each_entry(port, &pcie->ports, list) {
475 if (port->index + 1 == slot) {
476 addr = port->base + (where & ~3);
477 break;
478 }
479 }
480 } else {
481 struct tegra_pcie_bus *b;
482
483 list_for_each_entry(b, &pcie->buses, list)
484 if (b->nr == bus->number)
485 addr = (void __iomem *)b->area->addr;
486
487 if (!addr) {
488 dev_err(dev, "failed to map cfg. space for bus %u\n",
489 bus->number);
490 return NULL;
491 }
492
493 addr += tegra_pcie_conf_offset(devfn, where);
494 }
495
496 return addr;
497 }
498
499 static int tegra_pcie_config_read(struct pci_bus *bus, unsigned int devfn,
500 int where, int size, u32 *value)
501 {
502 if (bus->number == 0)
503 return pci_generic_config_read32(bus, devfn, where, size,
504 value);
505
506 return pci_generic_config_read(bus, devfn, where, size, value);
507 }
508
509 static int tegra_pcie_config_write(struct pci_bus *bus, unsigned int devfn,
510 int where, int size, u32 value)
511 {
512 if (bus->number == 0)
513 return pci_generic_config_write32(bus, devfn, where, size,
514 value);
515
516 return pci_generic_config_write(bus, devfn, where, size, value);
517 }
518
519 static struct pci_ops tegra_pcie_ops = {
520 .add_bus = tegra_pcie_add_bus,
521 .remove_bus = tegra_pcie_remove_bus,
522 .map_bus = tegra_pcie_map_bus,
523 .read = tegra_pcie_config_read,
524 .write = tegra_pcie_config_write,
525 };
526
527 static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
528 {
529 unsigned long ret = 0;
530
531 switch (port->index) {
532 case 0:
533 ret = AFI_PEX0_CTRL;
534 break;
535
536 case 1:
537 ret = AFI_PEX1_CTRL;
538 break;
539
540 case 2:
541 ret = AFI_PEX2_CTRL;
542 break;
543 }
544
545 return ret;
546 }
547
548 static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
549 {
550 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
551 unsigned long value;
552
553 /* pulse reset signal */
554 value = afi_readl(port->pcie, ctrl);
555 value &= ~AFI_PEX_CTRL_RST;
556 afi_writel(port->pcie, value, ctrl);
557
558 usleep_range(1000, 2000);
559
560 value = afi_readl(port->pcie, ctrl);
561 value |= AFI_PEX_CTRL_RST;
562 afi_writel(port->pcie, value, ctrl);
563 }
564
565 static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
566 {
567 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
568 const struct tegra_pcie_soc *soc = port->pcie->soc;
569 unsigned long value;
570
571 /* enable reference clock */
572 value = afi_readl(port->pcie, ctrl);
573 value |= AFI_PEX_CTRL_REFCLK_EN;
574
575 if (soc->has_pex_clkreq_en)
576 value |= AFI_PEX_CTRL_CLKREQ_EN;
577
578 value |= AFI_PEX_CTRL_OVERRIDE_EN;
579
580 afi_writel(port->pcie, value, ctrl);
581
582 tegra_pcie_port_reset(port);
583
584 if (soc->force_pca_enable) {
585 value = readl(port->base + RP_VEND_CTL2);
586 value |= RP_VEND_CTL2_PCA_ENABLE;
587 writel(value, port->base + RP_VEND_CTL2);
588 }
589 }
590
591 static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
592 {
593 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
594 const struct tegra_pcie_soc *soc = port->pcie->soc;
595 unsigned long value;
596
597 /* assert port reset */
598 value = afi_readl(port->pcie, ctrl);
599 value &= ~AFI_PEX_CTRL_RST;
600 afi_writel(port->pcie, value, ctrl);
601
602 /* disable reference clock */
603 value = afi_readl(port->pcie, ctrl);
604
605 if (soc->has_pex_clkreq_en)
606 value &= ~AFI_PEX_CTRL_CLKREQ_EN;
607
608 value &= ~AFI_PEX_CTRL_REFCLK_EN;
609 afi_writel(port->pcie, value, ctrl);
610 }
611
612 static void tegra_pcie_port_free(struct tegra_pcie_port *port)
613 {
614 struct tegra_pcie *pcie = port->pcie;
615 struct device *dev = pcie->dev;
616
617 devm_iounmap(dev, port->base);
618 devm_release_mem_region(dev, port->regs.start,
619 resource_size(&port->regs));
620 list_del(&port->list);
621 devm_kfree(dev, port);
622 }
623
624 /* Tegra PCIE root complex wrongly reports device class */
625 static void tegra_pcie_fixup_class(struct pci_dev *dev)
626 {
627 dev->class = PCI_CLASS_BRIDGE_PCI << 8;
628 }
629 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
630 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
631 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
632 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
633
634 /* Tegra PCIE requires relaxed ordering */
635 static void tegra_pcie_relax_enable(struct pci_dev *dev)
636 {
637 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
638 }
639 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
640
641 static int tegra_pcie_request_resources(struct tegra_pcie *pcie)
642 {
643 struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
644 struct list_head *windows = &host->windows;
645 struct device *dev = pcie->dev;
646 int err;
647
648 pci_add_resource_offset(windows, &pcie->pio, pcie->offset.io);
649 pci_add_resource_offset(windows, &pcie->mem, pcie->offset.mem);
650 pci_add_resource_offset(windows, &pcie->prefetch, pcie->offset.mem);
651 pci_add_resource(windows, &pcie->busn);
652
653 err = devm_request_pci_bus_resources(dev, windows);
654 if (err < 0)
655 return err;
656
657 pci_remap_iospace(&pcie->pio, pcie->io.start);
658
659 return 0;
660 }
661
662 static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
663 {
664 struct pci_host_bridge *host = pci_find_host_bridge(pdev->bus);
665 struct tegra_pcie *pcie = pci_host_bridge_priv(host);
666 int irq;
667
668 tegra_cpuidle_pcie_irqs_in_use();
669
670 irq = of_irq_parse_and_map_pci(pdev, slot, pin);
671 if (!irq)
672 irq = pcie->irq;
673
674 return irq;
675 }
676
677 static irqreturn_t tegra_pcie_isr(int irq, void *arg)
678 {
679 const char *err_msg[] = {
680 "Unknown",
681 "AXI slave error",
682 "AXI decode error",
683 "Target abort",
684 "Master abort",
685 "Invalid write",
686 "Legacy interrupt",
687 "Response decoding error",
688 "AXI response decoding error",
689 "Transaction timeout",
690 "Slot present pin change",
691 "Slot clock request change",
692 "TMS clock ramp change",
693 "TMS ready for power down",
694 "Peer2Peer error",
695 };
696 struct tegra_pcie *pcie = arg;
697 struct device *dev = pcie->dev;
698 u32 code, signature;
699
700 code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
701 signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
702 afi_writel(pcie, 0, AFI_INTR_CODE);
703
704 if (code == AFI_INTR_LEGACY)
705 return IRQ_NONE;
706
707 if (code >= ARRAY_SIZE(err_msg))
708 code = 0;
709
710 /*
711 * do not pollute kernel log with master abort reports since they
712 * happen a lot during enumeration
713 */
714 if (code == AFI_INTR_MASTER_ABORT)
715 dev_dbg(dev, "%s, signature: %08x\n", err_msg[code], signature);
716 else
717 dev_err(dev, "%s, signature: %08x\n", err_msg[code], signature);
718
719 if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
720 code == AFI_INTR_FPCI_DECODE_ERROR) {
721 u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
722 u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
723
724 if (code == AFI_INTR_MASTER_ABORT)
725 dev_dbg(dev, " FPCI address: %10llx\n", address);
726 else
727 dev_err(dev, " FPCI address: %10llx\n", address);
728 }
729
730 return IRQ_HANDLED;
731 }
732
733 /*
734 * FPCI map is as follows:
735 * - 0xfdfc000000: I/O space
736 * - 0xfdfe000000: type 0 configuration space
737 * - 0xfdff000000: type 1 configuration space
738 * - 0xfe00000000: type 0 extended configuration space
739 * - 0xfe10000000: type 1 extended configuration space
740 */
741 static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
742 {
743 u32 fpci_bar, size, axi_address;
744
745 /* Bar 0: type 1 extended configuration space */
746 fpci_bar = 0xfe100000;
747 size = resource_size(pcie->cs);
748 axi_address = pcie->cs->start;
749 afi_writel(pcie, axi_address, AFI_AXI_BAR0_START);
750 afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
751 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR0);
752
753 /* Bar 1: downstream IO bar */
754 fpci_bar = 0xfdfc0000;
755 size = resource_size(&pcie->io);
756 axi_address = pcie->io.start;
757 afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
758 afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
759 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
760
761 /* Bar 2: prefetchable memory BAR */
762 fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1;
763 size = resource_size(&pcie->prefetch);
764 axi_address = pcie->prefetch.start;
765 afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
766 afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
767 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
768
769 /* Bar 3: non prefetchable memory BAR */
770 fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1;
771 size = resource_size(&pcie->mem);
772 axi_address = pcie->mem.start;
773 afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
774 afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
775 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
776
777 /* NULL out the remaining BARs as they are not used */
778 afi_writel(pcie, 0, AFI_AXI_BAR4_START);
779 afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
780 afi_writel(pcie, 0, AFI_FPCI_BAR4);
781
782 afi_writel(pcie, 0, AFI_AXI_BAR5_START);
783 afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
784 afi_writel(pcie, 0, AFI_FPCI_BAR5);
785
786 /* map all upstream transactions as uncached */
787 afi_writel(pcie, 0, AFI_CACHE_BAR0_ST);
788 afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
789 afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
790 afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
791
792 /* MSI translations are setup only when needed */
793 afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
794 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
795 afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
796 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
797 }
798
799 static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
800 {
801 const struct tegra_pcie_soc *soc = pcie->soc;
802 u32 value;
803
804 timeout = jiffies + msecs_to_jiffies(timeout);
805
806 while (time_before(jiffies, timeout)) {
807 value = pads_readl(pcie, soc->pads_pll_ctl);
808 if (value & PADS_PLL_CTL_LOCKDET)
809 return 0;
810 }
811
812 return -ETIMEDOUT;
813 }
814
815 static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
816 {
817 struct device *dev = pcie->dev;
818 const struct tegra_pcie_soc *soc = pcie->soc;
819 u32 value;
820 int err;
821
822 /* initialize internal PHY, enable up to 16 PCIE lanes */
823 pads_writel(pcie, 0x0, PADS_CTL_SEL);
824
825 /* override IDDQ to 1 on all 4 lanes */
826 value = pads_readl(pcie, PADS_CTL);
827 value |= PADS_CTL_IDDQ_1L;
828 pads_writel(pcie, value, PADS_CTL);
829
830 /*
831 * Set up PHY PLL inputs select PLLE output as refclock,
832 * set TX ref sel to div10 (not div5).
833 */
834 value = pads_readl(pcie, soc->pads_pll_ctl);
835 value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
836 value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
837 pads_writel(pcie, value, soc->pads_pll_ctl);
838
839 /* reset PLL */
840 value = pads_readl(pcie, soc->pads_pll_ctl);
841 value &= ~PADS_PLL_CTL_RST_B4SM;
842 pads_writel(pcie, value, soc->pads_pll_ctl);
843
844 usleep_range(20, 100);
845
846 /* take PLL out of reset */
847 value = pads_readl(pcie, soc->pads_pll_ctl);
848 value |= PADS_PLL_CTL_RST_B4SM;
849 pads_writel(pcie, value, soc->pads_pll_ctl);
850
851 /* wait for the PLL to lock */
852 err = tegra_pcie_pll_wait(pcie, 500);
853 if (err < 0) {
854 dev_err(dev, "PLL failed to lock: %d\n", err);
855 return err;
856 }
857
858 /* turn off IDDQ override */
859 value = pads_readl(pcie, PADS_CTL);
860 value &= ~PADS_CTL_IDDQ_1L;
861 pads_writel(pcie, value, PADS_CTL);
862
863 /* enable TX/RX data */
864 value = pads_readl(pcie, PADS_CTL);
865 value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
866 pads_writel(pcie, value, PADS_CTL);
867
868 return 0;
869 }
870
871 static int tegra_pcie_phy_disable(struct tegra_pcie *pcie)
872 {
873 const struct tegra_pcie_soc *soc = pcie->soc;
874 u32 value;
875
876 /* disable TX/RX data */
877 value = pads_readl(pcie, PADS_CTL);
878 value &= ~(PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L);
879 pads_writel(pcie, value, PADS_CTL);
880
881 /* override IDDQ */
882 value = pads_readl(pcie, PADS_CTL);
883 value |= PADS_CTL_IDDQ_1L;
884 pads_writel(pcie, value, PADS_CTL);
885
886 /* reset PLL */
887 value = pads_readl(pcie, soc->pads_pll_ctl);
888 value &= ~PADS_PLL_CTL_RST_B4SM;
889 pads_writel(pcie, value, soc->pads_pll_ctl);
890
891 usleep_range(20, 100);
892
893 return 0;
894 }
895
896 static int tegra_pcie_port_phy_power_on(struct tegra_pcie_port *port)
897 {
898 struct device *dev = port->pcie->dev;
899 unsigned int i;
900 int err;
901
902 for (i = 0; i < port->lanes; i++) {
903 err = phy_power_on(port->phys[i]);
904 if (err < 0) {
905 dev_err(dev, "failed to power on PHY#%u: %d\n", i, err);
906 return err;
907 }
908 }
909
910 return 0;
911 }
912
913 static int tegra_pcie_port_phy_power_off(struct tegra_pcie_port *port)
914 {
915 struct device *dev = port->pcie->dev;
916 unsigned int i;
917 int err;
918
919 for (i = 0; i < port->lanes; i++) {
920 err = phy_power_off(port->phys[i]);
921 if (err < 0) {
922 dev_err(dev, "failed to power off PHY#%u: %d\n", i,
923 err);
924 return err;
925 }
926 }
927
928 return 0;
929 }
930
931 static int tegra_pcie_phy_power_on(struct tegra_pcie *pcie)
932 {
933 struct device *dev = pcie->dev;
934 const struct tegra_pcie_soc *soc = pcie->soc;
935 struct tegra_pcie_port *port;
936 int err;
937
938 if (pcie->legacy_phy) {
939 if (pcie->phy)
940 err = phy_power_on(pcie->phy);
941 else
942 err = tegra_pcie_phy_enable(pcie);
943
944 if (err < 0)
945 dev_err(dev, "failed to power on PHY: %d\n", err);
946
947 return err;
948 }
949
950 list_for_each_entry(port, &pcie->ports, list) {
951 err = tegra_pcie_port_phy_power_on(port);
952 if (err < 0) {
953 dev_err(dev,
954 "failed to power on PCIe port %u PHY: %d\n",
955 port->index, err);
956 return err;
957 }
958 }
959
960 /* Configure the reference clock driver */
961 pads_writel(pcie, soc->pads_refclk_cfg0, PADS_REFCLK_CFG0);
962
963 if (soc->num_ports > 2)
964 pads_writel(pcie, soc->pads_refclk_cfg1, PADS_REFCLK_CFG1);
965
966 return 0;
967 }
968
969 static int tegra_pcie_phy_power_off(struct tegra_pcie *pcie)
970 {
971 struct device *dev = pcie->dev;
972 struct tegra_pcie_port *port;
973 int err;
974
975 if (pcie->legacy_phy) {
976 if (pcie->phy)
977 err = phy_power_off(pcie->phy);
978 else
979 err = tegra_pcie_phy_disable(pcie);
980
981 if (err < 0)
982 dev_err(dev, "failed to power off PHY: %d\n", err);
983
984 return err;
985 }
986
987 list_for_each_entry(port, &pcie->ports, list) {
988 err = tegra_pcie_port_phy_power_off(port);
989 if (err < 0) {
990 dev_err(dev,
991 "failed to power off PCIe port %u PHY: %d\n",
992 port->index, err);
993 return err;
994 }
995 }
996
997 return 0;
998 }
999
1000 static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
1001 {
1002 struct device *dev = pcie->dev;
1003 const struct tegra_pcie_soc *soc = pcie->soc;
1004 struct tegra_pcie_port *port;
1005 unsigned long value;
1006 int err;
1007
1008 /* enable PLL power down */
1009 if (pcie->phy) {
1010 value = afi_readl(pcie, AFI_PLLE_CONTROL);
1011 value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
1012 value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
1013 afi_writel(pcie, value, AFI_PLLE_CONTROL);
1014 }
1015
1016 /* power down PCIe slot clock bias pad */
1017 if (soc->has_pex_bias_ctrl)
1018 afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
1019
1020 /* configure mode and disable all ports */
1021 value = afi_readl(pcie, AFI_PCIE_CONFIG);
1022 value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
1023 value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
1024
1025 list_for_each_entry(port, &pcie->ports, list)
1026 value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
1027
1028 afi_writel(pcie, value, AFI_PCIE_CONFIG);
1029
1030 if (soc->has_gen2) {
1031 value = afi_readl(pcie, AFI_FUSE);
1032 value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
1033 afi_writel(pcie, value, AFI_FUSE);
1034 } else {
1035 value = afi_readl(pcie, AFI_FUSE);
1036 value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
1037 afi_writel(pcie, value, AFI_FUSE);
1038 }
1039
1040 if (soc->program_uphy) {
1041 err = tegra_pcie_phy_power_on(pcie);
1042 if (err < 0) {
1043 dev_err(dev, "failed to power on PHY(s): %d\n", err);
1044 return err;
1045 }
1046 }
1047
1048 /* take the PCIe interface module out of reset */
1049 reset_control_deassert(pcie->pcie_xrst);
1050
1051 /* finally enable PCIe */
1052 value = afi_readl(pcie, AFI_CONFIGURATION);
1053 value |= AFI_CONFIGURATION_EN_FPCI;
1054 afi_writel(pcie, value, AFI_CONFIGURATION);
1055
1056 value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
1057 AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
1058 AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
1059
1060 if (soc->has_intr_prsnt_sense)
1061 value |= AFI_INTR_EN_PRSNT_SENSE;
1062
1063 afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
1064 afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
1065
1066 /* don't enable MSI for now, only when needed */
1067 afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
1068
1069 /* disable all exceptions */
1070 afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
1071
1072 return 0;
1073 }
1074
1075 static void tegra_pcie_power_off(struct tegra_pcie *pcie)
1076 {
1077 struct device *dev = pcie->dev;
1078 const struct tegra_pcie_soc *soc = pcie->soc;
1079 int err;
1080
1081 /* TODO: disable and unprepare clocks? */
1082
1083 if (soc->program_uphy) {
1084 err = tegra_pcie_phy_power_off(pcie);
1085 if (err < 0)
1086 dev_err(dev, "failed to power off PHY(s): %d\n", err);
1087 }
1088
1089 reset_control_assert(pcie->pcie_xrst);
1090 reset_control_assert(pcie->afi_rst);
1091 reset_control_assert(pcie->pex_rst);
1092
1093 if (!dev->pm_domain)
1094 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1095
1096 err = regulator_bulk_disable(pcie->num_supplies, pcie->supplies);
1097 if (err < 0)
1098 dev_warn(dev, "failed to disable regulators: %d\n", err);
1099 }
1100
1101 static int tegra_pcie_power_on(struct tegra_pcie *pcie)
1102 {
1103 struct device *dev = pcie->dev;
1104 const struct tegra_pcie_soc *soc = pcie->soc;
1105 int err;
1106
1107 reset_control_assert(pcie->pcie_xrst);
1108 reset_control_assert(pcie->afi_rst);
1109 reset_control_assert(pcie->pex_rst);
1110
1111 if (!dev->pm_domain)
1112 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
1113
1114 /* enable regulators */
1115 err = regulator_bulk_enable(pcie->num_supplies, pcie->supplies);
1116 if (err < 0)
1117 dev_err(dev, "failed to enable regulators: %d\n", err);
1118
1119 if (dev->pm_domain) {
1120 err = clk_prepare_enable(pcie->pex_clk);
1121 if (err) {
1122 dev_err(dev, "failed to enable PEX clock: %d\n", err);
1123 return err;
1124 }
1125 reset_control_deassert(pcie->pex_rst);
1126 } else {
1127 err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
1128 pcie->pex_clk,
1129 pcie->pex_rst);
1130 if (err) {
1131 dev_err(dev, "powerup sequence failed: %d\n", err);
1132 return err;
1133 }
1134 }
1135
1136 reset_control_deassert(pcie->afi_rst);
1137
1138 err = clk_prepare_enable(pcie->afi_clk);
1139 if (err < 0) {
1140 dev_err(dev, "failed to enable AFI clock: %d\n", err);
1141 return err;
1142 }
1143
1144 if (soc->has_cml_clk) {
1145 err = clk_prepare_enable(pcie->cml_clk);
1146 if (err < 0) {
1147 dev_err(dev, "failed to enable CML clock: %d\n", err);
1148 return err;
1149 }
1150 }
1151
1152 err = clk_prepare_enable(pcie->pll_e);
1153 if (err < 0) {
1154 dev_err(dev, "failed to enable PLLE clock: %d\n", err);
1155 return err;
1156 }
1157
1158 return 0;
1159 }
1160
1161 static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
1162 {
1163 struct device *dev = pcie->dev;
1164 const struct tegra_pcie_soc *soc = pcie->soc;
1165
1166 pcie->pex_clk = devm_clk_get(dev, "pex");
1167 if (IS_ERR(pcie->pex_clk))
1168 return PTR_ERR(pcie->pex_clk);
1169
1170 pcie->afi_clk = devm_clk_get(dev, "afi");
1171 if (IS_ERR(pcie->afi_clk))
1172 return PTR_ERR(pcie->afi_clk);
1173
1174 pcie->pll_e = devm_clk_get(dev, "pll_e");
1175 if (IS_ERR(pcie->pll_e))
1176 return PTR_ERR(pcie->pll_e);
1177
1178 if (soc->has_cml_clk) {
1179 pcie->cml_clk = devm_clk_get(dev, "cml");
1180 if (IS_ERR(pcie->cml_clk))
1181 return PTR_ERR(pcie->cml_clk);
1182 }
1183
1184 return 0;
1185 }
1186
1187 static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
1188 {
1189 struct device *dev = pcie->dev;
1190
1191 pcie->pex_rst = devm_reset_control_get_exclusive(dev, "pex");
1192 if (IS_ERR(pcie->pex_rst))
1193 return PTR_ERR(pcie->pex_rst);
1194
1195 pcie->afi_rst = devm_reset_control_get_exclusive(dev, "afi");
1196 if (IS_ERR(pcie->afi_rst))
1197 return PTR_ERR(pcie->afi_rst);
1198
1199 pcie->pcie_xrst = devm_reset_control_get_exclusive(dev, "pcie_x");
1200 if (IS_ERR(pcie->pcie_xrst))
1201 return PTR_ERR(pcie->pcie_xrst);
1202
1203 return 0;
1204 }
1205
1206 static int tegra_pcie_phys_get_legacy(struct tegra_pcie *pcie)
1207 {
1208 struct device *dev = pcie->dev;
1209 int err;
1210
1211 pcie->phy = devm_phy_optional_get(dev, "pcie");
1212 if (IS_ERR(pcie->phy)) {
1213 err = PTR_ERR(pcie->phy);
1214 dev_err(dev, "failed to get PHY: %d\n", err);
1215 return err;
1216 }
1217
1218 err = phy_init(pcie->phy);
1219 if (err < 0) {
1220 dev_err(dev, "failed to initialize PHY: %d\n", err);
1221 return err;
1222 }
1223
1224 pcie->legacy_phy = true;
1225
1226 return 0;
1227 }
1228
1229 static struct phy *devm_of_phy_optional_get_index(struct device *dev,
1230 struct device_node *np,
1231 const char *consumer,
1232 unsigned int index)
1233 {
1234 struct phy *phy;
1235 char *name;
1236
1237 name = kasprintf(GFP_KERNEL, "%s-%u", consumer, index);
1238 if (!name)
1239 return ERR_PTR(-ENOMEM);
1240
1241 phy = devm_of_phy_get(dev, np, name);
1242 kfree(name);
1243
1244 if (IS_ERR(phy) && PTR_ERR(phy) == -ENODEV)
1245 phy = NULL;
1246
1247 return phy;
1248 }
1249
1250 static int tegra_pcie_port_get_phys(struct tegra_pcie_port *port)
1251 {
1252 struct device *dev = port->pcie->dev;
1253 struct phy *phy;
1254 unsigned int i;
1255 int err;
1256
1257 port->phys = devm_kcalloc(dev, sizeof(phy), port->lanes, GFP_KERNEL);
1258 if (!port->phys)
1259 return -ENOMEM;
1260
1261 for (i = 0; i < port->lanes; i++) {
1262 phy = devm_of_phy_optional_get_index(dev, port->np, "pcie", i);
1263 if (IS_ERR(phy)) {
1264 dev_err(dev, "failed to get PHY#%u: %ld\n", i,
1265 PTR_ERR(phy));
1266 return PTR_ERR(phy);
1267 }
1268
1269 err = phy_init(phy);
1270 if (err < 0) {
1271 dev_err(dev, "failed to initialize PHY#%u: %d\n", i,
1272 err);
1273 return err;
1274 }
1275
1276 port->phys[i] = phy;
1277 }
1278
1279 return 0;
1280 }
1281
1282 static int tegra_pcie_phys_get(struct tegra_pcie *pcie)
1283 {
1284 const struct tegra_pcie_soc *soc = pcie->soc;
1285 struct device_node *np = pcie->dev->of_node;
1286 struct tegra_pcie_port *port;
1287 int err;
1288
1289 if (!soc->has_gen2 || of_find_property(np, "phys", NULL) != NULL)
1290 return tegra_pcie_phys_get_legacy(pcie);
1291
1292 list_for_each_entry(port, &pcie->ports, list) {
1293 err = tegra_pcie_port_get_phys(port);
1294 if (err < 0)
1295 return err;
1296 }
1297
1298 return 0;
1299 }
1300
1301 static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1302 {
1303 struct device *dev = pcie->dev;
1304 struct platform_device *pdev = to_platform_device(dev);
1305 struct resource *pads, *afi, *res;
1306 const struct tegra_pcie_soc *soc = pcie->soc;
1307 int err;
1308
1309 err = tegra_pcie_clocks_get(pcie);
1310 if (err) {
1311 dev_err(dev, "failed to get clocks: %d\n", err);
1312 return err;
1313 }
1314
1315 err = tegra_pcie_resets_get(pcie);
1316 if (err) {
1317 dev_err(dev, "failed to get resets: %d\n", err);
1318 return err;
1319 }
1320
1321 if (soc->program_uphy) {
1322 err = tegra_pcie_phys_get(pcie);
1323 if (err < 0) {
1324 dev_err(dev, "failed to get PHYs: %d\n", err);
1325 return err;
1326 }
1327 }
1328
1329 err = tegra_pcie_power_on(pcie);
1330 if (err) {
1331 dev_err(dev, "failed to power up: %d\n", err);
1332 return err;
1333 }
1334
1335 pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
1336 pcie->pads = devm_ioremap_resource(dev, pads);
1337 if (IS_ERR(pcie->pads)) {
1338 err = PTR_ERR(pcie->pads);
1339 goto poweroff;
1340 }
1341
1342 afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
1343 pcie->afi = devm_ioremap_resource(dev, afi);
1344 if (IS_ERR(pcie->afi)) {
1345 err = PTR_ERR(pcie->afi);
1346 goto poweroff;
1347 }
1348
1349 /* request configuration space, but remap later, on demand */
1350 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
1351 if (!res) {
1352 err = -EADDRNOTAVAIL;
1353 goto poweroff;
1354 }
1355
1356 pcie->cs = devm_request_mem_region(dev, res->start,
1357 resource_size(res), res->name);
1358 if (!pcie->cs) {
1359 err = -EADDRNOTAVAIL;
1360 goto poweroff;
1361 }
1362
1363 /* request interrupt */
1364 err = platform_get_irq_byname(pdev, "intr");
1365 if (err < 0) {
1366 dev_err(dev, "failed to get IRQ: %d\n", err);
1367 goto poweroff;
1368 }
1369
1370 pcie->irq = err;
1371
1372 err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
1373 if (err) {
1374 dev_err(dev, "failed to register IRQ: %d\n", err);
1375 goto poweroff;
1376 }
1377
1378 return 0;
1379
1380 poweroff:
1381 tegra_pcie_power_off(pcie);
1382 return err;
1383 }
1384
1385 static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1386 {
1387 struct device *dev = pcie->dev;
1388 const struct tegra_pcie_soc *soc = pcie->soc;
1389 int err;
1390
1391 if (pcie->irq > 0)
1392 free_irq(pcie->irq, pcie);
1393
1394 tegra_pcie_power_off(pcie);
1395
1396 if (soc->program_uphy) {
1397 err = phy_exit(pcie->phy);
1398 if (err < 0)
1399 dev_err(dev, "failed to teardown PHY: %d\n", err);
1400 }
1401
1402 return 0;
1403 }
1404
1405 static int tegra_msi_alloc(struct tegra_msi *chip)
1406 {
1407 int msi;
1408
1409 mutex_lock(&chip->lock);
1410
1411 msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
1412 if (msi < INT_PCI_MSI_NR)
1413 set_bit(msi, chip->used);
1414 else
1415 msi = -ENOSPC;
1416
1417 mutex_unlock(&chip->lock);
1418
1419 return msi;
1420 }
1421
1422 static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
1423 {
1424 struct device *dev = chip->chip.dev;
1425
1426 mutex_lock(&chip->lock);
1427
1428 if (!test_bit(irq, chip->used))
1429 dev_err(dev, "trying to free unused MSI#%lu\n", irq);
1430 else
1431 clear_bit(irq, chip->used);
1432
1433 mutex_unlock(&chip->lock);
1434 }
1435
1436 static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
1437 {
1438 struct tegra_pcie *pcie = data;
1439 struct device *dev = pcie->dev;
1440 struct tegra_msi *msi = &pcie->msi;
1441 unsigned int i, processed = 0;
1442
1443 for (i = 0; i < 8; i++) {
1444 unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1445
1446 while (reg) {
1447 unsigned int offset = find_first_bit(&reg, 32);
1448 unsigned int index = i * 32 + offset;
1449 unsigned int irq;
1450
1451 /* clear the interrupt */
1452 afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4);
1453
1454 irq = irq_find_mapping(msi->domain, index);
1455 if (irq) {
1456 if (test_bit(index, msi->used))
1457 generic_handle_irq(irq);
1458 else
1459 dev_info(dev, "unhandled MSI\n");
1460 } else {
1461 /*
1462 * that's weird who triggered this?
1463 * just clear it
1464 */
1465 dev_info(dev, "unexpected MSI\n");
1466 }
1467
1468 /* see if there's any more pending in this vector */
1469 reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1470
1471 processed++;
1472 }
1473 }
1474
1475 return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
1476 }
1477
1478 static int tegra_msi_setup_irq(struct msi_controller *chip,
1479 struct pci_dev *pdev, struct msi_desc *desc)
1480 {
1481 struct tegra_msi *msi = to_tegra_msi(chip);
1482 struct msi_msg msg;
1483 unsigned int irq;
1484 int hwirq;
1485
1486 hwirq = tegra_msi_alloc(msi);
1487 if (hwirq < 0)
1488 return hwirq;
1489
1490 irq = irq_create_mapping(msi->domain, hwirq);
1491 if (!irq) {
1492 tegra_msi_free(msi, hwirq);
1493 return -EINVAL;
1494 }
1495
1496 irq_set_msi_desc(irq, desc);
1497
1498 msg.address_lo = lower_32_bits(msi->phys);
1499 msg.address_hi = upper_32_bits(msi->phys);
1500 msg.data = hwirq;
1501
1502 pci_write_msi_msg(irq, &msg);
1503
1504 return 0;
1505 }
1506
1507 static void tegra_msi_teardown_irq(struct msi_controller *chip,
1508 unsigned int irq)
1509 {
1510 struct tegra_msi *msi = to_tegra_msi(chip);
1511 struct irq_data *d = irq_get_irq_data(irq);
1512 irq_hw_number_t hwirq = irqd_to_hwirq(d);
1513
1514 irq_dispose_mapping(irq);
1515 tegra_msi_free(msi, hwirq);
1516 }
1517
1518 static struct irq_chip tegra_msi_irq_chip = {
1519 .name = "Tegra PCIe MSI",
1520 .irq_enable = pci_msi_unmask_irq,
1521 .irq_disable = pci_msi_mask_irq,
1522 .irq_mask = pci_msi_mask_irq,
1523 .irq_unmask = pci_msi_unmask_irq,
1524 };
1525
1526 static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
1527 irq_hw_number_t hwirq)
1528 {
1529 irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
1530 irq_set_chip_data(irq, domain->host_data);
1531
1532 tegra_cpuidle_pcie_irqs_in_use();
1533
1534 return 0;
1535 }
1536
1537 static const struct irq_domain_ops msi_domain_ops = {
1538 .map = tegra_msi_map,
1539 };
1540
1541 static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1542 {
1543 struct pci_host_bridge *host = pci_host_bridge_from_priv(pcie);
1544 struct platform_device *pdev = to_platform_device(pcie->dev);
1545 const struct tegra_pcie_soc *soc = pcie->soc;
1546 struct tegra_msi *msi = &pcie->msi;
1547 struct device *dev = pcie->dev;
1548 int err;
1549 u32 reg;
1550
1551 mutex_init(&msi->lock);
1552
1553 msi->chip.dev = dev;
1554 msi->chip.setup_irq = tegra_msi_setup_irq;
1555 msi->chip.teardown_irq = tegra_msi_teardown_irq;
1556
1557 msi->domain = irq_domain_add_linear(dev->of_node, INT_PCI_MSI_NR,
1558 &msi_domain_ops, &msi->chip);
1559 if (!msi->domain) {
1560 dev_err(dev, "failed to create IRQ domain\n");
1561 return -ENOMEM;
1562 }
1563
1564 err = platform_get_irq_byname(pdev, "msi");
1565 if (err < 0) {
1566 dev_err(dev, "failed to get IRQ: %d\n", err);
1567 goto err;
1568 }
1569
1570 msi->irq = err;
1571
1572 err = request_irq(msi->irq, tegra_pcie_msi_irq, IRQF_NO_THREAD,
1573 tegra_msi_irq_chip.name, pcie);
1574 if (err < 0) {
1575 dev_err(dev, "failed to request IRQ: %d\n", err);
1576 goto err;
1577 }
1578
1579 /* setup AFI/FPCI range */
1580 msi->pages = __get_free_pages(GFP_KERNEL, 0);
1581 msi->phys = virt_to_phys((void *)msi->pages);
1582
1583 afi_writel(pcie, msi->phys >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
1584 afi_writel(pcie, msi->phys, AFI_MSI_AXI_BAR_ST);
1585 /* this register is in 4K increments */
1586 afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1587
1588 /* enable all MSI vectors */
1589 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0);
1590 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1);
1591 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2);
1592 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3);
1593 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4);
1594 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5);
1595 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6);
1596 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7);
1597
1598 /* and unmask the MSI interrupt */
1599 reg = afi_readl(pcie, AFI_INTR_MASK);
1600 reg |= AFI_INTR_MASK_MSI_MASK;
1601 afi_writel(pcie, reg, AFI_INTR_MASK);
1602
1603 host->msi = &msi->chip;
1604
1605 return 0;
1606
1607 err:
1608 irq_domain_remove(msi->domain);
1609 return err;
1610 }
1611
1612 static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1613 {
1614 struct tegra_msi *msi = &pcie->msi;
1615 unsigned int i, irq;
1616 u32 value;
1617
1618 /* mask the MSI interrupt */
1619 value = afi_readl(pcie, AFI_INTR_MASK);
1620 value &= ~AFI_INTR_MASK_MSI_MASK;
1621 afi_writel(pcie, value, AFI_INTR_MASK);
1622
1623 /* disable all MSI vectors */
1624 afi_writel(pcie, 0, AFI_MSI_EN_VEC0);
1625 afi_writel(pcie, 0, AFI_MSI_EN_VEC1);
1626 afi_writel(pcie, 0, AFI_MSI_EN_VEC2);
1627 afi_writel(pcie, 0, AFI_MSI_EN_VEC3);
1628 afi_writel(pcie, 0, AFI_MSI_EN_VEC4);
1629 afi_writel(pcie, 0, AFI_MSI_EN_VEC5);
1630 afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
1631 afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
1632
1633 free_pages(msi->pages, 0);
1634
1635 if (msi->irq > 0)
1636 free_irq(msi->irq, pcie);
1637
1638 for (i = 0; i < INT_PCI_MSI_NR; i++) {
1639 irq = irq_find_mapping(msi->domain, i);
1640 if (irq > 0)
1641 irq_dispose_mapping(irq);
1642 }
1643
1644 irq_domain_remove(msi->domain);
1645
1646 return 0;
1647 }
1648
1649 static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1650 u32 *xbar)
1651 {
1652 struct device *dev = pcie->dev;
1653 struct device_node *np = dev->of_node;
1654
1655 if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
1656 switch (lanes) {
1657 case 0x010004:
1658 dev_info(dev, "4x1, 1x1 configuration\n");
1659 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_401;
1660 return 0;
1661
1662 case 0x010102:
1663 dev_info(dev, "2x1, 1X1, 1x1 configuration\n");
1664 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
1665 return 0;
1666
1667 case 0x010101:
1668 dev_info(dev, "1x1, 1x1, 1x1 configuration\n");
1669 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_111;
1670 return 0;
1671
1672 default:
1673 dev_info(dev, "wrong configuration updated in DT, "
1674 "switching to default 2x1, 1x1, 1x1 "
1675 "configuration\n");
1676 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_211;
1677 return 0;
1678 }
1679 } else if (of_device_is_compatible(np, "nvidia,tegra124-pcie") ||
1680 of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
1681 switch (lanes) {
1682 case 0x0000104:
1683 dev_info(dev, "4x1, 1x1 configuration\n");
1684 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
1685 return 0;
1686
1687 case 0x0000102:
1688 dev_info(dev, "2x1, 1x1 configuration\n");
1689 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
1690 return 0;
1691 }
1692 } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1693 switch (lanes) {
1694 case 0x00000204:
1695 dev_info(dev, "4x1, 2x1 configuration\n");
1696 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
1697 return 0;
1698
1699 case 0x00020202:
1700 dev_info(dev, "2x3 configuration\n");
1701 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
1702 return 0;
1703
1704 case 0x00010104:
1705 dev_info(dev, "4x1, 1x2 configuration\n");
1706 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
1707 return 0;
1708 }
1709 } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1710 switch (lanes) {
1711 case 0x00000004:
1712 dev_info(dev, "single-mode configuration\n");
1713 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
1714 return 0;
1715
1716 case 0x00000202:
1717 dev_info(dev, "dual-mode configuration\n");
1718 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
1719 return 0;
1720 }
1721 }
1722
1723 return -EINVAL;
1724 }
1725
1726 /*
1727 * Check whether a given set of supplies is available in a device tree node.
1728 * This is used to check whether the new or the legacy device tree bindings
1729 * should be used.
1730 */
1731 static bool of_regulator_bulk_available(struct device_node *np,
1732 struct regulator_bulk_data *supplies,
1733 unsigned int num_supplies)
1734 {
1735 char property[32];
1736 unsigned int i;
1737
1738 for (i = 0; i < num_supplies; i++) {
1739 snprintf(property, 32, "%s-supply", supplies[i].supply);
1740
1741 if (of_find_property(np, property, NULL) == NULL)
1742 return false;
1743 }
1744
1745 return true;
1746 }
1747
1748 /*
1749 * Old versions of the device tree binding for this device used a set of power
1750 * supplies that didn't match the hardware inputs. This happened to work for a
1751 * number of cases but is not future proof. However to preserve backwards-
1752 * compatibility with old device trees, this function will try to use the old
1753 * set of supplies.
1754 */
1755 static int tegra_pcie_get_legacy_regulators(struct tegra_pcie *pcie)
1756 {
1757 struct device *dev = pcie->dev;
1758 struct device_node *np = dev->of_node;
1759
1760 if (of_device_is_compatible(np, "nvidia,tegra30-pcie"))
1761 pcie->num_supplies = 3;
1762 else if (of_device_is_compatible(np, "nvidia,tegra20-pcie"))
1763 pcie->num_supplies = 2;
1764
1765 if (pcie->num_supplies == 0) {
1766 dev_err(dev, "device %pOF not supported in legacy mode\n", np);
1767 return -ENODEV;
1768 }
1769
1770 pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1771 sizeof(*pcie->supplies),
1772 GFP_KERNEL);
1773 if (!pcie->supplies)
1774 return -ENOMEM;
1775
1776 pcie->supplies[0].supply = "pex-clk";
1777 pcie->supplies[1].supply = "vdd";
1778
1779 if (pcie->num_supplies > 2)
1780 pcie->supplies[2].supply = "avdd";
1781
1782 return devm_regulator_bulk_get(dev, pcie->num_supplies, pcie->supplies);
1783 }
1784
1785 /*
1786 * Obtains the list of regulators required for a particular generation of the
1787 * IP block.
1788 *
1789 * This would've been nice to do simply by providing static tables for use
1790 * with the regulator_bulk_*() API, but unfortunately Tegra30 is a bit quirky
1791 * in that it has two pairs or AVDD_PEX and VDD_PEX supplies (PEXA and PEXB)
1792 * and either seems to be optional depending on which ports are being used.
1793 */
1794 static int tegra_pcie_get_regulators(struct tegra_pcie *pcie, u32 lane_mask)
1795 {
1796 struct device *dev = pcie->dev;
1797 struct device_node *np = dev->of_node;
1798 unsigned int i = 0;
1799
1800 if (of_device_is_compatible(np, "nvidia,tegra186-pcie")) {
1801 pcie->num_supplies = 4;
1802
1803 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1804 sizeof(*pcie->supplies),
1805 GFP_KERNEL);
1806 if (!pcie->supplies)
1807 return -ENOMEM;
1808
1809 pcie->supplies[i++].supply = "dvdd-pex";
1810 pcie->supplies[i++].supply = "hvdd-pex-pll";
1811 pcie->supplies[i++].supply = "hvdd-pex";
1812 pcie->supplies[i++].supply = "vddio-pexctl-aud";
1813 } else if (of_device_is_compatible(np, "nvidia,tegra210-pcie")) {
1814 pcie->num_supplies = 6;
1815
1816 pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
1817 sizeof(*pcie->supplies),
1818 GFP_KERNEL);
1819 if (!pcie->supplies)
1820 return -ENOMEM;
1821
1822 pcie->supplies[i++].supply = "avdd-pll-uerefe";
1823 pcie->supplies[i++].supply = "hvddio-pex";
1824 pcie->supplies[i++].supply = "dvddio-pex";
1825 pcie->supplies[i++].supply = "dvdd-pex-pll";
1826 pcie->supplies[i++].supply = "hvdd-pex-pll-e";
1827 pcie->supplies[i++].supply = "vddio-pex-ctl";
1828 } else if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
1829 pcie->num_supplies = 7;
1830
1831 pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1832 sizeof(*pcie->supplies),
1833 GFP_KERNEL);
1834 if (!pcie->supplies)
1835 return -ENOMEM;
1836
1837 pcie->supplies[i++].supply = "avddio-pex";
1838 pcie->supplies[i++].supply = "dvddio-pex";
1839 pcie->supplies[i++].supply = "avdd-pex-pll";
1840 pcie->supplies[i++].supply = "hvdd-pex";
1841 pcie->supplies[i++].supply = "hvdd-pex-pll-e";
1842 pcie->supplies[i++].supply = "vddio-pex-ctl";
1843 pcie->supplies[i++].supply = "avdd-pll-erefe";
1844 } else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1845 bool need_pexa = false, need_pexb = false;
1846
1847 /* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
1848 if (lane_mask & 0x0f)
1849 need_pexa = true;
1850
1851 /* VDD_PEXB and AVDD_PEXB supply lanes 4 to 5 */
1852 if (lane_mask & 0x30)
1853 need_pexb = true;
1854
1855 pcie->num_supplies = 4 + (need_pexa ? 2 : 0) +
1856 (need_pexb ? 2 : 0);
1857
1858 pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1859 sizeof(*pcie->supplies),
1860 GFP_KERNEL);
1861 if (!pcie->supplies)
1862 return -ENOMEM;
1863
1864 pcie->supplies[i++].supply = "avdd-pex-pll";
1865 pcie->supplies[i++].supply = "hvdd-pex";
1866 pcie->supplies[i++].supply = "vddio-pex-ctl";
1867 pcie->supplies[i++].supply = "avdd-plle";
1868
1869 if (need_pexa) {
1870 pcie->supplies[i++].supply = "avdd-pexa";
1871 pcie->supplies[i++].supply = "vdd-pexa";
1872 }
1873
1874 if (need_pexb) {
1875 pcie->supplies[i++].supply = "avdd-pexb";
1876 pcie->supplies[i++].supply = "vdd-pexb";
1877 }
1878 } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1879 pcie->num_supplies = 5;
1880
1881 pcie->supplies = devm_kcalloc(dev, pcie->num_supplies,
1882 sizeof(*pcie->supplies),
1883 GFP_KERNEL);
1884 if (!pcie->supplies)
1885 return -ENOMEM;
1886
1887 pcie->supplies[0].supply = "avdd-pex";
1888 pcie->supplies[1].supply = "vdd-pex";
1889 pcie->supplies[2].supply = "avdd-pex-pll";
1890 pcie->supplies[3].supply = "avdd-plle";
1891 pcie->supplies[4].supply = "vddio-pex-clk";
1892 }
1893
1894 if (of_regulator_bulk_available(dev->of_node, pcie->supplies,
1895 pcie->num_supplies))
1896 return devm_regulator_bulk_get(dev, pcie->num_supplies,
1897 pcie->supplies);
1898
1899 /*
1900 * If not all regulators are available for this new scheme, assume
1901 * that the device tree complies with an older version of the device
1902 * tree binding.
1903 */
1904 dev_info(dev, "using legacy DT binding for power supplies\n");
1905
1906 devm_kfree(dev, pcie->supplies);
1907 pcie->num_supplies = 0;
1908
1909 return tegra_pcie_get_legacy_regulators(pcie);
1910 }
1911
1912 static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
1913 {
1914 struct device *dev = pcie->dev;
1915 struct device_node *np = dev->of_node, *port;
1916 const struct tegra_pcie_soc *soc = pcie->soc;
1917 struct of_pci_range_parser parser;
1918 struct of_pci_range range;
1919 u32 lanes = 0, mask = 0;
1920 unsigned int lane = 0;
1921 struct resource res;
1922 int err;
1923
1924 if (of_pci_range_parser_init(&parser, np)) {
1925 dev_err(dev, "missing \"ranges\" property\n");
1926 return -EINVAL;
1927 }
1928
1929 for_each_of_pci_range(&parser, &range) {
1930 err = of_pci_range_to_resource(&range, np, &res);
1931 if (err < 0)
1932 return err;
1933
1934 switch (res.flags & IORESOURCE_TYPE_BITS) {
1935 case IORESOURCE_IO:
1936 /* Track the bus -> CPU I/O mapping offset. */
1937 pcie->offset.io = res.start - range.pci_addr;
1938
1939 memcpy(&pcie->pio, &res, sizeof(res));
1940 pcie->pio.name = np->full_name;
1941
1942 /*
1943 * The Tegra PCIe host bridge uses this to program the
1944 * mapping of the I/O space to the physical address,
1945 * so we override the .start and .end fields here that
1946 * of_pci_range_to_resource() converted to I/O space.
1947 * We also set the IORESOURCE_MEM type to clarify that
1948 * the resource is in the physical memory space.
1949 */
1950 pcie->io.start = range.cpu_addr;
1951 pcie->io.end = range.cpu_addr + range.size - 1;
1952 pcie->io.flags = IORESOURCE_MEM;
1953 pcie->io.name = "I/O";
1954
1955 memcpy(&res, &pcie->io, sizeof(res));
1956 break;
1957
1958 case IORESOURCE_MEM:
1959 /*
1960 * Track the bus -> CPU memory mapping offset. This
1961 * assumes that the prefetchable and non-prefetchable
1962 * regions will be the last of type IORESOURCE_MEM in
1963 * the ranges property.
1964 * */
1965 pcie->offset.mem = res.start - range.pci_addr;
1966
1967 if (res.flags & IORESOURCE_PREFETCH) {
1968 memcpy(&pcie->prefetch, &res, sizeof(res));
1969 pcie->prefetch.name = "prefetchable";
1970 } else {
1971 memcpy(&pcie->mem, &res, sizeof(res));
1972 pcie->mem.name = "non-prefetchable";
1973 }
1974 break;
1975 }
1976 }
1977
1978 err = of_pci_parse_bus_range(np, &pcie->busn);
1979 if (err < 0) {
1980 dev_err(dev, "failed to parse ranges property: %d\n", err);
1981 pcie->busn.name = np->name;
1982 pcie->busn.start = 0;
1983 pcie->busn.end = 0xff;
1984 pcie->busn.flags = IORESOURCE_BUS;
1985 }
1986
1987 /* parse root ports */
1988 for_each_child_of_node(np, port) {
1989 struct tegra_pcie_port *rp;
1990 unsigned int index;
1991 u32 value;
1992
1993 err = of_pci_get_devfn(port);
1994 if (err < 0) {
1995 dev_err(dev, "failed to parse address: %d\n", err);
1996 return err;
1997 }
1998
1999 index = PCI_SLOT(err);
2000
2001 if (index < 1 || index > soc->num_ports) {
2002 dev_err(dev, "invalid port number: %d\n", index);
2003 return -EINVAL;
2004 }
2005
2006 index--;
2007
2008 err = of_property_read_u32(port, "nvidia,num-lanes", &value);
2009 if (err < 0) {
2010 dev_err(dev, "failed to parse # of lanes: %d\n",
2011 err);
2012 return err;
2013 }
2014
2015 if (value > 16) {
2016 dev_err(dev, "invalid # of lanes: %u\n", value);
2017 return -EINVAL;
2018 }
2019
2020 lanes |= value << (index << 3);
2021
2022 if (!of_device_is_available(port)) {
2023 lane += value;
2024 continue;
2025 }
2026
2027 mask |= ((1 << value) - 1) << lane;
2028 lane += value;
2029
2030 rp = devm_kzalloc(dev, sizeof(*rp), GFP_KERNEL);
2031 if (!rp)
2032 return -ENOMEM;
2033
2034 err = of_address_to_resource(port, 0, &rp->regs);
2035 if (err < 0) {
2036 dev_err(dev, "failed to parse address: %d\n", err);
2037 return err;
2038 }
2039
2040 INIT_LIST_HEAD(&rp->list);
2041 rp->index = index;
2042 rp->lanes = value;
2043 rp->pcie = pcie;
2044 rp->np = port;
2045
2046 rp->base = devm_pci_remap_cfg_resource(dev, &rp->regs);
2047 if (IS_ERR(rp->base))
2048 return PTR_ERR(rp->base);
2049
2050 list_add_tail(&rp->list, &pcie->ports);
2051 }
2052
2053 err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
2054 if (err < 0) {
2055 dev_err(dev, "invalid lane configuration\n");
2056 return err;
2057 }
2058
2059 err = tegra_pcie_get_regulators(pcie, mask);
2060 if (err < 0)
2061 return err;
2062
2063 return 0;
2064 }
2065
2066 /*
2067 * FIXME: If there are no PCIe cards attached, then calling this function
2068 * can result in the increase of the bootup time as there are big timeout
2069 * loops.
2070 */
2071 #define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */
2072 static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
2073 {
2074 struct device *dev = port->pcie->dev;
2075 unsigned int retries = 3;
2076 unsigned long value;
2077
2078 /* override presence detection */
2079 value = readl(port->base + RP_PRIV_MISC);
2080 value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
2081 value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
2082 writel(value, port->base + RP_PRIV_MISC);
2083
2084 do {
2085 unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2086
2087 do {
2088 value = readl(port->base + RP_VEND_XP);
2089
2090 if (value & RP_VEND_XP_DL_UP)
2091 break;
2092
2093 usleep_range(1000, 2000);
2094 } while (--timeout);
2095
2096 if (!timeout) {
2097 dev_err(dev, "link %u down, retrying\n", port->index);
2098 goto retry;
2099 }
2100
2101 timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
2102
2103 do {
2104 value = readl(port->base + RP_LINK_CONTROL_STATUS);
2105
2106 if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2107 return true;
2108
2109 usleep_range(1000, 2000);
2110 } while (--timeout);
2111
2112 retry:
2113 tegra_pcie_port_reset(port);
2114 } while (--retries);
2115
2116 return false;
2117 }
2118
2119 static void tegra_pcie_enable_ports(struct tegra_pcie *pcie)
2120 {
2121 struct device *dev = pcie->dev;
2122 struct tegra_pcie_port *port, *tmp;
2123
2124 list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
2125 dev_info(dev, "probing port %u, using %u lanes\n",
2126 port->index, port->lanes);
2127
2128 tegra_pcie_port_enable(port);
2129
2130 if (tegra_pcie_port_check_link(port))
2131 continue;
2132
2133 dev_info(dev, "link %u down, ignoring\n", port->index);
2134
2135 tegra_pcie_port_disable(port);
2136 tegra_pcie_port_free(port);
2137 }
2138 }
2139
2140 static const struct tegra_pcie_soc tegra20_pcie = {
2141 .num_ports = 2,
2142 .msi_base_shift = 0,
2143 .pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
2144 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
2145 .pads_refclk_cfg0 = 0xfa5cfa5c,
2146 .has_pex_clkreq_en = false,
2147 .has_pex_bias_ctrl = false,
2148 .has_intr_prsnt_sense = false,
2149 .has_cml_clk = false,
2150 .has_gen2 = false,
2151 .force_pca_enable = false,
2152 .program_uphy = true,
2153 };
2154
2155 static const struct tegra_pcie_soc tegra30_pcie = {
2156 .num_ports = 3,
2157 .msi_base_shift = 8,
2158 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2159 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2160 .pads_refclk_cfg0 = 0xfa5cfa5c,
2161 .pads_refclk_cfg1 = 0xfa5cfa5c,
2162 .has_pex_clkreq_en = true,
2163 .has_pex_bias_ctrl = true,
2164 .has_intr_prsnt_sense = true,
2165 .has_cml_clk = true,
2166 .has_gen2 = false,
2167 .force_pca_enable = false,
2168 .program_uphy = true,
2169 };
2170
2171 static const struct tegra_pcie_soc tegra124_pcie = {
2172 .num_ports = 2,
2173 .msi_base_shift = 8,
2174 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2175 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2176 .pads_refclk_cfg0 = 0x44ac44ac,
2177 .has_pex_clkreq_en = true,
2178 .has_pex_bias_ctrl = true,
2179 .has_intr_prsnt_sense = true,
2180 .has_cml_clk = true,
2181 .has_gen2 = true,
2182 .force_pca_enable = false,
2183 .program_uphy = true,
2184 };
2185
2186 static const struct tegra_pcie_soc tegra210_pcie = {
2187 .num_ports = 2,
2188 .msi_base_shift = 8,
2189 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2190 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2191 .pads_refclk_cfg0 = 0x90b890b8,
2192 .has_pex_clkreq_en = true,
2193 .has_pex_bias_ctrl = true,
2194 .has_intr_prsnt_sense = true,
2195 .has_cml_clk = true,
2196 .has_gen2 = true,
2197 .force_pca_enable = true,
2198 .program_uphy = true,
2199 };
2200
2201 static const struct tegra_pcie_soc tegra186_pcie = {
2202 .num_ports = 3,
2203 .msi_base_shift = 8,
2204 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
2205 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
2206 .pads_refclk_cfg0 = 0x80b880b8,
2207 .pads_refclk_cfg1 = 0x000480b8,
2208 .has_pex_clkreq_en = true,
2209 .has_pex_bias_ctrl = true,
2210 .has_intr_prsnt_sense = true,
2211 .has_cml_clk = false,
2212 .has_gen2 = true,
2213 .force_pca_enable = false,
2214 .program_uphy = false,
2215 };
2216
2217 static const struct of_device_id tegra_pcie_of_match[] = {
2218 { .compatible = "nvidia,tegra186-pcie", .data = &tegra186_pcie },
2219 { .compatible = "nvidia,tegra210-pcie", .data = &tegra210_pcie },
2220 { .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie },
2221 { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie },
2222 { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie },
2223 { },
2224 };
2225
2226 static void *tegra_pcie_ports_seq_start(struct seq_file *s, loff_t *pos)
2227 {
2228 struct tegra_pcie *pcie = s->private;
2229
2230 if (list_empty(&pcie->ports))
2231 return NULL;
2232
2233 seq_printf(s, "Index Status\n");
2234
2235 return seq_list_start(&pcie->ports, *pos);
2236 }
2237
2238 static void *tegra_pcie_ports_seq_next(struct seq_file *s, void *v, loff_t *pos)
2239 {
2240 struct tegra_pcie *pcie = s->private;
2241
2242 return seq_list_next(v, &pcie->ports, pos);
2243 }
2244
2245 static void tegra_pcie_ports_seq_stop(struct seq_file *s, void *v)
2246 {
2247 }
2248
2249 static int tegra_pcie_ports_seq_show(struct seq_file *s, void *v)
2250 {
2251 bool up = false, active = false;
2252 struct tegra_pcie_port *port;
2253 unsigned int value;
2254
2255 port = list_entry(v, struct tegra_pcie_port, list);
2256
2257 value = readl(port->base + RP_VEND_XP);
2258
2259 if (value & RP_VEND_XP_DL_UP)
2260 up = true;
2261
2262 value = readl(port->base + RP_LINK_CONTROL_STATUS);
2263
2264 if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
2265 active = true;
2266
2267 seq_printf(s, "%2u ", port->index);
2268
2269 if (up)
2270 seq_printf(s, "up");
2271
2272 if (active) {
2273 if (up)
2274 seq_printf(s, ", ");
2275
2276 seq_printf(s, "active");
2277 }
2278
2279 seq_printf(s, "\n");
2280 return 0;
2281 }
2282
2283 static const struct seq_operations tegra_pcie_ports_seq_ops = {
2284 .start = tegra_pcie_ports_seq_start,
2285 .next = tegra_pcie_ports_seq_next,
2286 .stop = tegra_pcie_ports_seq_stop,
2287 .show = tegra_pcie_ports_seq_show,
2288 };
2289
2290 static int tegra_pcie_ports_open(struct inode *inode, struct file *file)
2291 {
2292 struct tegra_pcie *pcie = inode->i_private;
2293 struct seq_file *s;
2294 int err;
2295
2296 err = seq_open(file, &tegra_pcie_ports_seq_ops);
2297 if (err)
2298 return err;
2299
2300 s = file->private_data;
2301 s->private = pcie;
2302
2303 return 0;
2304 }
2305
2306 static const struct file_operations tegra_pcie_ports_ops = {
2307 .owner = THIS_MODULE,
2308 .open = tegra_pcie_ports_open,
2309 .read = seq_read,
2310 .llseek = seq_lseek,
2311 .release = seq_release,
2312 };
2313
2314 static int tegra_pcie_debugfs_init(struct tegra_pcie *pcie)
2315 {
2316 struct dentry *file;
2317
2318 pcie->debugfs = debugfs_create_dir("pcie", NULL);
2319 if (!pcie->debugfs)
2320 return -ENOMEM;
2321
2322 file = debugfs_create_file("ports", S_IFREG | S_IRUGO, pcie->debugfs,
2323 pcie, &tegra_pcie_ports_ops);
2324 if (!file)
2325 goto remove;
2326
2327 return 0;
2328
2329 remove:
2330 debugfs_remove_recursive(pcie->debugfs);
2331 pcie->debugfs = NULL;
2332 return -ENOMEM;
2333 }
2334
2335 static int tegra_pcie_probe(struct platform_device *pdev)
2336 {
2337 struct device *dev = &pdev->dev;
2338 struct pci_host_bridge *host;
2339 struct tegra_pcie *pcie;
2340 struct pci_bus *child;
2341 int err;
2342
2343 host = devm_pci_alloc_host_bridge(dev, sizeof(*pcie));
2344 if (!host)
2345 return -ENOMEM;
2346
2347 pcie = pci_host_bridge_priv(host);
2348
2349 pcie->soc = of_device_get_match_data(dev);
2350 INIT_LIST_HEAD(&pcie->buses);
2351 INIT_LIST_HEAD(&pcie->ports);
2352 pcie->dev = dev;
2353
2354 err = tegra_pcie_parse_dt(pcie);
2355 if (err < 0)
2356 return err;
2357
2358 err = tegra_pcie_get_resources(pcie);
2359 if (err < 0) {
2360 dev_err(dev, "failed to request resources: %d\n", err);
2361 return err;
2362 }
2363
2364 err = tegra_pcie_enable_controller(pcie);
2365 if (err)
2366 goto put_resources;
2367
2368 err = tegra_pcie_request_resources(pcie);
2369 if (err)
2370 goto put_resources;
2371
2372 /* setup the AFI address translations */
2373 tegra_pcie_setup_translations(pcie);
2374
2375 if (IS_ENABLED(CONFIG_PCI_MSI)) {
2376 err = tegra_pcie_enable_msi(pcie);
2377 if (err < 0) {
2378 dev_err(dev, "failed to enable MSI support: %d\n", err);
2379 goto put_resources;
2380 }
2381 }
2382
2383 tegra_pcie_enable_ports(pcie);
2384
2385 pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS);
2386 host->busnr = pcie->busn.start;
2387 host->dev.parent = &pdev->dev;
2388 host->ops = &tegra_pcie_ops;
2389 host->map_irq = tegra_pcie_map_irq;
2390 host->swizzle_irq = pci_common_swizzle;
2391
2392 err = pci_scan_root_bus_bridge(host);
2393 if (err < 0) {
2394 dev_err(dev, "failed to register host: %d\n", err);
2395 goto disable_msi;
2396 }
2397
2398 pci_bus_size_bridges(host->bus);
2399 pci_bus_assign_resources(host->bus);
2400
2401 list_for_each_entry(child, &host->bus->children, node)
2402 pcie_bus_configure_settings(child);
2403
2404 pci_bus_add_devices(host->bus);
2405
2406 if (IS_ENABLED(CONFIG_DEBUG_FS)) {
2407 err = tegra_pcie_debugfs_init(pcie);
2408 if (err < 0)
2409 dev_err(dev, "failed to setup debugfs: %d\n", err);
2410 }
2411
2412 return 0;
2413
2414 disable_msi:
2415 if (IS_ENABLED(CONFIG_PCI_MSI))
2416 tegra_pcie_disable_msi(pcie);
2417 put_resources:
2418 tegra_pcie_put_resources(pcie);
2419 return err;
2420 }
2421
2422 static struct platform_driver tegra_pcie_driver = {
2423 .driver = {
2424 .name = "tegra-pcie",
2425 .of_match_table = tegra_pcie_of_match,
2426 .suppress_bind_attrs = true,
2427 },
2428 .probe = tegra_pcie_probe,
2429 };
2430 builtin_platform_driver(tegra_pcie_driver);