]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/pci/host/pci-tegra.c
Merge tag 'cleanup-for-3.17' of git://git.kernel.org/pub/scm/linux/kernel/git/arm...
[mirror_ubuntu-artful-kernel.git] / drivers / pci / host / pci-tegra.c
1 /*
2 * PCIe host controller driver for Tegra SoCs
3 *
4 * Copyright (c) 2010, CompuLab, Ltd.
5 * Author: Mike Rapoport <mike@compulab.co.il>
6 *
7 * Based on NVIDIA PCIe driver
8 * Copyright (c) 2008-2009, NVIDIA Corporation.
9 *
10 * Bits taken from arch/arm/mach-dove/pcie.c
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 * more details.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
25 */
26
27 #include <linux/clk.h>
28 #include <linux/delay.h>
29 #include <linux/export.h>
30 #include <linux/interrupt.h>
31 #include <linux/irq.h>
32 #include <linux/irqdomain.h>
33 #include <linux/kernel.h>
34 #include <linux/module.h>
35 #include <linux/msi.h>
36 #include <linux/of_address.h>
37 #include <linux/of_pci.h>
38 #include <linux/of_platform.h>
39 #include <linux/pci.h>
40 #include <linux/platform_device.h>
41 #include <linux/reset.h>
42 #include <linux/sizes.h>
43 #include <linux/slab.h>
44 #include <linux/vmalloc.h>
45 #include <linux/regulator/consumer.h>
46
47 #include <soc/tegra/cpuidle.h>
48 #include <soc/tegra/pmc.h>
49
50 #include <asm/mach/irq.h>
51 #include <asm/mach/map.h>
52 #include <asm/mach/pci.h>
53
54 #define INT_PCI_MSI_NR (8 * 32)
55
56 /* register definitions */
57
58 #define AFI_AXI_BAR0_SZ 0x00
59 #define AFI_AXI_BAR1_SZ 0x04
60 #define AFI_AXI_BAR2_SZ 0x08
61 #define AFI_AXI_BAR3_SZ 0x0c
62 #define AFI_AXI_BAR4_SZ 0x10
63 #define AFI_AXI_BAR5_SZ 0x14
64
65 #define AFI_AXI_BAR0_START 0x18
66 #define AFI_AXI_BAR1_START 0x1c
67 #define AFI_AXI_BAR2_START 0x20
68 #define AFI_AXI_BAR3_START 0x24
69 #define AFI_AXI_BAR4_START 0x28
70 #define AFI_AXI_BAR5_START 0x2c
71
72 #define AFI_FPCI_BAR0 0x30
73 #define AFI_FPCI_BAR1 0x34
74 #define AFI_FPCI_BAR2 0x38
75 #define AFI_FPCI_BAR3 0x3c
76 #define AFI_FPCI_BAR4 0x40
77 #define AFI_FPCI_BAR5 0x44
78
79 #define AFI_CACHE_BAR0_SZ 0x48
80 #define AFI_CACHE_BAR0_ST 0x4c
81 #define AFI_CACHE_BAR1_SZ 0x50
82 #define AFI_CACHE_BAR1_ST 0x54
83
84 #define AFI_MSI_BAR_SZ 0x60
85 #define AFI_MSI_FPCI_BAR_ST 0x64
86 #define AFI_MSI_AXI_BAR_ST 0x68
87
88 #define AFI_MSI_VEC0 0x6c
89 #define AFI_MSI_VEC1 0x70
90 #define AFI_MSI_VEC2 0x74
91 #define AFI_MSI_VEC3 0x78
92 #define AFI_MSI_VEC4 0x7c
93 #define AFI_MSI_VEC5 0x80
94 #define AFI_MSI_VEC6 0x84
95 #define AFI_MSI_VEC7 0x88
96
97 #define AFI_MSI_EN_VEC0 0x8c
98 #define AFI_MSI_EN_VEC1 0x90
99 #define AFI_MSI_EN_VEC2 0x94
100 #define AFI_MSI_EN_VEC3 0x98
101 #define AFI_MSI_EN_VEC4 0x9c
102 #define AFI_MSI_EN_VEC5 0xa0
103 #define AFI_MSI_EN_VEC6 0xa4
104 #define AFI_MSI_EN_VEC7 0xa8
105
106 #define AFI_CONFIGURATION 0xac
107 #define AFI_CONFIGURATION_EN_FPCI (1 << 0)
108
109 #define AFI_FPCI_ERROR_MASKS 0xb0
110
111 #define AFI_INTR_MASK 0xb4
112 #define AFI_INTR_MASK_INT_MASK (1 << 0)
113 #define AFI_INTR_MASK_MSI_MASK (1 << 8)
114
115 #define AFI_INTR_CODE 0xb8
116 #define AFI_INTR_CODE_MASK 0xf
117 #define AFI_INTR_AXI_SLAVE_ERROR 1
118 #define AFI_INTR_AXI_DECODE_ERROR 2
119 #define AFI_INTR_TARGET_ABORT 3
120 #define AFI_INTR_MASTER_ABORT 4
121 #define AFI_INTR_INVALID_WRITE 5
122 #define AFI_INTR_LEGACY 6
123 #define AFI_INTR_FPCI_DECODE_ERROR 7
124
125 #define AFI_INTR_SIGNATURE 0xbc
126 #define AFI_UPPER_FPCI_ADDRESS 0xc0
127 #define AFI_SM_INTR_ENABLE 0xc4
128 #define AFI_SM_INTR_INTA_ASSERT (1 << 0)
129 #define AFI_SM_INTR_INTB_ASSERT (1 << 1)
130 #define AFI_SM_INTR_INTC_ASSERT (1 << 2)
131 #define AFI_SM_INTR_INTD_ASSERT (1 << 3)
132 #define AFI_SM_INTR_INTA_DEASSERT (1 << 4)
133 #define AFI_SM_INTR_INTB_DEASSERT (1 << 5)
134 #define AFI_SM_INTR_INTC_DEASSERT (1 << 6)
135 #define AFI_SM_INTR_INTD_DEASSERT (1 << 7)
136
137 #define AFI_AFI_INTR_ENABLE 0xc8
138 #define AFI_INTR_EN_INI_SLVERR (1 << 0)
139 #define AFI_INTR_EN_INI_DECERR (1 << 1)
140 #define AFI_INTR_EN_TGT_SLVERR (1 << 2)
141 #define AFI_INTR_EN_TGT_DECERR (1 << 3)
142 #define AFI_INTR_EN_TGT_WRERR (1 << 4)
143 #define AFI_INTR_EN_DFPCI_DECERR (1 << 5)
144 #define AFI_INTR_EN_AXI_DECERR (1 << 6)
145 #define AFI_INTR_EN_FPCI_TIMEOUT (1 << 7)
146 #define AFI_INTR_EN_PRSNT_SENSE (1 << 8)
147
148 #define AFI_PCIE_CONFIG 0x0f8
149 #define AFI_PCIE_CONFIG_PCIE_DISABLE(x) (1 << ((x) + 1))
150 #define AFI_PCIE_CONFIG_PCIE_DISABLE_ALL 0xe
151 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK (0xf << 20)
152 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE (0x0 << 20)
153 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420 (0x0 << 20)
154 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL (0x1 << 20)
155 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222 (0x1 << 20)
156 #define AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411 (0x2 << 20)
157
158 #define AFI_FUSE 0x104
159 #define AFI_FUSE_PCIE_T0_GEN2_DIS (1 << 2)
160
161 #define AFI_PEX0_CTRL 0x110
162 #define AFI_PEX1_CTRL 0x118
163 #define AFI_PEX2_CTRL 0x128
164 #define AFI_PEX_CTRL_RST (1 << 0)
165 #define AFI_PEX_CTRL_CLKREQ_EN (1 << 1)
166 #define AFI_PEX_CTRL_REFCLK_EN (1 << 3)
167
168 #define AFI_PEXBIAS_CTRL_0 0x168
169
170 #define RP_VEND_XP 0x00000F00
171 #define RP_VEND_XP_DL_UP (1 << 30)
172
173 #define RP_LINK_CONTROL_STATUS 0x00000090
174 #define RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE 0x20000000
175 #define RP_LINK_CONTROL_STATUS_LINKSTAT_MASK 0x3fff0000
176
177 #define PADS_CTL_SEL 0x0000009C
178
179 #define PADS_CTL 0x000000A0
180 #define PADS_CTL_IDDQ_1L (1 << 0)
181 #define PADS_CTL_TX_DATA_EN_1L (1 << 6)
182 #define PADS_CTL_RX_DATA_EN_1L (1 << 10)
183
184 #define PADS_PLL_CTL_TEGRA20 0x000000B8
185 #define PADS_PLL_CTL_TEGRA30 0x000000B4
186 #define PADS_PLL_CTL_RST_B4SM (1 << 1)
187 #define PADS_PLL_CTL_LOCKDET (1 << 8)
188 #define PADS_PLL_CTL_REFCLK_MASK (0x3 << 16)
189 #define PADS_PLL_CTL_REFCLK_INTERNAL_CML (0 << 16)
190 #define PADS_PLL_CTL_REFCLK_INTERNAL_CMOS (1 << 16)
191 #define PADS_PLL_CTL_REFCLK_EXTERNAL (2 << 16)
192 #define PADS_PLL_CTL_TXCLKREF_MASK (0x1 << 20)
193 #define PADS_PLL_CTL_TXCLKREF_DIV10 (0 << 20)
194 #define PADS_PLL_CTL_TXCLKREF_DIV5 (1 << 20)
195 #define PADS_PLL_CTL_TXCLKREF_BUF_EN (1 << 22)
196
197 #define PADS_REFCLK_CFG0 0x000000C8
198 #define PADS_REFCLK_CFG1 0x000000CC
199
200 /*
201 * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
202 * entries, one entry per PCIe port. These field definitions and desired
203 * values aren't in the TRM, but do come from NVIDIA.
204 */
205 #define PADS_REFCLK_CFG_TERM_SHIFT 2 /* 6:2 */
206 #define PADS_REFCLK_CFG_E_TERM_SHIFT 7
207 #define PADS_REFCLK_CFG_PREDI_SHIFT 8 /* 11:8 */
208 #define PADS_REFCLK_CFG_DRVI_SHIFT 12 /* 15:12 */
209
210 /* Default value provided by HW engineering is 0xfa5c */
211 #define PADS_REFCLK_CFG_VALUE \
212 ( \
213 (0x17 << PADS_REFCLK_CFG_TERM_SHIFT) | \
214 (0 << PADS_REFCLK_CFG_E_TERM_SHIFT) | \
215 (0xa << PADS_REFCLK_CFG_PREDI_SHIFT) | \
216 (0xf << PADS_REFCLK_CFG_DRVI_SHIFT) \
217 )
218
219 struct tegra_msi {
220 struct msi_chip chip;
221 DECLARE_BITMAP(used, INT_PCI_MSI_NR);
222 struct irq_domain *domain;
223 unsigned long pages;
224 struct mutex lock;
225 int irq;
226 };
227
228 /* used to differentiate between Tegra SoC generations */
229 struct tegra_pcie_soc_data {
230 unsigned int num_ports;
231 unsigned int msi_base_shift;
232 u32 pads_pll_ctl;
233 u32 tx_ref_sel;
234 bool has_pex_clkreq_en;
235 bool has_pex_bias_ctrl;
236 bool has_intr_prsnt_sense;
237 bool has_avdd_supply;
238 bool has_cml_clk;
239 };
240
241 static inline struct tegra_msi *to_tegra_msi(struct msi_chip *chip)
242 {
243 return container_of(chip, struct tegra_msi, chip);
244 }
245
246 struct tegra_pcie {
247 struct device *dev;
248
249 void __iomem *pads;
250 void __iomem *afi;
251 int irq;
252
253 struct list_head buses;
254 struct resource *cs;
255
256 struct resource io;
257 struct resource mem;
258 struct resource prefetch;
259 struct resource busn;
260
261 struct clk *pex_clk;
262 struct clk *afi_clk;
263 struct clk *pll_e;
264 struct clk *cml_clk;
265
266 struct reset_control *pex_rst;
267 struct reset_control *afi_rst;
268 struct reset_control *pcie_xrst;
269
270 struct tegra_msi msi;
271
272 struct list_head ports;
273 unsigned int num_ports;
274 u32 xbar_config;
275
276 struct regulator *pex_clk_supply;
277 struct regulator *vdd_supply;
278 struct regulator *avdd_supply;
279
280 const struct tegra_pcie_soc_data *soc_data;
281 };
282
283 struct tegra_pcie_port {
284 struct tegra_pcie *pcie;
285 struct list_head list;
286 struct resource regs;
287 void __iomem *base;
288 unsigned int index;
289 unsigned int lanes;
290 };
291
292 struct tegra_pcie_bus {
293 struct vm_struct *area;
294 struct list_head list;
295 unsigned int nr;
296 };
297
298 static inline struct tegra_pcie *sys_to_pcie(struct pci_sys_data *sys)
299 {
300 return sys->private_data;
301 }
302
303 static inline void afi_writel(struct tegra_pcie *pcie, u32 value,
304 unsigned long offset)
305 {
306 writel(value, pcie->afi + offset);
307 }
308
309 static inline u32 afi_readl(struct tegra_pcie *pcie, unsigned long offset)
310 {
311 return readl(pcie->afi + offset);
312 }
313
314 static inline void pads_writel(struct tegra_pcie *pcie, u32 value,
315 unsigned long offset)
316 {
317 writel(value, pcie->pads + offset);
318 }
319
320 static inline u32 pads_readl(struct tegra_pcie *pcie, unsigned long offset)
321 {
322 return readl(pcie->pads + offset);
323 }
324
325 /*
326 * The configuration space mapping on Tegra is somewhat similar to the ECAM
327 * defined by PCIe. However it deviates a bit in how the 4 bits for extended
328 * register accesses are mapped:
329 *
330 * [27:24] extended register number
331 * [23:16] bus number
332 * [15:11] device number
333 * [10: 8] function number
334 * [ 7: 0] register number
335 *
336 * Mapping the whole extended configuration space would require 256 MiB of
337 * virtual address space, only a small part of which will actually be used.
338 * To work around this, a 1 MiB of virtual addresses are allocated per bus
339 * when the bus is first accessed. When the physical range is mapped, the
340 * the bus number bits are hidden so that the extended register number bits
341 * appear as bits [19:16]. Therefore the virtual mapping looks like this:
342 *
343 * [19:16] extended register number
344 * [15:11] device number
345 * [10: 8] function number
346 * [ 7: 0] register number
347 *
348 * This is achieved by stitching together 16 chunks of 64 KiB of physical
349 * address space via the MMU.
350 */
351 static unsigned long tegra_pcie_conf_offset(unsigned int devfn, int where)
352 {
353 return ((where & 0xf00) << 8) | (PCI_SLOT(devfn) << 11) |
354 (PCI_FUNC(devfn) << 8) | (where & 0xfc);
355 }
356
357 static struct tegra_pcie_bus *tegra_pcie_bus_alloc(struct tegra_pcie *pcie,
358 unsigned int busnr)
359 {
360 pgprot_t prot = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_XN |
361 L_PTE_MT_DEV_SHARED | L_PTE_SHARED;
362 phys_addr_t cs = pcie->cs->start;
363 struct tegra_pcie_bus *bus;
364 unsigned int i;
365 int err;
366
367 bus = kzalloc(sizeof(*bus), GFP_KERNEL);
368 if (!bus)
369 return ERR_PTR(-ENOMEM);
370
371 INIT_LIST_HEAD(&bus->list);
372 bus->nr = busnr;
373
374 /* allocate 1 MiB of virtual addresses */
375 bus->area = get_vm_area(SZ_1M, VM_IOREMAP);
376 if (!bus->area) {
377 err = -ENOMEM;
378 goto free;
379 }
380
381 /* map each of the 16 chunks of 64 KiB each */
382 for (i = 0; i < 16; i++) {
383 unsigned long virt = (unsigned long)bus->area->addr +
384 i * SZ_64K;
385 phys_addr_t phys = cs + i * SZ_1M + busnr * SZ_64K;
386
387 err = ioremap_page_range(virt, virt + SZ_64K, phys, prot);
388 if (err < 0) {
389 dev_err(pcie->dev, "ioremap_page_range() failed: %d\n",
390 err);
391 goto unmap;
392 }
393 }
394
395 return bus;
396
397 unmap:
398 vunmap(bus->area->addr);
399 free:
400 kfree(bus);
401 return ERR_PTR(err);
402 }
403
404 /*
405 * Look up a virtual address mapping for the specified bus number. If no such
406 * mapping exists, try to create one.
407 */
408 static void __iomem *tegra_pcie_bus_map(struct tegra_pcie *pcie,
409 unsigned int busnr)
410 {
411 struct tegra_pcie_bus *bus;
412
413 list_for_each_entry(bus, &pcie->buses, list)
414 if (bus->nr == busnr)
415 return (void __iomem *)bus->area->addr;
416
417 bus = tegra_pcie_bus_alloc(pcie, busnr);
418 if (IS_ERR(bus))
419 return NULL;
420
421 list_add_tail(&bus->list, &pcie->buses);
422
423 return (void __iomem *)bus->area->addr;
424 }
425
426 static void __iomem *tegra_pcie_conf_address(struct pci_bus *bus,
427 unsigned int devfn,
428 int where)
429 {
430 struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
431 void __iomem *addr = NULL;
432
433 if (bus->number == 0) {
434 unsigned int slot = PCI_SLOT(devfn);
435 struct tegra_pcie_port *port;
436
437 list_for_each_entry(port, &pcie->ports, list) {
438 if (port->index + 1 == slot) {
439 addr = port->base + (where & ~3);
440 break;
441 }
442 }
443 } else {
444 addr = tegra_pcie_bus_map(pcie, bus->number);
445 if (!addr) {
446 dev_err(pcie->dev,
447 "failed to map cfg. space for bus %u\n",
448 bus->number);
449 return NULL;
450 }
451
452 addr += tegra_pcie_conf_offset(devfn, where);
453 }
454
455 return addr;
456 }
457
458 static int tegra_pcie_read_conf(struct pci_bus *bus, unsigned int devfn,
459 int where, int size, u32 *value)
460 {
461 void __iomem *addr;
462
463 addr = tegra_pcie_conf_address(bus, devfn, where);
464 if (!addr) {
465 *value = 0xffffffff;
466 return PCIBIOS_DEVICE_NOT_FOUND;
467 }
468
469 *value = readl(addr);
470
471 if (size == 1)
472 *value = (*value >> (8 * (where & 3))) & 0xff;
473 else if (size == 2)
474 *value = (*value >> (8 * (where & 3))) & 0xffff;
475
476 return PCIBIOS_SUCCESSFUL;
477 }
478
479 static int tegra_pcie_write_conf(struct pci_bus *bus, unsigned int devfn,
480 int where, int size, u32 value)
481 {
482 void __iomem *addr;
483 u32 mask, tmp;
484
485 addr = tegra_pcie_conf_address(bus, devfn, where);
486 if (!addr)
487 return PCIBIOS_DEVICE_NOT_FOUND;
488
489 if (size == 4) {
490 writel(value, addr);
491 return PCIBIOS_SUCCESSFUL;
492 }
493
494 if (size == 2)
495 mask = ~(0xffff << ((where & 0x3) * 8));
496 else if (size == 1)
497 mask = ~(0xff << ((where & 0x3) * 8));
498 else
499 return PCIBIOS_BAD_REGISTER_NUMBER;
500
501 tmp = readl(addr) & mask;
502 tmp |= value << ((where & 0x3) * 8);
503 writel(tmp, addr);
504
505 return PCIBIOS_SUCCESSFUL;
506 }
507
508 static struct pci_ops tegra_pcie_ops = {
509 .read = tegra_pcie_read_conf,
510 .write = tegra_pcie_write_conf,
511 };
512
513 static unsigned long tegra_pcie_port_get_pex_ctrl(struct tegra_pcie_port *port)
514 {
515 unsigned long ret = 0;
516
517 switch (port->index) {
518 case 0:
519 ret = AFI_PEX0_CTRL;
520 break;
521
522 case 1:
523 ret = AFI_PEX1_CTRL;
524 break;
525
526 case 2:
527 ret = AFI_PEX2_CTRL;
528 break;
529 }
530
531 return ret;
532 }
533
534 static void tegra_pcie_port_reset(struct tegra_pcie_port *port)
535 {
536 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
537 unsigned long value;
538
539 /* pulse reset signal */
540 value = afi_readl(port->pcie, ctrl);
541 value &= ~AFI_PEX_CTRL_RST;
542 afi_writel(port->pcie, value, ctrl);
543
544 usleep_range(1000, 2000);
545
546 value = afi_readl(port->pcie, ctrl);
547 value |= AFI_PEX_CTRL_RST;
548 afi_writel(port->pcie, value, ctrl);
549 }
550
551 static void tegra_pcie_port_enable(struct tegra_pcie_port *port)
552 {
553 const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
554 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
555 unsigned long value;
556
557 /* enable reference clock */
558 value = afi_readl(port->pcie, ctrl);
559 value |= AFI_PEX_CTRL_REFCLK_EN;
560
561 if (soc->has_pex_clkreq_en)
562 value |= AFI_PEX_CTRL_CLKREQ_EN;
563
564 afi_writel(port->pcie, value, ctrl);
565
566 tegra_pcie_port_reset(port);
567 }
568
569 static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
570 {
571 unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
572 unsigned long value;
573
574 /* assert port reset */
575 value = afi_readl(port->pcie, ctrl);
576 value &= ~AFI_PEX_CTRL_RST;
577 afi_writel(port->pcie, value, ctrl);
578
579 /* disable reference clock */
580 value = afi_readl(port->pcie, ctrl);
581 value &= ~AFI_PEX_CTRL_REFCLK_EN;
582 afi_writel(port->pcie, value, ctrl);
583 }
584
585 static void tegra_pcie_port_free(struct tegra_pcie_port *port)
586 {
587 struct tegra_pcie *pcie = port->pcie;
588
589 devm_iounmap(pcie->dev, port->base);
590 devm_release_mem_region(pcie->dev, port->regs.start,
591 resource_size(&port->regs));
592 list_del(&port->list);
593 devm_kfree(pcie->dev, port);
594 }
595
596 static void tegra_pcie_fixup_bridge(struct pci_dev *dev)
597 {
598 u16 reg;
599
600 if ((dev->class >> 16) == PCI_BASE_CLASS_BRIDGE) {
601 pci_read_config_word(dev, PCI_COMMAND, &reg);
602 reg |= (PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
603 PCI_COMMAND_MASTER | PCI_COMMAND_SERR);
604 pci_write_config_word(dev, PCI_COMMAND, reg);
605 }
606 }
607 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_fixup_bridge);
608
609 /* Tegra PCIE root complex wrongly reports device class */
610 static void tegra_pcie_fixup_class(struct pci_dev *dev)
611 {
612 dev->class = PCI_CLASS_BRIDGE_PCI << 8;
613 }
614 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf0, tegra_pcie_fixup_class);
615 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0bf1, tegra_pcie_fixup_class);
616 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1c, tegra_pcie_fixup_class);
617 DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_NVIDIA, 0x0e1d, tegra_pcie_fixup_class);
618
619 /* Tegra PCIE requires relaxed ordering */
620 static void tegra_pcie_relax_enable(struct pci_dev *dev)
621 {
622 pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_RELAX_EN);
623 }
624 DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, tegra_pcie_relax_enable);
625
626 static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
627 {
628 struct tegra_pcie *pcie = sys_to_pcie(sys);
629
630 pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
631 pci_add_resource_offset(&sys->resources, &pcie->prefetch,
632 sys->mem_offset);
633 pci_add_resource(&sys->resources, &pcie->busn);
634
635 pci_ioremap_io(nr * SZ_64K, pcie->io.start);
636
637 return 1;
638 }
639
640 static int tegra_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin)
641 {
642 struct tegra_pcie *pcie = sys_to_pcie(pdev->bus->sysdata);
643 int irq;
644
645 tegra_cpuidle_pcie_irqs_in_use();
646
647 irq = of_irq_parse_and_map_pci(pdev, slot, pin);
648 if (!irq)
649 irq = pcie->irq;
650
651 return irq;
652 }
653
654 static void tegra_pcie_add_bus(struct pci_bus *bus)
655 {
656 if (IS_ENABLED(CONFIG_PCI_MSI)) {
657 struct tegra_pcie *pcie = sys_to_pcie(bus->sysdata);
658
659 bus->msi = &pcie->msi.chip;
660 }
661 }
662
663 static struct pci_bus *tegra_pcie_scan_bus(int nr, struct pci_sys_data *sys)
664 {
665 struct tegra_pcie *pcie = sys_to_pcie(sys);
666 struct pci_bus *bus;
667
668 bus = pci_create_root_bus(pcie->dev, sys->busnr, &tegra_pcie_ops, sys,
669 &sys->resources);
670 if (!bus)
671 return NULL;
672
673 pci_scan_child_bus(bus);
674
675 return bus;
676 }
677
678 static irqreturn_t tegra_pcie_isr(int irq, void *arg)
679 {
680 const char *err_msg[] = {
681 "Unknown",
682 "AXI slave error",
683 "AXI decode error",
684 "Target abort",
685 "Master abort",
686 "Invalid write",
687 "Response decoding error",
688 "AXI response decoding error",
689 "Transaction timeout",
690 };
691 struct tegra_pcie *pcie = arg;
692 u32 code, signature;
693
694 code = afi_readl(pcie, AFI_INTR_CODE) & AFI_INTR_CODE_MASK;
695 signature = afi_readl(pcie, AFI_INTR_SIGNATURE);
696 afi_writel(pcie, 0, AFI_INTR_CODE);
697
698 if (code == AFI_INTR_LEGACY)
699 return IRQ_NONE;
700
701 if (code >= ARRAY_SIZE(err_msg))
702 code = 0;
703
704 /*
705 * do not pollute kernel log with master abort reports since they
706 * happen a lot during enumeration
707 */
708 if (code == AFI_INTR_MASTER_ABORT)
709 dev_dbg(pcie->dev, "%s, signature: %08x\n", err_msg[code],
710 signature);
711 else
712 dev_err(pcie->dev, "%s, signature: %08x\n", err_msg[code],
713 signature);
714
715 if (code == AFI_INTR_TARGET_ABORT || code == AFI_INTR_MASTER_ABORT ||
716 code == AFI_INTR_FPCI_DECODE_ERROR) {
717 u32 fpci = afi_readl(pcie, AFI_UPPER_FPCI_ADDRESS) & 0xff;
718 u64 address = (u64)fpci << 32 | (signature & 0xfffffffc);
719
720 if (code == AFI_INTR_MASTER_ABORT)
721 dev_dbg(pcie->dev, " FPCI address: %10llx\n", address);
722 else
723 dev_err(pcie->dev, " FPCI address: %10llx\n", address);
724 }
725
726 return IRQ_HANDLED;
727 }
728
729 /*
730 * FPCI map is as follows:
731 * - 0xfdfc000000: I/O space
732 * - 0xfdfe000000: type 0 configuration space
733 * - 0xfdff000000: type 1 configuration space
734 * - 0xfe00000000: type 0 extended configuration space
735 * - 0xfe10000000: type 1 extended configuration space
736 */
737 static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
738 {
739 u32 fpci_bar, size, axi_address;
740
741 /* Bar 0: type 1 extended configuration space */
742 fpci_bar = 0xfe100000;
743 size = resource_size(pcie->cs);
744 axi_address = pcie->cs->start;
745 afi_writel(pcie, axi_address, AFI_AXI_BAR0_START);
746 afi_writel(pcie, size >> 12, AFI_AXI_BAR0_SZ);
747 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR0);
748
749 /* Bar 1: downstream IO bar */
750 fpci_bar = 0xfdfc0000;
751 size = resource_size(&pcie->io);
752 axi_address = pcie->io.start;
753 afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
754 afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
755 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
756
757 /* Bar 2: prefetchable memory BAR */
758 fpci_bar = (((pcie->prefetch.start >> 12) & 0x0fffffff) << 4) | 0x1;
759 size = resource_size(&pcie->prefetch);
760 axi_address = pcie->prefetch.start;
761 afi_writel(pcie, axi_address, AFI_AXI_BAR2_START);
762 afi_writel(pcie, size >> 12, AFI_AXI_BAR2_SZ);
763 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR2);
764
765 /* Bar 3: non prefetchable memory BAR */
766 fpci_bar = (((pcie->mem.start >> 12) & 0x0fffffff) << 4) | 0x1;
767 size = resource_size(&pcie->mem);
768 axi_address = pcie->mem.start;
769 afi_writel(pcie, axi_address, AFI_AXI_BAR3_START);
770 afi_writel(pcie, size >> 12, AFI_AXI_BAR3_SZ);
771 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR3);
772
773 /* NULL out the remaining BARs as they are not used */
774 afi_writel(pcie, 0, AFI_AXI_BAR4_START);
775 afi_writel(pcie, 0, AFI_AXI_BAR4_SZ);
776 afi_writel(pcie, 0, AFI_FPCI_BAR4);
777
778 afi_writel(pcie, 0, AFI_AXI_BAR5_START);
779 afi_writel(pcie, 0, AFI_AXI_BAR5_SZ);
780 afi_writel(pcie, 0, AFI_FPCI_BAR5);
781
782 /* map all upstream transactions as uncached */
783 afi_writel(pcie, PHYS_OFFSET, AFI_CACHE_BAR0_ST);
784 afi_writel(pcie, 0, AFI_CACHE_BAR0_SZ);
785 afi_writel(pcie, 0, AFI_CACHE_BAR1_ST);
786 afi_writel(pcie, 0, AFI_CACHE_BAR1_SZ);
787
788 /* MSI translations are setup only when needed */
789 afi_writel(pcie, 0, AFI_MSI_FPCI_BAR_ST);
790 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
791 afi_writel(pcie, 0, AFI_MSI_AXI_BAR_ST);
792 afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
793 }
794
795 static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
796 {
797 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
798 struct tegra_pcie_port *port;
799 unsigned int timeout;
800 unsigned long value;
801
802 /* power down PCIe slot clock bias pad */
803 if (soc->has_pex_bias_ctrl)
804 afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
805
806 /* configure mode and disable all ports */
807 value = afi_readl(pcie, AFI_PCIE_CONFIG);
808 value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
809 value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
810
811 list_for_each_entry(port, &pcie->ports, list)
812 value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
813
814 afi_writel(pcie, value, AFI_PCIE_CONFIG);
815
816 value = afi_readl(pcie, AFI_FUSE);
817 value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
818 afi_writel(pcie, value, AFI_FUSE);
819
820 /* initialize internal PHY, enable up to 16 PCIE lanes */
821 pads_writel(pcie, 0x0, PADS_CTL_SEL);
822
823 /* override IDDQ to 1 on all 4 lanes */
824 value = pads_readl(pcie, PADS_CTL);
825 value |= PADS_CTL_IDDQ_1L;
826 pads_writel(pcie, value, PADS_CTL);
827
828 /*
829 * Set up PHY PLL inputs select PLLE output as refclock,
830 * set TX ref sel to div10 (not div5).
831 */
832 value = pads_readl(pcie, soc->pads_pll_ctl);
833 value &= ~(PADS_PLL_CTL_REFCLK_MASK | PADS_PLL_CTL_TXCLKREF_MASK);
834 value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
835 pads_writel(pcie, value, soc->pads_pll_ctl);
836
837 /* take PLL out of reset */
838 value = pads_readl(pcie, soc->pads_pll_ctl);
839 value |= PADS_PLL_CTL_RST_B4SM;
840 pads_writel(pcie, value, soc->pads_pll_ctl);
841
842 /* Configure the reference clock driver */
843 value = PADS_REFCLK_CFG_VALUE | (PADS_REFCLK_CFG_VALUE << 16);
844 pads_writel(pcie, value, PADS_REFCLK_CFG0);
845 if (soc->num_ports > 2)
846 pads_writel(pcie, PADS_REFCLK_CFG_VALUE, PADS_REFCLK_CFG1);
847
848 /* wait for the PLL to lock */
849 timeout = 300;
850 do {
851 value = pads_readl(pcie, soc->pads_pll_ctl);
852 usleep_range(1000, 2000);
853 if (--timeout == 0) {
854 pr_err("Tegra PCIe error: timeout waiting for PLL\n");
855 return -EBUSY;
856 }
857 } while (!(value & PADS_PLL_CTL_LOCKDET));
858
859 /* turn off IDDQ override */
860 value = pads_readl(pcie, PADS_CTL);
861 value &= ~PADS_CTL_IDDQ_1L;
862 pads_writel(pcie, value, PADS_CTL);
863
864 /* enable TX/RX data */
865 value = pads_readl(pcie, PADS_CTL);
866 value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
867 pads_writel(pcie, value, PADS_CTL);
868
869 /* take the PCIe interface module out of reset */
870 reset_control_deassert(pcie->pcie_xrst);
871
872 /* finally enable PCIe */
873 value = afi_readl(pcie, AFI_CONFIGURATION);
874 value |= AFI_CONFIGURATION_EN_FPCI;
875 afi_writel(pcie, value, AFI_CONFIGURATION);
876
877 value = AFI_INTR_EN_INI_SLVERR | AFI_INTR_EN_INI_DECERR |
878 AFI_INTR_EN_TGT_SLVERR | AFI_INTR_EN_TGT_DECERR |
879 AFI_INTR_EN_TGT_WRERR | AFI_INTR_EN_DFPCI_DECERR;
880
881 if (soc->has_intr_prsnt_sense)
882 value |= AFI_INTR_EN_PRSNT_SENSE;
883
884 afi_writel(pcie, value, AFI_AFI_INTR_ENABLE);
885 afi_writel(pcie, 0xffffffff, AFI_SM_INTR_ENABLE);
886
887 /* don't enable MSI for now, only when needed */
888 afi_writel(pcie, AFI_INTR_MASK_INT_MASK, AFI_INTR_MASK);
889
890 /* disable all exceptions */
891 afi_writel(pcie, 0, AFI_FPCI_ERROR_MASKS);
892
893 return 0;
894 }
895
896 static void tegra_pcie_power_off(struct tegra_pcie *pcie)
897 {
898 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
899 int err;
900
901 /* TODO: disable and unprepare clocks? */
902
903 reset_control_assert(pcie->pcie_xrst);
904 reset_control_assert(pcie->afi_rst);
905 reset_control_assert(pcie->pex_rst);
906
907 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
908
909 if (soc->has_avdd_supply) {
910 err = regulator_disable(pcie->avdd_supply);
911 if (err < 0)
912 dev_warn(pcie->dev,
913 "failed to disable AVDD regulator: %d\n",
914 err);
915 }
916
917 err = regulator_disable(pcie->pex_clk_supply);
918 if (err < 0)
919 dev_warn(pcie->dev, "failed to disable pex-clk regulator: %d\n",
920 err);
921
922 err = regulator_disable(pcie->vdd_supply);
923 if (err < 0)
924 dev_warn(pcie->dev, "failed to disable VDD regulator: %d\n",
925 err);
926 }
927
928 static int tegra_pcie_power_on(struct tegra_pcie *pcie)
929 {
930 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
931 int err;
932
933 reset_control_assert(pcie->pcie_xrst);
934 reset_control_assert(pcie->afi_rst);
935 reset_control_assert(pcie->pex_rst);
936
937 tegra_powergate_power_off(TEGRA_POWERGATE_PCIE);
938
939 /* enable regulators */
940 err = regulator_enable(pcie->vdd_supply);
941 if (err < 0) {
942 dev_err(pcie->dev, "failed to enable VDD regulator: %d\n", err);
943 return err;
944 }
945
946 err = regulator_enable(pcie->pex_clk_supply);
947 if (err < 0) {
948 dev_err(pcie->dev, "failed to enable pex-clk regulator: %d\n",
949 err);
950 return err;
951 }
952
953 if (soc->has_avdd_supply) {
954 err = regulator_enable(pcie->avdd_supply);
955 if (err < 0) {
956 dev_err(pcie->dev,
957 "failed to enable AVDD regulator: %d\n",
958 err);
959 return err;
960 }
961 }
962
963 err = tegra_powergate_sequence_power_up(TEGRA_POWERGATE_PCIE,
964 pcie->pex_clk,
965 pcie->pex_rst);
966 if (err) {
967 dev_err(pcie->dev, "powerup sequence failed: %d\n", err);
968 return err;
969 }
970
971 reset_control_deassert(pcie->afi_rst);
972
973 err = clk_prepare_enable(pcie->afi_clk);
974 if (err < 0) {
975 dev_err(pcie->dev, "failed to enable AFI clock: %d\n", err);
976 return err;
977 }
978
979 if (soc->has_cml_clk) {
980 err = clk_prepare_enable(pcie->cml_clk);
981 if (err < 0) {
982 dev_err(pcie->dev, "failed to enable CML clock: %d\n",
983 err);
984 return err;
985 }
986 }
987
988 err = clk_prepare_enable(pcie->pll_e);
989 if (err < 0) {
990 dev_err(pcie->dev, "failed to enable PLLE clock: %d\n", err);
991 return err;
992 }
993
994 return 0;
995 }
996
997 static int tegra_pcie_clocks_get(struct tegra_pcie *pcie)
998 {
999 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
1000
1001 pcie->pex_clk = devm_clk_get(pcie->dev, "pex");
1002 if (IS_ERR(pcie->pex_clk))
1003 return PTR_ERR(pcie->pex_clk);
1004
1005 pcie->afi_clk = devm_clk_get(pcie->dev, "afi");
1006 if (IS_ERR(pcie->afi_clk))
1007 return PTR_ERR(pcie->afi_clk);
1008
1009 pcie->pll_e = devm_clk_get(pcie->dev, "pll_e");
1010 if (IS_ERR(pcie->pll_e))
1011 return PTR_ERR(pcie->pll_e);
1012
1013 if (soc->has_cml_clk) {
1014 pcie->cml_clk = devm_clk_get(pcie->dev, "cml");
1015 if (IS_ERR(pcie->cml_clk))
1016 return PTR_ERR(pcie->cml_clk);
1017 }
1018
1019 return 0;
1020 }
1021
1022 static int tegra_pcie_resets_get(struct tegra_pcie *pcie)
1023 {
1024 pcie->pex_rst = devm_reset_control_get(pcie->dev, "pex");
1025 if (IS_ERR(pcie->pex_rst))
1026 return PTR_ERR(pcie->pex_rst);
1027
1028 pcie->afi_rst = devm_reset_control_get(pcie->dev, "afi");
1029 if (IS_ERR(pcie->afi_rst))
1030 return PTR_ERR(pcie->afi_rst);
1031
1032 pcie->pcie_xrst = devm_reset_control_get(pcie->dev, "pcie_x");
1033 if (IS_ERR(pcie->pcie_xrst))
1034 return PTR_ERR(pcie->pcie_xrst);
1035
1036 return 0;
1037 }
1038
1039 static int tegra_pcie_get_resources(struct tegra_pcie *pcie)
1040 {
1041 struct platform_device *pdev = to_platform_device(pcie->dev);
1042 struct resource *pads, *afi, *res;
1043 int err;
1044
1045 err = tegra_pcie_clocks_get(pcie);
1046 if (err) {
1047 dev_err(&pdev->dev, "failed to get clocks: %d\n", err);
1048 return err;
1049 }
1050
1051 err = tegra_pcie_resets_get(pcie);
1052 if (err) {
1053 dev_err(&pdev->dev, "failed to get resets: %d\n", err);
1054 return err;
1055 }
1056
1057 err = tegra_pcie_power_on(pcie);
1058 if (err) {
1059 dev_err(&pdev->dev, "failed to power up: %d\n", err);
1060 return err;
1061 }
1062
1063 pads = platform_get_resource_byname(pdev, IORESOURCE_MEM, "pads");
1064 pcie->pads = devm_ioremap_resource(&pdev->dev, pads);
1065 if (IS_ERR(pcie->pads)) {
1066 err = PTR_ERR(pcie->pads);
1067 goto poweroff;
1068 }
1069
1070 afi = platform_get_resource_byname(pdev, IORESOURCE_MEM, "afi");
1071 pcie->afi = devm_ioremap_resource(&pdev->dev, afi);
1072 if (IS_ERR(pcie->afi)) {
1073 err = PTR_ERR(pcie->afi);
1074 goto poweroff;
1075 }
1076
1077 /* request configuration space, but remap later, on demand */
1078 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cs");
1079 if (!res) {
1080 err = -EADDRNOTAVAIL;
1081 goto poweroff;
1082 }
1083
1084 pcie->cs = devm_request_mem_region(pcie->dev, res->start,
1085 resource_size(res), res->name);
1086 if (!pcie->cs) {
1087 err = -EADDRNOTAVAIL;
1088 goto poweroff;
1089 }
1090
1091 /* request interrupt */
1092 err = platform_get_irq_byname(pdev, "intr");
1093 if (err < 0) {
1094 dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
1095 goto poweroff;
1096 }
1097
1098 pcie->irq = err;
1099
1100 err = request_irq(pcie->irq, tegra_pcie_isr, IRQF_SHARED, "PCIE", pcie);
1101 if (err) {
1102 dev_err(&pdev->dev, "failed to register IRQ: %d\n", err);
1103 goto poweroff;
1104 }
1105
1106 return 0;
1107
1108 poweroff:
1109 tegra_pcie_power_off(pcie);
1110 return err;
1111 }
1112
1113 static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
1114 {
1115 if (pcie->irq > 0)
1116 free_irq(pcie->irq, pcie);
1117
1118 tegra_pcie_power_off(pcie);
1119 return 0;
1120 }
1121
1122 static int tegra_msi_alloc(struct tegra_msi *chip)
1123 {
1124 int msi;
1125
1126 mutex_lock(&chip->lock);
1127
1128 msi = find_first_zero_bit(chip->used, INT_PCI_MSI_NR);
1129 if (msi < INT_PCI_MSI_NR)
1130 set_bit(msi, chip->used);
1131 else
1132 msi = -ENOSPC;
1133
1134 mutex_unlock(&chip->lock);
1135
1136 return msi;
1137 }
1138
1139 static void tegra_msi_free(struct tegra_msi *chip, unsigned long irq)
1140 {
1141 struct device *dev = chip->chip.dev;
1142
1143 mutex_lock(&chip->lock);
1144
1145 if (!test_bit(irq, chip->used))
1146 dev_err(dev, "trying to free unused MSI#%lu\n", irq);
1147 else
1148 clear_bit(irq, chip->used);
1149
1150 mutex_unlock(&chip->lock);
1151 }
1152
1153 static irqreturn_t tegra_pcie_msi_irq(int irq, void *data)
1154 {
1155 struct tegra_pcie *pcie = data;
1156 struct tegra_msi *msi = &pcie->msi;
1157 unsigned int i, processed = 0;
1158
1159 for (i = 0; i < 8; i++) {
1160 unsigned long reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1161
1162 while (reg) {
1163 unsigned int offset = find_first_bit(&reg, 32);
1164 unsigned int index = i * 32 + offset;
1165 unsigned int irq;
1166
1167 /* clear the interrupt */
1168 afi_writel(pcie, 1 << offset, AFI_MSI_VEC0 + i * 4);
1169
1170 irq = irq_find_mapping(msi->domain, index);
1171 if (irq) {
1172 if (test_bit(index, msi->used))
1173 generic_handle_irq(irq);
1174 else
1175 dev_info(pcie->dev, "unhandled MSI\n");
1176 } else {
1177 /*
1178 * that's weird who triggered this?
1179 * just clear it
1180 */
1181 dev_info(pcie->dev, "unexpected MSI\n");
1182 }
1183
1184 /* see if there's any more pending in this vector */
1185 reg = afi_readl(pcie, AFI_MSI_VEC0 + i * 4);
1186
1187 processed++;
1188 }
1189 }
1190
1191 return processed > 0 ? IRQ_HANDLED : IRQ_NONE;
1192 }
1193
1194 static int tegra_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev,
1195 struct msi_desc *desc)
1196 {
1197 struct tegra_msi *msi = to_tegra_msi(chip);
1198 struct msi_msg msg;
1199 unsigned int irq;
1200 int hwirq;
1201
1202 hwirq = tegra_msi_alloc(msi);
1203 if (hwirq < 0)
1204 return hwirq;
1205
1206 irq = irq_create_mapping(msi->domain, hwirq);
1207 if (!irq)
1208 return -EINVAL;
1209
1210 irq_set_msi_desc(irq, desc);
1211
1212 msg.address_lo = virt_to_phys((void *)msi->pages);
1213 /* 32 bit address only */
1214 msg.address_hi = 0;
1215 msg.data = hwirq;
1216
1217 write_msi_msg(irq, &msg);
1218
1219 return 0;
1220 }
1221
1222 static void tegra_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
1223 {
1224 struct tegra_msi *msi = to_tegra_msi(chip);
1225 struct irq_data *d = irq_get_irq_data(irq);
1226
1227 tegra_msi_free(msi, d->hwirq);
1228 }
1229
1230 static struct irq_chip tegra_msi_irq_chip = {
1231 .name = "Tegra PCIe MSI",
1232 .irq_enable = unmask_msi_irq,
1233 .irq_disable = mask_msi_irq,
1234 .irq_mask = mask_msi_irq,
1235 .irq_unmask = unmask_msi_irq,
1236 };
1237
1238 static int tegra_msi_map(struct irq_domain *domain, unsigned int irq,
1239 irq_hw_number_t hwirq)
1240 {
1241 irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq);
1242 irq_set_chip_data(irq, domain->host_data);
1243 set_irq_flags(irq, IRQF_VALID);
1244
1245 tegra_cpuidle_pcie_irqs_in_use();
1246
1247 return 0;
1248 }
1249
1250 static const struct irq_domain_ops msi_domain_ops = {
1251 .map = tegra_msi_map,
1252 };
1253
1254 static int tegra_pcie_enable_msi(struct tegra_pcie *pcie)
1255 {
1256 struct platform_device *pdev = to_platform_device(pcie->dev);
1257 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
1258 struct tegra_msi *msi = &pcie->msi;
1259 unsigned long base;
1260 int err;
1261 u32 reg;
1262
1263 mutex_init(&msi->lock);
1264
1265 msi->chip.dev = pcie->dev;
1266 msi->chip.setup_irq = tegra_msi_setup_irq;
1267 msi->chip.teardown_irq = tegra_msi_teardown_irq;
1268
1269 msi->domain = irq_domain_add_linear(pcie->dev->of_node, INT_PCI_MSI_NR,
1270 &msi_domain_ops, &msi->chip);
1271 if (!msi->domain) {
1272 dev_err(&pdev->dev, "failed to create IRQ domain\n");
1273 return -ENOMEM;
1274 }
1275
1276 err = platform_get_irq_byname(pdev, "msi");
1277 if (err < 0) {
1278 dev_err(&pdev->dev, "failed to get IRQ: %d\n", err);
1279 goto err;
1280 }
1281
1282 msi->irq = err;
1283
1284 err = request_irq(msi->irq, tegra_pcie_msi_irq, 0,
1285 tegra_msi_irq_chip.name, pcie);
1286 if (err < 0) {
1287 dev_err(&pdev->dev, "failed to request IRQ: %d\n", err);
1288 goto err;
1289 }
1290
1291 /* setup AFI/FPCI range */
1292 msi->pages = __get_free_pages(GFP_KERNEL, 0);
1293 base = virt_to_phys((void *)msi->pages);
1294
1295 afi_writel(pcie, base >> soc->msi_base_shift, AFI_MSI_FPCI_BAR_ST);
1296 afi_writel(pcie, base, AFI_MSI_AXI_BAR_ST);
1297 /* this register is in 4K increments */
1298 afi_writel(pcie, 1, AFI_MSI_BAR_SZ);
1299
1300 /* enable all MSI vectors */
1301 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC0);
1302 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC1);
1303 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC2);
1304 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC3);
1305 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC4);
1306 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC5);
1307 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC6);
1308 afi_writel(pcie, 0xffffffff, AFI_MSI_EN_VEC7);
1309
1310 /* and unmask the MSI interrupt */
1311 reg = afi_readl(pcie, AFI_INTR_MASK);
1312 reg |= AFI_INTR_MASK_MSI_MASK;
1313 afi_writel(pcie, reg, AFI_INTR_MASK);
1314
1315 return 0;
1316
1317 err:
1318 irq_domain_remove(msi->domain);
1319 return err;
1320 }
1321
1322 static int tegra_pcie_disable_msi(struct tegra_pcie *pcie)
1323 {
1324 struct tegra_msi *msi = &pcie->msi;
1325 unsigned int i, irq;
1326 u32 value;
1327
1328 /* mask the MSI interrupt */
1329 value = afi_readl(pcie, AFI_INTR_MASK);
1330 value &= ~AFI_INTR_MASK_MSI_MASK;
1331 afi_writel(pcie, value, AFI_INTR_MASK);
1332
1333 /* disable all MSI vectors */
1334 afi_writel(pcie, 0, AFI_MSI_EN_VEC0);
1335 afi_writel(pcie, 0, AFI_MSI_EN_VEC1);
1336 afi_writel(pcie, 0, AFI_MSI_EN_VEC2);
1337 afi_writel(pcie, 0, AFI_MSI_EN_VEC3);
1338 afi_writel(pcie, 0, AFI_MSI_EN_VEC4);
1339 afi_writel(pcie, 0, AFI_MSI_EN_VEC5);
1340 afi_writel(pcie, 0, AFI_MSI_EN_VEC6);
1341 afi_writel(pcie, 0, AFI_MSI_EN_VEC7);
1342
1343 free_pages(msi->pages, 0);
1344
1345 if (msi->irq > 0)
1346 free_irq(msi->irq, pcie);
1347
1348 for (i = 0; i < INT_PCI_MSI_NR; i++) {
1349 irq = irq_find_mapping(msi->domain, i);
1350 if (irq > 0)
1351 irq_dispose_mapping(irq);
1352 }
1353
1354 irq_domain_remove(msi->domain);
1355
1356 return 0;
1357 }
1358
1359 static int tegra_pcie_get_xbar_config(struct tegra_pcie *pcie, u32 lanes,
1360 u32 *xbar)
1361 {
1362 struct device_node *np = pcie->dev->of_node;
1363
1364 if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
1365 switch (lanes) {
1366 case 0x00000204:
1367 dev_info(pcie->dev, "4x1, 2x1 configuration\n");
1368 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420;
1369 return 0;
1370
1371 case 0x00020202:
1372 dev_info(pcie->dev, "2x3 configuration\n");
1373 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222;
1374 return 0;
1375
1376 case 0x00010104:
1377 dev_info(pcie->dev, "4x1, 1x2 configuration\n");
1378 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411;
1379 return 0;
1380 }
1381 } else if (of_device_is_compatible(np, "nvidia,tegra20-pcie")) {
1382 switch (lanes) {
1383 case 0x00000004:
1384 dev_info(pcie->dev, "single-mode configuration\n");
1385 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE;
1386 return 0;
1387
1388 case 0x00000202:
1389 dev_info(pcie->dev, "dual-mode configuration\n");
1390 *xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL;
1391 return 0;
1392 }
1393 }
1394
1395 return -EINVAL;
1396 }
1397
1398 static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
1399 {
1400 const struct tegra_pcie_soc_data *soc = pcie->soc_data;
1401 struct device_node *np = pcie->dev->of_node, *port;
1402 struct of_pci_range_parser parser;
1403 struct of_pci_range range;
1404 struct resource res;
1405 u32 lanes = 0;
1406 int err;
1407
1408 if (of_pci_range_parser_init(&parser, np)) {
1409 dev_err(pcie->dev, "missing \"ranges\" property\n");
1410 return -EINVAL;
1411 }
1412
1413 pcie->vdd_supply = devm_regulator_get(pcie->dev, "vdd");
1414 if (IS_ERR(pcie->vdd_supply))
1415 return PTR_ERR(pcie->vdd_supply);
1416
1417 pcie->pex_clk_supply = devm_regulator_get(pcie->dev, "pex-clk");
1418 if (IS_ERR(pcie->pex_clk_supply))
1419 return PTR_ERR(pcie->pex_clk_supply);
1420
1421 if (soc->has_avdd_supply) {
1422 pcie->avdd_supply = devm_regulator_get(pcie->dev, "avdd");
1423 if (IS_ERR(pcie->avdd_supply))
1424 return PTR_ERR(pcie->avdd_supply);
1425 }
1426
1427 for_each_of_pci_range(&parser, &range) {
1428 of_pci_range_to_resource(&range, np, &res);
1429
1430 switch (res.flags & IORESOURCE_TYPE_BITS) {
1431 case IORESOURCE_IO:
1432 memcpy(&pcie->io, &res, sizeof(res));
1433 pcie->io.name = "I/O";
1434 break;
1435
1436 case IORESOURCE_MEM:
1437 if (res.flags & IORESOURCE_PREFETCH) {
1438 memcpy(&pcie->prefetch, &res, sizeof(res));
1439 pcie->prefetch.name = "PREFETCH";
1440 } else {
1441 memcpy(&pcie->mem, &res, sizeof(res));
1442 pcie->mem.name = "MEM";
1443 }
1444 break;
1445 }
1446 }
1447
1448 err = of_pci_parse_bus_range(np, &pcie->busn);
1449 if (err < 0) {
1450 dev_err(pcie->dev, "failed to parse ranges property: %d\n",
1451 err);
1452 pcie->busn.name = np->name;
1453 pcie->busn.start = 0;
1454 pcie->busn.end = 0xff;
1455 pcie->busn.flags = IORESOURCE_BUS;
1456 }
1457
1458 /* parse root ports */
1459 for_each_child_of_node(np, port) {
1460 struct tegra_pcie_port *rp;
1461 unsigned int index;
1462 u32 value;
1463
1464 err = of_pci_get_devfn(port);
1465 if (err < 0) {
1466 dev_err(pcie->dev, "failed to parse address: %d\n",
1467 err);
1468 return err;
1469 }
1470
1471 index = PCI_SLOT(err);
1472
1473 if (index < 1 || index > soc->num_ports) {
1474 dev_err(pcie->dev, "invalid port number: %d\n", index);
1475 return -EINVAL;
1476 }
1477
1478 index--;
1479
1480 err = of_property_read_u32(port, "nvidia,num-lanes", &value);
1481 if (err < 0) {
1482 dev_err(pcie->dev, "failed to parse # of lanes: %d\n",
1483 err);
1484 return err;
1485 }
1486
1487 if (value > 16) {
1488 dev_err(pcie->dev, "invalid # of lanes: %u\n", value);
1489 return -EINVAL;
1490 }
1491
1492 lanes |= value << (index << 3);
1493
1494 if (!of_device_is_available(port))
1495 continue;
1496
1497 rp = devm_kzalloc(pcie->dev, sizeof(*rp), GFP_KERNEL);
1498 if (!rp)
1499 return -ENOMEM;
1500
1501 err = of_address_to_resource(port, 0, &rp->regs);
1502 if (err < 0) {
1503 dev_err(pcie->dev, "failed to parse address: %d\n",
1504 err);
1505 return err;
1506 }
1507
1508 INIT_LIST_HEAD(&rp->list);
1509 rp->index = index;
1510 rp->lanes = value;
1511 rp->pcie = pcie;
1512
1513 rp->base = devm_ioremap_resource(pcie->dev, &rp->regs);
1514 if (IS_ERR(rp->base))
1515 return PTR_ERR(rp->base);
1516
1517 list_add_tail(&rp->list, &pcie->ports);
1518 }
1519
1520 err = tegra_pcie_get_xbar_config(pcie, lanes, &pcie->xbar_config);
1521 if (err < 0) {
1522 dev_err(pcie->dev, "invalid lane configuration\n");
1523 return err;
1524 }
1525
1526 return 0;
1527 }
1528
1529 /*
1530 * FIXME: If there are no PCIe cards attached, then calling this function
1531 * can result in the increase of the bootup time as there are big timeout
1532 * loops.
1533 */
1534 #define TEGRA_PCIE_LINKUP_TIMEOUT 200 /* up to 1.2 seconds */
1535 static bool tegra_pcie_port_check_link(struct tegra_pcie_port *port)
1536 {
1537 unsigned int retries = 3;
1538 unsigned long value;
1539
1540 do {
1541 unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1542
1543 do {
1544 value = readl(port->base + RP_VEND_XP);
1545
1546 if (value & RP_VEND_XP_DL_UP)
1547 break;
1548
1549 usleep_range(1000, 2000);
1550 } while (--timeout);
1551
1552 if (!timeout) {
1553 dev_err(port->pcie->dev, "link %u down, retrying\n",
1554 port->index);
1555 goto retry;
1556 }
1557
1558 timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
1559
1560 do {
1561 value = readl(port->base + RP_LINK_CONTROL_STATUS);
1562
1563 if (value & RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE)
1564 return true;
1565
1566 usleep_range(1000, 2000);
1567 } while (--timeout);
1568
1569 retry:
1570 tegra_pcie_port_reset(port);
1571 } while (--retries);
1572
1573 return false;
1574 }
1575
1576 static int tegra_pcie_enable(struct tegra_pcie *pcie)
1577 {
1578 struct tegra_pcie_port *port, *tmp;
1579 struct hw_pci hw;
1580
1581 list_for_each_entry_safe(port, tmp, &pcie->ports, list) {
1582 dev_info(pcie->dev, "probing port %u, using %u lanes\n",
1583 port->index, port->lanes);
1584
1585 tegra_pcie_port_enable(port);
1586
1587 if (tegra_pcie_port_check_link(port))
1588 continue;
1589
1590 dev_info(pcie->dev, "link %u down, ignoring\n", port->index);
1591
1592 tegra_pcie_port_disable(port);
1593 tegra_pcie_port_free(port);
1594 }
1595
1596 memset(&hw, 0, sizeof(hw));
1597
1598 hw.nr_controllers = 1;
1599 hw.private_data = (void **)&pcie;
1600 hw.setup = tegra_pcie_setup;
1601 hw.map_irq = tegra_pcie_map_irq;
1602 hw.add_bus = tegra_pcie_add_bus;
1603 hw.scan = tegra_pcie_scan_bus;
1604 hw.ops = &tegra_pcie_ops;
1605
1606 pci_common_init_dev(pcie->dev, &hw);
1607
1608 return 0;
1609 }
1610
1611 static const struct tegra_pcie_soc_data tegra20_pcie_data = {
1612 .num_ports = 2,
1613 .msi_base_shift = 0,
1614 .pads_pll_ctl = PADS_PLL_CTL_TEGRA20,
1615 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_DIV10,
1616 .has_pex_clkreq_en = false,
1617 .has_pex_bias_ctrl = false,
1618 .has_intr_prsnt_sense = false,
1619 .has_avdd_supply = false,
1620 .has_cml_clk = false,
1621 };
1622
1623 static const struct tegra_pcie_soc_data tegra30_pcie_data = {
1624 .num_ports = 3,
1625 .msi_base_shift = 8,
1626 .pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
1627 .tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
1628 .has_pex_clkreq_en = true,
1629 .has_pex_bias_ctrl = true,
1630 .has_intr_prsnt_sense = true,
1631 .has_avdd_supply = true,
1632 .has_cml_clk = true,
1633 };
1634
1635 static const struct of_device_id tegra_pcie_of_match[] = {
1636 { .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie_data },
1637 { .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie_data },
1638 { },
1639 };
1640 MODULE_DEVICE_TABLE(of, tegra_pcie_of_match);
1641
1642 static int tegra_pcie_probe(struct platform_device *pdev)
1643 {
1644 const struct of_device_id *match;
1645 struct tegra_pcie *pcie;
1646 int err;
1647
1648 match = of_match_device(tegra_pcie_of_match, &pdev->dev);
1649 if (!match)
1650 return -ENODEV;
1651
1652 pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL);
1653 if (!pcie)
1654 return -ENOMEM;
1655
1656 INIT_LIST_HEAD(&pcie->buses);
1657 INIT_LIST_HEAD(&pcie->ports);
1658 pcie->soc_data = match->data;
1659 pcie->dev = &pdev->dev;
1660
1661 err = tegra_pcie_parse_dt(pcie);
1662 if (err < 0)
1663 return err;
1664
1665 pcibios_min_mem = 0;
1666
1667 err = tegra_pcie_get_resources(pcie);
1668 if (err < 0) {
1669 dev_err(&pdev->dev, "failed to request resources: %d\n", err);
1670 return err;
1671 }
1672
1673 err = tegra_pcie_enable_controller(pcie);
1674 if (err)
1675 goto put_resources;
1676
1677 /* setup the AFI address translations */
1678 tegra_pcie_setup_translations(pcie);
1679
1680 if (IS_ENABLED(CONFIG_PCI_MSI)) {
1681 err = tegra_pcie_enable_msi(pcie);
1682 if (err < 0) {
1683 dev_err(&pdev->dev,
1684 "failed to enable MSI support: %d\n",
1685 err);
1686 goto put_resources;
1687 }
1688 }
1689
1690 err = tegra_pcie_enable(pcie);
1691 if (err < 0) {
1692 dev_err(&pdev->dev, "failed to enable PCIe ports: %d\n", err);
1693 goto disable_msi;
1694 }
1695
1696 platform_set_drvdata(pdev, pcie);
1697 return 0;
1698
1699 disable_msi:
1700 if (IS_ENABLED(CONFIG_PCI_MSI))
1701 tegra_pcie_disable_msi(pcie);
1702 put_resources:
1703 tegra_pcie_put_resources(pcie);
1704 return err;
1705 }
1706
1707 static struct platform_driver tegra_pcie_driver = {
1708 .driver = {
1709 .name = "tegra-pcie",
1710 .owner = THIS_MODULE,
1711 .of_match_table = tegra_pcie_of_match,
1712 .suppress_bind_attrs = true,
1713 },
1714 .probe = tegra_pcie_probe,
1715 };
1716 module_platform_driver(tegra_pcie_driver);
1717
1718 MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
1719 MODULE_DESCRIPTION("NVIDIA Tegra PCIe driver");
1720 MODULE_LICENSE("GPL v2");