]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/pci/host/pci-mvebu.c
Merge branches 'pci/demodularize-hosts' and 'pci/host-request-windows' into next
[mirror_ubuntu-artful-kernel.git] / drivers / pci / host / pci-mvebu.c
1 /*
2 * PCIe driver for Marvell Armada 370 and Armada XP SoCs
3 *
4 * Author: Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
5 *
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
9 */
10
11 #include <linux/kernel.h>
12 #include <linux/pci.h>
13 #include <linux/clk.h>
14 #include <linux/delay.h>
15 #include <linux/gpio.h>
16 #include <linux/init.h>
17 #include <linux/mbus.h>
18 #include <linux/msi.h>
19 #include <linux/slab.h>
20 #include <linux/platform_device.h>
21 #include <linux/of_address.h>
22 #include <linux/of_irq.h>
23 #include <linux/of_gpio.h>
24 #include <linux/of_pci.h>
25 #include <linux/of_platform.h>
26
27 /*
28 * PCIe unit register offsets.
29 */
30 #define PCIE_DEV_ID_OFF 0x0000
31 #define PCIE_CMD_OFF 0x0004
32 #define PCIE_DEV_REV_OFF 0x0008
33 #define PCIE_BAR_LO_OFF(n) (0x0010 + ((n) << 3))
34 #define PCIE_BAR_HI_OFF(n) (0x0014 + ((n) << 3))
35 #define PCIE_CAP_PCIEXP 0x0060
36 #define PCIE_HEADER_LOG_4_OFF 0x0128
37 #define PCIE_BAR_CTRL_OFF(n) (0x1804 + (((n) - 1) * 4))
38 #define PCIE_WIN04_CTRL_OFF(n) (0x1820 + ((n) << 4))
39 #define PCIE_WIN04_BASE_OFF(n) (0x1824 + ((n) << 4))
40 #define PCIE_WIN04_REMAP_OFF(n) (0x182c + ((n) << 4))
41 #define PCIE_WIN5_CTRL_OFF 0x1880
42 #define PCIE_WIN5_BASE_OFF 0x1884
43 #define PCIE_WIN5_REMAP_OFF 0x188c
44 #define PCIE_CONF_ADDR_OFF 0x18f8
45 #define PCIE_CONF_ADDR_EN 0x80000000
46 #define PCIE_CONF_REG(r) ((((r) & 0xf00) << 16) | ((r) & 0xfc))
47 #define PCIE_CONF_BUS(b) (((b) & 0xff) << 16)
48 #define PCIE_CONF_DEV(d) (((d) & 0x1f) << 11)
49 #define PCIE_CONF_FUNC(f) (((f) & 0x7) << 8)
50 #define PCIE_CONF_ADDR(bus, devfn, where) \
51 (PCIE_CONF_BUS(bus) | PCIE_CONF_DEV(PCI_SLOT(devfn)) | \
52 PCIE_CONF_FUNC(PCI_FUNC(devfn)) | PCIE_CONF_REG(where) | \
53 PCIE_CONF_ADDR_EN)
54 #define PCIE_CONF_DATA_OFF 0x18fc
55 #define PCIE_MASK_OFF 0x1910
56 #define PCIE_MASK_ENABLE_INTS 0x0f000000
57 #define PCIE_CTRL_OFF 0x1a00
58 #define PCIE_CTRL_X1_MODE 0x0001
59 #define PCIE_STAT_OFF 0x1a04
60 #define PCIE_STAT_BUS 0xff00
61 #define PCIE_STAT_DEV 0x1f0000
62 #define PCIE_STAT_LINK_DOWN BIT(0)
63 #define PCIE_RC_RTSTA 0x1a14
64 #define PCIE_DEBUG_CTRL 0x1a60
65 #define PCIE_DEBUG_SOFT_RESET BIT(20)
66
67 enum {
68 PCISWCAP = PCI_BRIDGE_CONTROL + 2,
69 PCISWCAP_EXP_LIST_ID = PCISWCAP + PCI_CAP_LIST_ID,
70 PCISWCAP_EXP_DEVCAP = PCISWCAP + PCI_EXP_DEVCAP,
71 PCISWCAP_EXP_DEVCTL = PCISWCAP + PCI_EXP_DEVCTL,
72 PCISWCAP_EXP_LNKCAP = PCISWCAP + PCI_EXP_LNKCAP,
73 PCISWCAP_EXP_LNKCTL = PCISWCAP + PCI_EXP_LNKCTL,
74 PCISWCAP_EXP_SLTCAP = PCISWCAP + PCI_EXP_SLTCAP,
75 PCISWCAP_EXP_SLTCTL = PCISWCAP + PCI_EXP_SLTCTL,
76 PCISWCAP_EXP_RTCTL = PCISWCAP + PCI_EXP_RTCTL,
77 PCISWCAP_EXP_RTSTA = PCISWCAP + PCI_EXP_RTSTA,
78 PCISWCAP_EXP_DEVCAP2 = PCISWCAP + PCI_EXP_DEVCAP2,
79 PCISWCAP_EXP_DEVCTL2 = PCISWCAP + PCI_EXP_DEVCTL2,
80 PCISWCAP_EXP_LNKCAP2 = PCISWCAP + PCI_EXP_LNKCAP2,
81 PCISWCAP_EXP_LNKCTL2 = PCISWCAP + PCI_EXP_LNKCTL2,
82 PCISWCAP_EXP_SLTCAP2 = PCISWCAP + PCI_EXP_SLTCAP2,
83 PCISWCAP_EXP_SLTCTL2 = PCISWCAP + PCI_EXP_SLTCTL2,
84 };
85
86 /* PCI configuration space of a PCI-to-PCI bridge */
87 struct mvebu_sw_pci_bridge {
88 u16 vendor;
89 u16 device;
90 u16 command;
91 u16 status;
92 u16 class;
93 u8 interface;
94 u8 revision;
95 u8 bist;
96 u8 header_type;
97 u8 latency_timer;
98 u8 cache_line_size;
99 u32 bar[2];
100 u8 primary_bus;
101 u8 secondary_bus;
102 u8 subordinate_bus;
103 u8 secondary_latency_timer;
104 u8 iobase;
105 u8 iolimit;
106 u16 secondary_status;
107 u16 membase;
108 u16 memlimit;
109 u16 iobaseupper;
110 u16 iolimitupper;
111 u32 romaddr;
112 u8 intline;
113 u8 intpin;
114 u16 bridgectrl;
115
116 /* PCI express capability */
117 u32 pcie_sltcap;
118 u16 pcie_devctl;
119 u16 pcie_rtctl;
120 };
121
122 struct mvebu_pcie_port;
123
124 /* Structure representing all PCIe interfaces */
125 struct mvebu_pcie {
126 struct platform_device *pdev;
127 struct mvebu_pcie_port *ports;
128 struct msi_controller *msi;
129 struct resource io;
130 struct resource realio;
131 struct resource mem;
132 struct resource busn;
133 int nports;
134 };
135
136 /* Structure representing one PCIe interface */
137 struct mvebu_pcie_port {
138 char *name;
139 void __iomem *base;
140 u32 port;
141 u32 lane;
142 int devfn;
143 unsigned int mem_target;
144 unsigned int mem_attr;
145 unsigned int io_target;
146 unsigned int io_attr;
147 struct clk *clk;
148 struct gpio_desc *reset_gpio;
149 char *reset_name;
150 struct mvebu_sw_pci_bridge bridge;
151 struct device_node *dn;
152 struct mvebu_pcie *pcie;
153 phys_addr_t memwin_base;
154 size_t memwin_size;
155 phys_addr_t iowin_base;
156 size_t iowin_size;
157 u32 saved_pcie_stat;
158 };
159
160 static inline void mvebu_writel(struct mvebu_pcie_port *port, u32 val, u32 reg)
161 {
162 writel(val, port->base + reg);
163 }
164
165 static inline u32 mvebu_readl(struct mvebu_pcie_port *port, u32 reg)
166 {
167 return readl(port->base + reg);
168 }
169
170 static inline bool mvebu_has_ioport(struct mvebu_pcie_port *port)
171 {
172 return port->io_target != -1 && port->io_attr != -1;
173 }
174
175 static bool mvebu_pcie_link_up(struct mvebu_pcie_port *port)
176 {
177 return !(mvebu_readl(port, PCIE_STAT_OFF) & PCIE_STAT_LINK_DOWN);
178 }
179
180 static void mvebu_pcie_set_local_bus_nr(struct mvebu_pcie_port *port, int nr)
181 {
182 u32 stat;
183
184 stat = mvebu_readl(port, PCIE_STAT_OFF);
185 stat &= ~PCIE_STAT_BUS;
186 stat |= nr << 8;
187 mvebu_writel(port, stat, PCIE_STAT_OFF);
188 }
189
190 static void mvebu_pcie_set_local_dev_nr(struct mvebu_pcie_port *port, int nr)
191 {
192 u32 stat;
193
194 stat = mvebu_readl(port, PCIE_STAT_OFF);
195 stat &= ~PCIE_STAT_DEV;
196 stat |= nr << 16;
197 mvebu_writel(port, stat, PCIE_STAT_OFF);
198 }
199
200 /*
201 * Setup PCIE BARs and Address Decode Wins:
202 * BAR[0,2] -> disabled, BAR[1] -> covers all DRAM banks
203 * WIN[0-3] -> DRAM bank[0-3]
204 */
205 static void mvebu_pcie_setup_wins(struct mvebu_pcie_port *port)
206 {
207 const struct mbus_dram_target_info *dram;
208 u32 size;
209 int i;
210
211 dram = mv_mbus_dram_info();
212
213 /* First, disable and clear BARs and windows. */
214 for (i = 1; i < 3; i++) {
215 mvebu_writel(port, 0, PCIE_BAR_CTRL_OFF(i));
216 mvebu_writel(port, 0, PCIE_BAR_LO_OFF(i));
217 mvebu_writel(port, 0, PCIE_BAR_HI_OFF(i));
218 }
219
220 for (i = 0; i < 5; i++) {
221 mvebu_writel(port, 0, PCIE_WIN04_CTRL_OFF(i));
222 mvebu_writel(port, 0, PCIE_WIN04_BASE_OFF(i));
223 mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
224 }
225
226 mvebu_writel(port, 0, PCIE_WIN5_CTRL_OFF);
227 mvebu_writel(port, 0, PCIE_WIN5_BASE_OFF);
228 mvebu_writel(port, 0, PCIE_WIN5_REMAP_OFF);
229
230 /* Setup windows for DDR banks. Count total DDR size on the fly. */
231 size = 0;
232 for (i = 0; i < dram->num_cs; i++) {
233 const struct mbus_dram_window *cs = dram->cs + i;
234
235 mvebu_writel(port, cs->base & 0xffff0000,
236 PCIE_WIN04_BASE_OFF(i));
237 mvebu_writel(port, 0, PCIE_WIN04_REMAP_OFF(i));
238 mvebu_writel(port,
239 ((cs->size - 1) & 0xffff0000) |
240 (cs->mbus_attr << 8) |
241 (dram->mbus_dram_target_id << 4) | 1,
242 PCIE_WIN04_CTRL_OFF(i));
243
244 size += cs->size;
245 }
246
247 /* Round up 'size' to the nearest power of two. */
248 if ((size & (size - 1)) != 0)
249 size = 1 << fls(size);
250
251 /* Setup BAR[1] to all DRAM banks. */
252 mvebu_writel(port, dram->cs[0].base, PCIE_BAR_LO_OFF(1));
253 mvebu_writel(port, 0, PCIE_BAR_HI_OFF(1));
254 mvebu_writel(port, ((size - 1) & 0xffff0000) | 1,
255 PCIE_BAR_CTRL_OFF(1));
256 }
257
258 static void mvebu_pcie_setup_hw(struct mvebu_pcie_port *port)
259 {
260 u32 cmd, mask;
261
262 /* Point PCIe unit MBUS decode windows to DRAM space. */
263 mvebu_pcie_setup_wins(port);
264
265 /* Master + slave enable. */
266 cmd = mvebu_readl(port, PCIE_CMD_OFF);
267 cmd |= PCI_COMMAND_IO;
268 cmd |= PCI_COMMAND_MEMORY;
269 cmd |= PCI_COMMAND_MASTER;
270 mvebu_writel(port, cmd, PCIE_CMD_OFF);
271
272 /* Enable interrupt lines A-D. */
273 mask = mvebu_readl(port, PCIE_MASK_OFF);
274 mask |= PCIE_MASK_ENABLE_INTS;
275 mvebu_writel(port, mask, PCIE_MASK_OFF);
276 }
277
278 static int mvebu_pcie_hw_rd_conf(struct mvebu_pcie_port *port,
279 struct pci_bus *bus,
280 u32 devfn, int where, int size, u32 *val)
281 {
282 void __iomem *conf_data = port->base + PCIE_CONF_DATA_OFF;
283
284 mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
285 PCIE_CONF_ADDR_OFF);
286
287 switch (size) {
288 case 1:
289 *val = readb_relaxed(conf_data + (where & 3));
290 break;
291 case 2:
292 *val = readw_relaxed(conf_data + (where & 2));
293 break;
294 case 4:
295 *val = readl_relaxed(conf_data);
296 break;
297 }
298
299 return PCIBIOS_SUCCESSFUL;
300 }
301
302 static int mvebu_pcie_hw_wr_conf(struct mvebu_pcie_port *port,
303 struct pci_bus *bus,
304 u32 devfn, int where, int size, u32 val)
305 {
306 void __iomem *conf_data = port->base + PCIE_CONF_DATA_OFF;
307
308 mvebu_writel(port, PCIE_CONF_ADDR(bus->number, devfn, where),
309 PCIE_CONF_ADDR_OFF);
310
311 switch (size) {
312 case 1:
313 writeb(val, conf_data + (where & 3));
314 break;
315 case 2:
316 writew(val, conf_data + (where & 2));
317 break;
318 case 4:
319 writel(val, conf_data);
320 break;
321 default:
322 return PCIBIOS_BAD_REGISTER_NUMBER;
323 }
324
325 return PCIBIOS_SUCCESSFUL;
326 }
327
328 /*
329 * Remove windows, starting from the largest ones to the smallest
330 * ones.
331 */
332 static void mvebu_pcie_del_windows(struct mvebu_pcie_port *port,
333 phys_addr_t base, size_t size)
334 {
335 while (size) {
336 size_t sz = 1 << (fls(size) - 1);
337
338 mvebu_mbus_del_window(base, sz);
339 base += sz;
340 size -= sz;
341 }
342 }
343
344 /*
345 * MBus windows can only have a power of two size, but PCI BARs do not
346 * have this constraint. Therefore, we have to split the PCI BAR into
347 * areas each having a power of two size. We start from the largest
348 * one (i.e highest order bit set in the size).
349 */
350 static void mvebu_pcie_add_windows(struct mvebu_pcie_port *port,
351 unsigned int target, unsigned int attribute,
352 phys_addr_t base, size_t size,
353 phys_addr_t remap)
354 {
355 size_t size_mapped = 0;
356
357 while (size) {
358 size_t sz = 1 << (fls(size) - 1);
359 int ret;
360
361 ret = mvebu_mbus_add_window_remap_by_id(target, attribute, base,
362 sz, remap);
363 if (ret) {
364 phys_addr_t end = base + sz - 1;
365
366 dev_err(&port->pcie->pdev->dev,
367 "Could not create MBus window at [mem %pa-%pa]: %d\n",
368 &base, &end, ret);
369 mvebu_pcie_del_windows(port, base - size_mapped,
370 size_mapped);
371 return;
372 }
373
374 size -= sz;
375 size_mapped += sz;
376 base += sz;
377 if (remap != MVEBU_MBUS_NO_REMAP)
378 remap += sz;
379 }
380 }
381
382 static void mvebu_pcie_handle_iobase_change(struct mvebu_pcie_port *port)
383 {
384 phys_addr_t iobase;
385
386 /* Are the new iobase/iolimit values invalid? */
387 if (port->bridge.iolimit < port->bridge.iobase ||
388 port->bridge.iolimitupper < port->bridge.iobaseupper ||
389 !(port->bridge.command & PCI_COMMAND_IO)) {
390
391 /* If a window was configured, remove it */
392 if (port->iowin_base) {
393 mvebu_pcie_del_windows(port, port->iowin_base,
394 port->iowin_size);
395 port->iowin_base = 0;
396 port->iowin_size = 0;
397 }
398
399 return;
400 }
401
402 if (!mvebu_has_ioport(port)) {
403 dev_WARN(&port->pcie->pdev->dev,
404 "Attempt to set IO when IO is disabled\n");
405 return;
406 }
407
408 /*
409 * We read the PCI-to-PCI bridge emulated registers, and
410 * calculate the base address and size of the address decoding
411 * window to setup, according to the PCI-to-PCI bridge
412 * specifications. iobase is the bus address, port->iowin_base
413 * is the CPU address.
414 */
415 iobase = ((port->bridge.iobase & 0xF0) << 8) |
416 (port->bridge.iobaseupper << 16);
417 port->iowin_base = port->pcie->io.start + iobase;
418 port->iowin_size = ((0xFFF | ((port->bridge.iolimit & 0xF0) << 8) |
419 (port->bridge.iolimitupper << 16)) -
420 iobase) + 1;
421
422 mvebu_pcie_add_windows(port, port->io_target, port->io_attr,
423 port->iowin_base, port->iowin_size,
424 iobase);
425 }
426
427 static void mvebu_pcie_handle_membase_change(struct mvebu_pcie_port *port)
428 {
429 /* Are the new membase/memlimit values invalid? */
430 if (port->bridge.memlimit < port->bridge.membase ||
431 !(port->bridge.command & PCI_COMMAND_MEMORY)) {
432
433 /* If a window was configured, remove it */
434 if (port->memwin_base) {
435 mvebu_pcie_del_windows(port, port->memwin_base,
436 port->memwin_size);
437 port->memwin_base = 0;
438 port->memwin_size = 0;
439 }
440
441 return;
442 }
443
444 /*
445 * We read the PCI-to-PCI bridge emulated registers, and
446 * calculate the base address and size of the address decoding
447 * window to setup, according to the PCI-to-PCI bridge
448 * specifications.
449 */
450 port->memwin_base = ((port->bridge.membase & 0xFFF0) << 16);
451 port->memwin_size =
452 (((port->bridge.memlimit & 0xFFF0) << 16) | 0xFFFFF) -
453 port->memwin_base + 1;
454
455 mvebu_pcie_add_windows(port, port->mem_target, port->mem_attr,
456 port->memwin_base, port->memwin_size,
457 MVEBU_MBUS_NO_REMAP);
458 }
459
460 /*
461 * Initialize the configuration space of the PCI-to-PCI bridge
462 * associated with the given PCIe interface.
463 */
464 static void mvebu_sw_pci_bridge_init(struct mvebu_pcie_port *port)
465 {
466 struct mvebu_sw_pci_bridge *bridge = &port->bridge;
467
468 memset(bridge, 0, sizeof(struct mvebu_sw_pci_bridge));
469
470 bridge->class = PCI_CLASS_BRIDGE_PCI;
471 bridge->vendor = PCI_VENDOR_ID_MARVELL;
472 bridge->device = mvebu_readl(port, PCIE_DEV_ID_OFF) >> 16;
473 bridge->revision = mvebu_readl(port, PCIE_DEV_REV_OFF) & 0xff;
474 bridge->header_type = PCI_HEADER_TYPE_BRIDGE;
475 bridge->cache_line_size = 0x10;
476
477 /* We support 32 bits I/O addressing */
478 bridge->iobase = PCI_IO_RANGE_TYPE_32;
479 bridge->iolimit = PCI_IO_RANGE_TYPE_32;
480
481 /* Add capabilities */
482 bridge->status = PCI_STATUS_CAP_LIST;
483 }
484
485 /*
486 * Read the configuration space of the PCI-to-PCI bridge associated to
487 * the given PCIe interface.
488 */
489 static int mvebu_sw_pci_bridge_read(struct mvebu_pcie_port *port,
490 unsigned int where, int size, u32 *value)
491 {
492 struct mvebu_sw_pci_bridge *bridge = &port->bridge;
493
494 switch (where & ~3) {
495 case PCI_VENDOR_ID:
496 *value = bridge->device << 16 | bridge->vendor;
497 break;
498
499 case PCI_COMMAND:
500 *value = bridge->command | bridge->status << 16;
501 break;
502
503 case PCI_CLASS_REVISION:
504 *value = bridge->class << 16 | bridge->interface << 8 |
505 bridge->revision;
506 break;
507
508 case PCI_CACHE_LINE_SIZE:
509 *value = bridge->bist << 24 | bridge->header_type << 16 |
510 bridge->latency_timer << 8 | bridge->cache_line_size;
511 break;
512
513 case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_1:
514 *value = bridge->bar[((where & ~3) - PCI_BASE_ADDRESS_0) / 4];
515 break;
516
517 case PCI_PRIMARY_BUS:
518 *value = (bridge->secondary_latency_timer << 24 |
519 bridge->subordinate_bus << 16 |
520 bridge->secondary_bus << 8 |
521 bridge->primary_bus);
522 break;
523
524 case PCI_IO_BASE:
525 if (!mvebu_has_ioport(port))
526 *value = bridge->secondary_status << 16;
527 else
528 *value = (bridge->secondary_status << 16 |
529 bridge->iolimit << 8 |
530 bridge->iobase);
531 break;
532
533 case PCI_MEMORY_BASE:
534 *value = (bridge->memlimit << 16 | bridge->membase);
535 break;
536
537 case PCI_PREF_MEMORY_BASE:
538 *value = 0;
539 break;
540
541 case PCI_IO_BASE_UPPER16:
542 *value = (bridge->iolimitupper << 16 | bridge->iobaseupper);
543 break;
544
545 case PCI_CAPABILITY_LIST:
546 *value = PCISWCAP;
547 break;
548
549 case PCI_ROM_ADDRESS1:
550 *value = 0;
551 break;
552
553 case PCI_INTERRUPT_LINE:
554 /* LINE PIN MIN_GNT MAX_LAT */
555 *value = 0;
556 break;
557
558 case PCISWCAP_EXP_LIST_ID:
559 /* Set PCIe v2, root port, slot support */
560 *value = (PCI_EXP_TYPE_ROOT_PORT << 4 | 2 |
561 PCI_EXP_FLAGS_SLOT) << 16 | PCI_CAP_ID_EXP;
562 break;
563
564 case PCISWCAP_EXP_DEVCAP:
565 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCAP);
566 break;
567
568 case PCISWCAP_EXP_DEVCTL:
569 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL) &
570 ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE |
571 PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE);
572 *value |= bridge->pcie_devctl;
573 break;
574
575 case PCISWCAP_EXP_LNKCAP:
576 /*
577 * PCIe requires the clock power management capability to be
578 * hard-wired to zero for downstream ports
579 */
580 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCAP) &
581 ~PCI_EXP_LNKCAP_CLKPM;
582 break;
583
584 case PCISWCAP_EXP_LNKCTL:
585 *value = mvebu_readl(port, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
586 break;
587
588 case PCISWCAP_EXP_SLTCAP:
589 *value = bridge->pcie_sltcap;
590 break;
591
592 case PCISWCAP_EXP_SLTCTL:
593 *value = PCI_EXP_SLTSTA_PDS << 16;
594 break;
595
596 case PCISWCAP_EXP_RTCTL:
597 *value = bridge->pcie_rtctl;
598 break;
599
600 case PCISWCAP_EXP_RTSTA:
601 *value = mvebu_readl(port, PCIE_RC_RTSTA);
602 break;
603
604 /* PCIe requires the v2 fields to be hard-wired to zero */
605 case PCISWCAP_EXP_DEVCAP2:
606 case PCISWCAP_EXP_DEVCTL2:
607 case PCISWCAP_EXP_LNKCAP2:
608 case PCISWCAP_EXP_LNKCTL2:
609 case PCISWCAP_EXP_SLTCAP2:
610 case PCISWCAP_EXP_SLTCTL2:
611 default:
612 /*
613 * PCI defines configuration read accesses to reserved or
614 * unimplemented registers to read as zero and complete
615 * normally.
616 */
617 *value = 0;
618 return PCIBIOS_SUCCESSFUL;
619 }
620
621 if (size == 2)
622 *value = (*value >> (8 * (where & 3))) & 0xffff;
623 else if (size == 1)
624 *value = (*value >> (8 * (where & 3))) & 0xff;
625
626 return PCIBIOS_SUCCESSFUL;
627 }
628
629 /* Write to the PCI-to-PCI bridge configuration space */
630 static int mvebu_sw_pci_bridge_write(struct mvebu_pcie_port *port,
631 unsigned int where, int size, u32 value)
632 {
633 struct mvebu_sw_pci_bridge *bridge = &port->bridge;
634 u32 mask, reg;
635 int err;
636
637 if (size == 4)
638 mask = 0x0;
639 else if (size == 2)
640 mask = ~(0xffff << ((where & 3) * 8));
641 else if (size == 1)
642 mask = ~(0xff << ((where & 3) * 8));
643 else
644 return PCIBIOS_BAD_REGISTER_NUMBER;
645
646 err = mvebu_sw_pci_bridge_read(port, where & ~3, 4, &reg);
647 if (err)
648 return err;
649
650 value = (reg & mask) | value << ((where & 3) * 8);
651
652 switch (where & ~3) {
653 case PCI_COMMAND:
654 {
655 u32 old = bridge->command;
656
657 if (!mvebu_has_ioport(port))
658 value &= ~PCI_COMMAND_IO;
659
660 bridge->command = value & 0xffff;
661 if ((old ^ bridge->command) & PCI_COMMAND_IO)
662 mvebu_pcie_handle_iobase_change(port);
663 if ((old ^ bridge->command) & PCI_COMMAND_MEMORY)
664 mvebu_pcie_handle_membase_change(port);
665 break;
666 }
667
668 case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_1:
669 bridge->bar[((where & ~3) - PCI_BASE_ADDRESS_0) / 4] = value;
670 break;
671
672 case PCI_IO_BASE:
673 /*
674 * We also keep bit 1 set, it is a read-only bit that
675 * indicates we support 32 bits addressing for the
676 * I/O
677 */
678 bridge->iobase = (value & 0xff) | PCI_IO_RANGE_TYPE_32;
679 bridge->iolimit = ((value >> 8) & 0xff) | PCI_IO_RANGE_TYPE_32;
680 mvebu_pcie_handle_iobase_change(port);
681 break;
682
683 case PCI_MEMORY_BASE:
684 bridge->membase = value & 0xffff;
685 bridge->memlimit = value >> 16;
686 mvebu_pcie_handle_membase_change(port);
687 break;
688
689 case PCI_IO_BASE_UPPER16:
690 bridge->iobaseupper = value & 0xffff;
691 bridge->iolimitupper = value >> 16;
692 mvebu_pcie_handle_iobase_change(port);
693 break;
694
695 case PCI_PRIMARY_BUS:
696 bridge->primary_bus = value & 0xff;
697 bridge->secondary_bus = (value >> 8) & 0xff;
698 bridge->subordinate_bus = (value >> 16) & 0xff;
699 bridge->secondary_latency_timer = (value >> 24) & 0xff;
700 mvebu_pcie_set_local_bus_nr(port, bridge->secondary_bus);
701 break;
702
703 case PCISWCAP_EXP_DEVCTL:
704 /*
705 * Armada370 data says these bits must always
706 * be zero when in root complex mode.
707 */
708 value &= ~(PCI_EXP_DEVCTL_URRE | PCI_EXP_DEVCTL_FERE |
709 PCI_EXP_DEVCTL_NFERE | PCI_EXP_DEVCTL_CERE);
710
711 /*
712 * If the mask is 0xffff0000, then we only want to write
713 * the device control register, rather than clearing the
714 * RW1C bits in the device status register. Mask out the
715 * status register bits.
716 */
717 if (mask == 0xffff0000)
718 value &= 0xffff;
719
720 mvebu_writel(port, value, PCIE_CAP_PCIEXP + PCI_EXP_DEVCTL);
721 break;
722
723 case PCISWCAP_EXP_LNKCTL:
724 /*
725 * If we don't support CLKREQ, we must ensure that the
726 * CLKREQ enable bit always reads zero. Since we haven't
727 * had this capability, and it's dependent on board wiring,
728 * disable it for the time being.
729 */
730 value &= ~PCI_EXP_LNKCTL_CLKREQ_EN;
731
732 /*
733 * If the mask is 0xffff0000, then we only want to write
734 * the link control register, rather than clearing the
735 * RW1C bits in the link status register. Mask out the
736 * status register bits.
737 */
738 if (mask == 0xffff0000)
739 value &= 0xffff;
740
741 mvebu_writel(port, value, PCIE_CAP_PCIEXP + PCI_EXP_LNKCTL);
742 break;
743
744 case PCISWCAP_EXP_RTSTA:
745 mvebu_writel(port, value, PCIE_RC_RTSTA);
746 break;
747
748 default:
749 break;
750 }
751
752 return PCIBIOS_SUCCESSFUL;
753 }
754
755 static inline struct mvebu_pcie *sys_to_pcie(struct pci_sys_data *sys)
756 {
757 return sys->private_data;
758 }
759
760 static struct mvebu_pcie_port *mvebu_pcie_find_port(struct mvebu_pcie *pcie,
761 struct pci_bus *bus,
762 int devfn)
763 {
764 int i;
765
766 for (i = 0; i < pcie->nports; i++) {
767 struct mvebu_pcie_port *port = &pcie->ports[i];
768
769 if (bus->number == 0 && port->devfn == devfn)
770 return port;
771 if (bus->number != 0 &&
772 bus->number >= port->bridge.secondary_bus &&
773 bus->number <= port->bridge.subordinate_bus)
774 return port;
775 }
776
777 return NULL;
778 }
779
780 /* PCI configuration space write function */
781 static int mvebu_pcie_wr_conf(struct pci_bus *bus, u32 devfn,
782 int where, int size, u32 val)
783 {
784 struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata);
785 struct mvebu_pcie_port *port;
786 int ret;
787
788 port = mvebu_pcie_find_port(pcie, bus, devfn);
789 if (!port)
790 return PCIBIOS_DEVICE_NOT_FOUND;
791
792 /* Access the emulated PCI-to-PCI bridge */
793 if (bus->number == 0)
794 return mvebu_sw_pci_bridge_write(port, where, size, val);
795
796 if (!mvebu_pcie_link_up(port))
797 return PCIBIOS_DEVICE_NOT_FOUND;
798
799 /* Access the real PCIe interface */
800 ret = mvebu_pcie_hw_wr_conf(port, bus, devfn,
801 where, size, val);
802
803 return ret;
804 }
805
806 /* PCI configuration space read function */
807 static int mvebu_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where,
808 int size, u32 *val)
809 {
810 struct mvebu_pcie *pcie = sys_to_pcie(bus->sysdata);
811 struct mvebu_pcie_port *port;
812 int ret;
813
814 port = mvebu_pcie_find_port(pcie, bus, devfn);
815 if (!port) {
816 *val = 0xffffffff;
817 return PCIBIOS_DEVICE_NOT_FOUND;
818 }
819
820 /* Access the emulated PCI-to-PCI bridge */
821 if (bus->number == 0)
822 return mvebu_sw_pci_bridge_read(port, where, size, val);
823
824 if (!mvebu_pcie_link_up(port)) {
825 *val = 0xffffffff;
826 return PCIBIOS_DEVICE_NOT_FOUND;
827 }
828
829 /* Access the real PCIe interface */
830 ret = mvebu_pcie_hw_rd_conf(port, bus, devfn,
831 where, size, val);
832
833 return ret;
834 }
835
836 static struct pci_ops mvebu_pcie_ops = {
837 .read = mvebu_pcie_rd_conf,
838 .write = mvebu_pcie_wr_conf,
839 };
840
841 static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys)
842 {
843 struct mvebu_pcie *pcie = sys_to_pcie(sys);
844 int err, i;
845
846 pcie->mem.name = "PCI MEM";
847 pcie->realio.name = "PCI I/O";
848
849 if (resource_size(&pcie->realio) != 0)
850 pci_add_resource_offset(&sys->resources, &pcie->realio,
851 sys->io_offset);
852
853 pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
854 pci_add_resource(&sys->resources, &pcie->busn);
855
856 err = devm_request_pci_bus_resources(&pcie->pdev->dev, &sys->resources);
857 if (err)
858 return 0;
859
860 for (i = 0; i < pcie->nports; i++) {
861 struct mvebu_pcie_port *port = &pcie->ports[i];
862
863 if (!port->base)
864 continue;
865 mvebu_pcie_setup_hw(port);
866 }
867
868 return 1;
869 }
870
871 static resource_size_t mvebu_pcie_align_resource(struct pci_dev *dev,
872 const struct resource *res,
873 resource_size_t start,
874 resource_size_t size,
875 resource_size_t align)
876 {
877 if (dev->bus->number != 0)
878 return start;
879
880 /*
881 * On the PCI-to-PCI bridge side, the I/O windows must have at
882 * least a 64 KB size and the memory windows must have at
883 * least a 1 MB size. Moreover, MBus windows need to have a
884 * base address aligned on their size, and their size must be
885 * a power of two. This means that if the BAR doesn't have a
886 * power of two size, several MBus windows will actually be
887 * created. We need to ensure that the biggest MBus window
888 * (which will be the first one) is aligned on its size, which
889 * explains the rounddown_pow_of_two() being done here.
890 */
891 if (res->flags & IORESOURCE_IO)
892 return round_up(start, max_t(resource_size_t, SZ_64K,
893 rounddown_pow_of_two(size)));
894 else if (res->flags & IORESOURCE_MEM)
895 return round_up(start, max_t(resource_size_t, SZ_1M,
896 rounddown_pow_of_two(size)));
897 else
898 return start;
899 }
900
901 static void mvebu_pcie_enable(struct mvebu_pcie *pcie)
902 {
903 struct hw_pci hw;
904
905 memset(&hw, 0, sizeof(hw));
906
907 #ifdef CONFIG_PCI_MSI
908 hw.msi_ctrl = pcie->msi;
909 #endif
910
911 hw.nr_controllers = 1;
912 hw.private_data = (void **)&pcie;
913 hw.setup = mvebu_pcie_setup;
914 hw.map_irq = of_irq_parse_and_map_pci;
915 hw.ops = &mvebu_pcie_ops;
916 hw.align_resource = mvebu_pcie_align_resource;
917
918 pci_common_init_dev(&pcie->pdev->dev, &hw);
919 }
920
921 /*
922 * Looks up the list of register addresses encoded into the reg =
923 * <...> property for one that matches the given port/lane. Once
924 * found, maps it.
925 */
926 static void __iomem *mvebu_pcie_map_registers(struct platform_device *pdev,
927 struct device_node *np,
928 struct mvebu_pcie_port *port)
929 {
930 struct resource regs;
931 int ret = 0;
932
933 ret = of_address_to_resource(np, 0, &regs);
934 if (ret)
935 return ERR_PTR(ret);
936
937 return devm_ioremap_resource(&pdev->dev, &regs);
938 }
939
940 #define DT_FLAGS_TO_TYPE(flags) (((flags) >> 24) & 0x03)
941 #define DT_TYPE_IO 0x1
942 #define DT_TYPE_MEM32 0x2
943 #define DT_CPUADDR_TO_TARGET(cpuaddr) (((cpuaddr) >> 56) & 0xFF)
944 #define DT_CPUADDR_TO_ATTR(cpuaddr) (((cpuaddr) >> 48) & 0xFF)
945
946 static int mvebu_get_tgt_attr(struct device_node *np, int devfn,
947 unsigned long type,
948 unsigned int *tgt,
949 unsigned int *attr)
950 {
951 const int na = 3, ns = 2;
952 const __be32 *range;
953 int rlen, nranges, rangesz, pna, i;
954
955 *tgt = -1;
956 *attr = -1;
957
958 range = of_get_property(np, "ranges", &rlen);
959 if (!range)
960 return -EINVAL;
961
962 pna = of_n_addr_cells(np);
963 rangesz = pna + na + ns;
964 nranges = rlen / sizeof(__be32) / rangesz;
965
966 for (i = 0; i < nranges; i++, range += rangesz) {
967 u32 flags = of_read_number(range, 1);
968 u32 slot = of_read_number(range + 1, 1);
969 u64 cpuaddr = of_read_number(range + na, pna);
970 unsigned long rtype;
971
972 if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_IO)
973 rtype = IORESOURCE_IO;
974 else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32)
975 rtype = IORESOURCE_MEM;
976 else
977 continue;
978
979 if (slot == PCI_SLOT(devfn) && type == rtype) {
980 *tgt = DT_CPUADDR_TO_TARGET(cpuaddr);
981 *attr = DT_CPUADDR_TO_ATTR(cpuaddr);
982 return 0;
983 }
984 }
985
986 return -ENOENT;
987 }
988
989 static void mvebu_pcie_msi_enable(struct mvebu_pcie *pcie)
990 {
991 struct device_node *msi_node;
992
993 msi_node = of_parse_phandle(pcie->pdev->dev.of_node,
994 "msi-parent", 0);
995 if (!msi_node)
996 return;
997
998 pcie->msi = of_pci_find_msi_chip_by_node(msi_node);
999 of_node_put(msi_node);
1000
1001 if (pcie->msi)
1002 pcie->msi->dev = &pcie->pdev->dev;
1003 }
1004
1005 #ifdef CONFIG_PM_SLEEP
1006 static int mvebu_pcie_suspend(struct device *dev)
1007 {
1008 struct mvebu_pcie *pcie;
1009 int i;
1010
1011 pcie = dev_get_drvdata(dev);
1012 for (i = 0; i < pcie->nports; i++) {
1013 struct mvebu_pcie_port *port = pcie->ports + i;
1014 port->saved_pcie_stat = mvebu_readl(port, PCIE_STAT_OFF);
1015 }
1016
1017 return 0;
1018 }
1019
1020 static int mvebu_pcie_resume(struct device *dev)
1021 {
1022 struct mvebu_pcie *pcie;
1023 int i;
1024
1025 pcie = dev_get_drvdata(dev);
1026 for (i = 0; i < pcie->nports; i++) {
1027 struct mvebu_pcie_port *port = pcie->ports + i;
1028 mvebu_writel(port, port->saved_pcie_stat, PCIE_STAT_OFF);
1029 mvebu_pcie_setup_hw(port);
1030 }
1031
1032 return 0;
1033 }
1034 #endif
1035
1036 static void mvebu_pcie_port_clk_put(void *data)
1037 {
1038 struct mvebu_pcie_port *port = data;
1039
1040 clk_put(port->clk);
1041 }
1042
1043 static int mvebu_pcie_parse_port(struct mvebu_pcie *pcie,
1044 struct mvebu_pcie_port *port, struct device_node *child)
1045 {
1046 struct device *dev = &pcie->pdev->dev;
1047 enum of_gpio_flags flags;
1048 int reset_gpio, ret;
1049
1050 port->pcie = pcie;
1051
1052 if (of_property_read_u32(child, "marvell,pcie-port", &port->port)) {
1053 dev_warn(dev, "ignoring %s, missing pcie-port property\n",
1054 of_node_full_name(child));
1055 goto skip;
1056 }
1057
1058 if (of_property_read_u32(child, "marvell,pcie-lane", &port->lane))
1059 port->lane = 0;
1060
1061 port->name = devm_kasprintf(dev, GFP_KERNEL, "pcie%d.%d", port->port,
1062 port->lane);
1063 if (!port->name) {
1064 ret = -ENOMEM;
1065 goto err;
1066 }
1067
1068 port->devfn = of_pci_get_devfn(child);
1069 if (port->devfn < 0)
1070 goto skip;
1071
1072 ret = mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_MEM,
1073 &port->mem_target, &port->mem_attr);
1074 if (ret < 0) {
1075 dev_err(dev, "%s: cannot get tgt/attr for mem window\n",
1076 port->name);
1077 goto skip;
1078 }
1079
1080 if (resource_size(&pcie->io) != 0) {
1081 mvebu_get_tgt_attr(dev->of_node, port->devfn, IORESOURCE_IO,
1082 &port->io_target, &port->io_attr);
1083 } else {
1084 port->io_target = -1;
1085 port->io_attr = -1;
1086 }
1087
1088 reset_gpio = of_get_named_gpio_flags(child, "reset-gpios", 0, &flags);
1089 if (reset_gpio == -EPROBE_DEFER) {
1090 ret = reset_gpio;
1091 goto err;
1092 }
1093
1094 if (gpio_is_valid(reset_gpio)) {
1095 unsigned long gpio_flags;
1096
1097 port->reset_name = devm_kasprintf(dev, GFP_KERNEL, "%s-reset",
1098 port->name);
1099 if (!port->reset_name) {
1100 ret = -ENOMEM;
1101 goto err;
1102 }
1103
1104 if (flags & OF_GPIO_ACTIVE_LOW) {
1105 dev_info(dev, "%s: reset gpio is active low\n",
1106 of_node_full_name(child));
1107 gpio_flags = GPIOF_ACTIVE_LOW |
1108 GPIOF_OUT_INIT_LOW;
1109 } else {
1110 gpio_flags = GPIOF_OUT_INIT_HIGH;
1111 }
1112
1113 ret = devm_gpio_request_one(dev, reset_gpio, gpio_flags,
1114 port->reset_name);
1115 if (ret) {
1116 if (ret == -EPROBE_DEFER)
1117 goto err;
1118 goto skip;
1119 }
1120
1121 port->reset_gpio = gpio_to_desc(reset_gpio);
1122 }
1123
1124 port->clk = of_clk_get_by_name(child, NULL);
1125 if (IS_ERR(port->clk)) {
1126 dev_err(dev, "%s: cannot get clock\n", port->name);
1127 goto skip;
1128 }
1129
1130 ret = devm_add_action(dev, mvebu_pcie_port_clk_put, port);
1131 if (ret < 0) {
1132 clk_put(port->clk);
1133 goto err;
1134 }
1135
1136 return 1;
1137
1138 skip:
1139 ret = 0;
1140
1141 /* In the case of skipping, we need to free these */
1142 devm_kfree(dev, port->reset_name);
1143 port->reset_name = NULL;
1144 devm_kfree(dev, port->name);
1145 port->name = NULL;
1146
1147 err:
1148 return ret;
1149 }
1150
1151 /*
1152 * Power up a PCIe port. PCIe requires the refclk to be stable for 100µs
1153 * prior to releasing PERST. See table 2-4 in section 2.6.2 AC Specifications
1154 * of the PCI Express Card Electromechanical Specification, 1.1.
1155 */
1156 static int mvebu_pcie_powerup(struct mvebu_pcie_port *port)
1157 {
1158 int ret;
1159
1160 ret = clk_prepare_enable(port->clk);
1161 if (ret < 0)
1162 return ret;
1163
1164 if (port->reset_gpio) {
1165 u32 reset_udelay = 20000;
1166
1167 of_property_read_u32(port->dn, "reset-delay-us",
1168 &reset_udelay);
1169
1170 udelay(100);
1171
1172 gpiod_set_value_cansleep(port->reset_gpio, 0);
1173 msleep(reset_udelay / 1000);
1174 }
1175
1176 return 0;
1177 }
1178
1179 /*
1180 * Power down a PCIe port. Strictly, PCIe requires us to place the card
1181 * in D3hot state before asserting PERST#.
1182 */
1183 static void mvebu_pcie_powerdown(struct mvebu_pcie_port *port)
1184 {
1185 if (port->reset_gpio)
1186 gpiod_set_value_cansleep(port->reset_gpio, 1);
1187
1188 clk_disable_unprepare(port->clk);
1189 }
1190
1191 static int mvebu_pcie_probe(struct platform_device *pdev)
1192 {
1193 struct mvebu_pcie *pcie;
1194 struct device_node *np = pdev->dev.of_node;
1195 struct device_node *child;
1196 int num, i, ret;
1197
1198 pcie = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_pcie),
1199 GFP_KERNEL);
1200 if (!pcie)
1201 return -ENOMEM;
1202
1203 pcie->pdev = pdev;
1204 platform_set_drvdata(pdev, pcie);
1205
1206 /* Get the PCIe memory and I/O aperture */
1207 mvebu_mbus_get_pcie_mem_aperture(&pcie->mem);
1208 if (resource_size(&pcie->mem) == 0) {
1209 dev_err(&pdev->dev, "invalid memory aperture size\n");
1210 return -EINVAL;
1211 }
1212
1213 mvebu_mbus_get_pcie_io_aperture(&pcie->io);
1214
1215 if (resource_size(&pcie->io) != 0) {
1216 pcie->realio.flags = pcie->io.flags;
1217 pcie->realio.start = PCIBIOS_MIN_IO;
1218 pcie->realio.end = min_t(resource_size_t,
1219 IO_SPACE_LIMIT,
1220 resource_size(&pcie->io));
1221 } else
1222 pcie->realio = pcie->io;
1223
1224 /* Get the bus range */
1225 ret = of_pci_parse_bus_range(np, &pcie->busn);
1226 if (ret) {
1227 dev_err(&pdev->dev, "failed to parse bus-range property: %d\n",
1228 ret);
1229 return ret;
1230 }
1231
1232 num = of_get_available_child_count(pdev->dev.of_node);
1233
1234 pcie->ports = devm_kcalloc(&pdev->dev, num, sizeof(*pcie->ports),
1235 GFP_KERNEL);
1236 if (!pcie->ports)
1237 return -ENOMEM;
1238
1239 i = 0;
1240 for_each_available_child_of_node(pdev->dev.of_node, child) {
1241 struct mvebu_pcie_port *port = &pcie->ports[i];
1242
1243 ret = mvebu_pcie_parse_port(pcie, port, child);
1244 if (ret < 0) {
1245 of_node_put(child);
1246 return ret;
1247 } else if (ret == 0) {
1248 continue;
1249 }
1250
1251 port->dn = child;
1252 i++;
1253 }
1254 pcie->nports = i;
1255
1256 for (i = 0; i < pcie->nports; i++) {
1257 struct mvebu_pcie_port *port = &pcie->ports[i];
1258
1259 child = port->dn;
1260 if (!child)
1261 continue;
1262
1263 ret = mvebu_pcie_powerup(port);
1264 if (ret < 0)
1265 continue;
1266
1267 port->base = mvebu_pcie_map_registers(pdev, child, port);
1268 if (IS_ERR(port->base)) {
1269 dev_err(&pdev->dev, "%s: cannot map registers\n",
1270 port->name);
1271 port->base = NULL;
1272 mvebu_pcie_powerdown(port);
1273 continue;
1274 }
1275
1276 mvebu_pcie_set_local_dev_nr(port, 1);
1277 mvebu_sw_pci_bridge_init(port);
1278 }
1279
1280 pcie->nports = i;
1281
1282 for (i = 0; i < (IO_SPACE_LIMIT - SZ_64K); i += SZ_64K)
1283 pci_ioremap_io(i, pcie->io.start + i);
1284
1285 mvebu_pcie_msi_enable(pcie);
1286 mvebu_pcie_enable(pcie);
1287
1288 platform_set_drvdata(pdev, pcie);
1289
1290 return 0;
1291 }
1292
1293 static const struct of_device_id mvebu_pcie_of_match_table[] = {
1294 { .compatible = "marvell,armada-xp-pcie", },
1295 { .compatible = "marvell,armada-370-pcie", },
1296 { .compatible = "marvell,dove-pcie", },
1297 { .compatible = "marvell,kirkwood-pcie", },
1298 {},
1299 };
1300
1301 static const struct dev_pm_ops mvebu_pcie_pm_ops = {
1302 SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(mvebu_pcie_suspend, mvebu_pcie_resume)
1303 };
1304
1305 static struct platform_driver mvebu_pcie_driver = {
1306 .driver = {
1307 .name = "mvebu-pcie",
1308 .of_match_table = mvebu_pcie_of_match_table,
1309 /* driver unloading/unbinding currently not supported */
1310 .suppress_bind_attrs = true,
1311 .pm = &mvebu_pcie_pm_ops,
1312 },
1313 .probe = mvebu_pcie_probe,
1314 };
1315 builtin_platform_driver(mvebu_pcie_driver);