]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/pci/probe.c
PCI: Restore config space on runtime resume despite being unbound
[mirror_ubuntu-bionic-kernel.git] / drivers / pci / probe.c
1 /*
2 * probe.c - PCI detection and setup code
3 */
4
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/init.h>
8 #include <linux/pci.h>
9 #include <linux/of_device.h>
10 #include <linux/of_pci.h>
11 #include <linux/pci_hotplug.h>
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/cpumask.h>
15 #include <linux/pci-aspm.h>
16 #include <linux/aer.h>
17 #include <linux/acpi.h>
18 #include <linux/irqdomain.h>
19 #include <linux/pm_runtime.h>
20 #include "pci.h"
21
22 #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
23 #define CARDBUS_RESERVE_BUSNR 3
24
25 static struct resource busn_resource = {
26 .name = "PCI busn",
27 .start = 0,
28 .end = 255,
29 .flags = IORESOURCE_BUS,
30 };
31
32 /* Ugh. Need to stop exporting this to modules. */
33 LIST_HEAD(pci_root_buses);
34 EXPORT_SYMBOL(pci_root_buses);
35
36 static LIST_HEAD(pci_domain_busn_res_list);
37
38 struct pci_domain_busn_res {
39 struct list_head list;
40 struct resource res;
41 int domain_nr;
42 };
43
44 static struct resource *get_pci_domain_busn_res(int domain_nr)
45 {
46 struct pci_domain_busn_res *r;
47
48 list_for_each_entry(r, &pci_domain_busn_res_list, list)
49 if (r->domain_nr == domain_nr)
50 return &r->res;
51
52 r = kzalloc(sizeof(*r), GFP_KERNEL);
53 if (!r)
54 return NULL;
55
56 r->domain_nr = domain_nr;
57 r->res.start = 0;
58 r->res.end = 0xff;
59 r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
60
61 list_add_tail(&r->list, &pci_domain_busn_res_list);
62
63 return &r->res;
64 }
65
66 static int find_anything(struct device *dev, void *data)
67 {
68 return 1;
69 }
70
71 /*
72 * Some device drivers need know if pci is initiated.
73 * Basically, we think pci is not initiated when there
74 * is no device to be found on the pci_bus_type.
75 */
76 int no_pci_devices(void)
77 {
78 struct device *dev;
79 int no_devices;
80
81 dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
82 no_devices = (dev == NULL);
83 put_device(dev);
84 return no_devices;
85 }
86 EXPORT_SYMBOL(no_pci_devices);
87
88 /*
89 * PCI Bus Class
90 */
91 static void release_pcibus_dev(struct device *dev)
92 {
93 struct pci_bus *pci_bus = to_pci_bus(dev);
94
95 put_device(pci_bus->bridge);
96 pci_bus_remove_resources(pci_bus);
97 pci_release_bus_of_node(pci_bus);
98 kfree(pci_bus);
99 }
100
101 static struct class pcibus_class = {
102 .name = "pci_bus",
103 .dev_release = &release_pcibus_dev,
104 .dev_groups = pcibus_groups,
105 };
106
107 static int __init pcibus_class_init(void)
108 {
109 return class_register(&pcibus_class);
110 }
111 postcore_initcall(pcibus_class_init);
112
113 static u64 pci_size(u64 base, u64 maxbase, u64 mask)
114 {
115 u64 size = mask & maxbase; /* Find the significant bits */
116 if (!size)
117 return 0;
118
119 /* Get the lowest of them to find the decode size, and
120 from that the extent. */
121 size = (size & ~(size-1)) - 1;
122
123 /* base == maxbase can be valid only if the BAR has
124 already been programmed with all 1s. */
125 if (base == maxbase && ((base | size) & mask) != mask)
126 return 0;
127
128 return size;
129 }
130
131 static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
132 {
133 u32 mem_type;
134 unsigned long flags;
135
136 if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
137 flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
138 flags |= IORESOURCE_IO;
139 return flags;
140 }
141
142 flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
143 flags |= IORESOURCE_MEM;
144 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
145 flags |= IORESOURCE_PREFETCH;
146
147 mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
148 switch (mem_type) {
149 case PCI_BASE_ADDRESS_MEM_TYPE_32:
150 break;
151 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
152 /* 1M mem BAR treated as 32-bit BAR */
153 break;
154 case PCI_BASE_ADDRESS_MEM_TYPE_64:
155 flags |= IORESOURCE_MEM_64;
156 break;
157 default:
158 /* mem unknown type treated as 32-bit BAR */
159 break;
160 }
161 return flags;
162 }
163
164 #define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
165
166 /**
167 * pci_read_base - read a PCI BAR
168 * @dev: the PCI device
169 * @type: type of the BAR
170 * @res: resource buffer to be filled in
171 * @pos: BAR position in the config space
172 *
173 * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
174 */
175 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
176 struct resource *res, unsigned int pos)
177 {
178 u32 l = 0, sz = 0, mask;
179 u64 l64, sz64, mask64;
180 u16 orig_cmd;
181 struct pci_bus_region region, inverted_region;
182
183 mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
184
185 /* No printks while decoding is disabled! */
186 if (!dev->mmio_always_on) {
187 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
188 if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
189 pci_write_config_word(dev, PCI_COMMAND,
190 orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
191 }
192 }
193
194 res->name = pci_name(dev);
195
196 pci_read_config_dword(dev, pos, &l);
197 pci_write_config_dword(dev, pos, l | mask);
198 pci_read_config_dword(dev, pos, &sz);
199 pci_write_config_dword(dev, pos, l);
200
201 /*
202 * All bits set in sz means the device isn't working properly.
203 * If the BAR isn't implemented, all bits must be 0. If it's a
204 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
205 * 1 must be clear.
206 */
207 if (sz == 0xffffffff)
208 sz = 0;
209
210 /*
211 * I don't know how l can have all bits set. Copied from old code.
212 * Maybe it fixes a bug on some ancient platform.
213 */
214 if (l == 0xffffffff)
215 l = 0;
216
217 if (type == pci_bar_unknown) {
218 res->flags = decode_bar(dev, l);
219 res->flags |= IORESOURCE_SIZEALIGN;
220 if (res->flags & IORESOURCE_IO) {
221 l64 = l & PCI_BASE_ADDRESS_IO_MASK;
222 sz64 = sz & PCI_BASE_ADDRESS_IO_MASK;
223 mask64 = PCI_BASE_ADDRESS_IO_MASK & (u32)IO_SPACE_LIMIT;
224 } else {
225 l64 = l & PCI_BASE_ADDRESS_MEM_MASK;
226 sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK;
227 mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK;
228 }
229 } else {
230 if (l & PCI_ROM_ADDRESS_ENABLE)
231 res->flags |= IORESOURCE_ROM_ENABLE;
232 l64 = l & PCI_ROM_ADDRESS_MASK;
233 sz64 = sz & PCI_ROM_ADDRESS_MASK;
234 mask64 = PCI_ROM_ADDRESS_MASK;
235 }
236
237 if (res->flags & IORESOURCE_MEM_64) {
238 pci_read_config_dword(dev, pos + 4, &l);
239 pci_write_config_dword(dev, pos + 4, ~0);
240 pci_read_config_dword(dev, pos + 4, &sz);
241 pci_write_config_dword(dev, pos + 4, l);
242
243 l64 |= ((u64)l << 32);
244 sz64 |= ((u64)sz << 32);
245 mask64 |= ((u64)~0 << 32);
246 }
247
248 if (!dev->mmio_always_on && (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
249 pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
250
251 if (!sz64)
252 goto fail;
253
254 sz64 = pci_size(l64, sz64, mask64);
255 if (!sz64) {
256 dev_info(&dev->dev, FW_BUG "reg 0x%x: invalid BAR (can't size)\n",
257 pos);
258 goto fail;
259 }
260
261 if (res->flags & IORESOURCE_MEM_64) {
262 if ((sizeof(pci_bus_addr_t) < 8 || sizeof(resource_size_t) < 8)
263 && sz64 > 0x100000000ULL) {
264 res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
265 res->start = 0;
266 res->end = 0;
267 dev_err(&dev->dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
268 pos, (unsigned long long)sz64);
269 goto out;
270 }
271
272 if ((sizeof(pci_bus_addr_t) < 8) && l) {
273 /* Above 32-bit boundary; try to reallocate */
274 res->flags |= IORESOURCE_UNSET;
275 res->start = 0;
276 res->end = sz64;
277 dev_info(&dev->dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n",
278 pos, (unsigned long long)l64);
279 goto out;
280 }
281 }
282
283 region.start = l64;
284 region.end = l64 + sz64;
285
286 pcibios_bus_to_resource(dev->bus, res, &region);
287 pcibios_resource_to_bus(dev->bus, &inverted_region, res);
288
289 /*
290 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
291 * the corresponding resource address (the physical address used by
292 * the CPU. Converting that resource address back to a bus address
293 * should yield the original BAR value:
294 *
295 * resource_to_bus(bus_to_resource(A)) == A
296 *
297 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not
298 * be claimed by the device.
299 */
300 if (inverted_region.start != region.start) {
301 res->flags |= IORESOURCE_UNSET;
302 res->start = 0;
303 res->end = region.end - region.start;
304 dev_info(&dev->dev, "reg 0x%x: initial BAR value %#010llx invalid\n",
305 pos, (unsigned long long)region.start);
306 }
307
308 goto out;
309
310
311 fail:
312 res->flags = 0;
313 out:
314 if (res->flags)
315 dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res);
316
317 return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
318 }
319
320 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
321 {
322 unsigned int pos, reg;
323
324 if (dev->non_compliant_bars)
325 return;
326
327 for (pos = 0; pos < howmany; pos++) {
328 struct resource *res = &dev->resource[pos];
329 reg = PCI_BASE_ADDRESS_0 + (pos << 2);
330 pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
331 }
332
333 if (rom) {
334 struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
335 dev->rom_base_reg = rom;
336 res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
337 IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
338 __pci_read_base(dev, pci_bar_mem32, res, rom);
339 }
340 }
341
342 static void pci_read_bridge_io(struct pci_bus *child)
343 {
344 struct pci_dev *dev = child->self;
345 u8 io_base_lo, io_limit_lo;
346 unsigned long io_mask, io_granularity, base, limit;
347 struct pci_bus_region region;
348 struct resource *res;
349
350 io_mask = PCI_IO_RANGE_MASK;
351 io_granularity = 0x1000;
352 if (dev->io_window_1k) {
353 /* Support 1K I/O space granularity */
354 io_mask = PCI_IO_1K_RANGE_MASK;
355 io_granularity = 0x400;
356 }
357
358 res = child->resource[0];
359 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
360 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
361 base = (io_base_lo & io_mask) << 8;
362 limit = (io_limit_lo & io_mask) << 8;
363
364 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
365 u16 io_base_hi, io_limit_hi;
366
367 pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
368 pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
369 base |= ((unsigned long) io_base_hi << 16);
370 limit |= ((unsigned long) io_limit_hi << 16);
371 }
372
373 if (base <= limit) {
374 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
375 region.start = base;
376 region.end = limit + io_granularity - 1;
377 pcibios_bus_to_resource(dev->bus, res, &region);
378 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
379 }
380 }
381
382 static void pci_read_bridge_mmio(struct pci_bus *child)
383 {
384 struct pci_dev *dev = child->self;
385 u16 mem_base_lo, mem_limit_lo;
386 unsigned long base, limit;
387 struct pci_bus_region region;
388 struct resource *res;
389
390 res = child->resource[1];
391 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
392 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
393 base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
394 limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
395 if (base <= limit) {
396 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
397 region.start = base;
398 region.end = limit + 0xfffff;
399 pcibios_bus_to_resource(dev->bus, res, &region);
400 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
401 }
402 }
403
404 static void pci_read_bridge_mmio_pref(struct pci_bus *child)
405 {
406 struct pci_dev *dev = child->self;
407 u16 mem_base_lo, mem_limit_lo;
408 u64 base64, limit64;
409 pci_bus_addr_t base, limit;
410 struct pci_bus_region region;
411 struct resource *res;
412
413 res = child->resource[2];
414 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
415 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
416 base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
417 limit64 = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
418
419 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
420 u32 mem_base_hi, mem_limit_hi;
421
422 pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
423 pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
424
425 /*
426 * Some bridges set the base > limit by default, and some
427 * (broken) BIOSes do not initialize them. If we find
428 * this, just assume they are not being used.
429 */
430 if (mem_base_hi <= mem_limit_hi) {
431 base64 |= (u64) mem_base_hi << 32;
432 limit64 |= (u64) mem_limit_hi << 32;
433 }
434 }
435
436 base = (pci_bus_addr_t) base64;
437 limit = (pci_bus_addr_t) limit64;
438
439 if (base != base64) {
440 dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n",
441 (unsigned long long) base64);
442 return;
443 }
444
445 if (base <= limit) {
446 res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
447 IORESOURCE_MEM | IORESOURCE_PREFETCH;
448 if (res->flags & PCI_PREF_RANGE_TYPE_64)
449 res->flags |= IORESOURCE_MEM_64;
450 region.start = base;
451 region.end = limit + 0xfffff;
452 pcibios_bus_to_resource(dev->bus, res, &region);
453 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
454 }
455 }
456
457 void pci_read_bridge_bases(struct pci_bus *child)
458 {
459 struct pci_dev *dev = child->self;
460 struct resource *res;
461 int i;
462
463 if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */
464 return;
465
466 dev_info(&dev->dev, "PCI bridge to %pR%s\n",
467 &child->busn_res,
468 dev->transparent ? " (subtractive decode)" : "");
469
470 pci_bus_remove_resources(child);
471 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
472 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
473
474 pci_read_bridge_io(child);
475 pci_read_bridge_mmio(child);
476 pci_read_bridge_mmio_pref(child);
477
478 if (dev->transparent) {
479 pci_bus_for_each_resource(child->parent, res, i) {
480 if (res && res->flags) {
481 pci_bus_add_resource(child, res,
482 PCI_SUBTRACTIVE_DECODE);
483 dev_printk(KERN_DEBUG, &dev->dev,
484 " bridge window %pR (subtractive decode)\n",
485 res);
486 }
487 }
488 }
489 }
490
491 static struct pci_bus *pci_alloc_bus(struct pci_bus *parent)
492 {
493 struct pci_bus *b;
494
495 b = kzalloc(sizeof(*b), GFP_KERNEL);
496 if (!b)
497 return NULL;
498
499 INIT_LIST_HEAD(&b->node);
500 INIT_LIST_HEAD(&b->children);
501 INIT_LIST_HEAD(&b->devices);
502 INIT_LIST_HEAD(&b->slots);
503 INIT_LIST_HEAD(&b->resources);
504 b->max_bus_speed = PCI_SPEED_UNKNOWN;
505 b->cur_bus_speed = PCI_SPEED_UNKNOWN;
506 #ifdef CONFIG_PCI_DOMAINS_GENERIC
507 if (parent)
508 b->domain_nr = parent->domain_nr;
509 #endif
510 return b;
511 }
512
513 static void devm_pci_release_host_bridge_dev(struct device *dev)
514 {
515 struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
516
517 if (bridge->release_fn)
518 bridge->release_fn(bridge);
519 }
520
521 static void pci_release_host_bridge_dev(struct device *dev)
522 {
523 devm_pci_release_host_bridge_dev(dev);
524 pci_free_host_bridge(to_pci_host_bridge(dev));
525 }
526
527 struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
528 {
529 struct pci_host_bridge *bridge;
530
531 bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL);
532 if (!bridge)
533 return NULL;
534
535 INIT_LIST_HEAD(&bridge->windows);
536 bridge->dev.release = pci_release_host_bridge_dev;
537
538 return bridge;
539 }
540 EXPORT_SYMBOL(pci_alloc_host_bridge);
541
542 struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
543 size_t priv)
544 {
545 struct pci_host_bridge *bridge;
546
547 bridge = devm_kzalloc(dev, sizeof(*bridge) + priv, GFP_KERNEL);
548 if (!bridge)
549 return NULL;
550
551 INIT_LIST_HEAD(&bridge->windows);
552 bridge->dev.release = devm_pci_release_host_bridge_dev;
553
554 return bridge;
555 }
556 EXPORT_SYMBOL(devm_pci_alloc_host_bridge);
557
558 void pci_free_host_bridge(struct pci_host_bridge *bridge)
559 {
560 pci_free_resource_list(&bridge->windows);
561
562 kfree(bridge);
563 }
564 EXPORT_SYMBOL(pci_free_host_bridge);
565
566 static const unsigned char pcix_bus_speed[] = {
567 PCI_SPEED_UNKNOWN, /* 0 */
568 PCI_SPEED_66MHz_PCIX, /* 1 */
569 PCI_SPEED_100MHz_PCIX, /* 2 */
570 PCI_SPEED_133MHz_PCIX, /* 3 */
571 PCI_SPEED_UNKNOWN, /* 4 */
572 PCI_SPEED_66MHz_PCIX_ECC, /* 5 */
573 PCI_SPEED_100MHz_PCIX_ECC, /* 6 */
574 PCI_SPEED_133MHz_PCIX_ECC, /* 7 */
575 PCI_SPEED_UNKNOWN, /* 8 */
576 PCI_SPEED_66MHz_PCIX_266, /* 9 */
577 PCI_SPEED_100MHz_PCIX_266, /* A */
578 PCI_SPEED_133MHz_PCIX_266, /* B */
579 PCI_SPEED_UNKNOWN, /* C */
580 PCI_SPEED_66MHz_PCIX_533, /* D */
581 PCI_SPEED_100MHz_PCIX_533, /* E */
582 PCI_SPEED_133MHz_PCIX_533 /* F */
583 };
584
585 const unsigned char pcie_link_speed[] = {
586 PCI_SPEED_UNKNOWN, /* 0 */
587 PCIE_SPEED_2_5GT, /* 1 */
588 PCIE_SPEED_5_0GT, /* 2 */
589 PCIE_SPEED_8_0GT, /* 3 */
590 PCIE_SPEED_16_0GT, /* 4 */
591 PCI_SPEED_UNKNOWN, /* 5 */
592 PCI_SPEED_UNKNOWN, /* 6 */
593 PCI_SPEED_UNKNOWN, /* 7 */
594 PCI_SPEED_UNKNOWN, /* 8 */
595 PCI_SPEED_UNKNOWN, /* 9 */
596 PCI_SPEED_UNKNOWN, /* A */
597 PCI_SPEED_UNKNOWN, /* B */
598 PCI_SPEED_UNKNOWN, /* C */
599 PCI_SPEED_UNKNOWN, /* D */
600 PCI_SPEED_UNKNOWN, /* E */
601 PCI_SPEED_UNKNOWN /* F */
602 };
603
604 void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
605 {
606 bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
607 }
608 EXPORT_SYMBOL_GPL(pcie_update_link_speed);
609
610 static unsigned char agp_speeds[] = {
611 AGP_UNKNOWN,
612 AGP_1X,
613 AGP_2X,
614 AGP_4X,
615 AGP_8X
616 };
617
618 static enum pci_bus_speed agp_speed(int agp3, int agpstat)
619 {
620 int index = 0;
621
622 if (agpstat & 4)
623 index = 3;
624 else if (agpstat & 2)
625 index = 2;
626 else if (agpstat & 1)
627 index = 1;
628 else
629 goto out;
630
631 if (agp3) {
632 index += 2;
633 if (index == 5)
634 index = 0;
635 }
636
637 out:
638 return agp_speeds[index];
639 }
640
641 static void pci_set_bus_speed(struct pci_bus *bus)
642 {
643 struct pci_dev *bridge = bus->self;
644 int pos;
645
646 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
647 if (!pos)
648 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
649 if (pos) {
650 u32 agpstat, agpcmd;
651
652 pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
653 bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
654
655 pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
656 bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
657 }
658
659 pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
660 if (pos) {
661 u16 status;
662 enum pci_bus_speed max;
663
664 pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
665 &status);
666
667 if (status & PCI_X_SSTATUS_533MHZ) {
668 max = PCI_SPEED_133MHz_PCIX_533;
669 } else if (status & PCI_X_SSTATUS_266MHZ) {
670 max = PCI_SPEED_133MHz_PCIX_266;
671 } else if (status & PCI_X_SSTATUS_133MHZ) {
672 if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2)
673 max = PCI_SPEED_133MHz_PCIX_ECC;
674 else
675 max = PCI_SPEED_133MHz_PCIX;
676 } else {
677 max = PCI_SPEED_66MHz_PCIX;
678 }
679
680 bus->max_bus_speed = max;
681 bus->cur_bus_speed = pcix_bus_speed[
682 (status & PCI_X_SSTATUS_FREQ) >> 6];
683
684 return;
685 }
686
687 if (pci_is_pcie(bridge)) {
688 u32 linkcap;
689 u16 linksta;
690
691 pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
692 bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
693
694 pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
695 pcie_update_link_speed(bus, linksta);
696 }
697 }
698
699 static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus)
700 {
701 struct irq_domain *d;
702
703 /*
704 * Any firmware interface that can resolve the msi_domain
705 * should be called from here.
706 */
707 d = pci_host_bridge_of_msi_domain(bus);
708 if (!d)
709 d = pci_host_bridge_acpi_msi_domain(bus);
710
711 #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
712 /*
713 * If no IRQ domain was found via the OF tree, try looking it up
714 * directly through the fwnode_handle.
715 */
716 if (!d) {
717 struct fwnode_handle *fwnode = pci_root_bus_fwnode(bus);
718
719 if (fwnode)
720 d = irq_find_matching_fwnode(fwnode,
721 DOMAIN_BUS_PCI_MSI);
722 }
723 #endif
724
725 return d;
726 }
727
728 static void pci_set_bus_msi_domain(struct pci_bus *bus)
729 {
730 struct irq_domain *d;
731 struct pci_bus *b;
732
733 /*
734 * The bus can be a root bus, a subordinate bus, or a virtual bus
735 * created by an SR-IOV device. Walk up to the first bridge device
736 * found or derive the domain from the host bridge.
737 */
738 for (b = bus, d = NULL; !d && !pci_is_root_bus(b); b = b->parent) {
739 if (b->self)
740 d = dev_get_msi_domain(&b->self->dev);
741 }
742
743 if (!d)
744 d = pci_host_bridge_msi_domain(b);
745
746 dev_set_msi_domain(&bus->dev, d);
747 }
748
749 static int pci_register_host_bridge(struct pci_host_bridge *bridge)
750 {
751 struct device *parent = bridge->dev.parent;
752 struct resource_entry *window, *n;
753 struct pci_bus *bus, *b;
754 resource_size_t offset;
755 LIST_HEAD(resources);
756 struct resource *res;
757 char addr[64], *fmt;
758 const char *name;
759 int err;
760
761 bus = pci_alloc_bus(NULL);
762 if (!bus)
763 return -ENOMEM;
764
765 bridge->bus = bus;
766
767 /* temporarily move resources off the list */
768 list_splice_init(&bridge->windows, &resources);
769 bus->sysdata = bridge->sysdata;
770 bus->msi = bridge->msi;
771 bus->ops = bridge->ops;
772 bus->number = bus->busn_res.start = bridge->busnr;
773 #ifdef CONFIG_PCI_DOMAINS_GENERIC
774 bus->domain_nr = pci_bus_find_domain_nr(bus, parent);
775 #endif
776
777 b = pci_find_bus(pci_domain_nr(bus), bridge->busnr);
778 if (b) {
779 /* If we already got to this bus through a different bridge, ignore it */
780 dev_dbg(&b->dev, "bus already known\n");
781 err = -EEXIST;
782 goto free;
783 }
784
785 dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(bus),
786 bridge->busnr);
787
788 err = pcibios_root_bridge_prepare(bridge);
789 if (err)
790 goto free;
791
792 err = device_register(&bridge->dev);
793 if (err)
794 put_device(&bridge->dev);
795
796 bus->bridge = get_device(&bridge->dev);
797 device_enable_async_suspend(bus->bridge);
798 pci_set_bus_of_node(bus);
799 pci_set_bus_msi_domain(bus);
800
801 if (!parent)
802 set_dev_node(bus->bridge, pcibus_to_node(bus));
803
804 bus->dev.class = &pcibus_class;
805 bus->dev.parent = bus->bridge;
806
807 dev_set_name(&bus->dev, "%04x:%02x", pci_domain_nr(bus), bus->number);
808 name = dev_name(&bus->dev);
809
810 err = device_register(&bus->dev);
811 if (err)
812 goto unregister;
813
814 pcibios_add_bus(bus);
815
816 /* Create legacy_io and legacy_mem files for this bus */
817 pci_create_legacy_files(bus);
818
819 if (parent)
820 dev_info(parent, "PCI host bridge to bus %s\n", name);
821 else
822 pr_info("PCI host bridge to bus %s\n", name);
823
824 /* Add initial resources to the bus */
825 resource_list_for_each_entry_safe(window, n, &resources) {
826 list_move_tail(&window->node, &bridge->windows);
827 offset = window->offset;
828 res = window->res;
829
830 if (res->flags & IORESOURCE_BUS)
831 pci_bus_insert_busn_res(bus, bus->number, res->end);
832 else
833 pci_bus_add_resource(bus, res, 0);
834
835 if (offset) {
836 if (resource_type(res) == IORESOURCE_IO)
837 fmt = " (bus address [%#06llx-%#06llx])";
838 else
839 fmt = " (bus address [%#010llx-%#010llx])";
840
841 snprintf(addr, sizeof(addr), fmt,
842 (unsigned long long)(res->start - offset),
843 (unsigned long long)(res->end - offset));
844 } else
845 addr[0] = '\0';
846
847 dev_info(&bus->dev, "root bus resource %pR%s\n", res, addr);
848 }
849
850 down_write(&pci_bus_sem);
851 list_add_tail(&bus->node, &pci_root_buses);
852 up_write(&pci_bus_sem);
853
854 return 0;
855
856 unregister:
857 put_device(&bridge->dev);
858 device_unregister(&bridge->dev);
859
860 free:
861 kfree(bus);
862 return err;
863 }
864
865 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
866 struct pci_dev *bridge, int busnr)
867 {
868 struct pci_bus *child;
869 int i;
870 int ret;
871
872 /*
873 * Allocate a new bus, and inherit stuff from the parent..
874 */
875 child = pci_alloc_bus(parent);
876 if (!child)
877 return NULL;
878
879 child->parent = parent;
880 child->ops = parent->ops;
881 child->msi = parent->msi;
882 child->sysdata = parent->sysdata;
883 child->bus_flags = parent->bus_flags;
884
885 /* initialize some portions of the bus device, but don't register it
886 * now as the parent is not properly set up yet.
887 */
888 child->dev.class = &pcibus_class;
889 dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
890
891 /*
892 * Set up the primary, secondary and subordinate
893 * bus numbers.
894 */
895 child->number = child->busn_res.start = busnr;
896 child->primary = parent->busn_res.start;
897 child->busn_res.end = 0xff;
898
899 if (!bridge) {
900 child->dev.parent = parent->bridge;
901 goto add_dev;
902 }
903
904 child->self = bridge;
905 child->bridge = get_device(&bridge->dev);
906 child->dev.parent = child->bridge;
907 pci_set_bus_of_node(child);
908 pci_set_bus_speed(child);
909
910 /* Set up default resource pointers and names.. */
911 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
912 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
913 child->resource[i]->name = child->name;
914 }
915 bridge->subordinate = child;
916
917 add_dev:
918 pci_set_bus_msi_domain(child);
919 ret = device_register(&child->dev);
920 WARN_ON(ret < 0);
921
922 pcibios_add_bus(child);
923
924 if (child->ops->add_bus) {
925 ret = child->ops->add_bus(child);
926 if (WARN_ON(ret < 0))
927 dev_err(&child->dev, "failed to add bus: %d\n", ret);
928 }
929
930 /* Create legacy_io and legacy_mem files for this bus */
931 pci_create_legacy_files(child);
932
933 return child;
934 }
935
936 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
937 int busnr)
938 {
939 struct pci_bus *child;
940
941 child = pci_alloc_child_bus(parent, dev, busnr);
942 if (child) {
943 down_write(&pci_bus_sem);
944 list_add_tail(&child->node, &parent->children);
945 up_write(&pci_bus_sem);
946 }
947 return child;
948 }
949 EXPORT_SYMBOL(pci_add_new_bus);
950
951 static void pci_enable_crs(struct pci_dev *pdev)
952 {
953 u16 root_cap = 0;
954
955 /* Enable CRS Software Visibility if supported */
956 pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap);
957 if (root_cap & PCI_EXP_RTCAP_CRSVIS)
958 pcie_capability_set_word(pdev, PCI_EXP_RTCTL,
959 PCI_EXP_RTCTL_CRSSVE);
960 }
961
962 static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
963 unsigned int available_buses);
964
965 /*
966 * pci_scan_bridge_extend() - Scan buses behind a bridge
967 * @bus: Parent bus the bridge is on
968 * @dev: Bridge itself
969 * @max: Starting subordinate number of buses behind this bridge
970 * @available_buses: Total number of buses available for this bridge and
971 * the devices below. After the minimal bus space has
972 * been allocated the remaining buses will be
973 * distributed equally between hotplug-capable bridges.
974 * @pass: Either %0 (scan already configured bridges) or %1 (scan bridges
975 * that need to be reconfigured.
976 *
977 * If it's a bridge, configure it and scan the bus behind it.
978 * For CardBus bridges, we don't scan behind as the devices will
979 * be handled by the bridge driver itself.
980 *
981 * We need to process bridges in two passes -- first we scan those
982 * already configured by the BIOS and after we are done with all of
983 * them, we proceed to assigning numbers to the remaining buses in
984 * order to avoid overlaps between old and new bus numbers.
985 */
986 static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
987 int max, unsigned int available_buses,
988 int pass)
989 {
990 struct pci_bus *child;
991 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
992 u32 buses, i, j = 0;
993 u16 bctl;
994 u8 primary, secondary, subordinate;
995 int broken = 0;
996
997 /*
998 * Make sure the bridge is powered on to be able to access config
999 * space of devices below it.
1000 */
1001 pm_runtime_get_sync(&dev->dev);
1002
1003 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
1004 primary = buses & 0xFF;
1005 secondary = (buses >> 8) & 0xFF;
1006 subordinate = (buses >> 16) & 0xFF;
1007
1008 dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
1009 secondary, subordinate, pass);
1010
1011 if (!primary && (primary != bus->number) && secondary && subordinate) {
1012 dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
1013 primary = bus->number;
1014 }
1015
1016 /* Check if setup is sensible at all */
1017 if (!pass &&
1018 (primary != bus->number || secondary <= bus->number ||
1019 secondary > subordinate)) {
1020 dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
1021 secondary, subordinate);
1022 broken = 1;
1023 }
1024
1025 /* Disable MasterAbortMode during probing to avoid reporting
1026 of bus errors (in some architectures) */
1027 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
1028 pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
1029 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
1030
1031 pci_enable_crs(dev);
1032
1033 if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
1034 !is_cardbus && !broken) {
1035 unsigned int cmax;
1036 /*
1037 * Bus already configured by firmware, process it in the first
1038 * pass and just note the configuration.
1039 */
1040 if (pass)
1041 goto out;
1042
1043 /*
1044 * The bus might already exist for two reasons: Either we are
1045 * rescanning the bus or the bus is reachable through more than
1046 * one bridge. The second case can happen with the i450NX
1047 * chipset.
1048 */
1049 child = pci_find_bus(pci_domain_nr(bus), secondary);
1050 if (!child) {
1051 child = pci_add_new_bus(bus, dev, secondary);
1052 if (!child)
1053 goto out;
1054 child->primary = primary;
1055 pci_bus_insert_busn_res(child, secondary, subordinate);
1056 child->bridge_ctl = bctl;
1057 }
1058
1059 cmax = pci_scan_child_bus(child);
1060 if (cmax > subordinate)
1061 dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n",
1062 subordinate, cmax);
1063 /* subordinate should equal child->busn_res.end */
1064 if (subordinate > max)
1065 max = subordinate;
1066 } else {
1067 /*
1068 * We need to assign a number to this bus which we always
1069 * do in the second pass.
1070 */
1071 if (!pass) {
1072 if (pcibios_assign_all_busses() || broken || is_cardbus)
1073 /* Temporarily disable forwarding of the
1074 configuration cycles on all bridges in
1075 this bus segment to avoid possible
1076 conflicts in the second pass between two
1077 bridges programmed with overlapping
1078 bus ranges. */
1079 pci_write_config_dword(dev, PCI_PRIMARY_BUS,
1080 buses & ~0xffffff);
1081 goto out;
1082 }
1083
1084 /* Clear errors */
1085 pci_write_config_word(dev, PCI_STATUS, 0xffff);
1086
1087 /* Prevent assigning a bus number that already exists.
1088 * This can happen when a bridge is hot-plugged, so in
1089 * this case we only re-scan this bus. */
1090 child = pci_find_bus(pci_domain_nr(bus), max+1);
1091 if (!child) {
1092 child = pci_add_new_bus(bus, dev, max+1);
1093 if (!child)
1094 goto out;
1095 pci_bus_insert_busn_res(child, max+1,
1096 bus->busn_res.end);
1097 }
1098 max++;
1099 if (available_buses)
1100 available_buses--;
1101
1102 buses = (buses & 0xff000000)
1103 | ((unsigned int)(child->primary) << 0)
1104 | ((unsigned int)(child->busn_res.start) << 8)
1105 | ((unsigned int)(child->busn_res.end) << 16);
1106
1107 /*
1108 * yenta.c forces a secondary latency timer of 176.
1109 * Copy that behaviour here.
1110 */
1111 if (is_cardbus) {
1112 buses &= ~0xff000000;
1113 buses |= CARDBUS_LATENCY_TIMER << 24;
1114 }
1115
1116 /*
1117 * We need to blast all three values with a single write.
1118 */
1119 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
1120
1121 if (!is_cardbus) {
1122 child->bridge_ctl = bctl;
1123 max = pci_scan_child_bus_extend(child, available_buses);
1124 } else {
1125 /*
1126 * For CardBus bridges, we leave 4 bus numbers
1127 * as cards with a PCI-to-PCI bridge can be
1128 * inserted later.
1129 */
1130 for (i = 0; i < CARDBUS_RESERVE_BUSNR; i++) {
1131 struct pci_bus *parent = bus;
1132 if (pci_find_bus(pci_domain_nr(bus),
1133 max+i+1))
1134 break;
1135 while (parent->parent) {
1136 if ((!pcibios_assign_all_busses()) &&
1137 (parent->busn_res.end > max) &&
1138 (parent->busn_res.end <= max+i)) {
1139 j = 1;
1140 }
1141 parent = parent->parent;
1142 }
1143 if (j) {
1144 /*
1145 * Often, there are two cardbus bridges
1146 * -- try to leave one valid bus number
1147 * for each one.
1148 */
1149 i /= 2;
1150 break;
1151 }
1152 }
1153 max += i;
1154 }
1155 /*
1156 * Set the subordinate bus number to its real value.
1157 */
1158 pci_bus_update_busn_res_end(child, max);
1159 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
1160 }
1161
1162 sprintf(child->name,
1163 (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
1164 pci_domain_nr(bus), child->number);
1165
1166 /* Has only triggered on CardBus, fixup is in yenta_socket */
1167 while (bus->parent) {
1168 if ((child->busn_res.end > bus->busn_res.end) ||
1169 (child->number > bus->busn_res.end) ||
1170 (child->number < bus->number) ||
1171 (child->busn_res.end < bus->number)) {
1172 dev_info(&child->dev, "%pR %s hidden behind%s bridge %s %pR\n",
1173 &child->busn_res,
1174 (bus->number > child->busn_res.end &&
1175 bus->busn_res.end < child->number) ?
1176 "wholly" : "partially",
1177 bus->self->transparent ? " transparent" : "",
1178 dev_name(&bus->dev),
1179 &bus->busn_res);
1180 }
1181 bus = bus->parent;
1182 }
1183
1184 out:
1185 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
1186
1187 pm_runtime_put(&dev->dev);
1188
1189 return max;
1190 }
1191
1192 /*
1193 * pci_scan_bridge() - Scan buses behind a bridge
1194 * @bus: Parent bus the bridge is on
1195 * @dev: Bridge itself
1196 * @max: Starting subordinate number of buses behind this bridge
1197 * @pass: Either %0 (scan already configured bridges) or %1 (scan bridges
1198 * that need to be reconfigured.
1199 *
1200 * If it's a bridge, configure it and scan the bus behind it.
1201 * For CardBus bridges, we don't scan behind as the devices will
1202 * be handled by the bridge driver itself.
1203 *
1204 * We need to process bridges in two passes -- first we scan those
1205 * already configured by the BIOS and after we are done with all of
1206 * them, we proceed to assigning numbers to the remaining buses in
1207 * order to avoid overlaps between old and new bus numbers.
1208 */
1209 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
1210 {
1211 return pci_scan_bridge_extend(bus, dev, max, 0, pass);
1212 }
1213 EXPORT_SYMBOL(pci_scan_bridge);
1214
1215 /*
1216 * Read interrupt line and base address registers.
1217 * The architecture-dependent code can tweak these, of course.
1218 */
1219 static void pci_read_irq(struct pci_dev *dev)
1220 {
1221 unsigned char irq;
1222
1223 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
1224 dev->pin = irq;
1225 if (irq)
1226 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
1227 dev->irq = irq;
1228 }
1229
1230 void set_pcie_port_type(struct pci_dev *pdev)
1231 {
1232 int pos;
1233 u16 reg16;
1234 int type;
1235 struct pci_dev *parent;
1236
1237 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
1238 if (!pos)
1239 return;
1240
1241 pdev->pcie_cap = pos;
1242 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
1243 pdev->pcie_flags_reg = reg16;
1244 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
1245 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
1246
1247 /*
1248 * A Root Port or a PCI-to-PCIe bridge is always the upstream end
1249 * of a Link. No PCIe component has two Links. Two Links are
1250 * connected by a Switch that has a Port on each Link and internal
1251 * logic to connect the two Ports.
1252 */
1253 type = pci_pcie_type(pdev);
1254 if (type == PCI_EXP_TYPE_ROOT_PORT ||
1255 type == PCI_EXP_TYPE_PCIE_BRIDGE)
1256 pdev->has_secondary_link = 1;
1257 else if (type == PCI_EXP_TYPE_UPSTREAM ||
1258 type == PCI_EXP_TYPE_DOWNSTREAM) {
1259 parent = pci_upstream_bridge(pdev);
1260
1261 /*
1262 * Usually there's an upstream device (Root Port or Switch
1263 * Downstream Port), but we can't assume one exists.
1264 */
1265 if (parent && !parent->has_secondary_link)
1266 pdev->has_secondary_link = 1;
1267 }
1268 }
1269
1270 void set_pcie_hotplug_bridge(struct pci_dev *pdev)
1271 {
1272 u32 reg32;
1273
1274 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
1275 if (reg32 & PCI_EXP_SLTCAP_HPC)
1276 pdev->is_hotplug_bridge = 1;
1277 }
1278
1279 static void set_pcie_thunderbolt(struct pci_dev *dev)
1280 {
1281 int vsec = 0;
1282 u32 header;
1283
1284 while ((vsec = pci_find_next_ext_capability(dev, vsec,
1285 PCI_EXT_CAP_ID_VNDR))) {
1286 pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header);
1287
1288 /* Is the device part of a Thunderbolt controller? */
1289 if (dev->vendor == PCI_VENDOR_ID_INTEL &&
1290 PCI_VNDR_HEADER_ID(header) == PCI_VSEC_ID_INTEL_TBT) {
1291 dev->is_thunderbolt = 1;
1292 return;
1293 }
1294 }
1295 }
1296
1297 /**
1298 * pci_ext_cfg_is_aliased - is ext config space just an alias of std config?
1299 * @dev: PCI device
1300 *
1301 * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that
1302 * when forwarding a type1 configuration request the bridge must check that
1303 * the extended register address field is zero. The bridge is not permitted
1304 * to forward the transactions and must handle it as an Unsupported Request.
1305 * Some bridges do not follow this rule and simply drop the extended register
1306 * bits, resulting in the standard config space being aliased, every 256
1307 * bytes across the entire configuration space. Test for this condition by
1308 * comparing the first dword of each potential alias to the vendor/device ID.
1309 * Known offenders:
1310 * ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03)
1311 * AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40)
1312 */
1313 static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
1314 {
1315 #ifdef CONFIG_PCI_QUIRKS
1316 int pos;
1317 u32 header, tmp;
1318
1319 pci_read_config_dword(dev, PCI_VENDOR_ID, &header);
1320
1321 for (pos = PCI_CFG_SPACE_SIZE;
1322 pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) {
1323 if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL
1324 || header != tmp)
1325 return false;
1326 }
1327
1328 return true;
1329 #else
1330 return false;
1331 #endif
1332 }
1333
1334 /**
1335 * pci_cfg_space_size - get the configuration space size of the PCI device.
1336 * @dev: PCI device
1337 *
1338 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1339 * have 4096 bytes. Even if the device is capable, that doesn't mean we can
1340 * access it. Maybe we don't have a way to generate extended config space
1341 * accesses, or the device is behind a reverse Express bridge. So we try
1342 * reading the dword at 0x100 which must either be 0 or a valid extended
1343 * capability header.
1344 */
1345 static int pci_cfg_space_size_ext(struct pci_dev *dev)
1346 {
1347 u32 status;
1348 int pos = PCI_CFG_SPACE_SIZE;
1349
1350 if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
1351 return PCI_CFG_SPACE_SIZE;
1352 if (status == 0xffffffff || pci_ext_cfg_is_aliased(dev))
1353 return PCI_CFG_SPACE_SIZE;
1354
1355 return PCI_CFG_SPACE_EXP_SIZE;
1356 }
1357
1358 int pci_cfg_space_size(struct pci_dev *dev)
1359 {
1360 int pos;
1361 u32 status;
1362 u16 class;
1363
1364 class = dev->class >> 8;
1365 if (class == PCI_CLASS_BRIDGE_HOST)
1366 return pci_cfg_space_size_ext(dev);
1367
1368 if (pci_is_pcie(dev))
1369 return pci_cfg_space_size_ext(dev);
1370
1371 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1372 if (!pos)
1373 return PCI_CFG_SPACE_SIZE;
1374
1375 pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1376 if (status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ))
1377 return pci_cfg_space_size_ext(dev);
1378
1379 return PCI_CFG_SPACE_SIZE;
1380 }
1381
1382 #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
1383
1384 static void pci_msi_setup_pci_dev(struct pci_dev *dev)
1385 {
1386 /*
1387 * Disable the MSI hardware to avoid screaming interrupts
1388 * during boot. This is the power on reset default so
1389 * usually this should be a noop.
1390 */
1391 dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI);
1392 if (dev->msi_cap)
1393 pci_msi_set_enable(dev, 0);
1394
1395 dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1396 if (dev->msix_cap)
1397 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
1398 }
1399
1400 /**
1401 * pci_intx_mask_broken - test PCI_COMMAND_INTX_DISABLE writability
1402 * @dev: PCI device
1403 *
1404 * Test whether PCI_COMMAND_INTX_DISABLE is writable for @dev. Check this
1405 * at enumeration-time to avoid modifying PCI_COMMAND at run-time.
1406 */
1407 static int pci_intx_mask_broken(struct pci_dev *dev)
1408 {
1409 u16 orig, toggle, new;
1410
1411 pci_read_config_word(dev, PCI_COMMAND, &orig);
1412 toggle = orig ^ PCI_COMMAND_INTX_DISABLE;
1413 pci_write_config_word(dev, PCI_COMMAND, toggle);
1414 pci_read_config_word(dev, PCI_COMMAND, &new);
1415
1416 pci_write_config_word(dev, PCI_COMMAND, orig);
1417
1418 /*
1419 * PCI_COMMAND_INTX_DISABLE was reserved and read-only prior to PCI
1420 * r2.3, so strictly speaking, a device is not *broken* if it's not
1421 * writable. But we'll live with the misnomer for now.
1422 */
1423 if (new != toggle)
1424 return 1;
1425 return 0;
1426 }
1427
1428 /**
1429 * pci_setup_device - fill in class and map information of a device
1430 * @dev: the device structure to fill
1431 *
1432 * Initialize the device structure with information about the device's
1433 * vendor,class,memory and IO-space addresses,IRQ lines etc.
1434 * Called at initialisation of the PCI subsystem and by CardBus services.
1435 * Returns 0 on success and negative if unknown type of device (not normal,
1436 * bridge or CardBus).
1437 */
1438 int pci_setup_device(struct pci_dev *dev)
1439 {
1440 u32 class;
1441 u16 cmd;
1442 u8 hdr_type;
1443 int pos = 0;
1444 struct pci_bus_region region;
1445 struct resource *res;
1446
1447 if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
1448 return -EIO;
1449
1450 dev->sysdata = dev->bus->sysdata;
1451 dev->dev.parent = dev->bus->bridge;
1452 dev->dev.bus = &pci_bus_type;
1453 dev->hdr_type = hdr_type & 0x7f;
1454 dev->multifunction = !!(hdr_type & 0x80);
1455 dev->error_state = pci_channel_io_normal;
1456 set_pcie_port_type(dev);
1457
1458 pci_dev_assign_slot(dev);
1459 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1460 set this higher, assuming the system even supports it. */
1461 dev->dma_mask = 0xffffffff;
1462
1463 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
1464 dev->bus->number, PCI_SLOT(dev->devfn),
1465 PCI_FUNC(dev->devfn));
1466
1467 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
1468 dev->revision = class & 0xff;
1469 dev->class = class >> 8; /* upper 3 bytes */
1470
1471 dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
1472 dev->vendor, dev->device, dev->hdr_type, dev->class);
1473
1474 /* need to have dev->class ready */
1475 dev->cfg_size = pci_cfg_space_size(dev);
1476
1477 /* need to have dev->cfg_size ready */
1478 set_pcie_thunderbolt(dev);
1479
1480 /* "Unknown power state" */
1481 dev->current_state = PCI_UNKNOWN;
1482
1483 /* Early fixups, before probing the BARs */
1484 pci_fixup_device(pci_fixup_early, dev);
1485 /* device class may be changed after fixup */
1486 class = dev->class >> 8;
1487
1488 if (dev->non_compliant_bars) {
1489 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1490 if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
1491 dev_info(&dev->dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
1492 cmd &= ~PCI_COMMAND_IO;
1493 cmd &= ~PCI_COMMAND_MEMORY;
1494 pci_write_config_word(dev, PCI_COMMAND, cmd);
1495 }
1496 }
1497
1498 dev->broken_intx_masking = pci_intx_mask_broken(dev);
1499
1500 switch (dev->hdr_type) { /* header type */
1501 case PCI_HEADER_TYPE_NORMAL: /* standard header */
1502 if (class == PCI_CLASS_BRIDGE_PCI)
1503 goto bad;
1504 pci_read_irq(dev);
1505 pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1506 pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1507 pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
1508
1509 /*
1510 * Do the ugly legacy mode stuff here rather than broken chip
1511 * quirk code. Legacy mode ATA controllers have fixed
1512 * addresses. These are not always echoed in BAR0-3, and
1513 * BAR0-3 in a few cases contain junk!
1514 */
1515 if (class == PCI_CLASS_STORAGE_IDE) {
1516 u8 progif;
1517 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1518 if ((progif & 1) == 0) {
1519 region.start = 0x1F0;
1520 region.end = 0x1F7;
1521 res = &dev->resource[0];
1522 res->flags = LEGACY_IO_RESOURCE;
1523 pcibios_bus_to_resource(dev->bus, res, &region);
1524 dev_info(&dev->dev, "legacy IDE quirk: reg 0x10: %pR\n",
1525 res);
1526 region.start = 0x3F6;
1527 region.end = 0x3F6;
1528 res = &dev->resource[1];
1529 res->flags = LEGACY_IO_RESOURCE;
1530 pcibios_bus_to_resource(dev->bus, res, &region);
1531 dev_info(&dev->dev, "legacy IDE quirk: reg 0x14: %pR\n",
1532 res);
1533 }
1534 if ((progif & 4) == 0) {
1535 region.start = 0x170;
1536 region.end = 0x177;
1537 res = &dev->resource[2];
1538 res->flags = LEGACY_IO_RESOURCE;
1539 pcibios_bus_to_resource(dev->bus, res, &region);
1540 dev_info(&dev->dev, "legacy IDE quirk: reg 0x18: %pR\n",
1541 res);
1542 region.start = 0x376;
1543 region.end = 0x376;
1544 res = &dev->resource[3];
1545 res->flags = LEGACY_IO_RESOURCE;
1546 pcibios_bus_to_resource(dev->bus, res, &region);
1547 dev_info(&dev->dev, "legacy IDE quirk: reg 0x1c: %pR\n",
1548 res);
1549 }
1550 }
1551 break;
1552
1553 case PCI_HEADER_TYPE_BRIDGE: /* bridge header */
1554 if (class != PCI_CLASS_BRIDGE_PCI)
1555 goto bad;
1556 /* The PCI-to-PCI bridge spec requires that subtractive
1557 decoding (i.e. transparent) bridge must have programming
1558 interface code of 0x01. */
1559 pci_read_irq(dev);
1560 dev->transparent = ((dev->class & 0xff) == 1);
1561 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
1562 set_pcie_hotplug_bridge(dev);
1563 pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
1564 if (pos) {
1565 pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
1566 pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
1567 }
1568 break;
1569
1570 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */
1571 if (class != PCI_CLASS_BRIDGE_CARDBUS)
1572 goto bad;
1573 pci_read_irq(dev);
1574 pci_read_bases(dev, 1, 0);
1575 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1576 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1577 break;
1578
1579 default: /* unknown header */
1580 dev_err(&dev->dev, "unknown header type %02x, ignoring device\n",
1581 dev->hdr_type);
1582 return -EIO;
1583
1584 bad:
1585 dev_err(&dev->dev, "ignoring class %#08x (doesn't match header type %02x)\n",
1586 dev->class, dev->hdr_type);
1587 dev->class = PCI_CLASS_NOT_DEFINED << 8;
1588 }
1589
1590 /* We found a fine healthy device, go go go... */
1591 return 0;
1592 }
1593
1594 static void pci_configure_mps(struct pci_dev *dev)
1595 {
1596 struct pci_dev *bridge = pci_upstream_bridge(dev);
1597 int mps, p_mps, rc;
1598
1599 if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge))
1600 return;
1601
1602 mps = pcie_get_mps(dev);
1603 p_mps = pcie_get_mps(bridge);
1604
1605 if (mps == p_mps)
1606 return;
1607
1608 if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
1609 dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1610 mps, pci_name(bridge), p_mps);
1611 return;
1612 }
1613
1614 /*
1615 * Fancier MPS configuration is done later by
1616 * pcie_bus_configure_settings()
1617 */
1618 if (pcie_bus_config != PCIE_BUS_DEFAULT)
1619 return;
1620
1621 rc = pcie_set_mps(dev, p_mps);
1622 if (rc) {
1623 dev_warn(&dev->dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1624 p_mps);
1625 return;
1626 }
1627
1628 dev_info(&dev->dev, "Max Payload Size set to %d (was %d, max %d)\n",
1629 p_mps, mps, 128 << dev->pcie_mpss);
1630 }
1631
1632 static struct hpp_type0 pci_default_type0 = {
1633 .revision = 1,
1634 .cache_line_size = 8,
1635 .latency_timer = 0x40,
1636 .enable_serr = 0,
1637 .enable_perr = 0,
1638 };
1639
1640 static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
1641 {
1642 u16 pci_cmd, pci_bctl;
1643
1644 if (!hpp)
1645 hpp = &pci_default_type0;
1646
1647 if (hpp->revision > 1) {
1648 dev_warn(&dev->dev,
1649 "PCI settings rev %d not supported; using defaults\n",
1650 hpp->revision);
1651 hpp = &pci_default_type0;
1652 }
1653
1654 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
1655 pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
1656 pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
1657 if (hpp->enable_serr)
1658 pci_cmd |= PCI_COMMAND_SERR;
1659 if (hpp->enable_perr)
1660 pci_cmd |= PCI_COMMAND_PARITY;
1661 pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
1662
1663 /* Program bridge control value */
1664 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
1665 pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
1666 hpp->latency_timer);
1667 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
1668 if (hpp->enable_serr)
1669 pci_bctl |= PCI_BRIDGE_CTL_SERR;
1670 if (hpp->enable_perr)
1671 pci_bctl |= PCI_BRIDGE_CTL_PARITY;
1672 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
1673 }
1674 }
1675
1676 static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
1677 {
1678 int pos;
1679
1680 if (!hpp)
1681 return;
1682
1683 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1684 if (!pos)
1685 return;
1686
1687 dev_warn(&dev->dev, "PCI-X settings not supported\n");
1688 }
1689
1690 static bool pcie_root_rcb_set(struct pci_dev *dev)
1691 {
1692 struct pci_dev *rp = pcie_find_root_port(dev);
1693 u16 lnkctl;
1694
1695 if (!rp)
1696 return false;
1697
1698 pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl);
1699 if (lnkctl & PCI_EXP_LNKCTL_RCB)
1700 return true;
1701
1702 return false;
1703 }
1704
1705 static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
1706 {
1707 int pos;
1708 u32 reg32;
1709
1710 if (!hpp)
1711 return;
1712
1713 if (!pci_is_pcie(dev))
1714 return;
1715
1716 if (hpp->revision > 1) {
1717 dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
1718 hpp->revision);
1719 return;
1720 }
1721
1722 /*
1723 * Don't allow _HPX to change MPS or MRRS settings. We manage
1724 * those to make sure they're consistent with the rest of the
1725 * platform.
1726 */
1727 hpp->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
1728 PCI_EXP_DEVCTL_READRQ;
1729 hpp->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
1730 PCI_EXP_DEVCTL_READRQ);
1731
1732 /* Initialize Device Control Register */
1733 pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
1734 ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
1735
1736 /* Initialize Link Control Register */
1737 if (pcie_cap_has_lnkctl(dev)) {
1738
1739 /*
1740 * If the Root Port supports Read Completion Boundary of
1741 * 128, set RCB to 128. Otherwise, clear it.
1742 */
1743 hpp->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB;
1744 hpp->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB;
1745 if (pcie_root_rcb_set(dev))
1746 hpp->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB;
1747
1748 pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
1749 ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
1750 }
1751
1752 /* Find Advanced Error Reporting Enhanced Capability */
1753 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
1754 if (!pos)
1755 return;
1756
1757 /* Initialize Uncorrectable Error Mask Register */
1758 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
1759 reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
1760 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
1761
1762 /* Initialize Uncorrectable Error Severity Register */
1763 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
1764 reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
1765 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
1766
1767 /* Initialize Correctable Error Mask Register */
1768 pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
1769 reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
1770 pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
1771
1772 /* Initialize Advanced Error Capabilities and Control Register */
1773 pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
1774 reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
1775 /* Don't enable ECRC generation or checking if unsupported */
1776 if (!(reg32 & PCI_ERR_CAP_ECRC_GENC))
1777 reg32 &= ~PCI_ERR_CAP_ECRC_GENE;
1778 if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC))
1779 reg32 &= ~PCI_ERR_CAP_ECRC_CHKE;
1780 pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
1781
1782 /*
1783 * FIXME: The following two registers are not supported yet.
1784 *
1785 * o Secondary Uncorrectable Error Severity Register
1786 * o Secondary Uncorrectable Error Mask Register
1787 */
1788 }
1789
1790 int pci_configure_extended_tags(struct pci_dev *dev, void *ign)
1791 {
1792 struct pci_host_bridge *host;
1793 u32 cap;
1794 u16 ctl;
1795 int ret;
1796
1797 if (!pci_is_pcie(dev))
1798 return 0;
1799
1800 ret = pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
1801 if (ret)
1802 return 0;
1803
1804 if (!(cap & PCI_EXP_DEVCAP_EXT_TAG))
1805 return 0;
1806
1807 ret = pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
1808 if (ret)
1809 return 0;
1810
1811 host = pci_find_host_bridge(dev->bus);
1812 if (!host)
1813 return 0;
1814
1815 /*
1816 * If some device in the hierarchy doesn't handle Extended Tags
1817 * correctly, make sure they're disabled.
1818 */
1819 if (host->no_ext_tags) {
1820 if (ctl & PCI_EXP_DEVCTL_EXT_TAG) {
1821 dev_info(&dev->dev, "disabling Extended Tags\n");
1822 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
1823 PCI_EXP_DEVCTL_EXT_TAG);
1824 }
1825 return 0;
1826 }
1827
1828 if (!(ctl & PCI_EXP_DEVCTL_EXT_TAG)) {
1829 dev_info(&dev->dev, "enabling Extended Tags\n");
1830 pcie_capability_set_word(dev, PCI_EXP_DEVCTL,
1831 PCI_EXP_DEVCTL_EXT_TAG);
1832 }
1833 return 0;
1834 }
1835
1836 /**
1837 * pcie_relaxed_ordering_enabled - Probe for PCIe relaxed ordering enable
1838 * @dev: PCI device to query
1839 *
1840 * Returns true if the device has enabled relaxed ordering attribute.
1841 */
1842 bool pcie_relaxed_ordering_enabled(struct pci_dev *dev)
1843 {
1844 u16 v;
1845
1846 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &v);
1847
1848 return !!(v & PCI_EXP_DEVCTL_RELAX_EN);
1849 }
1850 EXPORT_SYMBOL(pcie_relaxed_ordering_enabled);
1851
1852 static void pci_configure_relaxed_ordering(struct pci_dev *dev)
1853 {
1854 struct pci_dev *root;
1855
1856 /* PCI_EXP_DEVICE_RELAX_EN is RsvdP in VFs */
1857 if (dev->is_virtfn)
1858 return;
1859
1860 if (!pcie_relaxed_ordering_enabled(dev))
1861 return;
1862
1863 /*
1864 * For now, we only deal with Relaxed Ordering issues with Root
1865 * Ports. Peer-to-Peer DMA is another can of worms.
1866 */
1867 root = pci_find_pcie_root_port(dev);
1868 if (!root)
1869 return;
1870
1871 if (root->dev_flags & PCI_DEV_FLAGS_NO_RELAXED_ORDERING) {
1872 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
1873 PCI_EXP_DEVCTL_RELAX_EN);
1874 dev_info(&dev->dev, "Disable Relaxed Ordering because the Root Port didn't support it\n");
1875 }
1876 }
1877
1878 static void pci_configure_ltr(struct pci_dev *dev)
1879 {
1880 #ifdef CONFIG_PCIEASPM
1881 u32 cap;
1882 struct pci_dev *bridge;
1883
1884 if (!pci_is_pcie(dev))
1885 return;
1886
1887 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap);
1888 if (!(cap & PCI_EXP_DEVCAP2_LTR))
1889 return;
1890
1891 /*
1892 * Software must not enable LTR in an Endpoint unless the Root
1893 * Complex and all intermediate Switches indicate support for LTR.
1894 * PCIe r3.1, sec 6.18.
1895 */
1896 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
1897 dev->ltr_path = 1;
1898 else {
1899 bridge = pci_upstream_bridge(dev);
1900 if (bridge && bridge->ltr_path)
1901 dev->ltr_path = 1;
1902 }
1903
1904 if (dev->ltr_path)
1905 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
1906 PCI_EXP_DEVCTL2_LTR_EN);
1907 #endif
1908 }
1909
1910 static void pci_configure_device(struct pci_dev *dev)
1911 {
1912 struct hotplug_params hpp;
1913 int ret;
1914
1915 pci_configure_mps(dev);
1916 pci_configure_extended_tags(dev, NULL);
1917 pci_configure_relaxed_ordering(dev);
1918 pci_configure_ltr(dev);
1919
1920 memset(&hpp, 0, sizeof(hpp));
1921 ret = pci_get_hp_params(dev, &hpp);
1922 if (ret)
1923 return;
1924
1925 program_hpp_type2(dev, hpp.t2);
1926 program_hpp_type1(dev, hpp.t1);
1927 program_hpp_type0(dev, hpp.t0);
1928 }
1929
1930 static void pci_release_capabilities(struct pci_dev *dev)
1931 {
1932 pci_vpd_release(dev);
1933 pci_iov_release(dev);
1934 pci_free_cap_save_buffers(dev);
1935 }
1936
1937 /**
1938 * pci_release_dev - free a pci device structure when all users of it are finished.
1939 * @dev: device that's been disconnected
1940 *
1941 * Will be called only by the device core when all users of this pci device are
1942 * done.
1943 */
1944 static void pci_release_dev(struct device *dev)
1945 {
1946 struct pci_dev *pci_dev;
1947
1948 pci_dev = to_pci_dev(dev);
1949 pci_release_capabilities(pci_dev);
1950 pci_release_of_node(pci_dev);
1951 pcibios_release_device(pci_dev);
1952 pci_bus_put(pci_dev->bus);
1953 kfree(pci_dev->driver_override);
1954 kfree(pci_dev->dma_alias_mask);
1955 kfree(pci_dev);
1956 }
1957
1958 struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
1959 {
1960 struct pci_dev *dev;
1961
1962 dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
1963 if (!dev)
1964 return NULL;
1965
1966 INIT_LIST_HEAD(&dev->bus_list);
1967 dev->dev.type = &pci_dev_type;
1968 dev->bus = pci_bus_get(bus);
1969
1970 return dev;
1971 }
1972 EXPORT_SYMBOL(pci_alloc_dev);
1973
1974 static bool pci_bus_crs_vendor_id(u32 l)
1975 {
1976 return (l & 0xffff) == 0x0001;
1977 }
1978
1979 static bool pci_bus_wait_crs(struct pci_bus *bus, int devfn, u32 *l,
1980 int timeout)
1981 {
1982 int delay = 1;
1983
1984 if (!pci_bus_crs_vendor_id(*l))
1985 return true; /* not a CRS completion */
1986
1987 if (!timeout)
1988 return false; /* CRS, but caller doesn't want to wait */
1989
1990 /*
1991 * We got the reserved Vendor ID that indicates a completion with
1992 * Configuration Request Retry Status (CRS). Retry until we get a
1993 * valid Vendor ID or we time out.
1994 */
1995 while (pci_bus_crs_vendor_id(*l)) {
1996 if (delay > timeout) {
1997 pr_warn("pci %04x:%02x:%02x.%d: not ready after %dms; giving up\n",
1998 pci_domain_nr(bus), bus->number,
1999 PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1);
2000
2001 return false;
2002 }
2003 if (delay >= 1000)
2004 pr_info("pci %04x:%02x:%02x.%d: not ready after %dms; waiting\n",
2005 pci_domain_nr(bus), bus->number,
2006 PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1);
2007
2008 msleep(delay);
2009 delay *= 2;
2010
2011 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
2012 return false;
2013 }
2014
2015 if (delay >= 1000)
2016 pr_info("pci %04x:%02x:%02x.%d: ready after %dms\n",
2017 pci_domain_nr(bus), bus->number,
2018 PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1);
2019
2020 return true;
2021 }
2022
2023 bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
2024 int timeout)
2025 {
2026 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
2027 return false;
2028
2029 /* some broken boards return 0 or ~0 if a slot is empty: */
2030 if (*l == 0xffffffff || *l == 0x00000000 ||
2031 *l == 0x0000ffff || *l == 0xffff0000)
2032 return false;
2033
2034 if (pci_bus_crs_vendor_id(*l))
2035 return pci_bus_wait_crs(bus, devfn, l, timeout);
2036
2037 return true;
2038 }
2039 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
2040
2041 /*
2042 * Read the config data for a PCI device, sanity-check it
2043 * and fill in the dev structure...
2044 */
2045 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
2046 {
2047 struct pci_dev *dev;
2048 u32 l;
2049
2050 if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
2051 return NULL;
2052
2053 dev = pci_alloc_dev(bus);
2054 if (!dev)
2055 return NULL;
2056
2057 dev->devfn = devfn;
2058 dev->vendor = l & 0xffff;
2059 dev->device = (l >> 16) & 0xffff;
2060
2061 pci_set_of_node(dev);
2062
2063 if (pci_setup_device(dev)) {
2064 pci_bus_put(dev->bus);
2065 kfree(dev);
2066 return NULL;
2067 }
2068
2069 return dev;
2070 }
2071
2072 static void pci_init_capabilities(struct pci_dev *dev)
2073 {
2074 /* Enhanced Allocation */
2075 pci_ea_init(dev);
2076
2077 /* Setup MSI caps & disable MSI/MSI-X interrupts */
2078 pci_msi_setup_pci_dev(dev);
2079
2080 /* Buffers for saving PCIe and PCI-X capabilities */
2081 pci_allocate_cap_save_buffers(dev);
2082
2083 /* Power Management */
2084 pci_pm_init(dev);
2085
2086 /* Vital Product Data */
2087 pci_vpd_init(dev);
2088
2089 /* Alternative Routing-ID Forwarding */
2090 pci_configure_ari(dev);
2091
2092 /* Single Root I/O Virtualization */
2093 pci_iov_init(dev);
2094
2095 /* Address Translation Services */
2096 pci_ats_init(dev);
2097
2098 /* Enable ACS P2P upstream forwarding */
2099 pci_enable_acs(dev);
2100
2101 /* Precision Time Measurement */
2102 pci_ptm_init(dev);
2103
2104 /* Advanced Error Reporting */
2105 pci_aer_init(dev);
2106 }
2107
2108 /*
2109 * This is the equivalent of pci_host_bridge_msi_domain that acts on
2110 * devices. Firmware interfaces that can select the MSI domain on a
2111 * per-device basis should be called from here.
2112 */
2113 static struct irq_domain *pci_dev_msi_domain(struct pci_dev *dev)
2114 {
2115 struct irq_domain *d;
2116
2117 /*
2118 * If a domain has been set through the pcibios_add_device
2119 * callback, then this is the one (platform code knows best).
2120 */
2121 d = dev_get_msi_domain(&dev->dev);
2122 if (d)
2123 return d;
2124
2125 /*
2126 * Let's see if we have a firmware interface able to provide
2127 * the domain.
2128 */
2129 d = pci_msi_get_device_domain(dev);
2130 if (d)
2131 return d;
2132
2133 return NULL;
2134 }
2135
2136 static void pci_set_msi_domain(struct pci_dev *dev)
2137 {
2138 struct irq_domain *d;
2139
2140 /*
2141 * If the platform or firmware interfaces cannot supply a
2142 * device-specific MSI domain, then inherit the default domain
2143 * from the host bridge itself.
2144 */
2145 d = pci_dev_msi_domain(dev);
2146 if (!d)
2147 d = dev_get_msi_domain(&dev->bus->dev);
2148
2149 dev_set_msi_domain(&dev->dev, d);
2150 }
2151
2152 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
2153 {
2154 int ret;
2155
2156 pci_configure_device(dev);
2157
2158 device_initialize(&dev->dev);
2159 dev->dev.release = pci_release_dev;
2160
2161 set_dev_node(&dev->dev, pcibus_to_node(bus));
2162 dev->dev.dma_mask = &dev->dma_mask;
2163 dev->dev.dma_parms = &dev->dma_parms;
2164 dev->dev.coherent_dma_mask = 0xffffffffull;
2165
2166 pci_set_dma_max_seg_size(dev, 65536);
2167 pci_set_dma_seg_boundary(dev, 0xffffffff);
2168
2169 /* Fix up broken headers */
2170 pci_fixup_device(pci_fixup_header, dev);
2171
2172 /* moved out from quirk header fixup code */
2173 pci_reassigndev_resource_alignment(dev);
2174
2175 /* Clear the state_saved flag. */
2176 dev->state_saved = false;
2177
2178 /* Initialize various capabilities */
2179 pci_init_capabilities(dev);
2180
2181 /*
2182 * Add the device to our list of discovered devices
2183 * and the bus list for fixup functions, etc.
2184 */
2185 down_write(&pci_bus_sem);
2186 list_add_tail(&dev->bus_list, &bus->devices);
2187 up_write(&pci_bus_sem);
2188
2189 ret = pcibios_add_device(dev);
2190 WARN_ON(ret < 0);
2191
2192 /* Setup MSI irq domain */
2193 pci_set_msi_domain(dev);
2194
2195 /* Notifier could use PCI capabilities */
2196 dev->match_driver = false;
2197 ret = device_add(&dev->dev);
2198 WARN_ON(ret < 0);
2199 }
2200
2201 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
2202 {
2203 struct pci_dev *dev;
2204
2205 dev = pci_get_slot(bus, devfn);
2206 if (dev) {
2207 pci_dev_put(dev);
2208 return dev;
2209 }
2210
2211 dev = pci_scan_device(bus, devfn);
2212 if (!dev)
2213 return NULL;
2214
2215 pci_device_add(dev, bus);
2216
2217 return dev;
2218 }
2219 EXPORT_SYMBOL(pci_scan_single_device);
2220
2221 static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
2222 {
2223 int pos;
2224 u16 cap = 0;
2225 unsigned next_fn;
2226
2227 if (pci_ari_enabled(bus)) {
2228 if (!dev)
2229 return 0;
2230 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
2231 if (!pos)
2232 return 0;
2233
2234 pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
2235 next_fn = PCI_ARI_CAP_NFN(cap);
2236 if (next_fn <= fn)
2237 return 0; /* protect against malformed list */
2238
2239 return next_fn;
2240 }
2241
2242 /* dev may be NULL for non-contiguous multifunction devices */
2243 if (!dev || dev->multifunction)
2244 return (fn + 1) % 8;
2245
2246 return 0;
2247 }
2248
2249 static int only_one_child(struct pci_bus *bus)
2250 {
2251 struct pci_dev *bridge = bus->self;
2252
2253 /*
2254 * Systems with unusual topologies set PCI_SCAN_ALL_PCIE_DEVS so
2255 * we scan for all possible devices, not just Device 0.
2256 */
2257 if (pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
2258 return 0;
2259
2260 /*
2261 * A PCIe Downstream Port normally leads to a Link with only Device
2262 * 0 on it (PCIe spec r3.1, sec 7.3.1). As an optimization, scan
2263 * only for Device 0 in that situation.
2264 *
2265 * Checking has_secondary_link is a hack to identify Downstream
2266 * Ports because sometimes Switches are configured such that the
2267 * PCIe Port Type labels are backwards.
2268 */
2269 if (bridge && pci_is_pcie(bridge) && bridge->has_secondary_link)
2270 return 1;
2271
2272 return 0;
2273 }
2274
2275 /**
2276 * pci_scan_slot - scan a PCI slot on a bus for devices.
2277 * @bus: PCI bus to scan
2278 * @devfn: slot number to scan (must have zero function.)
2279 *
2280 * Scan a PCI slot on the specified PCI bus for devices, adding
2281 * discovered devices to the @bus->devices list. New devices
2282 * will not have is_added set.
2283 *
2284 * Returns the number of new devices found.
2285 */
2286 int pci_scan_slot(struct pci_bus *bus, int devfn)
2287 {
2288 unsigned fn, nr = 0;
2289 struct pci_dev *dev;
2290
2291 if (only_one_child(bus) && (devfn > 0))
2292 return 0; /* Already scanned the entire slot */
2293
2294 dev = pci_scan_single_device(bus, devfn);
2295 if (!dev)
2296 return 0;
2297 if (!dev->is_added)
2298 nr++;
2299
2300 for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
2301 dev = pci_scan_single_device(bus, devfn + fn);
2302 if (dev) {
2303 if (!dev->is_added)
2304 nr++;
2305 dev->multifunction = 1;
2306 }
2307 }
2308
2309 /* only one slot has pcie device */
2310 if (bus->self && nr)
2311 pcie_aspm_init_link_state(bus->self);
2312
2313 return nr;
2314 }
2315 EXPORT_SYMBOL(pci_scan_slot);
2316
2317 static int pcie_find_smpss(struct pci_dev *dev, void *data)
2318 {
2319 u8 *smpss = data;
2320
2321 if (!pci_is_pcie(dev))
2322 return 0;
2323
2324 /*
2325 * We don't have a way to change MPS settings on devices that have
2326 * drivers attached. A hot-added device might support only the minimum
2327 * MPS setting (MPS=128). Therefore, if the fabric contains a bridge
2328 * where devices may be hot-added, we limit the fabric MPS to 128 so
2329 * hot-added devices will work correctly.
2330 *
2331 * However, if we hot-add a device to a slot directly below a Root
2332 * Port, it's impossible for there to be other existing devices below
2333 * the port. We don't limit the MPS in this case because we can
2334 * reconfigure MPS on both the Root Port and the hot-added device,
2335 * and there are no other devices involved.
2336 *
2337 * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
2338 */
2339 if (dev->is_hotplug_bridge &&
2340 pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
2341 *smpss = 0;
2342
2343 if (*smpss > dev->pcie_mpss)
2344 *smpss = dev->pcie_mpss;
2345
2346 return 0;
2347 }
2348
2349 static void pcie_write_mps(struct pci_dev *dev, int mps)
2350 {
2351 int rc;
2352
2353 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
2354 mps = 128 << dev->pcie_mpss;
2355
2356 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
2357 dev->bus->self)
2358 /* For "Performance", the assumption is made that
2359 * downstream communication will never be larger than
2360 * the MRRS. So, the MPS only needs to be configured
2361 * for the upstream communication. This being the case,
2362 * walk from the top down and set the MPS of the child
2363 * to that of the parent bus.
2364 *
2365 * Configure the device MPS with the smaller of the
2366 * device MPSS or the bridge MPS (which is assumed to be
2367 * properly configured at this point to the largest
2368 * allowable MPS based on its parent bus).
2369 */
2370 mps = min(mps, pcie_get_mps(dev->bus->self));
2371 }
2372
2373 rc = pcie_set_mps(dev, mps);
2374 if (rc)
2375 dev_err(&dev->dev, "Failed attempting to set the MPS\n");
2376 }
2377
2378 static void pcie_write_mrrs(struct pci_dev *dev)
2379 {
2380 int rc, mrrs;
2381
2382 /* In the "safe" case, do not configure the MRRS. There appear to be
2383 * issues with setting MRRS to 0 on a number of devices.
2384 */
2385 if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
2386 return;
2387
2388 /* For Max performance, the MRRS must be set to the largest supported
2389 * value. However, it cannot be configured larger than the MPS the
2390 * device or the bus can support. This should already be properly
2391 * configured by a prior call to pcie_write_mps.
2392 */
2393 mrrs = pcie_get_mps(dev);
2394
2395 /* MRRS is a R/W register. Invalid values can be written, but a
2396 * subsequent read will verify if the value is acceptable or not.
2397 * If the MRRS value provided is not acceptable (e.g., too large),
2398 * shrink the value until it is acceptable to the HW.
2399 */
2400 while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
2401 rc = pcie_set_readrq(dev, mrrs);
2402 if (!rc)
2403 break;
2404
2405 dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
2406 mrrs /= 2;
2407 }
2408
2409 if (mrrs < 128)
2410 dev_err(&dev->dev, "MRRS was unable to be configured with a safe value. If problems are experienced, try running with pci=pcie_bus_safe\n");
2411 }
2412
2413 static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
2414 {
2415 int mps, orig_mps;
2416
2417 if (!pci_is_pcie(dev))
2418 return 0;
2419
2420 if (pcie_bus_config == PCIE_BUS_TUNE_OFF ||
2421 pcie_bus_config == PCIE_BUS_DEFAULT)
2422 return 0;
2423
2424 mps = 128 << *(u8 *)data;
2425 orig_mps = pcie_get_mps(dev);
2426
2427 pcie_write_mps(dev, mps);
2428 pcie_write_mrrs(dev);
2429
2430 dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), Max Read Rq %4d\n",
2431 pcie_get_mps(dev), 128 << dev->pcie_mpss,
2432 orig_mps, pcie_get_readrq(dev));
2433
2434 return 0;
2435 }
2436
2437 /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
2438 * parents then children fashion. If this changes, then this code will not
2439 * work as designed.
2440 */
2441 void pcie_bus_configure_settings(struct pci_bus *bus)
2442 {
2443 u8 smpss = 0;
2444
2445 if (!bus->self)
2446 return;
2447
2448 if (!pci_is_pcie(bus->self))
2449 return;
2450
2451 /* FIXME - Peer to peer DMA is possible, though the endpoint would need
2452 * to be aware of the MPS of the destination. To work around this,
2453 * simply force the MPS of the entire system to the smallest possible.
2454 */
2455 if (pcie_bus_config == PCIE_BUS_PEER2PEER)
2456 smpss = 0;
2457
2458 if (pcie_bus_config == PCIE_BUS_SAFE) {
2459 smpss = bus->self->pcie_mpss;
2460
2461 pcie_find_smpss(bus->self, &smpss);
2462 pci_walk_bus(bus, pcie_find_smpss, &smpss);
2463 }
2464
2465 pcie_bus_configure_set(bus->self, &smpss);
2466 pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
2467 }
2468 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
2469
2470 /*
2471 * Called after each bus is probed, but before its children are examined. This
2472 * is marked as __weak because multiple architectures define it.
2473 */
2474 void __weak pcibios_fixup_bus(struct pci_bus *bus)
2475 {
2476 /* nothing to do, expected to be removed in the future */
2477 }
2478
2479 /**
2480 * pci_scan_child_bus_extend() - Scan devices below a bus
2481 * @bus: Bus to scan for devices
2482 * @available_buses: Total number of buses available (%0 does not try to
2483 * extend beyond the minimal)
2484 *
2485 * Scans devices below @bus including subordinate buses. Returns new
2486 * subordinate number including all the found devices. Passing
2487 * @available_buses causes the remaining bus space to be distributed
2488 * equally between hotplug-capable bridges to allow future extension of the
2489 * hierarchy.
2490 */
2491 static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
2492 unsigned int available_buses)
2493 {
2494 unsigned int used_buses, normal_bridges = 0, hotplug_bridges = 0;
2495 unsigned int start = bus->busn_res.start;
2496 unsigned int devfn, cmax, max = start;
2497 struct pci_dev *dev;
2498
2499 dev_dbg(&bus->dev, "scanning bus\n");
2500
2501 /* Go find them, Rover! */
2502 for (devfn = 0; devfn < 0x100; devfn += 8)
2503 pci_scan_slot(bus, devfn);
2504
2505 /* Reserve buses for SR-IOV capability. */
2506 used_buses = pci_iov_bus_range(bus);
2507 max += used_buses;
2508
2509 /*
2510 * After performing arch-dependent fixup of the bus, look behind
2511 * all PCI-to-PCI bridges on this bus.
2512 */
2513 if (!bus->is_added) {
2514 dev_dbg(&bus->dev, "fixups for bus\n");
2515 pcibios_fixup_bus(bus);
2516 bus->is_added = 1;
2517 }
2518
2519 /*
2520 * Calculate how many hotplug bridges and normal bridges there
2521 * are on this bus. We will distribute the additional available
2522 * buses between hotplug bridges.
2523 */
2524 for_each_pci_bridge(dev, bus) {
2525 if (dev->is_hotplug_bridge)
2526 hotplug_bridges++;
2527 else
2528 normal_bridges++;
2529 }
2530
2531 /*
2532 * Scan bridges that are already configured. We don't touch them
2533 * unless they are misconfigured (which will be done in the second
2534 * scan below).
2535 */
2536 for_each_pci_bridge(dev, bus) {
2537 cmax = max;
2538 max = pci_scan_bridge_extend(bus, dev, max, 0, 0);
2539 used_buses += cmax - max;
2540 }
2541
2542 /* Scan bridges that need to be reconfigured */
2543 for_each_pci_bridge(dev, bus) {
2544 unsigned int buses = 0;
2545
2546 if (!hotplug_bridges && normal_bridges == 1) {
2547 /*
2548 * There is only one bridge on the bus (upstream
2549 * port) so it gets all available buses which it
2550 * can then distribute to the possible hotplug
2551 * bridges below.
2552 */
2553 buses = available_buses;
2554 } else if (dev->is_hotplug_bridge) {
2555 /*
2556 * Distribute the extra buses between hotplug
2557 * bridges if any.
2558 */
2559 buses = available_buses / hotplug_bridges;
2560 buses = min(buses, available_buses - used_buses);
2561 }
2562
2563 cmax = max;
2564 max = pci_scan_bridge_extend(bus, dev, cmax, buses, 1);
2565 used_buses += max - cmax;
2566 }
2567
2568 /*
2569 * Make sure a hotplug bridge has at least the minimum requested
2570 * number of buses but allow it to grow up to the maximum available
2571 * bus number of there is room.
2572 */
2573 if (bus->self && bus->self->is_hotplug_bridge) {
2574 used_buses = max_t(unsigned int, available_buses,
2575 pci_hotplug_bus_size - 1);
2576 if (max - start < used_buses) {
2577 max = start + used_buses;
2578
2579 /* Do not allocate more buses than we have room left */
2580 if (max > bus->busn_res.end)
2581 max = bus->busn_res.end;
2582
2583 dev_dbg(&bus->dev, "%pR extended by %#02x\n",
2584 &bus->busn_res, max - start);
2585 }
2586 }
2587
2588 /*
2589 * We've scanned the bus and so we know all about what's on
2590 * the other side of any bridges that may be on this bus plus
2591 * any devices.
2592 *
2593 * Return how far we've got finding sub-buses.
2594 */
2595 dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
2596 return max;
2597 }
2598
2599 /**
2600 * pci_scan_child_bus() - Scan devices below a bus
2601 * @bus: Bus to scan for devices
2602 *
2603 * Scans devices below @bus including subordinate buses. Returns new
2604 * subordinate number including all the found devices.
2605 */
2606 unsigned int pci_scan_child_bus(struct pci_bus *bus)
2607 {
2608 return pci_scan_child_bus_extend(bus, 0);
2609 }
2610 EXPORT_SYMBOL_GPL(pci_scan_child_bus);
2611
2612 /**
2613 * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
2614 * @bridge: Host bridge to set up.
2615 *
2616 * Default empty implementation. Replace with an architecture-specific setup
2617 * routine, if necessary.
2618 */
2619 int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
2620 {
2621 return 0;
2622 }
2623
2624 void __weak pcibios_add_bus(struct pci_bus *bus)
2625 {
2626 }
2627
2628 void __weak pcibios_remove_bus(struct pci_bus *bus)
2629 {
2630 }
2631
2632 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
2633 struct pci_ops *ops, void *sysdata, struct list_head *resources)
2634 {
2635 int error;
2636 struct pci_host_bridge *bridge;
2637
2638 bridge = pci_alloc_host_bridge(0);
2639 if (!bridge)
2640 return NULL;
2641
2642 bridge->dev.parent = parent;
2643
2644 list_splice_init(resources, &bridge->windows);
2645 bridge->sysdata = sysdata;
2646 bridge->busnr = bus;
2647 bridge->ops = ops;
2648
2649 error = pci_register_host_bridge(bridge);
2650 if (error < 0)
2651 goto err_out;
2652
2653 return bridge->bus;
2654
2655 err_out:
2656 kfree(bridge);
2657 return NULL;
2658 }
2659 EXPORT_SYMBOL_GPL(pci_create_root_bus);
2660
2661 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
2662 {
2663 struct resource *res = &b->busn_res;
2664 struct resource *parent_res, *conflict;
2665
2666 res->start = bus;
2667 res->end = bus_max;
2668 res->flags = IORESOURCE_BUS;
2669
2670 if (!pci_is_root_bus(b))
2671 parent_res = &b->parent->busn_res;
2672 else {
2673 parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
2674 res->flags |= IORESOURCE_PCI_FIXED;
2675 }
2676
2677 conflict = request_resource_conflict(parent_res, res);
2678
2679 if (conflict)
2680 dev_printk(KERN_DEBUG, &b->dev,
2681 "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
2682 res, pci_is_root_bus(b) ? "domain " : "",
2683 parent_res, conflict->name, conflict);
2684
2685 return conflict == NULL;
2686 }
2687
2688 int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
2689 {
2690 struct resource *res = &b->busn_res;
2691 struct resource old_res = *res;
2692 resource_size_t size;
2693 int ret;
2694
2695 if (res->start > bus_max)
2696 return -EINVAL;
2697
2698 size = bus_max - res->start + 1;
2699 ret = adjust_resource(res, res->start, size);
2700 dev_printk(KERN_DEBUG, &b->dev,
2701 "busn_res: %pR end %s updated to %02x\n",
2702 &old_res, ret ? "can not be" : "is", bus_max);
2703
2704 if (!ret && !res->parent)
2705 pci_bus_insert_busn_res(b, res->start, res->end);
2706
2707 return ret;
2708 }
2709
2710 void pci_bus_release_busn_res(struct pci_bus *b)
2711 {
2712 struct resource *res = &b->busn_res;
2713 int ret;
2714
2715 if (!res->flags || !res->parent)
2716 return;
2717
2718 ret = release_resource(res);
2719 dev_printk(KERN_DEBUG, &b->dev,
2720 "busn_res: %pR %s released\n",
2721 res, ret ? "can not be" : "is");
2722 }
2723
2724 int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge)
2725 {
2726 struct resource_entry *window;
2727 bool found = false;
2728 struct pci_bus *b;
2729 int max, bus, ret;
2730
2731 if (!bridge)
2732 return -EINVAL;
2733
2734 resource_list_for_each_entry(window, &bridge->windows)
2735 if (window->res->flags & IORESOURCE_BUS) {
2736 found = true;
2737 break;
2738 }
2739
2740 ret = pci_register_host_bridge(bridge);
2741 if (ret < 0)
2742 return ret;
2743
2744 b = bridge->bus;
2745 bus = bridge->busnr;
2746
2747 if (!found) {
2748 dev_info(&b->dev,
2749 "No busn resource found for root bus, will use [bus %02x-ff]\n",
2750 bus);
2751 pci_bus_insert_busn_res(b, bus, 255);
2752 }
2753
2754 max = pci_scan_child_bus(b);
2755
2756 if (!found)
2757 pci_bus_update_busn_res_end(b, max);
2758
2759 return 0;
2760 }
2761 EXPORT_SYMBOL(pci_scan_root_bus_bridge);
2762
2763 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
2764 struct pci_ops *ops, void *sysdata, struct list_head *resources)
2765 {
2766 struct resource_entry *window;
2767 bool found = false;
2768 struct pci_bus *b;
2769 int max;
2770
2771 resource_list_for_each_entry(window, resources)
2772 if (window->res->flags & IORESOURCE_BUS) {
2773 found = true;
2774 break;
2775 }
2776
2777 b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
2778 if (!b)
2779 return NULL;
2780
2781 if (!found) {
2782 dev_info(&b->dev,
2783 "No busn resource found for root bus, will use [bus %02x-ff]\n",
2784 bus);
2785 pci_bus_insert_busn_res(b, bus, 255);
2786 }
2787
2788 max = pci_scan_child_bus(b);
2789
2790 if (!found)
2791 pci_bus_update_busn_res_end(b, max);
2792
2793 return b;
2794 }
2795 EXPORT_SYMBOL(pci_scan_root_bus);
2796
2797 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
2798 void *sysdata)
2799 {
2800 LIST_HEAD(resources);
2801 struct pci_bus *b;
2802
2803 pci_add_resource(&resources, &ioport_resource);
2804 pci_add_resource(&resources, &iomem_resource);
2805 pci_add_resource(&resources, &busn_resource);
2806 b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
2807 if (b) {
2808 pci_scan_child_bus(b);
2809 } else {
2810 pci_free_resource_list(&resources);
2811 }
2812 return b;
2813 }
2814 EXPORT_SYMBOL(pci_scan_bus);
2815
2816 /**
2817 * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
2818 * @bridge: PCI bridge for the bus to scan
2819 *
2820 * Scan a PCI bus and child buses for new devices, add them,
2821 * and enable them, resizing bridge mmio/io resource if necessary
2822 * and possible. The caller must ensure the child devices are already
2823 * removed for resizing to occur.
2824 *
2825 * Returns the max number of subordinate bus discovered.
2826 */
2827 unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
2828 {
2829 unsigned int max;
2830 struct pci_bus *bus = bridge->subordinate;
2831
2832 max = pci_scan_child_bus(bus);
2833
2834 pci_assign_unassigned_bridge_resources(bridge);
2835
2836 pci_bus_add_devices(bus);
2837
2838 return max;
2839 }
2840
2841 /**
2842 * pci_rescan_bus - scan a PCI bus for devices.
2843 * @bus: PCI bus to scan
2844 *
2845 * Scan a PCI bus and child buses for new devices, adds them,
2846 * and enables them.
2847 *
2848 * Returns the max number of subordinate bus discovered.
2849 */
2850 unsigned int pci_rescan_bus(struct pci_bus *bus)
2851 {
2852 unsigned int max;
2853
2854 max = pci_scan_child_bus(bus);
2855 pci_assign_unassigned_bus_resources(bus);
2856 pci_bus_add_devices(bus);
2857
2858 return max;
2859 }
2860 EXPORT_SYMBOL_GPL(pci_rescan_bus);
2861
2862 /*
2863 * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
2864 * routines should always be executed under this mutex.
2865 */
2866 static DEFINE_MUTEX(pci_rescan_remove_lock);
2867
2868 void pci_lock_rescan_remove(void)
2869 {
2870 mutex_lock(&pci_rescan_remove_lock);
2871 }
2872 EXPORT_SYMBOL_GPL(pci_lock_rescan_remove);
2873
2874 void pci_unlock_rescan_remove(void)
2875 {
2876 mutex_unlock(&pci_rescan_remove_lock);
2877 }
2878 EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove);
2879
2880 static int __init pci_sort_bf_cmp(const struct device *d_a,
2881 const struct device *d_b)
2882 {
2883 const struct pci_dev *a = to_pci_dev(d_a);
2884 const struct pci_dev *b = to_pci_dev(d_b);
2885
2886 if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
2887 else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1;
2888
2889 if (a->bus->number < b->bus->number) return -1;
2890 else if (a->bus->number > b->bus->number) return 1;
2891
2892 if (a->devfn < b->devfn) return -1;
2893 else if (a->devfn > b->devfn) return 1;
2894
2895 return 0;
2896 }
2897
2898 void __init pci_sort_breadthfirst(void)
2899 {
2900 bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
2901 }
2902
2903 int pci_hp_add_bridge(struct pci_dev *dev)
2904 {
2905 struct pci_bus *parent = dev->bus;
2906 int busnr, start = parent->busn_res.start;
2907 unsigned int available_buses = 0;
2908 int end = parent->busn_res.end;
2909
2910 for (busnr = start; busnr <= end; busnr++) {
2911 if (!pci_find_bus(pci_domain_nr(parent), busnr))
2912 break;
2913 }
2914 if (busnr-- > end) {
2915 dev_err(&dev->dev, "No bus number available for hot-added bridge\n");
2916 return -1;
2917 }
2918
2919 /* Scan bridges that are already configured */
2920 busnr = pci_scan_bridge(parent, dev, busnr, 0);
2921
2922 /*
2923 * Distribute the available bus numbers between hotplug-capable
2924 * bridges to make extending the chain later possible.
2925 */
2926 available_buses = end - busnr;
2927
2928 /* Scan bridges that need to be reconfigured */
2929 pci_scan_bridge_extend(parent, dev, busnr, available_buses, 1);
2930
2931 if (!dev->subordinate)
2932 return -1;
2933
2934 return 0;
2935 }
2936 EXPORT_SYMBOL_GPL(pci_hp_add_bridge);