]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/pci/probe.c
UBUNTU: Ubuntu-4.15.0-96.97
[mirror_ubuntu-bionic-kernel.git] / drivers / pci / probe.c
1 /*
2 * probe.c - PCI detection and setup code
3 */
4
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/init.h>
8 #include <linux/pci.h>
9 #include <linux/of_device.h>
10 #include <linux/of_pci.h>
11 #include <linux/pci_hotplug.h>
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/cpumask.h>
15 #include <linux/pci-aspm.h>
16 #include <linux/aer.h>
17 #include <linux/acpi.h>
18 #include <linux/irqdomain.h>
19 #include <linux/pm_runtime.h>
20 #include "pci.h"
21
22 #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
23 #define CARDBUS_RESERVE_BUSNR 3
24
25 static struct resource busn_resource = {
26 .name = "PCI busn",
27 .start = 0,
28 .end = 255,
29 .flags = IORESOURCE_BUS,
30 };
31
32 /* Ugh. Need to stop exporting this to modules. */
33 LIST_HEAD(pci_root_buses);
34 EXPORT_SYMBOL(pci_root_buses);
35
36 static LIST_HEAD(pci_domain_busn_res_list);
37
38 struct pci_domain_busn_res {
39 struct list_head list;
40 struct resource res;
41 int domain_nr;
42 };
43
44 static struct resource *get_pci_domain_busn_res(int domain_nr)
45 {
46 struct pci_domain_busn_res *r;
47
48 list_for_each_entry(r, &pci_domain_busn_res_list, list)
49 if (r->domain_nr == domain_nr)
50 return &r->res;
51
52 r = kzalloc(sizeof(*r), GFP_KERNEL);
53 if (!r)
54 return NULL;
55
56 r->domain_nr = domain_nr;
57 r->res.start = 0;
58 r->res.end = 0xff;
59 r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
60
61 list_add_tail(&r->list, &pci_domain_busn_res_list);
62
63 return &r->res;
64 }
65
66 static int find_anything(struct device *dev, void *data)
67 {
68 return 1;
69 }
70
71 /*
72 * Some device drivers need know if pci is initiated.
73 * Basically, we think pci is not initiated when there
74 * is no device to be found on the pci_bus_type.
75 */
76 int no_pci_devices(void)
77 {
78 struct device *dev;
79 int no_devices;
80
81 dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
82 no_devices = (dev == NULL);
83 put_device(dev);
84 return no_devices;
85 }
86 EXPORT_SYMBOL(no_pci_devices);
87
88 /*
89 * PCI Bus Class
90 */
91 static void release_pcibus_dev(struct device *dev)
92 {
93 struct pci_bus *pci_bus = to_pci_bus(dev);
94
95 put_device(pci_bus->bridge);
96 pci_bus_remove_resources(pci_bus);
97 pci_release_bus_of_node(pci_bus);
98 kfree(pci_bus);
99 }
100
101 static struct class pcibus_class = {
102 .name = "pci_bus",
103 .dev_release = &release_pcibus_dev,
104 .dev_groups = pcibus_groups,
105 };
106
107 static int __init pcibus_class_init(void)
108 {
109 return class_register(&pcibus_class);
110 }
111 postcore_initcall(pcibus_class_init);
112
113 static u64 pci_size(u64 base, u64 maxbase, u64 mask)
114 {
115 u64 size = mask & maxbase; /* Find the significant bits */
116 if (!size)
117 return 0;
118
119 /* Get the lowest of them to find the decode size, and
120 from that the extent. */
121 size = (size & ~(size-1)) - 1;
122
123 /* base == maxbase can be valid only if the BAR has
124 already been programmed with all 1s. */
125 if (base == maxbase && ((base | size) & mask) != mask)
126 return 0;
127
128 return size;
129 }
130
131 static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
132 {
133 u32 mem_type;
134 unsigned long flags;
135
136 if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
137 flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
138 flags |= IORESOURCE_IO;
139 return flags;
140 }
141
142 flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
143 flags |= IORESOURCE_MEM;
144 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
145 flags |= IORESOURCE_PREFETCH;
146
147 mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
148 switch (mem_type) {
149 case PCI_BASE_ADDRESS_MEM_TYPE_32:
150 break;
151 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
152 /* 1M mem BAR treated as 32-bit BAR */
153 break;
154 case PCI_BASE_ADDRESS_MEM_TYPE_64:
155 flags |= IORESOURCE_MEM_64;
156 break;
157 default:
158 /* mem unknown type treated as 32-bit BAR */
159 break;
160 }
161 return flags;
162 }
163
164 #define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
165
166 /**
167 * pci_read_base - read a PCI BAR
168 * @dev: the PCI device
169 * @type: type of the BAR
170 * @res: resource buffer to be filled in
171 * @pos: BAR position in the config space
172 *
173 * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
174 */
175 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
176 struct resource *res, unsigned int pos)
177 {
178 u32 l = 0, sz = 0, mask;
179 u64 l64, sz64, mask64;
180 u16 orig_cmd;
181 struct pci_bus_region region, inverted_region;
182
183 mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
184
185 /* No printks while decoding is disabled! */
186 if (!dev->mmio_always_on) {
187 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
188 if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
189 pci_write_config_word(dev, PCI_COMMAND,
190 orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
191 }
192 }
193
194 res->name = pci_name(dev);
195
196 pci_read_config_dword(dev, pos, &l);
197 pci_write_config_dword(dev, pos, l | mask);
198 pci_read_config_dword(dev, pos, &sz);
199 pci_write_config_dword(dev, pos, l);
200
201 /*
202 * All bits set in sz means the device isn't working properly.
203 * If the BAR isn't implemented, all bits must be 0. If it's a
204 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
205 * 1 must be clear.
206 */
207 if (sz == 0xffffffff)
208 sz = 0;
209
210 /*
211 * I don't know how l can have all bits set. Copied from old code.
212 * Maybe it fixes a bug on some ancient platform.
213 */
214 if (l == 0xffffffff)
215 l = 0;
216
217 if (type == pci_bar_unknown) {
218 res->flags = decode_bar(dev, l);
219 res->flags |= IORESOURCE_SIZEALIGN;
220 if (res->flags & IORESOURCE_IO) {
221 l64 = l & PCI_BASE_ADDRESS_IO_MASK;
222 sz64 = sz & PCI_BASE_ADDRESS_IO_MASK;
223 mask64 = PCI_BASE_ADDRESS_IO_MASK & (u32)IO_SPACE_LIMIT;
224 } else {
225 l64 = l & PCI_BASE_ADDRESS_MEM_MASK;
226 sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK;
227 mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK;
228 }
229 } else {
230 if (l & PCI_ROM_ADDRESS_ENABLE)
231 res->flags |= IORESOURCE_ROM_ENABLE;
232 l64 = l & PCI_ROM_ADDRESS_MASK;
233 sz64 = sz & PCI_ROM_ADDRESS_MASK;
234 mask64 = PCI_ROM_ADDRESS_MASK;
235 }
236
237 if (res->flags & IORESOURCE_MEM_64) {
238 pci_read_config_dword(dev, pos + 4, &l);
239 pci_write_config_dword(dev, pos + 4, ~0);
240 pci_read_config_dword(dev, pos + 4, &sz);
241 pci_write_config_dword(dev, pos + 4, l);
242
243 l64 |= ((u64)l << 32);
244 sz64 |= ((u64)sz << 32);
245 mask64 |= ((u64)~0 << 32);
246 }
247
248 if (!dev->mmio_always_on && (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
249 pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
250
251 if (!sz64)
252 goto fail;
253
254 sz64 = pci_size(l64, sz64, mask64);
255 if (!sz64) {
256 dev_info(&dev->dev, FW_BUG "reg 0x%x: invalid BAR (can't size)\n",
257 pos);
258 goto fail;
259 }
260
261 if (res->flags & IORESOURCE_MEM_64) {
262 if ((sizeof(pci_bus_addr_t) < 8 || sizeof(resource_size_t) < 8)
263 && sz64 > 0x100000000ULL) {
264 res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
265 res->start = 0;
266 res->end = 0;
267 dev_err(&dev->dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
268 pos, (unsigned long long)sz64);
269 goto out;
270 }
271
272 if ((sizeof(pci_bus_addr_t) < 8) && l) {
273 /* Above 32-bit boundary; try to reallocate */
274 res->flags |= IORESOURCE_UNSET;
275 res->start = 0;
276 res->end = sz64;
277 dev_info(&dev->dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n",
278 pos, (unsigned long long)l64);
279 goto out;
280 }
281 }
282
283 region.start = l64;
284 region.end = l64 + sz64;
285
286 pcibios_bus_to_resource(dev->bus, res, &region);
287 pcibios_resource_to_bus(dev->bus, &inverted_region, res);
288
289 /*
290 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
291 * the corresponding resource address (the physical address used by
292 * the CPU. Converting that resource address back to a bus address
293 * should yield the original BAR value:
294 *
295 * resource_to_bus(bus_to_resource(A)) == A
296 *
297 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not
298 * be claimed by the device.
299 */
300 if (inverted_region.start != region.start) {
301 res->flags |= IORESOURCE_UNSET;
302 res->start = 0;
303 res->end = region.end - region.start;
304 dev_info(&dev->dev, "reg 0x%x: initial BAR value %#010llx invalid\n",
305 pos, (unsigned long long)region.start);
306 }
307
308 goto out;
309
310
311 fail:
312 res->flags = 0;
313 out:
314 if (res->flags)
315 dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res);
316
317 return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
318 }
319
320 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
321 {
322 unsigned int pos, reg;
323
324 if (dev->non_compliant_bars)
325 return;
326
327 for (pos = 0; pos < howmany; pos++) {
328 struct resource *res = &dev->resource[pos];
329 reg = PCI_BASE_ADDRESS_0 + (pos << 2);
330 pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
331 }
332
333 if (rom) {
334 struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
335 dev->rom_base_reg = rom;
336 res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
337 IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
338 __pci_read_base(dev, pci_bar_mem32, res, rom);
339 }
340 }
341
342 static void pci_read_bridge_io(struct pci_bus *child)
343 {
344 struct pci_dev *dev = child->self;
345 u8 io_base_lo, io_limit_lo;
346 unsigned long io_mask, io_granularity, base, limit;
347 struct pci_bus_region region;
348 struct resource *res;
349
350 io_mask = PCI_IO_RANGE_MASK;
351 io_granularity = 0x1000;
352 if (dev->io_window_1k) {
353 /* Support 1K I/O space granularity */
354 io_mask = PCI_IO_1K_RANGE_MASK;
355 io_granularity = 0x400;
356 }
357
358 res = child->resource[0];
359 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
360 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
361 base = (io_base_lo & io_mask) << 8;
362 limit = (io_limit_lo & io_mask) << 8;
363
364 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
365 u16 io_base_hi, io_limit_hi;
366
367 pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
368 pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
369 base |= ((unsigned long) io_base_hi << 16);
370 limit |= ((unsigned long) io_limit_hi << 16);
371 }
372
373 if (base <= limit) {
374 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
375 region.start = base;
376 region.end = limit + io_granularity - 1;
377 pcibios_bus_to_resource(dev->bus, res, &region);
378 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
379 }
380 }
381
382 static void pci_read_bridge_mmio(struct pci_bus *child)
383 {
384 struct pci_dev *dev = child->self;
385 u16 mem_base_lo, mem_limit_lo;
386 unsigned long base, limit;
387 struct pci_bus_region region;
388 struct resource *res;
389
390 res = child->resource[1];
391 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
392 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
393 base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
394 limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
395 if (base <= limit) {
396 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
397 region.start = base;
398 region.end = limit + 0xfffff;
399 pcibios_bus_to_resource(dev->bus, res, &region);
400 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
401 }
402 }
403
404 static void pci_read_bridge_mmio_pref(struct pci_bus *child)
405 {
406 struct pci_dev *dev = child->self;
407 u16 mem_base_lo, mem_limit_lo;
408 u64 base64, limit64;
409 pci_bus_addr_t base, limit;
410 struct pci_bus_region region;
411 struct resource *res;
412
413 res = child->resource[2];
414 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
415 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
416 base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
417 limit64 = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
418
419 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
420 u32 mem_base_hi, mem_limit_hi;
421
422 pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
423 pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
424
425 /*
426 * Some bridges set the base > limit by default, and some
427 * (broken) BIOSes do not initialize them. If we find
428 * this, just assume they are not being used.
429 */
430 if (mem_base_hi <= mem_limit_hi) {
431 base64 |= (u64) mem_base_hi << 32;
432 limit64 |= (u64) mem_limit_hi << 32;
433 }
434 }
435
436 base = (pci_bus_addr_t) base64;
437 limit = (pci_bus_addr_t) limit64;
438
439 if (base != base64) {
440 dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n",
441 (unsigned long long) base64);
442 return;
443 }
444
445 if (base <= limit) {
446 res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
447 IORESOURCE_MEM | IORESOURCE_PREFETCH;
448 if (res->flags & PCI_PREF_RANGE_TYPE_64)
449 res->flags |= IORESOURCE_MEM_64;
450 region.start = base;
451 region.end = limit + 0xfffff;
452 pcibios_bus_to_resource(dev->bus, res, &region);
453 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
454 }
455 }
456
457 void pci_read_bridge_bases(struct pci_bus *child)
458 {
459 struct pci_dev *dev = child->self;
460 struct resource *res;
461 int i;
462
463 if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */
464 return;
465
466 dev_info(&dev->dev, "PCI bridge to %pR%s\n",
467 &child->busn_res,
468 dev->transparent ? " (subtractive decode)" : "");
469
470 pci_bus_remove_resources(child);
471 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
472 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
473
474 pci_read_bridge_io(child);
475 pci_read_bridge_mmio(child);
476 pci_read_bridge_mmio_pref(child);
477
478 if (dev->transparent) {
479 pci_bus_for_each_resource(child->parent, res, i) {
480 if (res && res->flags) {
481 pci_bus_add_resource(child, res,
482 PCI_SUBTRACTIVE_DECODE);
483 dev_printk(KERN_DEBUG, &dev->dev,
484 " bridge window %pR (subtractive decode)\n",
485 res);
486 }
487 }
488 }
489 }
490
491 static struct pci_bus *pci_alloc_bus(struct pci_bus *parent)
492 {
493 struct pci_bus *b;
494
495 b = kzalloc(sizeof(*b), GFP_KERNEL);
496 if (!b)
497 return NULL;
498
499 INIT_LIST_HEAD(&b->node);
500 INIT_LIST_HEAD(&b->children);
501 INIT_LIST_HEAD(&b->devices);
502 INIT_LIST_HEAD(&b->slots);
503 INIT_LIST_HEAD(&b->resources);
504 b->max_bus_speed = PCI_SPEED_UNKNOWN;
505 b->cur_bus_speed = PCI_SPEED_UNKNOWN;
506 #ifdef CONFIG_PCI_DOMAINS_GENERIC
507 if (parent)
508 b->domain_nr = parent->domain_nr;
509 #endif
510 return b;
511 }
512
513 static void devm_pci_release_host_bridge_dev(struct device *dev)
514 {
515 struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
516
517 if (bridge->release_fn)
518 bridge->release_fn(bridge);
519
520 pci_free_resource_list(&bridge->windows);
521 }
522
523 static void pci_release_host_bridge_dev(struct device *dev)
524 {
525 devm_pci_release_host_bridge_dev(dev);
526 kfree(to_pci_host_bridge(dev));
527 }
528
529 struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
530 {
531 struct pci_host_bridge *bridge;
532
533 bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL);
534 if (!bridge)
535 return NULL;
536
537 INIT_LIST_HEAD(&bridge->windows);
538 bridge->dev.release = pci_release_host_bridge_dev;
539
540 return bridge;
541 }
542 EXPORT_SYMBOL(pci_alloc_host_bridge);
543
544 struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
545 size_t priv)
546 {
547 struct pci_host_bridge *bridge;
548
549 bridge = devm_kzalloc(dev, sizeof(*bridge) + priv, GFP_KERNEL);
550 if (!bridge)
551 return NULL;
552
553 INIT_LIST_HEAD(&bridge->windows);
554 bridge->dev.release = devm_pci_release_host_bridge_dev;
555
556 return bridge;
557 }
558 EXPORT_SYMBOL(devm_pci_alloc_host_bridge);
559
560 void pci_free_host_bridge(struct pci_host_bridge *bridge)
561 {
562 pci_free_resource_list(&bridge->windows);
563
564 kfree(bridge);
565 }
566 EXPORT_SYMBOL(pci_free_host_bridge);
567
568 static const unsigned char pcix_bus_speed[] = {
569 PCI_SPEED_UNKNOWN, /* 0 */
570 PCI_SPEED_66MHz_PCIX, /* 1 */
571 PCI_SPEED_100MHz_PCIX, /* 2 */
572 PCI_SPEED_133MHz_PCIX, /* 3 */
573 PCI_SPEED_UNKNOWN, /* 4 */
574 PCI_SPEED_66MHz_PCIX_ECC, /* 5 */
575 PCI_SPEED_100MHz_PCIX_ECC, /* 6 */
576 PCI_SPEED_133MHz_PCIX_ECC, /* 7 */
577 PCI_SPEED_UNKNOWN, /* 8 */
578 PCI_SPEED_66MHz_PCIX_266, /* 9 */
579 PCI_SPEED_100MHz_PCIX_266, /* A */
580 PCI_SPEED_133MHz_PCIX_266, /* B */
581 PCI_SPEED_UNKNOWN, /* C */
582 PCI_SPEED_66MHz_PCIX_533, /* D */
583 PCI_SPEED_100MHz_PCIX_533, /* E */
584 PCI_SPEED_133MHz_PCIX_533 /* F */
585 };
586
587 const unsigned char pcie_link_speed[] = {
588 PCI_SPEED_UNKNOWN, /* 0 */
589 PCIE_SPEED_2_5GT, /* 1 */
590 PCIE_SPEED_5_0GT, /* 2 */
591 PCIE_SPEED_8_0GT, /* 3 */
592 PCIE_SPEED_16_0GT, /* 4 */
593 PCI_SPEED_UNKNOWN, /* 5 */
594 PCI_SPEED_UNKNOWN, /* 6 */
595 PCI_SPEED_UNKNOWN, /* 7 */
596 PCI_SPEED_UNKNOWN, /* 8 */
597 PCI_SPEED_UNKNOWN, /* 9 */
598 PCI_SPEED_UNKNOWN, /* A */
599 PCI_SPEED_UNKNOWN, /* B */
600 PCI_SPEED_UNKNOWN, /* C */
601 PCI_SPEED_UNKNOWN, /* D */
602 PCI_SPEED_UNKNOWN, /* E */
603 PCI_SPEED_UNKNOWN /* F */
604 };
605
606 void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
607 {
608 bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
609 }
610 EXPORT_SYMBOL_GPL(pcie_update_link_speed);
611
612 static unsigned char agp_speeds[] = {
613 AGP_UNKNOWN,
614 AGP_1X,
615 AGP_2X,
616 AGP_4X,
617 AGP_8X
618 };
619
620 static enum pci_bus_speed agp_speed(int agp3, int agpstat)
621 {
622 int index = 0;
623
624 if (agpstat & 4)
625 index = 3;
626 else if (agpstat & 2)
627 index = 2;
628 else if (agpstat & 1)
629 index = 1;
630 else
631 goto out;
632
633 if (agp3) {
634 index += 2;
635 if (index == 5)
636 index = 0;
637 }
638
639 out:
640 return agp_speeds[index];
641 }
642
643 static void pci_set_bus_speed(struct pci_bus *bus)
644 {
645 struct pci_dev *bridge = bus->self;
646 int pos;
647
648 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
649 if (!pos)
650 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
651 if (pos) {
652 u32 agpstat, agpcmd;
653
654 pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
655 bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
656
657 pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
658 bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
659 }
660
661 pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
662 if (pos) {
663 u16 status;
664 enum pci_bus_speed max;
665
666 pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
667 &status);
668
669 if (status & PCI_X_SSTATUS_533MHZ) {
670 max = PCI_SPEED_133MHz_PCIX_533;
671 } else if (status & PCI_X_SSTATUS_266MHZ) {
672 max = PCI_SPEED_133MHz_PCIX_266;
673 } else if (status & PCI_X_SSTATUS_133MHZ) {
674 if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2)
675 max = PCI_SPEED_133MHz_PCIX_ECC;
676 else
677 max = PCI_SPEED_133MHz_PCIX;
678 } else {
679 max = PCI_SPEED_66MHz_PCIX;
680 }
681
682 bus->max_bus_speed = max;
683 bus->cur_bus_speed = pcix_bus_speed[
684 (status & PCI_X_SSTATUS_FREQ) >> 6];
685
686 return;
687 }
688
689 if (pci_is_pcie(bridge)) {
690 u32 linkcap;
691 u16 linksta;
692
693 pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
694 bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
695
696 pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
697 pcie_update_link_speed(bus, linksta);
698 }
699 }
700
701 static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus)
702 {
703 struct irq_domain *d;
704
705 /*
706 * Any firmware interface that can resolve the msi_domain
707 * should be called from here.
708 */
709 d = pci_host_bridge_of_msi_domain(bus);
710 if (!d)
711 d = pci_host_bridge_acpi_msi_domain(bus);
712
713 #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
714 /*
715 * If no IRQ domain was found via the OF tree, try looking it up
716 * directly through the fwnode_handle.
717 */
718 if (!d) {
719 struct fwnode_handle *fwnode = pci_root_bus_fwnode(bus);
720
721 if (fwnode)
722 d = irq_find_matching_fwnode(fwnode,
723 DOMAIN_BUS_PCI_MSI);
724 }
725 #endif
726
727 return d;
728 }
729
730 static void pci_set_bus_msi_domain(struct pci_bus *bus)
731 {
732 struct irq_domain *d;
733 struct pci_bus *b;
734
735 /*
736 * The bus can be a root bus, a subordinate bus, or a virtual bus
737 * created by an SR-IOV device. Walk up to the first bridge device
738 * found or derive the domain from the host bridge.
739 */
740 for (b = bus, d = NULL; !d && !pci_is_root_bus(b); b = b->parent) {
741 if (b->self)
742 d = dev_get_msi_domain(&b->self->dev);
743 }
744
745 if (!d)
746 d = pci_host_bridge_msi_domain(b);
747
748 dev_set_msi_domain(&bus->dev, d);
749 }
750
751 static int pci_register_host_bridge(struct pci_host_bridge *bridge)
752 {
753 struct device *parent = bridge->dev.parent;
754 struct resource_entry *window, *n;
755 struct pci_bus *bus, *b;
756 resource_size_t offset;
757 LIST_HEAD(resources);
758 struct resource *res;
759 char addr[64], *fmt;
760 const char *name;
761 int err;
762
763 bus = pci_alloc_bus(NULL);
764 if (!bus)
765 return -ENOMEM;
766
767 bridge->bus = bus;
768
769 /* temporarily move resources off the list */
770 list_splice_init(&bridge->windows, &resources);
771 bus->sysdata = bridge->sysdata;
772 bus->msi = bridge->msi;
773 bus->ops = bridge->ops;
774 bus->number = bus->busn_res.start = bridge->busnr;
775 #ifdef CONFIG_PCI_DOMAINS_GENERIC
776 bus->domain_nr = pci_bus_find_domain_nr(bus, parent);
777 #endif
778
779 b = pci_find_bus(pci_domain_nr(bus), bridge->busnr);
780 if (b) {
781 /* If we already got to this bus through a different bridge, ignore it */
782 dev_dbg(&b->dev, "bus already known\n");
783 err = -EEXIST;
784 goto free;
785 }
786
787 dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(bus),
788 bridge->busnr);
789
790 err = pcibios_root_bridge_prepare(bridge);
791 if (err)
792 goto free;
793
794 err = device_register(&bridge->dev);
795 if (err)
796 put_device(&bridge->dev);
797
798 bus->bridge = get_device(&bridge->dev);
799 device_enable_async_suspend(bus->bridge);
800 pci_set_bus_of_node(bus);
801 pci_set_bus_msi_domain(bus);
802
803 if (!parent)
804 set_dev_node(bus->bridge, pcibus_to_node(bus));
805
806 bus->dev.class = &pcibus_class;
807 bus->dev.parent = bus->bridge;
808
809 dev_set_name(&bus->dev, "%04x:%02x", pci_domain_nr(bus), bus->number);
810 name = dev_name(&bus->dev);
811
812 err = device_register(&bus->dev);
813 if (err)
814 goto unregister;
815
816 pcibios_add_bus(bus);
817
818 /* Create legacy_io and legacy_mem files for this bus */
819 pci_create_legacy_files(bus);
820
821 if (parent)
822 dev_info(parent, "PCI host bridge to bus %s\n", name);
823 else
824 pr_info("PCI host bridge to bus %s\n", name);
825
826 /* Add initial resources to the bus */
827 resource_list_for_each_entry_safe(window, n, &resources) {
828 list_move_tail(&window->node, &bridge->windows);
829 offset = window->offset;
830 res = window->res;
831
832 if (res->flags & IORESOURCE_BUS)
833 pci_bus_insert_busn_res(bus, bus->number, res->end);
834 else
835 pci_bus_add_resource(bus, res, 0);
836
837 if (offset) {
838 if (resource_type(res) == IORESOURCE_IO)
839 fmt = " (bus address [%#06llx-%#06llx])";
840 else
841 fmt = " (bus address [%#010llx-%#010llx])";
842
843 snprintf(addr, sizeof(addr), fmt,
844 (unsigned long long)(res->start - offset),
845 (unsigned long long)(res->end - offset));
846 } else
847 addr[0] = '\0';
848
849 dev_info(&bus->dev, "root bus resource %pR%s\n", res, addr);
850 }
851
852 down_write(&pci_bus_sem);
853 list_add_tail(&bus->node, &pci_root_buses);
854 up_write(&pci_bus_sem);
855
856 return 0;
857
858 unregister:
859 put_device(&bridge->dev);
860 device_unregister(&bridge->dev);
861
862 free:
863 kfree(bus);
864 return err;
865 }
866
867 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
868 struct pci_dev *bridge, int busnr)
869 {
870 struct pci_bus *child;
871 int i;
872 int ret;
873
874 /*
875 * Allocate a new bus, and inherit stuff from the parent..
876 */
877 child = pci_alloc_bus(parent);
878 if (!child)
879 return NULL;
880
881 child->parent = parent;
882 child->ops = parent->ops;
883 child->msi = parent->msi;
884 child->sysdata = parent->sysdata;
885 child->bus_flags = parent->bus_flags;
886
887 /* initialize some portions of the bus device, but don't register it
888 * now as the parent is not properly set up yet.
889 */
890 child->dev.class = &pcibus_class;
891 dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
892
893 /*
894 * Set up the primary, secondary and subordinate
895 * bus numbers.
896 */
897 child->number = child->busn_res.start = busnr;
898 child->primary = parent->busn_res.start;
899 child->busn_res.end = 0xff;
900
901 if (!bridge) {
902 child->dev.parent = parent->bridge;
903 goto add_dev;
904 }
905
906 child->self = bridge;
907 child->bridge = get_device(&bridge->dev);
908 child->dev.parent = child->bridge;
909 pci_set_bus_of_node(child);
910 pci_set_bus_speed(child);
911
912 /* Set up default resource pointers and names.. */
913 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
914 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
915 child->resource[i]->name = child->name;
916 }
917 bridge->subordinate = child;
918
919 add_dev:
920 pci_set_bus_msi_domain(child);
921 ret = device_register(&child->dev);
922 WARN_ON(ret < 0);
923
924 pcibios_add_bus(child);
925
926 if (child->ops->add_bus) {
927 ret = child->ops->add_bus(child);
928 if (WARN_ON(ret < 0))
929 dev_err(&child->dev, "failed to add bus: %d\n", ret);
930 }
931
932 /* Create legacy_io and legacy_mem files for this bus */
933 pci_create_legacy_files(child);
934
935 return child;
936 }
937
938 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
939 int busnr)
940 {
941 struct pci_bus *child;
942
943 child = pci_alloc_child_bus(parent, dev, busnr);
944 if (child) {
945 down_write(&pci_bus_sem);
946 list_add_tail(&child->node, &parent->children);
947 up_write(&pci_bus_sem);
948 }
949 return child;
950 }
951 EXPORT_SYMBOL(pci_add_new_bus);
952
953 static void pci_enable_crs(struct pci_dev *pdev)
954 {
955 u16 root_cap = 0;
956
957 /* Enable CRS Software Visibility if supported */
958 pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap);
959 if (root_cap & PCI_EXP_RTCAP_CRSVIS)
960 pcie_capability_set_word(pdev, PCI_EXP_RTCTL,
961 PCI_EXP_RTCTL_CRSSVE);
962 }
963
964 static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
965 unsigned int available_buses);
966
967 /*
968 * pci_scan_bridge_extend() - Scan buses behind a bridge
969 * @bus: Parent bus the bridge is on
970 * @dev: Bridge itself
971 * @max: Starting subordinate number of buses behind this bridge
972 * @available_buses: Total number of buses available for this bridge and
973 * the devices below. After the minimal bus space has
974 * been allocated the remaining buses will be
975 * distributed equally between hotplug-capable bridges.
976 * @pass: Either %0 (scan already configured bridges) or %1 (scan bridges
977 * that need to be reconfigured.
978 *
979 * If it's a bridge, configure it and scan the bus behind it.
980 * For CardBus bridges, we don't scan behind as the devices will
981 * be handled by the bridge driver itself.
982 *
983 * We need to process bridges in two passes -- first we scan those
984 * already configured by the BIOS and after we are done with all of
985 * them, we proceed to assigning numbers to the remaining buses in
986 * order to avoid overlaps between old and new bus numbers.
987 */
988 static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
989 int max, unsigned int available_buses,
990 int pass)
991 {
992 struct pci_bus *child;
993 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
994 u32 buses, i, j = 0;
995 u16 bctl;
996 u8 primary, secondary, subordinate;
997 int broken = 0;
998
999 /*
1000 * Make sure the bridge is powered on to be able to access config
1001 * space of devices below it.
1002 */
1003 pm_runtime_get_sync(&dev->dev);
1004
1005 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
1006 primary = buses & 0xFF;
1007 secondary = (buses >> 8) & 0xFF;
1008 subordinate = (buses >> 16) & 0xFF;
1009
1010 dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
1011 secondary, subordinate, pass);
1012
1013 if (!primary && (primary != bus->number) && secondary && subordinate) {
1014 dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
1015 primary = bus->number;
1016 }
1017
1018 /* Check if setup is sensible at all */
1019 if (!pass &&
1020 (primary != bus->number || secondary <= bus->number ||
1021 secondary > subordinate)) {
1022 dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
1023 secondary, subordinate);
1024 broken = 1;
1025 }
1026
1027 /* Disable MasterAbortMode during probing to avoid reporting
1028 of bus errors (in some architectures) */
1029 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
1030 pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
1031 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
1032
1033 pci_enable_crs(dev);
1034
1035 if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
1036 !is_cardbus && !broken) {
1037 unsigned int cmax;
1038 /*
1039 * Bus already configured by firmware, process it in the first
1040 * pass and just note the configuration.
1041 */
1042 if (pass)
1043 goto out;
1044
1045 /*
1046 * The bus might already exist for two reasons: Either we are
1047 * rescanning the bus or the bus is reachable through more than
1048 * one bridge. The second case can happen with the i450NX
1049 * chipset.
1050 */
1051 child = pci_find_bus(pci_domain_nr(bus), secondary);
1052 if (!child) {
1053 child = pci_add_new_bus(bus, dev, secondary);
1054 if (!child)
1055 goto out;
1056 child->primary = primary;
1057 pci_bus_insert_busn_res(child, secondary, subordinate);
1058 child->bridge_ctl = bctl;
1059 }
1060
1061 cmax = pci_scan_child_bus(child);
1062 if (cmax > subordinate)
1063 dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n",
1064 subordinate, cmax);
1065 /* subordinate should equal child->busn_res.end */
1066 if (subordinate > max)
1067 max = subordinate;
1068 } else {
1069 /*
1070 * We need to assign a number to this bus which we always
1071 * do in the second pass.
1072 */
1073 if (!pass) {
1074 if (pcibios_assign_all_busses() || broken || is_cardbus)
1075 /* Temporarily disable forwarding of the
1076 configuration cycles on all bridges in
1077 this bus segment to avoid possible
1078 conflicts in the second pass between two
1079 bridges programmed with overlapping
1080 bus ranges. */
1081 pci_write_config_dword(dev, PCI_PRIMARY_BUS,
1082 buses & ~0xffffff);
1083 goto out;
1084 }
1085
1086 /* Clear errors */
1087 pci_write_config_word(dev, PCI_STATUS, 0xffff);
1088
1089 /* Prevent assigning a bus number that already exists.
1090 * This can happen when a bridge is hot-plugged, so in
1091 * this case we only re-scan this bus. */
1092 child = pci_find_bus(pci_domain_nr(bus), max+1);
1093 if (!child) {
1094 child = pci_add_new_bus(bus, dev, max+1);
1095 if (!child)
1096 goto out;
1097 pci_bus_insert_busn_res(child, max+1,
1098 bus->busn_res.end);
1099 }
1100 max++;
1101 if (available_buses)
1102 available_buses--;
1103
1104 buses = (buses & 0xff000000)
1105 | ((unsigned int)(child->primary) << 0)
1106 | ((unsigned int)(child->busn_res.start) << 8)
1107 | ((unsigned int)(child->busn_res.end) << 16);
1108
1109 /*
1110 * yenta.c forces a secondary latency timer of 176.
1111 * Copy that behaviour here.
1112 */
1113 if (is_cardbus) {
1114 buses &= ~0xff000000;
1115 buses |= CARDBUS_LATENCY_TIMER << 24;
1116 }
1117
1118 /*
1119 * We need to blast all three values with a single write.
1120 */
1121 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
1122
1123 if (!is_cardbus) {
1124 child->bridge_ctl = bctl;
1125 max = pci_scan_child_bus_extend(child, available_buses);
1126 } else {
1127 /*
1128 * For CardBus bridges, we leave 4 bus numbers
1129 * as cards with a PCI-to-PCI bridge can be
1130 * inserted later.
1131 */
1132 for (i = 0; i < CARDBUS_RESERVE_BUSNR; i++) {
1133 struct pci_bus *parent = bus;
1134 if (pci_find_bus(pci_domain_nr(bus),
1135 max+i+1))
1136 break;
1137 while (parent->parent) {
1138 if ((!pcibios_assign_all_busses()) &&
1139 (parent->busn_res.end > max) &&
1140 (parent->busn_res.end <= max+i)) {
1141 j = 1;
1142 }
1143 parent = parent->parent;
1144 }
1145 if (j) {
1146 /*
1147 * Often, there are two cardbus bridges
1148 * -- try to leave one valid bus number
1149 * for each one.
1150 */
1151 i /= 2;
1152 break;
1153 }
1154 }
1155 max += i;
1156 }
1157 /*
1158 * Set the subordinate bus number to its real value.
1159 */
1160 pci_bus_update_busn_res_end(child, max);
1161 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
1162 }
1163
1164 sprintf(child->name,
1165 (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
1166 pci_domain_nr(bus), child->number);
1167
1168 /* Has only triggered on CardBus, fixup is in yenta_socket */
1169 while (bus->parent) {
1170 if ((child->busn_res.end > bus->busn_res.end) ||
1171 (child->number > bus->busn_res.end) ||
1172 (child->number < bus->number) ||
1173 (child->busn_res.end < bus->number)) {
1174 dev_info(&child->dev, "%pR %s hidden behind%s bridge %s %pR\n",
1175 &child->busn_res,
1176 (bus->number > child->busn_res.end &&
1177 bus->busn_res.end < child->number) ?
1178 "wholly" : "partially",
1179 bus->self->transparent ? " transparent" : "",
1180 dev_name(&bus->dev),
1181 &bus->busn_res);
1182 }
1183 bus = bus->parent;
1184 }
1185
1186 out:
1187 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
1188
1189 pm_runtime_put(&dev->dev);
1190
1191 return max;
1192 }
1193
1194 /*
1195 * pci_scan_bridge() - Scan buses behind a bridge
1196 * @bus: Parent bus the bridge is on
1197 * @dev: Bridge itself
1198 * @max: Starting subordinate number of buses behind this bridge
1199 * @pass: Either %0 (scan already configured bridges) or %1 (scan bridges
1200 * that need to be reconfigured.
1201 *
1202 * If it's a bridge, configure it and scan the bus behind it.
1203 * For CardBus bridges, we don't scan behind as the devices will
1204 * be handled by the bridge driver itself.
1205 *
1206 * We need to process bridges in two passes -- first we scan those
1207 * already configured by the BIOS and after we are done with all of
1208 * them, we proceed to assigning numbers to the remaining buses in
1209 * order to avoid overlaps between old and new bus numbers.
1210 */
1211 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
1212 {
1213 return pci_scan_bridge_extend(bus, dev, max, 0, pass);
1214 }
1215 EXPORT_SYMBOL(pci_scan_bridge);
1216
1217 /*
1218 * Read interrupt line and base address registers.
1219 * The architecture-dependent code can tweak these, of course.
1220 */
1221 static void pci_read_irq(struct pci_dev *dev)
1222 {
1223 unsigned char irq;
1224
1225 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
1226 dev->pin = irq;
1227 if (irq)
1228 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
1229 dev->irq = irq;
1230 }
1231
1232 void set_pcie_port_type(struct pci_dev *pdev)
1233 {
1234 int pos;
1235 u16 reg16;
1236 int type;
1237 struct pci_dev *parent;
1238
1239 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
1240 if (!pos)
1241 return;
1242
1243 pdev->pcie_cap = pos;
1244 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
1245 pdev->pcie_flags_reg = reg16;
1246 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
1247 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
1248
1249 /*
1250 * A Root Port or a PCI-to-PCIe bridge is always the upstream end
1251 * of a Link. No PCIe component has two Links. Two Links are
1252 * connected by a Switch that has a Port on each Link and internal
1253 * logic to connect the two Ports.
1254 */
1255 type = pci_pcie_type(pdev);
1256 if (type == PCI_EXP_TYPE_ROOT_PORT ||
1257 type == PCI_EXP_TYPE_PCIE_BRIDGE)
1258 pdev->has_secondary_link = 1;
1259 else if (type == PCI_EXP_TYPE_UPSTREAM ||
1260 type == PCI_EXP_TYPE_DOWNSTREAM) {
1261 parent = pci_upstream_bridge(pdev);
1262
1263 /*
1264 * Usually there's an upstream device (Root Port or Switch
1265 * Downstream Port), but we can't assume one exists.
1266 */
1267 if (parent && !parent->has_secondary_link)
1268 pdev->has_secondary_link = 1;
1269 }
1270 }
1271
1272 void set_pcie_hotplug_bridge(struct pci_dev *pdev)
1273 {
1274 u32 reg32;
1275
1276 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
1277 if (reg32 & PCI_EXP_SLTCAP_HPC)
1278 pdev->is_hotplug_bridge = 1;
1279 }
1280
1281 static void set_pcie_thunderbolt(struct pci_dev *dev)
1282 {
1283 int vsec = 0;
1284 u32 header;
1285
1286 while ((vsec = pci_find_next_ext_capability(dev, vsec,
1287 PCI_EXT_CAP_ID_VNDR))) {
1288 pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header);
1289
1290 /* Is the device part of a Thunderbolt controller? */
1291 if (dev->vendor == PCI_VENDOR_ID_INTEL &&
1292 PCI_VNDR_HEADER_ID(header) == PCI_VSEC_ID_INTEL_TBT) {
1293 dev->is_thunderbolt = 1;
1294 return;
1295 }
1296 }
1297 }
1298
1299 static void set_pcie_untrusted(struct pci_dev *dev)
1300 {
1301 struct pci_dev *parent;
1302
1303 /*
1304 * If the upstream bridge is untrusted we treat this device
1305 * untrusted as well.
1306 */
1307 parent = pci_upstream_bridge(dev);
1308 if (parent && parent->untrusted)
1309 dev->untrusted = true;
1310 }
1311
1312 /**
1313 * pci_ext_cfg_is_aliased - is ext config space just an alias of std config?
1314 * @dev: PCI device
1315 *
1316 * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that
1317 * when forwarding a type1 configuration request the bridge must check that
1318 * the extended register address field is zero. The bridge is not permitted
1319 * to forward the transactions and must handle it as an Unsupported Request.
1320 * Some bridges do not follow this rule and simply drop the extended register
1321 * bits, resulting in the standard config space being aliased, every 256
1322 * bytes across the entire configuration space. Test for this condition by
1323 * comparing the first dword of each potential alias to the vendor/device ID.
1324 * Known offenders:
1325 * ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03)
1326 * AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40)
1327 */
1328 static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
1329 {
1330 #ifdef CONFIG_PCI_QUIRKS
1331 int pos;
1332 u32 header, tmp;
1333
1334 pci_read_config_dword(dev, PCI_VENDOR_ID, &header);
1335
1336 for (pos = PCI_CFG_SPACE_SIZE;
1337 pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) {
1338 if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL
1339 || header != tmp)
1340 return false;
1341 }
1342
1343 return true;
1344 #else
1345 return false;
1346 #endif
1347 }
1348
1349 /**
1350 * pci_cfg_space_size - get the configuration space size of the PCI device.
1351 * @dev: PCI device
1352 *
1353 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1354 * have 4096 bytes. Even if the device is capable, that doesn't mean we can
1355 * access it. Maybe we don't have a way to generate extended config space
1356 * accesses, or the device is behind a reverse Express bridge. So we try
1357 * reading the dword at 0x100 which must either be 0 or a valid extended
1358 * capability header.
1359 */
1360 static int pci_cfg_space_size_ext(struct pci_dev *dev)
1361 {
1362 u32 status;
1363 int pos = PCI_CFG_SPACE_SIZE;
1364
1365 if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
1366 return PCI_CFG_SPACE_SIZE;
1367 if (status == 0xffffffff || pci_ext_cfg_is_aliased(dev))
1368 return PCI_CFG_SPACE_SIZE;
1369
1370 return PCI_CFG_SPACE_EXP_SIZE;
1371 }
1372
1373 int pci_cfg_space_size(struct pci_dev *dev)
1374 {
1375 int pos;
1376 u32 status;
1377 u16 class;
1378
1379 class = dev->class >> 8;
1380 if (class == PCI_CLASS_BRIDGE_HOST)
1381 return pci_cfg_space_size_ext(dev);
1382
1383 if (pci_is_pcie(dev))
1384 return pci_cfg_space_size_ext(dev);
1385
1386 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1387 if (!pos)
1388 return PCI_CFG_SPACE_SIZE;
1389
1390 pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1391 if (status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ))
1392 return pci_cfg_space_size_ext(dev);
1393
1394 return PCI_CFG_SPACE_SIZE;
1395 }
1396
1397 #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
1398
1399 static void pci_msi_setup_pci_dev(struct pci_dev *dev)
1400 {
1401 /*
1402 * Disable the MSI hardware to avoid screaming interrupts
1403 * during boot. This is the power on reset default so
1404 * usually this should be a noop.
1405 */
1406 dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI);
1407 if (dev->msi_cap)
1408 pci_msi_set_enable(dev, 0);
1409
1410 dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1411 if (dev->msix_cap)
1412 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
1413 }
1414
1415 /**
1416 * pci_intx_mask_broken - test PCI_COMMAND_INTX_DISABLE writability
1417 * @dev: PCI device
1418 *
1419 * Test whether PCI_COMMAND_INTX_DISABLE is writable for @dev. Check this
1420 * at enumeration-time to avoid modifying PCI_COMMAND at run-time.
1421 */
1422 static int pci_intx_mask_broken(struct pci_dev *dev)
1423 {
1424 u16 orig, toggle, new;
1425
1426 pci_read_config_word(dev, PCI_COMMAND, &orig);
1427 toggle = orig ^ PCI_COMMAND_INTX_DISABLE;
1428 pci_write_config_word(dev, PCI_COMMAND, toggle);
1429 pci_read_config_word(dev, PCI_COMMAND, &new);
1430
1431 pci_write_config_word(dev, PCI_COMMAND, orig);
1432
1433 /*
1434 * PCI_COMMAND_INTX_DISABLE was reserved and read-only prior to PCI
1435 * r2.3, so strictly speaking, a device is not *broken* if it's not
1436 * writable. But we'll live with the misnomer for now.
1437 */
1438 if (new != toggle)
1439 return 1;
1440 return 0;
1441 }
1442
1443 /**
1444 * pci_setup_device - fill in class and map information of a device
1445 * @dev: the device structure to fill
1446 *
1447 * Initialize the device structure with information about the device's
1448 * vendor,class,memory and IO-space addresses,IRQ lines etc.
1449 * Called at initialisation of the PCI subsystem and by CardBus services.
1450 * Returns 0 on success and negative if unknown type of device (not normal,
1451 * bridge or CardBus).
1452 */
1453 int pci_setup_device(struct pci_dev *dev)
1454 {
1455 u32 class;
1456 u16 cmd;
1457 u8 hdr_type;
1458 int pos = 0;
1459 struct pci_bus_region region;
1460 struct resource *res;
1461
1462 if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
1463 return -EIO;
1464
1465 dev->sysdata = dev->bus->sysdata;
1466 dev->dev.parent = dev->bus->bridge;
1467 dev->dev.bus = &pci_bus_type;
1468 dev->hdr_type = hdr_type & 0x7f;
1469 dev->multifunction = !!(hdr_type & 0x80);
1470 dev->error_state = pci_channel_io_normal;
1471 set_pcie_port_type(dev);
1472
1473 pci_dev_assign_slot(dev);
1474 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1475 set this higher, assuming the system even supports it. */
1476 dev->dma_mask = 0xffffffff;
1477
1478 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
1479 dev->bus->number, PCI_SLOT(dev->devfn),
1480 PCI_FUNC(dev->devfn));
1481
1482 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
1483 dev->revision = class & 0xff;
1484 dev->class = class >> 8; /* upper 3 bytes */
1485
1486 dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
1487 dev->vendor, dev->device, dev->hdr_type, dev->class);
1488
1489 /* need to have dev->class ready */
1490 dev->cfg_size = pci_cfg_space_size(dev);
1491
1492 /* need to have dev->cfg_size ready */
1493 set_pcie_thunderbolt(dev);
1494
1495 set_pcie_untrusted(dev);
1496
1497 /* "Unknown power state" */
1498 dev->current_state = PCI_UNKNOWN;
1499
1500 /* Early fixups, before probing the BARs */
1501 pci_fixup_device(pci_fixup_early, dev);
1502 /* device class may be changed after fixup */
1503 class = dev->class >> 8;
1504
1505 if (dev->non_compliant_bars) {
1506 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1507 if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
1508 dev_info(&dev->dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
1509 cmd &= ~PCI_COMMAND_IO;
1510 cmd &= ~PCI_COMMAND_MEMORY;
1511 pci_write_config_word(dev, PCI_COMMAND, cmd);
1512 }
1513 }
1514
1515 dev->broken_intx_masking = pci_intx_mask_broken(dev);
1516
1517 switch (dev->hdr_type) { /* header type */
1518 case PCI_HEADER_TYPE_NORMAL: /* standard header */
1519 if (class == PCI_CLASS_BRIDGE_PCI)
1520 goto bad;
1521 pci_read_irq(dev);
1522 pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1523 pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1524 pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
1525
1526 /*
1527 * Do the ugly legacy mode stuff here rather than broken chip
1528 * quirk code. Legacy mode ATA controllers have fixed
1529 * addresses. These are not always echoed in BAR0-3, and
1530 * BAR0-3 in a few cases contain junk!
1531 */
1532 if (class == PCI_CLASS_STORAGE_IDE) {
1533 u8 progif;
1534 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1535 if ((progif & 1) == 0) {
1536 region.start = 0x1F0;
1537 region.end = 0x1F7;
1538 res = &dev->resource[0];
1539 res->flags = LEGACY_IO_RESOURCE;
1540 pcibios_bus_to_resource(dev->bus, res, &region);
1541 dev_info(&dev->dev, "legacy IDE quirk: reg 0x10: %pR\n",
1542 res);
1543 region.start = 0x3F6;
1544 region.end = 0x3F6;
1545 res = &dev->resource[1];
1546 res->flags = LEGACY_IO_RESOURCE;
1547 pcibios_bus_to_resource(dev->bus, res, &region);
1548 dev_info(&dev->dev, "legacy IDE quirk: reg 0x14: %pR\n",
1549 res);
1550 }
1551 if ((progif & 4) == 0) {
1552 region.start = 0x170;
1553 region.end = 0x177;
1554 res = &dev->resource[2];
1555 res->flags = LEGACY_IO_RESOURCE;
1556 pcibios_bus_to_resource(dev->bus, res, &region);
1557 dev_info(&dev->dev, "legacy IDE quirk: reg 0x18: %pR\n",
1558 res);
1559 region.start = 0x376;
1560 region.end = 0x376;
1561 res = &dev->resource[3];
1562 res->flags = LEGACY_IO_RESOURCE;
1563 pcibios_bus_to_resource(dev->bus, res, &region);
1564 dev_info(&dev->dev, "legacy IDE quirk: reg 0x1c: %pR\n",
1565 res);
1566 }
1567 }
1568 break;
1569
1570 case PCI_HEADER_TYPE_BRIDGE: /* bridge header */
1571 if (class != PCI_CLASS_BRIDGE_PCI)
1572 goto bad;
1573 /* The PCI-to-PCI bridge spec requires that subtractive
1574 decoding (i.e. transparent) bridge must have programming
1575 interface code of 0x01. */
1576 pci_read_irq(dev);
1577 dev->transparent = ((dev->class & 0xff) == 1);
1578 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
1579 set_pcie_hotplug_bridge(dev);
1580 pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
1581 if (pos) {
1582 pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
1583 pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
1584 }
1585 break;
1586
1587 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */
1588 if (class != PCI_CLASS_BRIDGE_CARDBUS)
1589 goto bad;
1590 pci_read_irq(dev);
1591 pci_read_bases(dev, 1, 0);
1592 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1593 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1594 break;
1595
1596 default: /* unknown header */
1597 dev_err(&dev->dev, "unknown header type %02x, ignoring device\n",
1598 dev->hdr_type);
1599 return -EIO;
1600
1601 bad:
1602 dev_err(&dev->dev, "ignoring class %#08x (doesn't match header type %02x)\n",
1603 dev->class, dev->hdr_type);
1604 dev->class = PCI_CLASS_NOT_DEFINED << 8;
1605 }
1606
1607 /* We found a fine healthy device, go go go... */
1608 return 0;
1609 }
1610
1611 static void pci_configure_mps(struct pci_dev *dev)
1612 {
1613 struct pci_dev *bridge = pci_upstream_bridge(dev);
1614 int mps, mpss, p_mps, rc;
1615
1616 if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge))
1617 return;
1618
1619 /* MPS and MRRS fields are of type 'RsvdP' for VFs, short-circuit out */
1620 if (dev->is_virtfn)
1621 return;
1622
1623 mps = pcie_get_mps(dev);
1624 p_mps = pcie_get_mps(bridge);
1625
1626 if (mps == p_mps)
1627 return;
1628
1629 if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
1630 dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1631 mps, pci_name(bridge), p_mps);
1632 return;
1633 }
1634
1635 /*
1636 * Fancier MPS configuration is done later by
1637 * pcie_bus_configure_settings()
1638 */
1639 if (pcie_bus_config != PCIE_BUS_DEFAULT)
1640 return;
1641
1642 mpss = 128 << dev->pcie_mpss;
1643 if (mpss < p_mps && pci_pcie_type(bridge) == PCI_EXP_TYPE_ROOT_PORT) {
1644 pcie_set_mps(bridge, mpss);
1645 pci_info(dev, "Upstream bridge's Max Payload Size set to %d (was %d, max %d)\n",
1646 mpss, p_mps, 128 << bridge->pcie_mpss);
1647 p_mps = pcie_get_mps(bridge);
1648 }
1649
1650 rc = pcie_set_mps(dev, p_mps);
1651 if (rc) {
1652 dev_warn(&dev->dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1653 p_mps);
1654 return;
1655 }
1656
1657 dev_info(&dev->dev, "Max Payload Size set to %d (was %d, max %d)\n",
1658 p_mps, mps, mpss);
1659 }
1660
1661 static struct hpp_type0 pci_default_type0 = {
1662 .revision = 1,
1663 .cache_line_size = 8,
1664 .latency_timer = 0x40,
1665 .enable_serr = 0,
1666 .enable_perr = 0,
1667 };
1668
1669 static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
1670 {
1671 u16 pci_cmd, pci_bctl;
1672
1673 if (!hpp)
1674 hpp = &pci_default_type0;
1675
1676 if (hpp->revision > 1) {
1677 dev_warn(&dev->dev,
1678 "PCI settings rev %d not supported; using defaults\n",
1679 hpp->revision);
1680 hpp = &pci_default_type0;
1681 }
1682
1683 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
1684 pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
1685 pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
1686 if (hpp->enable_serr)
1687 pci_cmd |= PCI_COMMAND_SERR;
1688 if (hpp->enable_perr)
1689 pci_cmd |= PCI_COMMAND_PARITY;
1690 pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
1691
1692 /* Program bridge control value */
1693 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
1694 pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
1695 hpp->latency_timer);
1696 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
1697 if (hpp->enable_serr)
1698 pci_bctl |= PCI_BRIDGE_CTL_SERR;
1699 if (hpp->enable_perr)
1700 pci_bctl |= PCI_BRIDGE_CTL_PARITY;
1701 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
1702 }
1703 }
1704
1705 static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
1706 {
1707 int pos;
1708
1709 if (!hpp)
1710 return;
1711
1712 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1713 if (!pos)
1714 return;
1715
1716 dev_warn(&dev->dev, "PCI-X settings not supported\n");
1717 }
1718
1719 static bool pcie_root_rcb_set(struct pci_dev *dev)
1720 {
1721 struct pci_dev *rp = pcie_find_root_port(dev);
1722 u16 lnkctl;
1723
1724 if (!rp)
1725 return false;
1726
1727 pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl);
1728 if (lnkctl & PCI_EXP_LNKCTL_RCB)
1729 return true;
1730
1731 return false;
1732 }
1733
1734 static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
1735 {
1736 int pos;
1737 u32 reg32;
1738
1739 if (!hpp)
1740 return;
1741
1742 if (!pci_is_pcie(dev))
1743 return;
1744
1745 if (hpp->revision > 1) {
1746 dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
1747 hpp->revision);
1748 return;
1749 }
1750
1751 /*
1752 * Don't allow _HPX to change MPS or MRRS settings. We manage
1753 * those to make sure they're consistent with the rest of the
1754 * platform.
1755 */
1756 hpp->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
1757 PCI_EXP_DEVCTL_READRQ;
1758 hpp->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
1759 PCI_EXP_DEVCTL_READRQ);
1760
1761 /* Initialize Device Control Register */
1762 pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
1763 ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
1764
1765 /* Initialize Link Control Register */
1766 if (pcie_cap_has_lnkctl(dev)) {
1767
1768 /*
1769 * If the Root Port supports Read Completion Boundary of
1770 * 128, set RCB to 128. Otherwise, clear it.
1771 */
1772 hpp->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB;
1773 hpp->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB;
1774 if (pcie_root_rcb_set(dev))
1775 hpp->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB;
1776
1777 pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
1778 ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
1779 }
1780
1781 /* Find Advanced Error Reporting Enhanced Capability */
1782 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
1783 if (!pos)
1784 return;
1785
1786 /* Initialize Uncorrectable Error Mask Register */
1787 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
1788 reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
1789 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
1790
1791 /* Initialize Uncorrectable Error Severity Register */
1792 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
1793 reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
1794 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
1795
1796 /* Initialize Correctable Error Mask Register */
1797 pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
1798 reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
1799 pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
1800
1801 /* Initialize Advanced Error Capabilities and Control Register */
1802 pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
1803 reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
1804 /* Don't enable ECRC generation or checking if unsupported */
1805 if (!(reg32 & PCI_ERR_CAP_ECRC_GENC))
1806 reg32 &= ~PCI_ERR_CAP_ECRC_GENE;
1807 if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC))
1808 reg32 &= ~PCI_ERR_CAP_ECRC_CHKE;
1809 pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
1810
1811 /*
1812 * FIXME: The following two registers are not supported yet.
1813 *
1814 * o Secondary Uncorrectable Error Severity Register
1815 * o Secondary Uncorrectable Error Mask Register
1816 */
1817 }
1818
1819 int pci_configure_extended_tags(struct pci_dev *dev, void *ign)
1820 {
1821 struct pci_host_bridge *host;
1822 u32 cap;
1823 u16 ctl;
1824 int ret;
1825
1826 if (!pci_is_pcie(dev))
1827 return 0;
1828
1829 ret = pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
1830 if (ret)
1831 return 0;
1832
1833 if (!(cap & PCI_EXP_DEVCAP_EXT_TAG))
1834 return 0;
1835
1836 ret = pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
1837 if (ret)
1838 return 0;
1839
1840 host = pci_find_host_bridge(dev->bus);
1841 if (!host)
1842 return 0;
1843
1844 /*
1845 * If some device in the hierarchy doesn't handle Extended Tags
1846 * correctly, make sure they're disabled.
1847 */
1848 if (host->no_ext_tags) {
1849 if (ctl & PCI_EXP_DEVCTL_EXT_TAG) {
1850 dev_info(&dev->dev, "disabling Extended Tags\n");
1851 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
1852 PCI_EXP_DEVCTL_EXT_TAG);
1853 }
1854 return 0;
1855 }
1856
1857 if (!(ctl & PCI_EXP_DEVCTL_EXT_TAG)) {
1858 dev_info(&dev->dev, "enabling Extended Tags\n");
1859 pcie_capability_set_word(dev, PCI_EXP_DEVCTL,
1860 PCI_EXP_DEVCTL_EXT_TAG);
1861 }
1862 return 0;
1863 }
1864
1865 /**
1866 * pcie_relaxed_ordering_enabled - Probe for PCIe relaxed ordering enable
1867 * @dev: PCI device to query
1868 *
1869 * Returns true if the device has enabled relaxed ordering attribute.
1870 */
1871 bool pcie_relaxed_ordering_enabled(struct pci_dev *dev)
1872 {
1873 u16 v;
1874
1875 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &v);
1876
1877 return !!(v & PCI_EXP_DEVCTL_RELAX_EN);
1878 }
1879 EXPORT_SYMBOL(pcie_relaxed_ordering_enabled);
1880
1881 static void pci_configure_relaxed_ordering(struct pci_dev *dev)
1882 {
1883 struct pci_dev *root;
1884
1885 /* PCI_EXP_DEVICE_RELAX_EN is RsvdP in VFs */
1886 if (dev->is_virtfn)
1887 return;
1888
1889 if (!pcie_relaxed_ordering_enabled(dev))
1890 return;
1891
1892 /*
1893 * For now, we only deal with Relaxed Ordering issues with Root
1894 * Ports. Peer-to-Peer DMA is another can of worms.
1895 */
1896 root = pci_find_pcie_root_port(dev);
1897 if (!root)
1898 return;
1899
1900 if (root->dev_flags & PCI_DEV_FLAGS_NO_RELAXED_ORDERING) {
1901 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
1902 PCI_EXP_DEVCTL_RELAX_EN);
1903 dev_info(&dev->dev, "Disable Relaxed Ordering because the Root Port didn't support it\n");
1904 }
1905 }
1906
1907 static void pci_configure_ltr(struct pci_dev *dev)
1908 {
1909 #ifdef CONFIG_PCIEASPM
1910 u32 cap;
1911 struct pci_dev *bridge;
1912
1913 if (!pci_is_pcie(dev))
1914 return;
1915
1916 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap);
1917 if (!(cap & PCI_EXP_DEVCAP2_LTR))
1918 return;
1919
1920 /*
1921 * Software must not enable LTR in an Endpoint unless the Root
1922 * Complex and all intermediate Switches indicate support for LTR.
1923 * PCIe r3.1, sec 6.18.
1924 */
1925 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
1926 dev->ltr_path = 1;
1927 else {
1928 bridge = pci_upstream_bridge(dev);
1929 if (bridge && bridge->ltr_path)
1930 dev->ltr_path = 1;
1931 }
1932
1933 if (dev->ltr_path)
1934 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
1935 PCI_EXP_DEVCTL2_LTR_EN);
1936 #endif
1937 }
1938
1939 static void pci_configure_device(struct pci_dev *dev)
1940 {
1941 struct hotplug_params hpp;
1942 int ret;
1943
1944 pci_configure_mps(dev);
1945 pci_configure_extended_tags(dev, NULL);
1946 pci_configure_relaxed_ordering(dev);
1947 pci_configure_ltr(dev);
1948
1949 memset(&hpp, 0, sizeof(hpp));
1950 ret = pci_get_hp_params(dev, &hpp);
1951 if (ret)
1952 return;
1953
1954 program_hpp_type2(dev, hpp.t2);
1955 program_hpp_type1(dev, hpp.t1);
1956 program_hpp_type0(dev, hpp.t0);
1957 }
1958
1959 static void pci_release_capabilities(struct pci_dev *dev)
1960 {
1961 pci_vpd_release(dev);
1962 pci_iov_release(dev);
1963 pci_free_cap_save_buffers(dev);
1964 }
1965
1966 /**
1967 * pci_release_dev - free a pci device structure when all users of it are finished.
1968 * @dev: device that's been disconnected
1969 *
1970 * Will be called only by the device core when all users of this pci device are
1971 * done.
1972 */
1973 static void pci_release_dev(struct device *dev)
1974 {
1975 struct pci_dev *pci_dev;
1976
1977 pci_dev = to_pci_dev(dev);
1978 pci_release_capabilities(pci_dev);
1979 pci_release_of_node(pci_dev);
1980 pcibios_release_device(pci_dev);
1981 pci_bus_put(pci_dev->bus);
1982 kfree(pci_dev->driver_override);
1983 kfree(pci_dev->dma_alias_mask);
1984 kfree(pci_dev);
1985 }
1986
1987 struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
1988 {
1989 struct pci_dev *dev;
1990
1991 dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
1992 if (!dev)
1993 return NULL;
1994
1995 INIT_LIST_HEAD(&dev->bus_list);
1996 dev->dev.type = &pci_dev_type;
1997 dev->bus = pci_bus_get(bus);
1998
1999 return dev;
2000 }
2001 EXPORT_SYMBOL(pci_alloc_dev);
2002
2003 static bool pci_bus_crs_vendor_id(u32 l)
2004 {
2005 return (l & 0xffff) == 0x0001;
2006 }
2007
2008 static bool pci_bus_wait_crs(struct pci_bus *bus, int devfn, u32 *l,
2009 int timeout)
2010 {
2011 int delay = 1;
2012
2013 if (!pci_bus_crs_vendor_id(*l))
2014 return true; /* not a CRS completion */
2015
2016 if (!timeout)
2017 return false; /* CRS, but caller doesn't want to wait */
2018
2019 /*
2020 * We got the reserved Vendor ID that indicates a completion with
2021 * Configuration Request Retry Status (CRS). Retry until we get a
2022 * valid Vendor ID or we time out.
2023 */
2024 while (pci_bus_crs_vendor_id(*l)) {
2025 if (delay > timeout) {
2026 pr_warn("pci %04x:%02x:%02x.%d: not ready after %dms; giving up\n",
2027 pci_domain_nr(bus), bus->number,
2028 PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1);
2029
2030 return false;
2031 }
2032 if (delay >= 1000)
2033 pr_info("pci %04x:%02x:%02x.%d: not ready after %dms; waiting\n",
2034 pci_domain_nr(bus), bus->number,
2035 PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1);
2036
2037 msleep(delay);
2038 delay *= 2;
2039
2040 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
2041 return false;
2042 }
2043
2044 if (delay >= 1000)
2045 pr_info("pci %04x:%02x:%02x.%d: ready after %dms\n",
2046 pci_domain_nr(bus), bus->number,
2047 PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1);
2048
2049 return true;
2050 }
2051
2052 bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
2053 int timeout)
2054 {
2055 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
2056 return false;
2057
2058 /* some broken boards return 0 or ~0 if a slot is empty: */
2059 if (*l == 0xffffffff || *l == 0x00000000 ||
2060 *l == 0x0000ffff || *l == 0xffff0000)
2061 return false;
2062
2063 if (pci_bus_crs_vendor_id(*l))
2064 return pci_bus_wait_crs(bus, devfn, l, timeout);
2065
2066 return true;
2067 }
2068 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
2069
2070 /*
2071 * Read the config data for a PCI device, sanity-check it
2072 * and fill in the dev structure...
2073 */
2074 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
2075 {
2076 struct pci_dev *dev;
2077 u32 l;
2078
2079 if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
2080 return NULL;
2081
2082 dev = pci_alloc_dev(bus);
2083 if (!dev)
2084 return NULL;
2085
2086 dev->devfn = devfn;
2087 dev->vendor = l & 0xffff;
2088 dev->device = (l >> 16) & 0xffff;
2089
2090 pci_set_of_node(dev);
2091
2092 if (pci_setup_device(dev)) {
2093 pci_bus_put(dev->bus);
2094 kfree(dev);
2095 return NULL;
2096 }
2097
2098 return dev;
2099 }
2100
2101 static void pci_init_capabilities(struct pci_dev *dev)
2102 {
2103 /* Enhanced Allocation */
2104 pci_ea_init(dev);
2105
2106 /* Setup MSI caps & disable MSI/MSI-X interrupts */
2107 pci_msi_setup_pci_dev(dev);
2108
2109 /* Buffers for saving PCIe and PCI-X capabilities */
2110 pci_allocate_cap_save_buffers(dev);
2111
2112 /* Power Management */
2113 pci_pm_init(dev);
2114
2115 /* Vital Product Data */
2116 pci_vpd_init(dev);
2117
2118 /* Alternative Routing-ID Forwarding */
2119 pci_configure_ari(dev);
2120
2121 /* Single Root I/O Virtualization */
2122 pci_iov_init(dev);
2123
2124 /* Address Translation Services */
2125 pci_ats_init(dev);
2126
2127 /* Enable ACS P2P upstream forwarding */
2128 pci_enable_acs(dev);
2129
2130 /* Precision Time Measurement */
2131 pci_ptm_init(dev);
2132
2133 /* Advanced Error Reporting */
2134 pci_aer_init(dev);
2135 }
2136
2137 /*
2138 * This is the equivalent of pci_host_bridge_msi_domain that acts on
2139 * devices. Firmware interfaces that can select the MSI domain on a
2140 * per-device basis should be called from here.
2141 */
2142 static struct irq_domain *pci_dev_msi_domain(struct pci_dev *dev)
2143 {
2144 struct irq_domain *d;
2145
2146 /*
2147 * If a domain has been set through the pcibios_add_device
2148 * callback, then this is the one (platform code knows best).
2149 */
2150 d = dev_get_msi_domain(&dev->dev);
2151 if (d)
2152 return d;
2153
2154 /*
2155 * Let's see if we have a firmware interface able to provide
2156 * the domain.
2157 */
2158 d = pci_msi_get_device_domain(dev);
2159 if (d)
2160 return d;
2161
2162 return NULL;
2163 }
2164
2165 static void pci_set_msi_domain(struct pci_dev *dev)
2166 {
2167 struct irq_domain *d;
2168
2169 /*
2170 * If the platform or firmware interfaces cannot supply a
2171 * device-specific MSI domain, then inherit the default domain
2172 * from the host bridge itself.
2173 */
2174 d = pci_dev_msi_domain(dev);
2175 if (!d)
2176 d = dev_get_msi_domain(&dev->bus->dev);
2177
2178 dev_set_msi_domain(&dev->dev, d);
2179 }
2180
2181 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
2182 {
2183 int ret;
2184
2185 pci_configure_device(dev);
2186
2187 device_initialize(&dev->dev);
2188 dev->dev.release = pci_release_dev;
2189
2190 set_dev_node(&dev->dev, pcibus_to_node(bus));
2191 dev->dev.dma_mask = &dev->dma_mask;
2192 dev->dev.dma_parms = &dev->dma_parms;
2193 dev->dev.coherent_dma_mask = 0xffffffffull;
2194
2195 pci_set_dma_max_seg_size(dev, 65536);
2196 pci_set_dma_seg_boundary(dev, 0xffffffff);
2197
2198 /* Fix up broken headers */
2199 pci_fixup_device(pci_fixup_header, dev);
2200
2201 /* moved out from quirk header fixup code */
2202 pci_reassigndev_resource_alignment(dev);
2203
2204 /* Clear the state_saved flag. */
2205 dev->state_saved = false;
2206
2207 /* Initialize various capabilities */
2208 pci_init_capabilities(dev);
2209
2210 /*
2211 * Add the device to our list of discovered devices
2212 * and the bus list for fixup functions, etc.
2213 */
2214 down_write(&pci_bus_sem);
2215 list_add_tail(&dev->bus_list, &bus->devices);
2216 up_write(&pci_bus_sem);
2217
2218 ret = pcibios_add_device(dev);
2219 WARN_ON(ret < 0);
2220
2221 /* Setup MSI irq domain */
2222 pci_set_msi_domain(dev);
2223
2224 /* Notifier could use PCI capabilities */
2225 dev->match_driver = false;
2226 ret = device_add(&dev->dev);
2227 WARN_ON(ret < 0);
2228 }
2229
2230 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
2231 {
2232 struct pci_dev *dev;
2233
2234 dev = pci_get_slot(bus, devfn);
2235 if (dev) {
2236 pci_dev_put(dev);
2237 return dev;
2238 }
2239
2240 dev = pci_scan_device(bus, devfn);
2241 if (!dev)
2242 return NULL;
2243
2244 pci_device_add(dev, bus);
2245
2246 return dev;
2247 }
2248 EXPORT_SYMBOL(pci_scan_single_device);
2249
2250 static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
2251 {
2252 int pos;
2253 u16 cap = 0;
2254 unsigned next_fn;
2255
2256 if (pci_ari_enabled(bus)) {
2257 if (!dev)
2258 return 0;
2259 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
2260 if (!pos)
2261 return 0;
2262
2263 pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
2264 next_fn = PCI_ARI_CAP_NFN(cap);
2265 if (next_fn <= fn)
2266 return 0; /* protect against malformed list */
2267
2268 return next_fn;
2269 }
2270
2271 /* dev may be NULL for non-contiguous multifunction devices */
2272 if (!dev || dev->multifunction)
2273 return (fn + 1) % 8;
2274
2275 return 0;
2276 }
2277
2278 static int only_one_child(struct pci_bus *bus)
2279 {
2280 struct pci_dev *bridge = bus->self;
2281
2282 /*
2283 * Systems with unusual topologies set PCI_SCAN_ALL_PCIE_DEVS so
2284 * we scan for all possible devices, not just Device 0.
2285 */
2286 if (pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
2287 return 0;
2288
2289 /*
2290 * A PCIe Downstream Port normally leads to a Link with only Device
2291 * 0 on it (PCIe spec r3.1, sec 7.3.1). As an optimization, scan
2292 * only for Device 0 in that situation.
2293 *
2294 * Checking has_secondary_link is a hack to identify Downstream
2295 * Ports because sometimes Switches are configured such that the
2296 * PCIe Port Type labels are backwards.
2297 */
2298 if (bridge && pci_is_pcie(bridge) && bridge->has_secondary_link)
2299 return 1;
2300
2301 return 0;
2302 }
2303
2304 /**
2305 * pci_scan_slot - scan a PCI slot on a bus for devices.
2306 * @bus: PCI bus to scan
2307 * @devfn: slot number to scan (must have zero function.)
2308 *
2309 * Scan a PCI slot on the specified PCI bus for devices, adding
2310 * discovered devices to the @bus->devices list. New devices
2311 * will not have is_added set.
2312 *
2313 * Returns the number of new devices found.
2314 */
2315 int pci_scan_slot(struct pci_bus *bus, int devfn)
2316 {
2317 unsigned fn, nr = 0;
2318 struct pci_dev *dev;
2319
2320 if (only_one_child(bus) && (devfn > 0))
2321 return 0; /* Already scanned the entire slot */
2322
2323 dev = pci_scan_single_device(bus, devfn);
2324 if (!dev)
2325 return 0;
2326 if (!dev->is_added)
2327 nr++;
2328
2329 for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
2330 dev = pci_scan_single_device(bus, devfn + fn);
2331 if (dev) {
2332 if (!dev->is_added)
2333 nr++;
2334 dev->multifunction = 1;
2335 }
2336 }
2337
2338 /* only one slot has pcie device */
2339 if (bus->self && nr)
2340 pcie_aspm_init_link_state(bus->self);
2341
2342 return nr;
2343 }
2344 EXPORT_SYMBOL(pci_scan_slot);
2345
2346 static int pcie_find_smpss(struct pci_dev *dev, void *data)
2347 {
2348 u8 *smpss = data;
2349
2350 if (!pci_is_pcie(dev))
2351 return 0;
2352
2353 /*
2354 * We don't have a way to change MPS settings on devices that have
2355 * drivers attached. A hot-added device might support only the minimum
2356 * MPS setting (MPS=128). Therefore, if the fabric contains a bridge
2357 * where devices may be hot-added, we limit the fabric MPS to 128 so
2358 * hot-added devices will work correctly.
2359 *
2360 * However, if we hot-add a device to a slot directly below a Root
2361 * Port, it's impossible for there to be other existing devices below
2362 * the port. We don't limit the MPS in this case because we can
2363 * reconfigure MPS on both the Root Port and the hot-added device,
2364 * and there are no other devices involved.
2365 *
2366 * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
2367 */
2368 if (dev->is_hotplug_bridge &&
2369 pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
2370 *smpss = 0;
2371
2372 if (*smpss > dev->pcie_mpss)
2373 *smpss = dev->pcie_mpss;
2374
2375 return 0;
2376 }
2377
2378 static void pcie_write_mps(struct pci_dev *dev, int mps)
2379 {
2380 int rc;
2381
2382 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
2383 mps = 128 << dev->pcie_mpss;
2384
2385 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
2386 dev->bus->self)
2387 /* For "Performance", the assumption is made that
2388 * downstream communication will never be larger than
2389 * the MRRS. So, the MPS only needs to be configured
2390 * for the upstream communication. This being the case,
2391 * walk from the top down and set the MPS of the child
2392 * to that of the parent bus.
2393 *
2394 * Configure the device MPS with the smaller of the
2395 * device MPSS or the bridge MPS (which is assumed to be
2396 * properly configured at this point to the largest
2397 * allowable MPS based on its parent bus).
2398 */
2399 mps = min(mps, pcie_get_mps(dev->bus->self));
2400 }
2401
2402 rc = pcie_set_mps(dev, mps);
2403 if (rc)
2404 dev_err(&dev->dev, "Failed attempting to set the MPS\n");
2405 }
2406
2407 static void pcie_write_mrrs(struct pci_dev *dev)
2408 {
2409 int rc, mrrs;
2410
2411 /* In the "safe" case, do not configure the MRRS. There appear to be
2412 * issues with setting MRRS to 0 on a number of devices.
2413 */
2414 if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
2415 return;
2416
2417 /* For Max performance, the MRRS must be set to the largest supported
2418 * value. However, it cannot be configured larger than the MPS the
2419 * device or the bus can support. This should already be properly
2420 * configured by a prior call to pcie_write_mps.
2421 */
2422 mrrs = pcie_get_mps(dev);
2423
2424 /* MRRS is a R/W register. Invalid values can be written, but a
2425 * subsequent read will verify if the value is acceptable or not.
2426 * If the MRRS value provided is not acceptable (e.g., too large),
2427 * shrink the value until it is acceptable to the HW.
2428 */
2429 while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
2430 rc = pcie_set_readrq(dev, mrrs);
2431 if (!rc)
2432 break;
2433
2434 dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
2435 mrrs /= 2;
2436 }
2437
2438 if (mrrs < 128)
2439 dev_err(&dev->dev, "MRRS was unable to be configured with a safe value. If problems are experienced, try running with pci=pcie_bus_safe\n");
2440 }
2441
2442 static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
2443 {
2444 int mps, orig_mps;
2445
2446 if (!pci_is_pcie(dev))
2447 return 0;
2448
2449 if (pcie_bus_config == PCIE_BUS_TUNE_OFF ||
2450 pcie_bus_config == PCIE_BUS_DEFAULT)
2451 return 0;
2452
2453 mps = 128 << *(u8 *)data;
2454 orig_mps = pcie_get_mps(dev);
2455
2456 pcie_write_mps(dev, mps);
2457 pcie_write_mrrs(dev);
2458
2459 dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), Max Read Rq %4d\n",
2460 pcie_get_mps(dev), 128 << dev->pcie_mpss,
2461 orig_mps, pcie_get_readrq(dev));
2462
2463 return 0;
2464 }
2465
2466 /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
2467 * parents then children fashion. If this changes, then this code will not
2468 * work as designed.
2469 */
2470 void pcie_bus_configure_settings(struct pci_bus *bus)
2471 {
2472 u8 smpss = 0;
2473
2474 if (!bus->self)
2475 return;
2476
2477 if (!pci_is_pcie(bus->self))
2478 return;
2479
2480 /* FIXME - Peer to peer DMA is possible, though the endpoint would need
2481 * to be aware of the MPS of the destination. To work around this,
2482 * simply force the MPS of the entire system to the smallest possible.
2483 */
2484 if (pcie_bus_config == PCIE_BUS_PEER2PEER)
2485 smpss = 0;
2486
2487 if (pcie_bus_config == PCIE_BUS_SAFE) {
2488 smpss = bus->self->pcie_mpss;
2489
2490 pcie_find_smpss(bus->self, &smpss);
2491 pci_walk_bus(bus, pcie_find_smpss, &smpss);
2492 }
2493
2494 pcie_bus_configure_set(bus->self, &smpss);
2495 pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
2496 }
2497 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
2498
2499 /*
2500 * Called after each bus is probed, but before its children are examined. This
2501 * is marked as __weak because multiple architectures define it.
2502 */
2503 void __weak pcibios_fixup_bus(struct pci_bus *bus)
2504 {
2505 /* nothing to do, expected to be removed in the future */
2506 }
2507
2508 /**
2509 * pci_scan_child_bus_extend() - Scan devices below a bus
2510 * @bus: Bus to scan for devices
2511 * @available_buses: Total number of buses available (%0 does not try to
2512 * extend beyond the minimal)
2513 *
2514 * Scans devices below @bus including subordinate buses. Returns new
2515 * subordinate number including all the found devices. Passing
2516 * @available_buses causes the remaining bus space to be distributed
2517 * equally between hotplug-capable bridges to allow future extension of the
2518 * hierarchy.
2519 */
2520 static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
2521 unsigned int available_buses)
2522 {
2523 unsigned int used_buses, normal_bridges = 0, hotplug_bridges = 0;
2524 unsigned int start = bus->busn_res.start;
2525 unsigned int devfn, cmax, max = start;
2526 struct pci_dev *dev;
2527
2528 dev_dbg(&bus->dev, "scanning bus\n");
2529
2530 /* Go find them, Rover! */
2531 for (devfn = 0; devfn < 0x100; devfn += 8)
2532 pci_scan_slot(bus, devfn);
2533
2534 /* Reserve buses for SR-IOV capability. */
2535 used_buses = pci_iov_bus_range(bus);
2536 max += used_buses;
2537
2538 /*
2539 * After performing arch-dependent fixup of the bus, look behind
2540 * all PCI-to-PCI bridges on this bus.
2541 */
2542 if (!bus->is_added) {
2543 dev_dbg(&bus->dev, "fixups for bus\n");
2544 pcibios_fixup_bus(bus);
2545 bus->is_added = 1;
2546 }
2547
2548 /*
2549 * Calculate how many hotplug bridges and normal bridges there
2550 * are on this bus. We will distribute the additional available
2551 * buses between hotplug bridges.
2552 */
2553 for_each_pci_bridge(dev, bus) {
2554 if (dev->is_hotplug_bridge)
2555 hotplug_bridges++;
2556 else
2557 normal_bridges++;
2558 }
2559
2560 /*
2561 * Scan bridges that are already configured. We don't touch them
2562 * unless they are misconfigured (which will be done in the second
2563 * scan below).
2564 */
2565 for_each_pci_bridge(dev, bus) {
2566 cmax = max;
2567 max = pci_scan_bridge_extend(bus, dev, max, 0, 0);
2568
2569 /*
2570 * Reserve one bus for each bridge now to avoid extending
2571 * hotplug bridges too much during the second scan below.
2572 */
2573 used_buses++;
2574 if (cmax - max > 1)
2575 used_buses += cmax - max - 1;
2576 }
2577
2578 /* Scan bridges that need to be reconfigured */
2579 for_each_pci_bridge(dev, bus) {
2580 unsigned int buses = 0;
2581
2582 if (!hotplug_bridges && normal_bridges == 1) {
2583 /*
2584 * There is only one bridge on the bus (upstream
2585 * port) so it gets all available buses which it
2586 * can then distribute to the possible hotplug
2587 * bridges below.
2588 */
2589 buses = available_buses;
2590 } else if (dev->is_hotplug_bridge) {
2591 /*
2592 * Distribute the extra buses between hotplug
2593 * bridges if any.
2594 */
2595 buses = available_buses / hotplug_bridges;
2596 buses = min(buses, available_buses - used_buses + 1);
2597 }
2598
2599 cmax = max;
2600 max = pci_scan_bridge_extend(bus, dev, cmax, buses, 1);
2601 /* One bus is already accounted so don't add it again */
2602 if (max - cmax > 1)
2603 used_buses += max - cmax - 1;
2604 }
2605
2606 /*
2607 * Make sure a hotplug bridge has at least the minimum requested
2608 * number of buses but allow it to grow up to the maximum available
2609 * bus number of there is room.
2610 */
2611 if (bus->self && bus->self->is_hotplug_bridge) {
2612 used_buses = max_t(unsigned int, available_buses,
2613 pci_hotplug_bus_size - 1);
2614 if (max - start < used_buses) {
2615 max = start + used_buses;
2616
2617 /* Do not allocate more buses than we have room left */
2618 if (max > bus->busn_res.end)
2619 max = bus->busn_res.end;
2620
2621 dev_dbg(&bus->dev, "%pR extended by %#02x\n",
2622 &bus->busn_res, max - start);
2623 }
2624 }
2625
2626 /*
2627 * We've scanned the bus and so we know all about what's on
2628 * the other side of any bridges that may be on this bus plus
2629 * any devices.
2630 *
2631 * Return how far we've got finding sub-buses.
2632 */
2633 dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
2634 return max;
2635 }
2636
2637 /**
2638 * pci_scan_child_bus() - Scan devices below a bus
2639 * @bus: Bus to scan for devices
2640 *
2641 * Scans devices below @bus including subordinate buses. Returns new
2642 * subordinate number including all the found devices.
2643 */
2644 unsigned int pci_scan_child_bus(struct pci_bus *bus)
2645 {
2646 return pci_scan_child_bus_extend(bus, 0);
2647 }
2648 EXPORT_SYMBOL_GPL(pci_scan_child_bus);
2649
2650 /**
2651 * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
2652 * @bridge: Host bridge to set up.
2653 *
2654 * Default empty implementation. Replace with an architecture-specific setup
2655 * routine, if necessary.
2656 */
2657 int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
2658 {
2659 return 0;
2660 }
2661
2662 void __weak pcibios_add_bus(struct pci_bus *bus)
2663 {
2664 }
2665
2666 void __weak pcibios_remove_bus(struct pci_bus *bus)
2667 {
2668 }
2669
2670 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
2671 struct pci_ops *ops, void *sysdata, struct list_head *resources)
2672 {
2673 int error;
2674 struct pci_host_bridge *bridge;
2675
2676 bridge = pci_alloc_host_bridge(0);
2677 if (!bridge)
2678 return NULL;
2679
2680 bridge->dev.parent = parent;
2681
2682 list_splice_init(resources, &bridge->windows);
2683 bridge->sysdata = sysdata;
2684 bridge->busnr = bus;
2685 bridge->ops = ops;
2686
2687 error = pci_register_host_bridge(bridge);
2688 if (error < 0)
2689 goto err_out;
2690
2691 return bridge->bus;
2692
2693 err_out:
2694 kfree(bridge);
2695 return NULL;
2696 }
2697 EXPORT_SYMBOL_GPL(pci_create_root_bus);
2698
2699 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
2700 {
2701 struct resource *res = &b->busn_res;
2702 struct resource *parent_res, *conflict;
2703
2704 res->start = bus;
2705 res->end = bus_max;
2706 res->flags = IORESOURCE_BUS;
2707
2708 if (!pci_is_root_bus(b))
2709 parent_res = &b->parent->busn_res;
2710 else {
2711 parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
2712 res->flags |= IORESOURCE_PCI_FIXED;
2713 }
2714
2715 conflict = request_resource_conflict(parent_res, res);
2716
2717 if (conflict)
2718 dev_printk(KERN_DEBUG, &b->dev,
2719 "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
2720 res, pci_is_root_bus(b) ? "domain " : "",
2721 parent_res, conflict->name, conflict);
2722
2723 return conflict == NULL;
2724 }
2725
2726 int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
2727 {
2728 struct resource *res = &b->busn_res;
2729 struct resource old_res = *res;
2730 resource_size_t size;
2731 int ret;
2732
2733 if (res->start > bus_max)
2734 return -EINVAL;
2735
2736 size = bus_max - res->start + 1;
2737 ret = adjust_resource(res, res->start, size);
2738 dev_printk(KERN_DEBUG, &b->dev,
2739 "busn_res: %pR end %s updated to %02x\n",
2740 &old_res, ret ? "can not be" : "is", bus_max);
2741
2742 if (!ret && !res->parent)
2743 pci_bus_insert_busn_res(b, res->start, res->end);
2744
2745 return ret;
2746 }
2747
2748 void pci_bus_release_busn_res(struct pci_bus *b)
2749 {
2750 struct resource *res = &b->busn_res;
2751 int ret;
2752
2753 if (!res->flags || !res->parent)
2754 return;
2755
2756 ret = release_resource(res);
2757 dev_printk(KERN_DEBUG, &b->dev,
2758 "busn_res: %pR %s released\n",
2759 res, ret ? "can not be" : "is");
2760 }
2761
2762 int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge)
2763 {
2764 struct resource_entry *window;
2765 bool found = false;
2766 struct pci_bus *b;
2767 int max, bus, ret;
2768
2769 if (!bridge)
2770 return -EINVAL;
2771
2772 resource_list_for_each_entry(window, &bridge->windows)
2773 if (window->res->flags & IORESOURCE_BUS) {
2774 found = true;
2775 break;
2776 }
2777
2778 ret = pci_register_host_bridge(bridge);
2779 if (ret < 0)
2780 return ret;
2781
2782 b = bridge->bus;
2783 bus = bridge->busnr;
2784
2785 if (!found) {
2786 dev_info(&b->dev,
2787 "No busn resource found for root bus, will use [bus %02x-ff]\n",
2788 bus);
2789 pci_bus_insert_busn_res(b, bus, 255);
2790 }
2791
2792 max = pci_scan_child_bus(b);
2793
2794 if (!found)
2795 pci_bus_update_busn_res_end(b, max);
2796
2797 return 0;
2798 }
2799 EXPORT_SYMBOL(pci_scan_root_bus_bridge);
2800
2801 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
2802 struct pci_ops *ops, void *sysdata, struct list_head *resources)
2803 {
2804 struct resource_entry *window;
2805 bool found = false;
2806 struct pci_bus *b;
2807 int max;
2808
2809 resource_list_for_each_entry(window, resources)
2810 if (window->res->flags & IORESOURCE_BUS) {
2811 found = true;
2812 break;
2813 }
2814
2815 b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
2816 if (!b)
2817 return NULL;
2818
2819 if (!found) {
2820 dev_info(&b->dev,
2821 "No busn resource found for root bus, will use [bus %02x-ff]\n",
2822 bus);
2823 pci_bus_insert_busn_res(b, bus, 255);
2824 }
2825
2826 max = pci_scan_child_bus(b);
2827
2828 if (!found)
2829 pci_bus_update_busn_res_end(b, max);
2830
2831 return b;
2832 }
2833 EXPORT_SYMBOL(pci_scan_root_bus);
2834
2835 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
2836 void *sysdata)
2837 {
2838 LIST_HEAD(resources);
2839 struct pci_bus *b;
2840
2841 pci_add_resource(&resources, &ioport_resource);
2842 pci_add_resource(&resources, &iomem_resource);
2843 pci_add_resource(&resources, &busn_resource);
2844 b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
2845 if (b) {
2846 pci_scan_child_bus(b);
2847 } else {
2848 pci_free_resource_list(&resources);
2849 }
2850 return b;
2851 }
2852 EXPORT_SYMBOL(pci_scan_bus);
2853
2854 /**
2855 * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
2856 * @bridge: PCI bridge for the bus to scan
2857 *
2858 * Scan a PCI bus and child buses for new devices, add them,
2859 * and enable them, resizing bridge mmio/io resource if necessary
2860 * and possible. The caller must ensure the child devices are already
2861 * removed for resizing to occur.
2862 *
2863 * Returns the max number of subordinate bus discovered.
2864 */
2865 unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
2866 {
2867 unsigned int max;
2868 struct pci_bus *bus = bridge->subordinate;
2869
2870 max = pci_scan_child_bus(bus);
2871
2872 pci_assign_unassigned_bridge_resources(bridge);
2873
2874 pci_bus_add_devices(bus);
2875
2876 return max;
2877 }
2878
2879 /**
2880 * pci_rescan_bus - scan a PCI bus for devices.
2881 * @bus: PCI bus to scan
2882 *
2883 * Scan a PCI bus and child buses for new devices, adds them,
2884 * and enables them.
2885 *
2886 * Returns the max number of subordinate bus discovered.
2887 */
2888 unsigned int pci_rescan_bus(struct pci_bus *bus)
2889 {
2890 unsigned int max;
2891
2892 max = pci_scan_child_bus(bus);
2893 pci_assign_unassigned_bus_resources(bus);
2894 pci_bus_add_devices(bus);
2895
2896 return max;
2897 }
2898 EXPORT_SYMBOL_GPL(pci_rescan_bus);
2899
2900 /*
2901 * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
2902 * routines should always be executed under this mutex.
2903 */
2904 static DEFINE_MUTEX(pci_rescan_remove_lock);
2905
2906 void pci_lock_rescan_remove(void)
2907 {
2908 mutex_lock(&pci_rescan_remove_lock);
2909 }
2910 EXPORT_SYMBOL_GPL(pci_lock_rescan_remove);
2911
2912 void pci_unlock_rescan_remove(void)
2913 {
2914 mutex_unlock(&pci_rescan_remove_lock);
2915 }
2916 EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove);
2917
2918 static int __init pci_sort_bf_cmp(const struct device *d_a,
2919 const struct device *d_b)
2920 {
2921 const struct pci_dev *a = to_pci_dev(d_a);
2922 const struct pci_dev *b = to_pci_dev(d_b);
2923
2924 if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
2925 else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1;
2926
2927 if (a->bus->number < b->bus->number) return -1;
2928 else if (a->bus->number > b->bus->number) return 1;
2929
2930 if (a->devfn < b->devfn) return -1;
2931 else if (a->devfn > b->devfn) return 1;
2932
2933 return 0;
2934 }
2935
2936 void __init pci_sort_breadthfirst(void)
2937 {
2938 bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
2939 }
2940
2941 int pci_hp_add_bridge(struct pci_dev *dev)
2942 {
2943 struct pci_bus *parent = dev->bus;
2944 int busnr, start = parent->busn_res.start;
2945 unsigned int available_buses = 0;
2946 int end = parent->busn_res.end;
2947
2948 for (busnr = start; busnr <= end; busnr++) {
2949 if (!pci_find_bus(pci_domain_nr(parent), busnr))
2950 break;
2951 }
2952 if (busnr-- > end) {
2953 dev_err(&dev->dev, "No bus number available for hot-added bridge\n");
2954 return -1;
2955 }
2956
2957 /* Scan bridges that are already configured */
2958 busnr = pci_scan_bridge(parent, dev, busnr, 0);
2959
2960 /*
2961 * Distribute the available bus numbers between hotplug-capable
2962 * bridges to make extending the chain later possible.
2963 */
2964 available_buses = end - busnr;
2965
2966 /* Scan bridges that need to be reconfigured */
2967 pci_scan_bridge_extend(parent, dev, busnr, available_buses, 1);
2968
2969 if (!dev->subordinate)
2970 return -1;
2971
2972 return 0;
2973 }
2974 EXPORT_SYMBOL_GPL(pci_hp_add_bridge);