]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/pci/probe.c
PCI: Whitespace cleanup
[mirror_ubuntu-zesty-kernel.git] / drivers / pci / probe.c
1 /*
2 * probe.c - PCI detection and setup code
3 */
4
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/init.h>
8 #include <linux/pci.h>
9 #include <linux/slab.h>
10 #include <linux/module.h>
11 #include <linux/cpumask.h>
12 #include <linux/pci-aspm.h>
13 #include <asm-generic/pci-bridge.h>
14 #include "pci.h"
15
16 #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
17 #define CARDBUS_RESERVE_BUSNR 3
18
19 static struct resource busn_resource = {
20 .name = "PCI busn",
21 .start = 0,
22 .end = 255,
23 .flags = IORESOURCE_BUS,
24 };
25
26 /* Ugh. Need to stop exporting this to modules. */
27 LIST_HEAD(pci_root_buses);
28 EXPORT_SYMBOL(pci_root_buses);
29
30 static LIST_HEAD(pci_domain_busn_res_list);
31
32 struct pci_domain_busn_res {
33 struct list_head list;
34 struct resource res;
35 int domain_nr;
36 };
37
38 static struct resource *get_pci_domain_busn_res(int domain_nr)
39 {
40 struct pci_domain_busn_res *r;
41
42 list_for_each_entry(r, &pci_domain_busn_res_list, list)
43 if (r->domain_nr == domain_nr)
44 return &r->res;
45
46 r = kzalloc(sizeof(*r), GFP_KERNEL);
47 if (!r)
48 return NULL;
49
50 r->domain_nr = domain_nr;
51 r->res.start = 0;
52 r->res.end = 0xff;
53 r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
54
55 list_add_tail(&r->list, &pci_domain_busn_res_list);
56
57 return &r->res;
58 }
59
60 static int find_anything(struct device *dev, void *data)
61 {
62 return 1;
63 }
64
65 /*
66 * Some device drivers need know if pci is initiated.
67 * Basically, we think pci is not initiated when there
68 * is no device to be found on the pci_bus_type.
69 */
70 int no_pci_devices(void)
71 {
72 struct device *dev;
73 int no_devices;
74
75 dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
76 no_devices = (dev == NULL);
77 put_device(dev);
78 return no_devices;
79 }
80 EXPORT_SYMBOL(no_pci_devices);
81
82 /*
83 * PCI Bus Class
84 */
85 static void release_pcibus_dev(struct device *dev)
86 {
87 struct pci_bus *pci_bus = to_pci_bus(dev);
88
89 if (pci_bus->bridge)
90 put_device(pci_bus->bridge);
91 pci_bus_remove_resources(pci_bus);
92 pci_release_bus_of_node(pci_bus);
93 kfree(pci_bus);
94 }
95
96 static struct class pcibus_class = {
97 .name = "pci_bus",
98 .dev_release = &release_pcibus_dev,
99 .dev_groups = pcibus_groups,
100 };
101
102 static int __init pcibus_class_init(void)
103 {
104 return class_register(&pcibus_class);
105 }
106 postcore_initcall(pcibus_class_init);
107
108 static u64 pci_size(u64 base, u64 maxbase, u64 mask)
109 {
110 u64 size = mask & maxbase; /* Find the significant bits */
111 if (!size)
112 return 0;
113
114 /* Get the lowest of them to find the decode size, and
115 from that the extent. */
116 size = (size & ~(size-1)) - 1;
117
118 /* base == maxbase can be valid only if the BAR has
119 already been programmed with all 1s. */
120 if (base == maxbase && ((base | size) & mask) != mask)
121 return 0;
122
123 return size;
124 }
125
126 static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
127 {
128 u32 mem_type;
129 unsigned long flags;
130
131 if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
132 flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
133 flags |= IORESOURCE_IO;
134 return flags;
135 }
136
137 flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
138 flags |= IORESOURCE_MEM;
139 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
140 flags |= IORESOURCE_PREFETCH;
141
142 mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
143 switch (mem_type) {
144 case PCI_BASE_ADDRESS_MEM_TYPE_32:
145 break;
146 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
147 /* 1M mem BAR treated as 32-bit BAR */
148 break;
149 case PCI_BASE_ADDRESS_MEM_TYPE_64:
150 flags |= IORESOURCE_MEM_64;
151 break;
152 default:
153 /* mem unknown type treated as 32-bit BAR */
154 break;
155 }
156 return flags;
157 }
158
159 #define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
160
161 /**
162 * pci_read_base - read a PCI BAR
163 * @dev: the PCI device
164 * @type: type of the BAR
165 * @res: resource buffer to be filled in
166 * @pos: BAR position in the config space
167 *
168 * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
169 */
170 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
171 struct resource *res, unsigned int pos)
172 {
173 u32 l, sz, mask;
174 u64 l64, sz64, mask64;
175 u16 orig_cmd;
176 struct pci_bus_region region, inverted_region;
177 bool bar_too_big = false, bar_too_high = false, bar_invalid = false;
178
179 mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
180
181 /* No printks while decoding is disabled! */
182 if (!dev->mmio_always_on) {
183 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
184 if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
185 pci_write_config_word(dev, PCI_COMMAND,
186 orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
187 }
188 }
189
190 res->name = pci_name(dev);
191
192 pci_read_config_dword(dev, pos, &l);
193 pci_write_config_dword(dev, pos, l | mask);
194 pci_read_config_dword(dev, pos, &sz);
195 pci_write_config_dword(dev, pos, l);
196
197 /*
198 * All bits set in sz means the device isn't working properly.
199 * If the BAR isn't implemented, all bits must be 0. If it's a
200 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
201 * 1 must be clear.
202 */
203 if (!sz || sz == 0xffffffff)
204 goto fail;
205
206 /*
207 * I don't know how l can have all bits set. Copied from old code.
208 * Maybe it fixes a bug on some ancient platform.
209 */
210 if (l == 0xffffffff)
211 l = 0;
212
213 if (type == pci_bar_unknown) {
214 res->flags = decode_bar(dev, l);
215 res->flags |= IORESOURCE_SIZEALIGN;
216 if (res->flags & IORESOURCE_IO) {
217 l &= PCI_BASE_ADDRESS_IO_MASK;
218 mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT;
219 } else {
220 l &= PCI_BASE_ADDRESS_MEM_MASK;
221 mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
222 }
223 } else {
224 res->flags |= (l & IORESOURCE_ROM_ENABLE);
225 l &= PCI_ROM_ADDRESS_MASK;
226 mask = (u32)PCI_ROM_ADDRESS_MASK;
227 }
228
229 if (res->flags & IORESOURCE_MEM_64) {
230 l64 = l;
231 sz64 = sz;
232 mask64 = mask | (u64)~0 << 32;
233
234 pci_read_config_dword(dev, pos + 4, &l);
235 pci_write_config_dword(dev, pos + 4, ~0);
236 pci_read_config_dword(dev, pos + 4, &sz);
237 pci_write_config_dword(dev, pos + 4, l);
238
239 l64 |= ((u64)l << 32);
240 sz64 |= ((u64)sz << 32);
241
242 sz64 = pci_size(l64, sz64, mask64);
243
244 if (!sz64)
245 goto fail;
246
247 if ((sizeof(dma_addr_t) < 8 || sizeof(resource_size_t) < 8) &&
248 sz64 > 0x100000000ULL) {
249 res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
250 res->start = 0;
251 res->end = 0;
252 bar_too_big = true;
253 goto out;
254 }
255
256 if ((sizeof(dma_addr_t) < 8) && l) {
257 /* Above 32-bit boundary; try to reallocate */
258 res->flags |= IORESOURCE_UNSET;
259 res->start = 0;
260 res->end = sz64;
261 bar_too_high = true;
262 goto out;
263 } else {
264 region.start = l64;
265 region.end = l64 + sz64;
266 }
267 } else {
268 sz = pci_size(l, sz, mask);
269
270 if (!sz)
271 goto fail;
272
273 region.start = l;
274 region.end = l + sz;
275 }
276
277 pcibios_bus_to_resource(dev->bus, res, &region);
278 pcibios_resource_to_bus(dev->bus, &inverted_region, res);
279
280 /*
281 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
282 * the corresponding resource address (the physical address used by
283 * the CPU. Converting that resource address back to a bus address
284 * should yield the original BAR value:
285 *
286 * resource_to_bus(bus_to_resource(A)) == A
287 *
288 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not
289 * be claimed by the device.
290 */
291 if (inverted_region.start != region.start) {
292 res->flags |= IORESOURCE_UNSET;
293 res->start = 0;
294 res->end = region.end - region.start;
295 bar_invalid = true;
296 }
297
298 goto out;
299
300
301 fail:
302 res->flags = 0;
303 out:
304 if (!dev->mmio_always_on &&
305 (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
306 pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
307
308 if (bar_too_big)
309 dev_err(&dev->dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
310 pos, (unsigned long long) sz64);
311 if (bar_too_high)
312 dev_info(&dev->dev, "reg 0x%x: can't handle BAR above 4G (bus address %#010llx)\n",
313 pos, (unsigned long long) l64);
314 if (bar_invalid)
315 dev_info(&dev->dev, "reg 0x%x: initial BAR value %#010llx invalid\n",
316 pos, (unsigned long long) region.start);
317 if (res->flags)
318 dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res);
319
320 return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
321 }
322
323 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
324 {
325 unsigned int pos, reg;
326
327 for (pos = 0; pos < howmany; pos++) {
328 struct resource *res = &dev->resource[pos];
329 reg = PCI_BASE_ADDRESS_0 + (pos << 2);
330 pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
331 }
332
333 if (rom) {
334 struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
335 dev->rom_base_reg = rom;
336 res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
337 IORESOURCE_READONLY | IORESOURCE_CACHEABLE |
338 IORESOURCE_SIZEALIGN;
339 __pci_read_base(dev, pci_bar_mem32, res, rom);
340 }
341 }
342
343 static void pci_read_bridge_io(struct pci_bus *child)
344 {
345 struct pci_dev *dev = child->self;
346 u8 io_base_lo, io_limit_lo;
347 unsigned long io_mask, io_granularity, base, limit;
348 struct pci_bus_region region;
349 struct resource *res;
350
351 io_mask = PCI_IO_RANGE_MASK;
352 io_granularity = 0x1000;
353 if (dev->io_window_1k) {
354 /* Support 1K I/O space granularity */
355 io_mask = PCI_IO_1K_RANGE_MASK;
356 io_granularity = 0x400;
357 }
358
359 res = child->resource[0];
360 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
361 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
362 base = (io_base_lo & io_mask) << 8;
363 limit = (io_limit_lo & io_mask) << 8;
364
365 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
366 u16 io_base_hi, io_limit_hi;
367
368 pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
369 pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
370 base |= ((unsigned long) io_base_hi << 16);
371 limit |= ((unsigned long) io_limit_hi << 16);
372 }
373
374 if (base <= limit) {
375 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
376 region.start = base;
377 region.end = limit + io_granularity - 1;
378 pcibios_bus_to_resource(dev->bus, res, &region);
379 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
380 }
381 }
382
383 static void pci_read_bridge_mmio(struct pci_bus *child)
384 {
385 struct pci_dev *dev = child->self;
386 u16 mem_base_lo, mem_limit_lo;
387 unsigned long base, limit;
388 struct pci_bus_region region;
389 struct resource *res;
390
391 res = child->resource[1];
392 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
393 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
394 base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
395 limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
396 if (base <= limit) {
397 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
398 region.start = base;
399 region.end = limit + 0xfffff;
400 pcibios_bus_to_resource(dev->bus, res, &region);
401 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
402 }
403 }
404
405 static void pci_read_bridge_mmio_pref(struct pci_bus *child)
406 {
407 struct pci_dev *dev = child->self;
408 u16 mem_base_lo, mem_limit_lo;
409 unsigned long base, limit;
410 struct pci_bus_region region;
411 struct resource *res;
412
413 res = child->resource[2];
414 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
415 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
416 base = ((unsigned long) mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
417 limit = ((unsigned long) mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
418
419 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
420 u32 mem_base_hi, mem_limit_hi;
421
422 pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
423 pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
424
425 /*
426 * Some bridges set the base > limit by default, and some
427 * (broken) BIOSes do not initialize them. If we find
428 * this, just assume they are not being used.
429 */
430 if (mem_base_hi <= mem_limit_hi) {
431 #if BITS_PER_LONG == 64
432 base |= ((unsigned long) mem_base_hi) << 32;
433 limit |= ((unsigned long) mem_limit_hi) << 32;
434 #else
435 if (mem_base_hi || mem_limit_hi) {
436 dev_err(&dev->dev, "can't handle 64-bit "
437 "address space for bridge\n");
438 return;
439 }
440 #endif
441 }
442 }
443 if (base <= limit) {
444 res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
445 IORESOURCE_MEM | IORESOURCE_PREFETCH;
446 if (res->flags & PCI_PREF_RANGE_TYPE_64)
447 res->flags |= IORESOURCE_MEM_64;
448 region.start = base;
449 region.end = limit + 0xfffff;
450 pcibios_bus_to_resource(dev->bus, res, &region);
451 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
452 }
453 }
454
455 void pci_read_bridge_bases(struct pci_bus *child)
456 {
457 struct pci_dev *dev = child->self;
458 struct resource *res;
459 int i;
460
461 if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */
462 return;
463
464 dev_info(&dev->dev, "PCI bridge to %pR%s\n",
465 &child->busn_res,
466 dev->transparent ? " (subtractive decode)" : "");
467
468 pci_bus_remove_resources(child);
469 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
470 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
471
472 pci_read_bridge_io(child);
473 pci_read_bridge_mmio(child);
474 pci_read_bridge_mmio_pref(child);
475
476 if (dev->transparent) {
477 pci_bus_for_each_resource(child->parent, res, i) {
478 if (res && res->flags) {
479 pci_bus_add_resource(child, res,
480 PCI_SUBTRACTIVE_DECODE);
481 dev_printk(KERN_DEBUG, &dev->dev,
482 " bridge window %pR (subtractive decode)\n",
483 res);
484 }
485 }
486 }
487 }
488
489 static struct pci_bus *pci_alloc_bus(void)
490 {
491 struct pci_bus *b;
492
493 b = kzalloc(sizeof(*b), GFP_KERNEL);
494 if (!b)
495 return NULL;
496
497 INIT_LIST_HEAD(&b->node);
498 INIT_LIST_HEAD(&b->children);
499 INIT_LIST_HEAD(&b->devices);
500 INIT_LIST_HEAD(&b->slots);
501 INIT_LIST_HEAD(&b->resources);
502 b->max_bus_speed = PCI_SPEED_UNKNOWN;
503 b->cur_bus_speed = PCI_SPEED_UNKNOWN;
504 return b;
505 }
506
507 static void pci_release_host_bridge_dev(struct device *dev)
508 {
509 struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
510
511 if (bridge->release_fn)
512 bridge->release_fn(bridge);
513
514 pci_free_resource_list(&bridge->windows);
515
516 kfree(bridge);
517 }
518
519 static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b)
520 {
521 struct pci_host_bridge *bridge;
522
523 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
524 if (!bridge)
525 return NULL;
526
527 INIT_LIST_HEAD(&bridge->windows);
528 bridge->bus = b;
529 return bridge;
530 }
531
532 static const unsigned char pcix_bus_speed[] = {
533 PCI_SPEED_UNKNOWN, /* 0 */
534 PCI_SPEED_66MHz_PCIX, /* 1 */
535 PCI_SPEED_100MHz_PCIX, /* 2 */
536 PCI_SPEED_133MHz_PCIX, /* 3 */
537 PCI_SPEED_UNKNOWN, /* 4 */
538 PCI_SPEED_66MHz_PCIX_ECC, /* 5 */
539 PCI_SPEED_100MHz_PCIX_ECC, /* 6 */
540 PCI_SPEED_133MHz_PCIX_ECC, /* 7 */
541 PCI_SPEED_UNKNOWN, /* 8 */
542 PCI_SPEED_66MHz_PCIX_266, /* 9 */
543 PCI_SPEED_100MHz_PCIX_266, /* A */
544 PCI_SPEED_133MHz_PCIX_266, /* B */
545 PCI_SPEED_UNKNOWN, /* C */
546 PCI_SPEED_66MHz_PCIX_533, /* D */
547 PCI_SPEED_100MHz_PCIX_533, /* E */
548 PCI_SPEED_133MHz_PCIX_533 /* F */
549 };
550
551 const unsigned char pcie_link_speed[] = {
552 PCI_SPEED_UNKNOWN, /* 0 */
553 PCIE_SPEED_2_5GT, /* 1 */
554 PCIE_SPEED_5_0GT, /* 2 */
555 PCIE_SPEED_8_0GT, /* 3 */
556 PCI_SPEED_UNKNOWN, /* 4 */
557 PCI_SPEED_UNKNOWN, /* 5 */
558 PCI_SPEED_UNKNOWN, /* 6 */
559 PCI_SPEED_UNKNOWN, /* 7 */
560 PCI_SPEED_UNKNOWN, /* 8 */
561 PCI_SPEED_UNKNOWN, /* 9 */
562 PCI_SPEED_UNKNOWN, /* A */
563 PCI_SPEED_UNKNOWN, /* B */
564 PCI_SPEED_UNKNOWN, /* C */
565 PCI_SPEED_UNKNOWN, /* D */
566 PCI_SPEED_UNKNOWN, /* E */
567 PCI_SPEED_UNKNOWN /* F */
568 };
569
570 void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
571 {
572 bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
573 }
574 EXPORT_SYMBOL_GPL(pcie_update_link_speed);
575
576 static unsigned char agp_speeds[] = {
577 AGP_UNKNOWN,
578 AGP_1X,
579 AGP_2X,
580 AGP_4X,
581 AGP_8X
582 };
583
584 static enum pci_bus_speed agp_speed(int agp3, int agpstat)
585 {
586 int index = 0;
587
588 if (agpstat & 4)
589 index = 3;
590 else if (agpstat & 2)
591 index = 2;
592 else if (agpstat & 1)
593 index = 1;
594 else
595 goto out;
596
597 if (agp3) {
598 index += 2;
599 if (index == 5)
600 index = 0;
601 }
602
603 out:
604 return agp_speeds[index];
605 }
606
607 static void pci_set_bus_speed(struct pci_bus *bus)
608 {
609 struct pci_dev *bridge = bus->self;
610 int pos;
611
612 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
613 if (!pos)
614 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
615 if (pos) {
616 u32 agpstat, agpcmd;
617
618 pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
619 bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
620
621 pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
622 bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
623 }
624
625 pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
626 if (pos) {
627 u16 status;
628 enum pci_bus_speed max;
629
630 pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
631 &status);
632
633 if (status & PCI_X_SSTATUS_533MHZ) {
634 max = PCI_SPEED_133MHz_PCIX_533;
635 } else if (status & PCI_X_SSTATUS_266MHZ) {
636 max = PCI_SPEED_133MHz_PCIX_266;
637 } else if (status & PCI_X_SSTATUS_133MHZ) {
638 if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2)
639 max = PCI_SPEED_133MHz_PCIX_ECC;
640 else
641 max = PCI_SPEED_133MHz_PCIX;
642 } else {
643 max = PCI_SPEED_66MHz_PCIX;
644 }
645
646 bus->max_bus_speed = max;
647 bus->cur_bus_speed = pcix_bus_speed[
648 (status & PCI_X_SSTATUS_FREQ) >> 6];
649
650 return;
651 }
652
653 if (pci_is_pcie(bridge)) {
654 u32 linkcap;
655 u16 linksta;
656
657 pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
658 bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
659
660 pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
661 pcie_update_link_speed(bus, linksta);
662 }
663 }
664
665 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
666 struct pci_dev *bridge, int busnr)
667 {
668 struct pci_bus *child;
669 int i;
670 int ret;
671
672 /*
673 * Allocate a new bus, and inherit stuff from the parent..
674 */
675 child = pci_alloc_bus();
676 if (!child)
677 return NULL;
678
679 child->parent = parent;
680 child->ops = parent->ops;
681 child->msi = parent->msi;
682 child->sysdata = parent->sysdata;
683 child->bus_flags = parent->bus_flags;
684
685 /* initialize some portions of the bus device, but don't register it
686 * now as the parent is not properly set up yet.
687 */
688 child->dev.class = &pcibus_class;
689 dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
690
691 /*
692 * Set up the primary, secondary and subordinate
693 * bus numbers.
694 */
695 child->number = child->busn_res.start = busnr;
696 child->primary = parent->busn_res.start;
697 child->busn_res.end = 0xff;
698
699 if (!bridge) {
700 child->dev.parent = parent->bridge;
701 goto add_dev;
702 }
703
704 child->self = bridge;
705 child->bridge = get_device(&bridge->dev);
706 child->dev.parent = child->bridge;
707 pci_set_bus_of_node(child);
708 pci_set_bus_speed(child);
709
710 /* Set up default resource pointers and names.. */
711 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
712 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
713 child->resource[i]->name = child->name;
714 }
715 bridge->subordinate = child;
716
717 add_dev:
718 ret = device_register(&child->dev);
719 WARN_ON(ret < 0);
720
721 pcibios_add_bus(child);
722
723 /* Create legacy_io and legacy_mem files for this bus */
724 pci_create_legacy_files(child);
725
726 return child;
727 }
728
729 struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
730 int busnr)
731 {
732 struct pci_bus *child;
733
734 child = pci_alloc_child_bus(parent, dev, busnr);
735 if (child) {
736 down_write(&pci_bus_sem);
737 list_add_tail(&child->node, &parent->children);
738 up_write(&pci_bus_sem);
739 }
740 return child;
741 }
742 EXPORT_SYMBOL(pci_add_new_bus);
743
744 /*
745 * If it's a bridge, configure it and scan the bus behind it.
746 * For CardBus bridges, we don't scan behind as the devices will
747 * be handled by the bridge driver itself.
748 *
749 * We need to process bridges in two passes -- first we scan those
750 * already configured by the BIOS and after we are done with all of
751 * them, we proceed to assigning numbers to the remaining buses in
752 * order to avoid overlaps between old and new bus numbers.
753 */
754 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
755 {
756 struct pci_bus *child;
757 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
758 u32 buses, i, j = 0;
759 u16 bctl;
760 u8 primary, secondary, subordinate;
761 int broken = 0;
762
763 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
764 primary = buses & 0xFF;
765 secondary = (buses >> 8) & 0xFF;
766 subordinate = (buses >> 16) & 0xFF;
767
768 dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
769 secondary, subordinate, pass);
770
771 if (!primary && (primary != bus->number) && secondary && subordinate) {
772 dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
773 primary = bus->number;
774 }
775
776 /* Check if setup is sensible at all */
777 if (!pass &&
778 (primary != bus->number || secondary <= bus->number ||
779 secondary > subordinate || subordinate > bus->busn_res.end)) {
780 dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
781 secondary, subordinate);
782 broken = 1;
783 }
784
785 /* Disable MasterAbortMode during probing to avoid reporting
786 of bus errors (in some architectures) */
787 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
788 pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
789 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
790
791 if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
792 !is_cardbus && !broken) {
793 unsigned int cmax;
794 /*
795 * Bus already configured by firmware, process it in the first
796 * pass and just note the configuration.
797 */
798 if (pass)
799 goto out;
800
801 /*
802 * The bus might already exist for two reasons: Either we are
803 * rescanning the bus or the bus is reachable through more than
804 * one bridge. The second case can happen with the i450NX
805 * chipset.
806 */
807 child = pci_find_bus(pci_domain_nr(bus), secondary);
808 if (!child) {
809 child = pci_add_new_bus(bus, dev, secondary);
810 if (!child)
811 goto out;
812 child->primary = primary;
813 pci_bus_insert_busn_res(child, secondary, subordinate);
814 child->bridge_ctl = bctl;
815 }
816
817 cmax = pci_scan_child_bus(child);
818 if (cmax > subordinate)
819 dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n",
820 subordinate, cmax);
821 /* subordinate should equal child->busn_res.end */
822 if (subordinate > max)
823 max = subordinate;
824 } else {
825 /*
826 * We need to assign a number to this bus which we always
827 * do in the second pass.
828 */
829 if (!pass) {
830 if (pcibios_assign_all_busses() || broken || is_cardbus)
831 /* Temporarily disable forwarding of the
832 configuration cycles on all bridges in
833 this bus segment to avoid possible
834 conflicts in the second pass between two
835 bridges programmed with overlapping
836 bus ranges. */
837 pci_write_config_dword(dev, PCI_PRIMARY_BUS,
838 buses & ~0xffffff);
839 goto out;
840 }
841
842 if (max >= bus->busn_res.end) {
843 dev_warn(&dev->dev, "can't allocate child bus %02x from %pR\n",
844 max, &bus->busn_res);
845 goto out;
846 }
847
848 /* Clear errors */
849 pci_write_config_word(dev, PCI_STATUS, 0xffff);
850
851 /* The bus will already exist if we are rescanning */
852 child = pci_find_bus(pci_domain_nr(bus), max+1);
853 if (!child) {
854 child = pci_add_new_bus(bus, dev, max+1);
855 if (!child)
856 goto out;
857 pci_bus_insert_busn_res(child, max+1,
858 bus->busn_res.end);
859 }
860 max++;
861 buses = (buses & 0xff000000)
862 | ((unsigned int)(child->primary) << 0)
863 | ((unsigned int)(child->busn_res.start) << 8)
864 | ((unsigned int)(child->busn_res.end) << 16);
865
866 /*
867 * yenta.c forces a secondary latency timer of 176.
868 * Copy that behaviour here.
869 */
870 if (is_cardbus) {
871 buses &= ~0xff000000;
872 buses |= CARDBUS_LATENCY_TIMER << 24;
873 }
874
875 /*
876 * We need to blast all three values with a single write.
877 */
878 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
879
880 if (!is_cardbus) {
881 child->bridge_ctl = bctl;
882 max = pci_scan_child_bus(child);
883 } else {
884 /*
885 * For CardBus bridges, we leave 4 bus numbers
886 * as cards with a PCI-to-PCI bridge can be
887 * inserted later.
888 */
889 for (i = 0; i < CARDBUS_RESERVE_BUSNR; i++) {
890 struct pci_bus *parent = bus;
891 if (pci_find_bus(pci_domain_nr(bus),
892 max+i+1))
893 break;
894 while (parent->parent) {
895 if ((!pcibios_assign_all_busses()) &&
896 (parent->busn_res.end > max) &&
897 (parent->busn_res.end <= max+i)) {
898 j = 1;
899 }
900 parent = parent->parent;
901 }
902 if (j) {
903 /*
904 * Often, there are two cardbus bridges
905 * -- try to leave one valid bus number
906 * for each one.
907 */
908 i /= 2;
909 break;
910 }
911 }
912 max += i;
913 }
914 /*
915 * Set the subordinate bus number to its real value.
916 */
917 if (max > bus->busn_res.end) {
918 dev_warn(&dev->dev, "max busn %02x is outside %pR\n",
919 max, &bus->busn_res);
920 max = bus->busn_res.end;
921 }
922 pci_bus_update_busn_res_end(child, max);
923 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
924 }
925
926 sprintf(child->name,
927 (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
928 pci_domain_nr(bus), child->number);
929
930 /* Has only triggered on CardBus, fixup is in yenta_socket */
931 while (bus->parent) {
932 if ((child->busn_res.end > bus->busn_res.end) ||
933 (child->number > bus->busn_res.end) ||
934 (child->number < bus->number) ||
935 (child->busn_res.end < bus->number)) {
936 dev_info(&child->dev, "%pR %s "
937 "hidden behind%s bridge %s %pR\n",
938 &child->busn_res,
939 (bus->number > child->busn_res.end &&
940 bus->busn_res.end < child->number) ?
941 "wholly" : "partially",
942 bus->self->transparent ? " transparent" : "",
943 dev_name(&bus->dev),
944 &bus->busn_res);
945 }
946 bus = bus->parent;
947 }
948
949 out:
950 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
951
952 return max;
953 }
954 EXPORT_SYMBOL(pci_scan_bridge);
955
956 /*
957 * Read interrupt line and base address registers.
958 * The architecture-dependent code can tweak these, of course.
959 */
960 static void pci_read_irq(struct pci_dev *dev)
961 {
962 unsigned char irq;
963
964 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
965 dev->pin = irq;
966 if (irq)
967 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
968 dev->irq = irq;
969 }
970
971 void set_pcie_port_type(struct pci_dev *pdev)
972 {
973 int pos;
974 u16 reg16;
975
976 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
977 if (!pos)
978 return;
979 pdev->pcie_cap = pos;
980 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
981 pdev->pcie_flags_reg = reg16;
982 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
983 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
984 }
985
986 void set_pcie_hotplug_bridge(struct pci_dev *pdev)
987 {
988 u32 reg32;
989
990 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
991 if (reg32 & PCI_EXP_SLTCAP_HPC)
992 pdev->is_hotplug_bridge = 1;
993 }
994
995 /**
996 * pci_ext_cfg_is_aliased - is ext config space just an alias of std config?
997 * @dev: PCI device
998 *
999 * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that
1000 * when forwarding a type1 configuration request the bridge must check that
1001 * the extended register address field is zero. The bridge is not permitted
1002 * to forward the transactions and must handle it as an Unsupported Request.
1003 * Some bridges do not follow this rule and simply drop the extended register
1004 * bits, resulting in the standard config space being aliased, every 256
1005 * bytes across the entire configuration space. Test for this condition by
1006 * comparing the first dword of each potential alias to the vendor/device ID.
1007 * Known offenders:
1008 * ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03)
1009 * AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40)
1010 */
1011 static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
1012 {
1013 #ifdef CONFIG_PCI_QUIRKS
1014 int pos;
1015 u32 header, tmp;
1016
1017 pci_read_config_dword(dev, PCI_VENDOR_ID, &header);
1018
1019 for (pos = PCI_CFG_SPACE_SIZE;
1020 pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) {
1021 if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL
1022 || header != tmp)
1023 return false;
1024 }
1025
1026 return true;
1027 #else
1028 return false;
1029 #endif
1030 }
1031
1032 /**
1033 * pci_cfg_space_size - get the configuration space size of the PCI device.
1034 * @dev: PCI device
1035 *
1036 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1037 * have 4096 bytes. Even if the device is capable, that doesn't mean we can
1038 * access it. Maybe we don't have a way to generate extended config space
1039 * accesses, or the device is behind a reverse Express bridge. So we try
1040 * reading the dword at 0x100 which must either be 0 or a valid extended
1041 * capability header.
1042 */
1043 static int pci_cfg_space_size_ext(struct pci_dev *dev)
1044 {
1045 u32 status;
1046 int pos = PCI_CFG_SPACE_SIZE;
1047
1048 if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
1049 goto fail;
1050 if (status == 0xffffffff || pci_ext_cfg_is_aliased(dev))
1051 goto fail;
1052
1053 return PCI_CFG_SPACE_EXP_SIZE;
1054
1055 fail:
1056 return PCI_CFG_SPACE_SIZE;
1057 }
1058
1059 int pci_cfg_space_size(struct pci_dev *dev)
1060 {
1061 int pos;
1062 u32 status;
1063 u16 class;
1064
1065 class = dev->class >> 8;
1066 if (class == PCI_CLASS_BRIDGE_HOST)
1067 return pci_cfg_space_size_ext(dev);
1068
1069 if (!pci_is_pcie(dev)) {
1070 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1071 if (!pos)
1072 goto fail;
1073
1074 pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1075 if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ)))
1076 goto fail;
1077 }
1078
1079 return pci_cfg_space_size_ext(dev);
1080
1081 fail:
1082 return PCI_CFG_SPACE_SIZE;
1083 }
1084
1085 #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
1086
1087 /**
1088 * pci_setup_device - fill in class and map information of a device
1089 * @dev: the device structure to fill
1090 *
1091 * Initialize the device structure with information about the device's
1092 * vendor,class,memory and IO-space addresses,IRQ lines etc.
1093 * Called at initialisation of the PCI subsystem and by CardBus services.
1094 * Returns 0 on success and negative if unknown type of device (not normal,
1095 * bridge or CardBus).
1096 */
1097 int pci_setup_device(struct pci_dev *dev)
1098 {
1099 u32 class;
1100 u8 hdr_type;
1101 struct pci_slot *slot;
1102 int pos = 0;
1103 struct pci_bus_region region;
1104 struct resource *res;
1105
1106 if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
1107 return -EIO;
1108
1109 dev->sysdata = dev->bus->sysdata;
1110 dev->dev.parent = dev->bus->bridge;
1111 dev->dev.bus = &pci_bus_type;
1112 dev->hdr_type = hdr_type & 0x7f;
1113 dev->multifunction = !!(hdr_type & 0x80);
1114 dev->error_state = pci_channel_io_normal;
1115 set_pcie_port_type(dev);
1116
1117 list_for_each_entry(slot, &dev->bus->slots, list)
1118 if (PCI_SLOT(dev->devfn) == slot->number)
1119 dev->slot = slot;
1120
1121 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1122 set this higher, assuming the system even supports it. */
1123 dev->dma_mask = 0xffffffff;
1124
1125 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
1126 dev->bus->number, PCI_SLOT(dev->devfn),
1127 PCI_FUNC(dev->devfn));
1128
1129 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
1130 dev->revision = class & 0xff;
1131 dev->class = class >> 8; /* upper 3 bytes */
1132
1133 dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
1134 dev->vendor, dev->device, dev->hdr_type, dev->class);
1135
1136 /* need to have dev->class ready */
1137 dev->cfg_size = pci_cfg_space_size(dev);
1138
1139 /* "Unknown power state" */
1140 dev->current_state = PCI_UNKNOWN;
1141
1142 /* Early fixups, before probing the BARs */
1143 pci_fixup_device(pci_fixup_early, dev);
1144 /* device class may be changed after fixup */
1145 class = dev->class >> 8;
1146
1147 switch (dev->hdr_type) { /* header type */
1148 case PCI_HEADER_TYPE_NORMAL: /* standard header */
1149 if (class == PCI_CLASS_BRIDGE_PCI)
1150 goto bad;
1151 pci_read_irq(dev);
1152 pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1153 pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1154 pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
1155
1156 /*
1157 * Do the ugly legacy mode stuff here rather than broken chip
1158 * quirk code. Legacy mode ATA controllers have fixed
1159 * addresses. These are not always echoed in BAR0-3, and
1160 * BAR0-3 in a few cases contain junk!
1161 */
1162 if (class == PCI_CLASS_STORAGE_IDE) {
1163 u8 progif;
1164 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1165 if ((progif & 1) == 0) {
1166 region.start = 0x1F0;
1167 region.end = 0x1F7;
1168 res = &dev->resource[0];
1169 res->flags = LEGACY_IO_RESOURCE;
1170 pcibios_bus_to_resource(dev->bus, res, &region);
1171 dev_info(&dev->dev, "legacy IDE quirk: reg 0x10: %pR\n",
1172 res);
1173 region.start = 0x3F6;
1174 region.end = 0x3F6;
1175 res = &dev->resource[1];
1176 res->flags = LEGACY_IO_RESOURCE;
1177 pcibios_bus_to_resource(dev->bus, res, &region);
1178 dev_info(&dev->dev, "legacy IDE quirk: reg 0x14: %pR\n",
1179 res);
1180 }
1181 if ((progif & 4) == 0) {
1182 region.start = 0x170;
1183 region.end = 0x177;
1184 res = &dev->resource[2];
1185 res->flags = LEGACY_IO_RESOURCE;
1186 pcibios_bus_to_resource(dev->bus, res, &region);
1187 dev_info(&dev->dev, "legacy IDE quirk: reg 0x18: %pR\n",
1188 res);
1189 region.start = 0x376;
1190 region.end = 0x376;
1191 res = &dev->resource[3];
1192 res->flags = LEGACY_IO_RESOURCE;
1193 pcibios_bus_to_resource(dev->bus, res, &region);
1194 dev_info(&dev->dev, "legacy IDE quirk: reg 0x1c: %pR\n",
1195 res);
1196 }
1197 }
1198 break;
1199
1200 case PCI_HEADER_TYPE_BRIDGE: /* bridge header */
1201 if (class != PCI_CLASS_BRIDGE_PCI)
1202 goto bad;
1203 /* The PCI-to-PCI bridge spec requires that subtractive
1204 decoding (i.e. transparent) bridge must have programming
1205 interface code of 0x01. */
1206 pci_read_irq(dev);
1207 dev->transparent = ((dev->class & 0xff) == 1);
1208 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
1209 set_pcie_hotplug_bridge(dev);
1210 pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
1211 if (pos) {
1212 pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
1213 pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
1214 }
1215 break;
1216
1217 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */
1218 if (class != PCI_CLASS_BRIDGE_CARDBUS)
1219 goto bad;
1220 pci_read_irq(dev);
1221 pci_read_bases(dev, 1, 0);
1222 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1223 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1224 break;
1225
1226 default: /* unknown header */
1227 dev_err(&dev->dev, "unknown header type %02x, "
1228 "ignoring device\n", dev->hdr_type);
1229 return -EIO;
1230
1231 bad:
1232 dev_err(&dev->dev, "ignoring class %#08x (doesn't match header "
1233 "type %02x)\n", dev->class, dev->hdr_type);
1234 dev->class = PCI_CLASS_NOT_DEFINED;
1235 }
1236
1237 /* We found a fine healthy device, go go go... */
1238 return 0;
1239 }
1240
1241 static void pci_release_capabilities(struct pci_dev *dev)
1242 {
1243 pci_vpd_release(dev);
1244 pci_iov_release(dev);
1245 pci_free_cap_save_buffers(dev);
1246 }
1247
1248 /**
1249 * pci_release_dev - free a pci device structure when all users of it are finished.
1250 * @dev: device that's been disconnected
1251 *
1252 * Will be called only by the device core when all users of this pci device are
1253 * done.
1254 */
1255 static void pci_release_dev(struct device *dev)
1256 {
1257 struct pci_dev *pci_dev;
1258
1259 pci_dev = to_pci_dev(dev);
1260 pci_release_capabilities(pci_dev);
1261 pci_release_of_node(pci_dev);
1262 pcibios_release_device(pci_dev);
1263 pci_bus_put(pci_dev->bus);
1264 kfree(pci_dev->driver_override);
1265 kfree(pci_dev);
1266 }
1267
1268 struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
1269 {
1270 struct pci_dev *dev;
1271
1272 dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
1273 if (!dev)
1274 return NULL;
1275
1276 INIT_LIST_HEAD(&dev->bus_list);
1277 dev->dev.type = &pci_dev_type;
1278 dev->bus = pci_bus_get(bus);
1279
1280 return dev;
1281 }
1282 EXPORT_SYMBOL(pci_alloc_dev);
1283
1284 bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
1285 int crs_timeout)
1286 {
1287 int delay = 1;
1288
1289 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1290 return false;
1291
1292 /* some broken boards return 0 or ~0 if a slot is empty: */
1293 if (*l == 0xffffffff || *l == 0x00000000 ||
1294 *l == 0x0000ffff || *l == 0xffff0000)
1295 return false;
1296
1297 /* Configuration request Retry Status */
1298 while (*l == 0xffff0001) {
1299 if (!crs_timeout)
1300 return false;
1301
1302 msleep(delay);
1303 delay *= 2;
1304 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1305 return false;
1306 /* Card hasn't responded in 60 seconds? Must be stuck. */
1307 if (delay > crs_timeout) {
1308 printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not "
1309 "responding\n", pci_domain_nr(bus),
1310 bus->number, PCI_SLOT(devfn),
1311 PCI_FUNC(devfn));
1312 return false;
1313 }
1314 }
1315
1316 return true;
1317 }
1318 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
1319
1320 /*
1321 * Read the config data for a PCI device, sanity-check it
1322 * and fill in the dev structure...
1323 */
1324 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
1325 {
1326 struct pci_dev *dev;
1327 u32 l;
1328
1329 if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
1330 return NULL;
1331
1332 dev = pci_alloc_dev(bus);
1333 if (!dev)
1334 return NULL;
1335
1336 dev->devfn = devfn;
1337 dev->vendor = l & 0xffff;
1338 dev->device = (l >> 16) & 0xffff;
1339
1340 pci_set_of_node(dev);
1341
1342 if (pci_setup_device(dev)) {
1343 pci_bus_put(dev->bus);
1344 kfree(dev);
1345 return NULL;
1346 }
1347
1348 return dev;
1349 }
1350
1351 static void pci_init_capabilities(struct pci_dev *dev)
1352 {
1353 /* MSI/MSI-X list */
1354 pci_msi_init_pci_dev(dev);
1355
1356 /* Buffers for saving PCIe and PCI-X capabilities */
1357 pci_allocate_cap_save_buffers(dev);
1358
1359 /* Power Management */
1360 pci_pm_init(dev);
1361
1362 /* Vital Product Data */
1363 pci_vpd_pci22_init(dev);
1364
1365 /* Alternative Routing-ID Forwarding */
1366 pci_configure_ari(dev);
1367
1368 /* Single Root I/O Virtualization */
1369 pci_iov_init(dev);
1370
1371 /* Enable ACS P2P upstream forwarding */
1372 pci_enable_acs(dev);
1373 }
1374
1375 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
1376 {
1377 int ret;
1378
1379 device_initialize(&dev->dev);
1380 dev->dev.release = pci_release_dev;
1381
1382 set_dev_node(&dev->dev, pcibus_to_node(bus));
1383 dev->dev.dma_mask = &dev->dma_mask;
1384 dev->dev.dma_parms = &dev->dma_parms;
1385 dev->dev.coherent_dma_mask = 0xffffffffull;
1386
1387 pci_set_dma_max_seg_size(dev, 65536);
1388 pci_set_dma_seg_boundary(dev, 0xffffffff);
1389
1390 /* Fix up broken headers */
1391 pci_fixup_device(pci_fixup_header, dev);
1392
1393 /* moved out from quirk header fixup code */
1394 pci_reassigndev_resource_alignment(dev);
1395
1396 /* Clear the state_saved flag. */
1397 dev->state_saved = false;
1398
1399 /* Initialize various capabilities */
1400 pci_init_capabilities(dev);
1401
1402 /*
1403 * Add the device to our list of discovered devices
1404 * and the bus list for fixup functions, etc.
1405 */
1406 down_write(&pci_bus_sem);
1407 list_add_tail(&dev->bus_list, &bus->devices);
1408 up_write(&pci_bus_sem);
1409
1410 ret = pcibios_add_device(dev);
1411 WARN_ON(ret < 0);
1412
1413 /* Notifier could use PCI capabilities */
1414 dev->match_driver = false;
1415 ret = device_add(&dev->dev);
1416 WARN_ON(ret < 0);
1417 }
1418
1419 struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
1420 {
1421 struct pci_dev *dev;
1422
1423 dev = pci_get_slot(bus, devfn);
1424 if (dev) {
1425 pci_dev_put(dev);
1426 return dev;
1427 }
1428
1429 dev = pci_scan_device(bus, devfn);
1430 if (!dev)
1431 return NULL;
1432
1433 pci_device_add(dev, bus);
1434
1435 return dev;
1436 }
1437 EXPORT_SYMBOL(pci_scan_single_device);
1438
1439 static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
1440 {
1441 int pos;
1442 u16 cap = 0;
1443 unsigned next_fn;
1444
1445 if (pci_ari_enabled(bus)) {
1446 if (!dev)
1447 return 0;
1448 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1449 if (!pos)
1450 return 0;
1451
1452 pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
1453 next_fn = PCI_ARI_CAP_NFN(cap);
1454 if (next_fn <= fn)
1455 return 0; /* protect against malformed list */
1456
1457 return next_fn;
1458 }
1459
1460 /* dev may be NULL for non-contiguous multifunction devices */
1461 if (!dev || dev->multifunction)
1462 return (fn + 1) % 8;
1463
1464 return 0;
1465 }
1466
1467 static int only_one_child(struct pci_bus *bus)
1468 {
1469 struct pci_dev *parent = bus->self;
1470
1471 if (!parent || !pci_is_pcie(parent))
1472 return 0;
1473 if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
1474 return 1;
1475 if (pci_pcie_type(parent) == PCI_EXP_TYPE_DOWNSTREAM &&
1476 !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
1477 return 1;
1478 return 0;
1479 }
1480
1481 /**
1482 * pci_scan_slot - scan a PCI slot on a bus for devices.
1483 * @bus: PCI bus to scan
1484 * @devfn: slot number to scan (must have zero function.)
1485 *
1486 * Scan a PCI slot on the specified PCI bus for devices, adding
1487 * discovered devices to the @bus->devices list. New devices
1488 * will not have is_added set.
1489 *
1490 * Returns the number of new devices found.
1491 */
1492 int pci_scan_slot(struct pci_bus *bus, int devfn)
1493 {
1494 unsigned fn, nr = 0;
1495 struct pci_dev *dev;
1496
1497 if (only_one_child(bus) && (devfn > 0))
1498 return 0; /* Already scanned the entire slot */
1499
1500 dev = pci_scan_single_device(bus, devfn);
1501 if (!dev)
1502 return 0;
1503 if (!dev->is_added)
1504 nr++;
1505
1506 for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
1507 dev = pci_scan_single_device(bus, devfn + fn);
1508 if (dev) {
1509 if (!dev->is_added)
1510 nr++;
1511 dev->multifunction = 1;
1512 }
1513 }
1514
1515 /* only one slot has pcie device */
1516 if (bus->self && nr)
1517 pcie_aspm_init_link_state(bus->self);
1518
1519 return nr;
1520 }
1521 EXPORT_SYMBOL(pci_scan_slot);
1522
1523 static int pcie_find_smpss(struct pci_dev *dev, void *data)
1524 {
1525 u8 *smpss = data;
1526
1527 if (!pci_is_pcie(dev))
1528 return 0;
1529
1530 /*
1531 * We don't have a way to change MPS settings on devices that have
1532 * drivers attached. A hot-added device might support only the minimum
1533 * MPS setting (MPS=128). Therefore, if the fabric contains a bridge
1534 * where devices may be hot-added, we limit the fabric MPS to 128 so
1535 * hot-added devices will work correctly.
1536 *
1537 * However, if we hot-add a device to a slot directly below a Root
1538 * Port, it's impossible for there to be other existing devices below
1539 * the port. We don't limit the MPS in this case because we can
1540 * reconfigure MPS on both the Root Port and the hot-added device,
1541 * and there are no other devices involved.
1542 *
1543 * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
1544 */
1545 if (dev->is_hotplug_bridge &&
1546 pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
1547 *smpss = 0;
1548
1549 if (*smpss > dev->pcie_mpss)
1550 *smpss = dev->pcie_mpss;
1551
1552 return 0;
1553 }
1554
1555 static void pcie_write_mps(struct pci_dev *dev, int mps)
1556 {
1557 int rc;
1558
1559 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
1560 mps = 128 << dev->pcie_mpss;
1561
1562 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
1563 dev->bus->self)
1564 /* For "Performance", the assumption is made that
1565 * downstream communication will never be larger than
1566 * the MRRS. So, the MPS only needs to be configured
1567 * for the upstream communication. This being the case,
1568 * walk from the top down and set the MPS of the child
1569 * to that of the parent bus.
1570 *
1571 * Configure the device MPS with the smaller of the
1572 * device MPSS or the bridge MPS (which is assumed to be
1573 * properly configured at this point to the largest
1574 * allowable MPS based on its parent bus).
1575 */
1576 mps = min(mps, pcie_get_mps(dev->bus->self));
1577 }
1578
1579 rc = pcie_set_mps(dev, mps);
1580 if (rc)
1581 dev_err(&dev->dev, "Failed attempting to set the MPS\n");
1582 }
1583
1584 static void pcie_write_mrrs(struct pci_dev *dev)
1585 {
1586 int rc, mrrs;
1587
1588 /* In the "safe" case, do not configure the MRRS. There appear to be
1589 * issues with setting MRRS to 0 on a number of devices.
1590 */
1591 if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
1592 return;
1593
1594 /* For Max performance, the MRRS must be set to the largest supported
1595 * value. However, it cannot be configured larger than the MPS the
1596 * device or the bus can support. This should already be properly
1597 * configured by a prior call to pcie_write_mps.
1598 */
1599 mrrs = pcie_get_mps(dev);
1600
1601 /* MRRS is a R/W register. Invalid values can be written, but a
1602 * subsequent read will verify if the value is acceptable or not.
1603 * If the MRRS value provided is not acceptable (e.g., too large),
1604 * shrink the value until it is acceptable to the HW.
1605 */
1606 while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
1607 rc = pcie_set_readrq(dev, mrrs);
1608 if (!rc)
1609 break;
1610
1611 dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
1612 mrrs /= 2;
1613 }
1614
1615 if (mrrs < 128)
1616 dev_err(&dev->dev, "MRRS was unable to be configured with a "
1617 "safe value. If problems are experienced, try running "
1618 "with pci=pcie_bus_safe.\n");
1619 }
1620
1621 static void pcie_bus_detect_mps(struct pci_dev *dev)
1622 {
1623 struct pci_dev *bridge = dev->bus->self;
1624 int mps, p_mps;
1625
1626 if (!bridge)
1627 return;
1628
1629 mps = pcie_get_mps(dev);
1630 p_mps = pcie_get_mps(bridge);
1631
1632 if (mps != p_mps)
1633 dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1634 mps, pci_name(bridge), p_mps);
1635 }
1636
1637 static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
1638 {
1639 int mps, orig_mps;
1640
1641 if (!pci_is_pcie(dev))
1642 return 0;
1643
1644 if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
1645 pcie_bus_detect_mps(dev);
1646 return 0;
1647 }
1648
1649 mps = 128 << *(u8 *)data;
1650 orig_mps = pcie_get_mps(dev);
1651
1652 pcie_write_mps(dev, mps);
1653 pcie_write_mrrs(dev);
1654
1655 dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), "
1656 "Max Read Rq %4d\n", pcie_get_mps(dev), 128 << dev->pcie_mpss,
1657 orig_mps, pcie_get_readrq(dev));
1658
1659 return 0;
1660 }
1661
1662 /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
1663 * parents then children fashion. If this changes, then this code will not
1664 * work as designed.
1665 */
1666 void pcie_bus_configure_settings(struct pci_bus *bus)
1667 {
1668 u8 smpss = 0;
1669
1670 if (!bus->self)
1671 return;
1672
1673 if (!pci_is_pcie(bus->self))
1674 return;
1675
1676 /* FIXME - Peer to peer DMA is possible, though the endpoint would need
1677 * to be aware of the MPS of the destination. To work around this,
1678 * simply force the MPS of the entire system to the smallest possible.
1679 */
1680 if (pcie_bus_config == PCIE_BUS_PEER2PEER)
1681 smpss = 0;
1682
1683 if (pcie_bus_config == PCIE_BUS_SAFE) {
1684 smpss = bus->self->pcie_mpss;
1685
1686 pcie_find_smpss(bus->self, &smpss);
1687 pci_walk_bus(bus, pcie_find_smpss, &smpss);
1688 }
1689
1690 pcie_bus_configure_set(bus->self, &smpss);
1691 pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
1692 }
1693 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
1694
1695 unsigned int pci_scan_child_bus(struct pci_bus *bus)
1696 {
1697 unsigned int devfn, pass, max = bus->busn_res.start;
1698 struct pci_dev *dev;
1699
1700 dev_dbg(&bus->dev, "scanning bus\n");
1701
1702 /* Go find them, Rover! */
1703 for (devfn = 0; devfn < 0x100; devfn += 8)
1704 pci_scan_slot(bus, devfn);
1705
1706 /* Reserve buses for SR-IOV capability. */
1707 max += pci_iov_bus_range(bus);
1708
1709 /*
1710 * After performing arch-dependent fixup of the bus, look behind
1711 * all PCI-to-PCI bridges on this bus.
1712 */
1713 if (!bus->is_added) {
1714 dev_dbg(&bus->dev, "fixups for bus\n");
1715 pcibios_fixup_bus(bus);
1716 bus->is_added = 1;
1717 }
1718
1719 for (pass = 0; pass < 2; pass++)
1720 list_for_each_entry(dev, &bus->devices, bus_list) {
1721 if (pci_is_bridge(dev))
1722 max = pci_scan_bridge(bus, dev, max, pass);
1723 }
1724
1725 /*
1726 * We've scanned the bus and so we know all about what's on
1727 * the other side of any bridges that may be on this bus plus
1728 * any devices.
1729 *
1730 * Return how far we've got finding sub-buses.
1731 */
1732 dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
1733 return max;
1734 }
1735 EXPORT_SYMBOL_GPL(pci_scan_child_bus);
1736
1737 /**
1738 * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
1739 * @bridge: Host bridge to set up.
1740 *
1741 * Default empty implementation. Replace with an architecture-specific setup
1742 * routine, if necessary.
1743 */
1744 int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
1745 {
1746 return 0;
1747 }
1748
1749 void __weak pcibios_add_bus(struct pci_bus *bus)
1750 {
1751 }
1752
1753 void __weak pcibios_remove_bus(struct pci_bus *bus)
1754 {
1755 }
1756
1757 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
1758 struct pci_ops *ops, void *sysdata, struct list_head *resources)
1759 {
1760 int error;
1761 struct pci_host_bridge *bridge;
1762 struct pci_bus *b, *b2;
1763 struct pci_host_bridge_window *window, *n;
1764 struct resource *res;
1765 resource_size_t offset;
1766 char bus_addr[64];
1767 char *fmt;
1768
1769 b = pci_alloc_bus();
1770 if (!b)
1771 return NULL;
1772
1773 b->sysdata = sysdata;
1774 b->ops = ops;
1775 b->number = b->busn_res.start = bus;
1776 b2 = pci_find_bus(pci_domain_nr(b), bus);
1777 if (b2) {
1778 /* If we already got to this bus through a different bridge, ignore it */
1779 dev_dbg(&b2->dev, "bus already known\n");
1780 goto err_out;
1781 }
1782
1783 bridge = pci_alloc_host_bridge(b);
1784 if (!bridge)
1785 goto err_out;
1786
1787 bridge->dev.parent = parent;
1788 bridge->dev.release = pci_release_host_bridge_dev;
1789 dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus);
1790 error = pcibios_root_bridge_prepare(bridge);
1791 if (error) {
1792 kfree(bridge);
1793 goto err_out;
1794 }
1795
1796 error = device_register(&bridge->dev);
1797 if (error) {
1798 put_device(&bridge->dev);
1799 goto err_out;
1800 }
1801 b->bridge = get_device(&bridge->dev);
1802 device_enable_async_suspend(b->bridge);
1803 pci_set_bus_of_node(b);
1804
1805 if (!parent)
1806 set_dev_node(b->bridge, pcibus_to_node(b));
1807
1808 b->dev.class = &pcibus_class;
1809 b->dev.parent = b->bridge;
1810 dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus);
1811 error = device_register(&b->dev);
1812 if (error)
1813 goto class_dev_reg_err;
1814
1815 pcibios_add_bus(b);
1816
1817 /* Create legacy_io and legacy_mem files for this bus */
1818 pci_create_legacy_files(b);
1819
1820 if (parent)
1821 dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev));
1822 else
1823 printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev));
1824
1825 /* Add initial resources to the bus */
1826 list_for_each_entry_safe(window, n, resources, list) {
1827 list_move_tail(&window->list, &bridge->windows);
1828 res = window->res;
1829 offset = window->offset;
1830 if (res->flags & IORESOURCE_BUS)
1831 pci_bus_insert_busn_res(b, bus, res->end);
1832 else
1833 pci_bus_add_resource(b, res, 0);
1834 if (offset) {
1835 if (resource_type(res) == IORESOURCE_IO)
1836 fmt = " (bus address [%#06llx-%#06llx])";
1837 else
1838 fmt = " (bus address [%#010llx-%#010llx])";
1839 snprintf(bus_addr, sizeof(bus_addr), fmt,
1840 (unsigned long long) (res->start - offset),
1841 (unsigned long long) (res->end - offset));
1842 } else
1843 bus_addr[0] = '\0';
1844 dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr);
1845 }
1846
1847 down_write(&pci_bus_sem);
1848 list_add_tail(&b->node, &pci_root_buses);
1849 up_write(&pci_bus_sem);
1850
1851 return b;
1852
1853 class_dev_reg_err:
1854 put_device(&bridge->dev);
1855 device_unregister(&bridge->dev);
1856 err_out:
1857 kfree(b);
1858 return NULL;
1859 }
1860
1861 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
1862 {
1863 struct resource *res = &b->busn_res;
1864 struct resource *parent_res, *conflict;
1865
1866 res->start = bus;
1867 res->end = bus_max;
1868 res->flags = IORESOURCE_BUS;
1869
1870 if (!pci_is_root_bus(b))
1871 parent_res = &b->parent->busn_res;
1872 else {
1873 parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
1874 res->flags |= IORESOURCE_PCI_FIXED;
1875 }
1876
1877 conflict = request_resource_conflict(parent_res, res);
1878
1879 if (conflict)
1880 dev_printk(KERN_DEBUG, &b->dev,
1881 "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
1882 res, pci_is_root_bus(b) ? "domain " : "",
1883 parent_res, conflict->name, conflict);
1884
1885 return conflict == NULL;
1886 }
1887
1888 int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
1889 {
1890 struct resource *res = &b->busn_res;
1891 struct resource old_res = *res;
1892 resource_size_t size;
1893 int ret;
1894
1895 if (res->start > bus_max)
1896 return -EINVAL;
1897
1898 size = bus_max - res->start + 1;
1899 ret = adjust_resource(res, res->start, size);
1900 dev_printk(KERN_DEBUG, &b->dev,
1901 "busn_res: %pR end %s updated to %02x\n",
1902 &old_res, ret ? "can not be" : "is", bus_max);
1903
1904 if (!ret && !res->parent)
1905 pci_bus_insert_busn_res(b, res->start, res->end);
1906
1907 return ret;
1908 }
1909
1910 void pci_bus_release_busn_res(struct pci_bus *b)
1911 {
1912 struct resource *res = &b->busn_res;
1913 int ret;
1914
1915 if (!res->flags || !res->parent)
1916 return;
1917
1918 ret = release_resource(res);
1919 dev_printk(KERN_DEBUG, &b->dev,
1920 "busn_res: %pR %s released\n",
1921 res, ret ? "can not be" : "is");
1922 }
1923
1924 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
1925 struct pci_ops *ops, void *sysdata, struct list_head *resources)
1926 {
1927 struct pci_host_bridge_window *window;
1928 bool found = false;
1929 struct pci_bus *b;
1930 int max;
1931
1932 list_for_each_entry(window, resources, list)
1933 if (window->res->flags & IORESOURCE_BUS) {
1934 found = true;
1935 break;
1936 }
1937
1938 b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
1939 if (!b)
1940 return NULL;
1941
1942 if (!found) {
1943 dev_info(&b->dev,
1944 "No busn resource found for root bus, will use [bus %02x-ff]\n",
1945 bus);
1946 pci_bus_insert_busn_res(b, bus, 255);
1947 }
1948
1949 max = pci_scan_child_bus(b);
1950
1951 if (!found)
1952 pci_bus_update_busn_res_end(b, max);
1953
1954 pci_bus_add_devices(b);
1955 return b;
1956 }
1957 EXPORT_SYMBOL(pci_scan_root_bus);
1958
1959 /* Deprecated; use pci_scan_root_bus() instead */
1960 struct pci_bus *pci_scan_bus_parented(struct device *parent,
1961 int bus, struct pci_ops *ops, void *sysdata)
1962 {
1963 LIST_HEAD(resources);
1964 struct pci_bus *b;
1965
1966 pci_add_resource(&resources, &ioport_resource);
1967 pci_add_resource(&resources, &iomem_resource);
1968 pci_add_resource(&resources, &busn_resource);
1969 b = pci_create_root_bus(parent, bus, ops, sysdata, &resources);
1970 if (b)
1971 pci_scan_child_bus(b);
1972 else
1973 pci_free_resource_list(&resources);
1974 return b;
1975 }
1976 EXPORT_SYMBOL(pci_scan_bus_parented);
1977
1978 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
1979 void *sysdata)
1980 {
1981 LIST_HEAD(resources);
1982 struct pci_bus *b;
1983
1984 pci_add_resource(&resources, &ioport_resource);
1985 pci_add_resource(&resources, &iomem_resource);
1986 pci_add_resource(&resources, &busn_resource);
1987 b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
1988 if (b) {
1989 pci_scan_child_bus(b);
1990 pci_bus_add_devices(b);
1991 } else {
1992 pci_free_resource_list(&resources);
1993 }
1994 return b;
1995 }
1996 EXPORT_SYMBOL(pci_scan_bus);
1997
1998 /**
1999 * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
2000 * @bridge: PCI bridge for the bus to scan
2001 *
2002 * Scan a PCI bus and child buses for new devices, add them,
2003 * and enable them, resizing bridge mmio/io resource if necessary
2004 * and possible. The caller must ensure the child devices are already
2005 * removed for resizing to occur.
2006 *
2007 * Returns the max number of subordinate bus discovered.
2008 */
2009 unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
2010 {
2011 unsigned int max;
2012 struct pci_bus *bus = bridge->subordinate;
2013
2014 max = pci_scan_child_bus(bus);
2015
2016 pci_assign_unassigned_bridge_resources(bridge);
2017
2018 pci_bus_add_devices(bus);
2019
2020 return max;
2021 }
2022
2023 /**
2024 * pci_rescan_bus - scan a PCI bus for devices.
2025 * @bus: PCI bus to scan
2026 *
2027 * Scan a PCI bus and child buses for new devices, adds them,
2028 * and enables them.
2029 *
2030 * Returns the max number of subordinate bus discovered.
2031 */
2032 unsigned int pci_rescan_bus(struct pci_bus *bus)
2033 {
2034 unsigned int max;
2035
2036 max = pci_scan_child_bus(bus);
2037 pci_assign_unassigned_bus_resources(bus);
2038 pci_bus_add_devices(bus);
2039
2040 return max;
2041 }
2042 EXPORT_SYMBOL_GPL(pci_rescan_bus);
2043
2044 /*
2045 * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
2046 * routines should always be executed under this mutex.
2047 */
2048 static DEFINE_MUTEX(pci_rescan_remove_lock);
2049
2050 void pci_lock_rescan_remove(void)
2051 {
2052 mutex_lock(&pci_rescan_remove_lock);
2053 }
2054 EXPORT_SYMBOL_GPL(pci_lock_rescan_remove);
2055
2056 void pci_unlock_rescan_remove(void)
2057 {
2058 mutex_unlock(&pci_rescan_remove_lock);
2059 }
2060 EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove);
2061
2062 static int __init pci_sort_bf_cmp(const struct device *d_a,
2063 const struct device *d_b)
2064 {
2065 const struct pci_dev *a = to_pci_dev(d_a);
2066 const struct pci_dev *b = to_pci_dev(d_b);
2067
2068 if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
2069 else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1;
2070
2071 if (a->bus->number < b->bus->number) return -1;
2072 else if (a->bus->number > b->bus->number) return 1;
2073
2074 if (a->devfn < b->devfn) return -1;
2075 else if (a->devfn > b->devfn) return 1;
2076
2077 return 0;
2078 }
2079
2080 void __init pci_sort_breadthfirst(void)
2081 {
2082 bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
2083 }