]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/pci/probe.c
PCI: Don't set BAR to zero if dma_addr_t is too small
[mirror_ubuntu-zesty-kernel.git] / drivers / pci / probe.c
1 /*
2 * probe.c - PCI detection and setup code
3 */
4
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/init.h>
8 #include <linux/pci.h>
9 #include <linux/slab.h>
10 #include <linux/module.h>
11 #include <linux/cpumask.h>
12 #include <linux/pci-aspm.h>
13 #include <asm-generic/pci-bridge.h>
14 #include "pci.h"
15
16 #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
17 #define CARDBUS_RESERVE_BUSNR 3
18
19 static struct resource busn_resource = {
20 .name = "PCI busn",
21 .start = 0,
22 .end = 255,
23 .flags = IORESOURCE_BUS,
24 };
25
26 /* Ugh. Need to stop exporting this to modules. */
27 LIST_HEAD(pci_root_buses);
28 EXPORT_SYMBOL(pci_root_buses);
29
30 static LIST_HEAD(pci_domain_busn_res_list);
31
32 struct pci_domain_busn_res {
33 struct list_head list;
34 struct resource res;
35 int domain_nr;
36 };
37
38 static struct resource *get_pci_domain_busn_res(int domain_nr)
39 {
40 struct pci_domain_busn_res *r;
41
42 list_for_each_entry(r, &pci_domain_busn_res_list, list)
43 if (r->domain_nr == domain_nr)
44 return &r->res;
45
46 r = kzalloc(sizeof(*r), GFP_KERNEL);
47 if (!r)
48 return NULL;
49
50 r->domain_nr = domain_nr;
51 r->res.start = 0;
52 r->res.end = 0xff;
53 r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
54
55 list_add_tail(&r->list, &pci_domain_busn_res_list);
56
57 return &r->res;
58 }
59
60 static int find_anything(struct device *dev, void *data)
61 {
62 return 1;
63 }
64
65 /*
66 * Some device drivers need know if pci is initiated.
67 * Basically, we think pci is not initiated when there
68 * is no device to be found on the pci_bus_type.
69 */
70 int no_pci_devices(void)
71 {
72 struct device *dev;
73 int no_devices;
74
75 dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
76 no_devices = (dev == NULL);
77 put_device(dev);
78 return no_devices;
79 }
80 EXPORT_SYMBOL(no_pci_devices);
81
82 /*
83 * PCI Bus Class
84 */
85 static void release_pcibus_dev(struct device *dev)
86 {
87 struct pci_bus *pci_bus = to_pci_bus(dev);
88
89 if (pci_bus->bridge)
90 put_device(pci_bus->bridge);
91 pci_bus_remove_resources(pci_bus);
92 pci_release_bus_of_node(pci_bus);
93 kfree(pci_bus);
94 }
95
96 static struct class pcibus_class = {
97 .name = "pci_bus",
98 .dev_release = &release_pcibus_dev,
99 .dev_groups = pcibus_groups,
100 };
101
102 static int __init pcibus_class_init(void)
103 {
104 return class_register(&pcibus_class);
105 }
106 postcore_initcall(pcibus_class_init);
107
108 static u64 pci_size(u64 base, u64 maxbase, u64 mask)
109 {
110 u64 size = mask & maxbase; /* Find the significant bits */
111 if (!size)
112 return 0;
113
114 /* Get the lowest of them to find the decode size, and
115 from that the extent. */
116 size = (size & ~(size-1)) - 1;
117
118 /* base == maxbase can be valid only if the BAR has
119 already been programmed with all 1s. */
120 if (base == maxbase && ((base | size) & mask) != mask)
121 return 0;
122
123 return size;
124 }
125
126 static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
127 {
128 u32 mem_type;
129 unsigned long flags;
130
131 if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
132 flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
133 flags |= IORESOURCE_IO;
134 return flags;
135 }
136
137 flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
138 flags |= IORESOURCE_MEM;
139 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
140 flags |= IORESOURCE_PREFETCH;
141
142 mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
143 switch (mem_type) {
144 case PCI_BASE_ADDRESS_MEM_TYPE_32:
145 break;
146 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
147 /* 1M mem BAR treated as 32-bit BAR */
148 break;
149 case PCI_BASE_ADDRESS_MEM_TYPE_64:
150 flags |= IORESOURCE_MEM_64;
151 break;
152 default:
153 /* mem unknown type treated as 32-bit BAR */
154 break;
155 }
156 return flags;
157 }
158
159 #define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
160
161 /**
162 * pci_read_base - read a PCI BAR
163 * @dev: the PCI device
164 * @type: type of the BAR
165 * @res: resource buffer to be filled in
166 * @pos: BAR position in the config space
167 *
168 * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
169 */
170 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
171 struct resource *res, unsigned int pos)
172 {
173 u32 l, sz, mask;
174 u64 l64, sz64, mask64;
175 u16 orig_cmd;
176 struct pci_bus_region region, inverted_region;
177 bool bar_too_big = false, bar_too_high = false;
178
179 mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
180
181 /* No printks while decoding is disabled! */
182 if (!dev->mmio_always_on) {
183 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
184 if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
185 pci_write_config_word(dev, PCI_COMMAND,
186 orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
187 }
188 }
189
190 res->name = pci_name(dev);
191
192 pci_read_config_dword(dev, pos, &l);
193 pci_write_config_dword(dev, pos, l | mask);
194 pci_read_config_dword(dev, pos, &sz);
195 pci_write_config_dword(dev, pos, l);
196
197 /*
198 * All bits set in sz means the device isn't working properly.
199 * If the BAR isn't implemented, all bits must be 0. If it's a
200 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
201 * 1 must be clear.
202 */
203 if (!sz || sz == 0xffffffff)
204 goto fail;
205
206 /*
207 * I don't know how l can have all bits set. Copied from old code.
208 * Maybe it fixes a bug on some ancient platform.
209 */
210 if (l == 0xffffffff)
211 l = 0;
212
213 if (type == pci_bar_unknown) {
214 res->flags = decode_bar(dev, l);
215 res->flags |= IORESOURCE_SIZEALIGN;
216 if (res->flags & IORESOURCE_IO) {
217 l &= PCI_BASE_ADDRESS_IO_MASK;
218 mask = PCI_BASE_ADDRESS_IO_MASK & (u32) IO_SPACE_LIMIT;
219 } else {
220 l &= PCI_BASE_ADDRESS_MEM_MASK;
221 mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
222 }
223 } else {
224 res->flags |= (l & IORESOURCE_ROM_ENABLE);
225 l &= PCI_ROM_ADDRESS_MASK;
226 mask = (u32)PCI_ROM_ADDRESS_MASK;
227 }
228
229 if (res->flags & IORESOURCE_MEM_64) {
230 l64 = l;
231 sz64 = sz;
232 mask64 = mask | (u64)~0 << 32;
233
234 pci_read_config_dword(dev, pos + 4, &l);
235 pci_write_config_dword(dev, pos + 4, ~0);
236 pci_read_config_dword(dev, pos + 4, &sz);
237 pci_write_config_dword(dev, pos + 4, l);
238
239 l64 |= ((u64)l << 32);
240 sz64 |= ((u64)sz << 32);
241
242 sz64 = pci_size(l64, sz64, mask64);
243
244 if (!sz64)
245 goto fail;
246
247 if ((sizeof(dma_addr_t) < 8 || sizeof(resource_size_t) < 8) &&
248 sz64 > 0x100000000ULL) {
249 res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
250 res->start = 0;
251 res->end = 0;
252 bar_too_big = true;
253 goto out;
254 }
255
256 if ((sizeof(dma_addr_t) < 8) && l) {
257 /* Above 32-bit boundary; try to reallocate */
258 res->flags |= IORESOURCE_UNSET;
259 res->start = 0;
260 res->end = sz64;
261 bar_too_high = true;
262 goto out;
263 } else {
264 region.start = l64;
265 region.end = l64 + sz64;
266 }
267 } else {
268 sz = pci_size(l, sz, mask);
269
270 if (!sz)
271 goto fail;
272
273 region.start = l;
274 region.end = l + sz;
275 }
276
277 pcibios_bus_to_resource(dev->bus, res, &region);
278 pcibios_resource_to_bus(dev->bus, &inverted_region, res);
279
280 /*
281 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
282 * the corresponding resource address (the physical address used by
283 * the CPU. Converting that resource address back to a bus address
284 * should yield the original BAR value:
285 *
286 * resource_to_bus(bus_to_resource(A)) == A
287 *
288 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not
289 * be claimed by the device.
290 */
291 if (inverted_region.start != region.start) {
292 dev_info(&dev->dev, "reg 0x%x: initial BAR value %pa invalid; forcing reassignment\n",
293 pos, &region.start);
294 res->flags |= IORESOURCE_UNSET;
295 res->end -= res->start;
296 res->start = 0;
297 }
298
299 goto out;
300
301
302 fail:
303 res->flags = 0;
304 out:
305 if (!dev->mmio_always_on &&
306 (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
307 pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
308
309 if (bar_too_big)
310 dev_err(&dev->dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
311 pos, (unsigned long long) sz64);
312 if (bar_too_high)
313 dev_info(&dev->dev, "reg 0x%x: can't handle BAR above 4G (bus address %#010llx)\n",
314 pos, (unsigned long long) l64);
315 if (res->flags)
316 dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res);
317
318 return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
319 }
320
321 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
322 {
323 unsigned int pos, reg;
324
325 for (pos = 0; pos < howmany; pos++) {
326 struct resource *res = &dev->resource[pos];
327 reg = PCI_BASE_ADDRESS_0 + (pos << 2);
328 pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
329 }
330
331 if (rom) {
332 struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
333 dev->rom_base_reg = rom;
334 res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
335 IORESOURCE_READONLY | IORESOURCE_CACHEABLE |
336 IORESOURCE_SIZEALIGN;
337 __pci_read_base(dev, pci_bar_mem32, res, rom);
338 }
339 }
340
341 static void pci_read_bridge_io(struct pci_bus *child)
342 {
343 struct pci_dev *dev = child->self;
344 u8 io_base_lo, io_limit_lo;
345 unsigned long io_mask, io_granularity, base, limit;
346 struct pci_bus_region region;
347 struct resource *res;
348
349 io_mask = PCI_IO_RANGE_MASK;
350 io_granularity = 0x1000;
351 if (dev->io_window_1k) {
352 /* Support 1K I/O space granularity */
353 io_mask = PCI_IO_1K_RANGE_MASK;
354 io_granularity = 0x400;
355 }
356
357 res = child->resource[0];
358 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
359 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
360 base = (io_base_lo & io_mask) << 8;
361 limit = (io_limit_lo & io_mask) << 8;
362
363 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
364 u16 io_base_hi, io_limit_hi;
365
366 pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
367 pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
368 base |= ((unsigned long) io_base_hi << 16);
369 limit |= ((unsigned long) io_limit_hi << 16);
370 }
371
372 if (base <= limit) {
373 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
374 region.start = base;
375 region.end = limit + io_granularity - 1;
376 pcibios_bus_to_resource(dev->bus, res, &region);
377 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
378 }
379 }
380
381 static void pci_read_bridge_mmio(struct pci_bus *child)
382 {
383 struct pci_dev *dev = child->self;
384 u16 mem_base_lo, mem_limit_lo;
385 unsigned long base, limit;
386 struct pci_bus_region region;
387 struct resource *res;
388
389 res = child->resource[1];
390 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
391 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
392 base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
393 limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
394 if (base <= limit) {
395 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
396 region.start = base;
397 region.end = limit + 0xfffff;
398 pcibios_bus_to_resource(dev->bus, res, &region);
399 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
400 }
401 }
402
403 static void pci_read_bridge_mmio_pref(struct pci_bus *child)
404 {
405 struct pci_dev *dev = child->self;
406 u16 mem_base_lo, mem_limit_lo;
407 unsigned long base, limit;
408 struct pci_bus_region region;
409 struct resource *res;
410
411 res = child->resource[2];
412 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
413 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
414 base = ((unsigned long) mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
415 limit = ((unsigned long) mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
416
417 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
418 u32 mem_base_hi, mem_limit_hi;
419
420 pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
421 pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
422
423 /*
424 * Some bridges set the base > limit by default, and some
425 * (broken) BIOSes do not initialize them. If we find
426 * this, just assume they are not being used.
427 */
428 if (mem_base_hi <= mem_limit_hi) {
429 #if BITS_PER_LONG == 64
430 base |= ((unsigned long) mem_base_hi) << 32;
431 limit |= ((unsigned long) mem_limit_hi) << 32;
432 #else
433 if (mem_base_hi || mem_limit_hi) {
434 dev_err(&dev->dev, "can't handle 64-bit "
435 "address space for bridge\n");
436 return;
437 }
438 #endif
439 }
440 }
441 if (base <= limit) {
442 res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
443 IORESOURCE_MEM | IORESOURCE_PREFETCH;
444 if (res->flags & PCI_PREF_RANGE_TYPE_64)
445 res->flags |= IORESOURCE_MEM_64;
446 region.start = base;
447 region.end = limit + 0xfffff;
448 pcibios_bus_to_resource(dev->bus, res, &region);
449 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
450 }
451 }
452
453 void pci_read_bridge_bases(struct pci_bus *child)
454 {
455 struct pci_dev *dev = child->self;
456 struct resource *res;
457 int i;
458
459 if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */
460 return;
461
462 dev_info(&dev->dev, "PCI bridge to %pR%s\n",
463 &child->busn_res,
464 dev->transparent ? " (subtractive decode)" : "");
465
466 pci_bus_remove_resources(child);
467 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
468 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
469
470 pci_read_bridge_io(child);
471 pci_read_bridge_mmio(child);
472 pci_read_bridge_mmio_pref(child);
473
474 if (dev->transparent) {
475 pci_bus_for_each_resource(child->parent, res, i) {
476 if (res) {
477 pci_bus_add_resource(child, res,
478 PCI_SUBTRACTIVE_DECODE);
479 dev_printk(KERN_DEBUG, &dev->dev,
480 " bridge window %pR (subtractive decode)\n",
481 res);
482 }
483 }
484 }
485 }
486
487 static struct pci_bus *pci_alloc_bus(void)
488 {
489 struct pci_bus *b;
490
491 b = kzalloc(sizeof(*b), GFP_KERNEL);
492 if (!b)
493 return NULL;
494
495 INIT_LIST_HEAD(&b->node);
496 INIT_LIST_HEAD(&b->children);
497 INIT_LIST_HEAD(&b->devices);
498 INIT_LIST_HEAD(&b->slots);
499 INIT_LIST_HEAD(&b->resources);
500 b->max_bus_speed = PCI_SPEED_UNKNOWN;
501 b->cur_bus_speed = PCI_SPEED_UNKNOWN;
502 return b;
503 }
504
505 static void pci_release_host_bridge_dev(struct device *dev)
506 {
507 struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
508
509 if (bridge->release_fn)
510 bridge->release_fn(bridge);
511
512 pci_free_resource_list(&bridge->windows);
513
514 kfree(bridge);
515 }
516
517 static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b)
518 {
519 struct pci_host_bridge *bridge;
520
521 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
522 if (!bridge)
523 return NULL;
524
525 INIT_LIST_HEAD(&bridge->windows);
526 bridge->bus = b;
527 return bridge;
528 }
529
530 static const unsigned char pcix_bus_speed[] = {
531 PCI_SPEED_UNKNOWN, /* 0 */
532 PCI_SPEED_66MHz_PCIX, /* 1 */
533 PCI_SPEED_100MHz_PCIX, /* 2 */
534 PCI_SPEED_133MHz_PCIX, /* 3 */
535 PCI_SPEED_UNKNOWN, /* 4 */
536 PCI_SPEED_66MHz_PCIX_ECC, /* 5 */
537 PCI_SPEED_100MHz_PCIX_ECC, /* 6 */
538 PCI_SPEED_133MHz_PCIX_ECC, /* 7 */
539 PCI_SPEED_UNKNOWN, /* 8 */
540 PCI_SPEED_66MHz_PCIX_266, /* 9 */
541 PCI_SPEED_100MHz_PCIX_266, /* A */
542 PCI_SPEED_133MHz_PCIX_266, /* B */
543 PCI_SPEED_UNKNOWN, /* C */
544 PCI_SPEED_66MHz_PCIX_533, /* D */
545 PCI_SPEED_100MHz_PCIX_533, /* E */
546 PCI_SPEED_133MHz_PCIX_533 /* F */
547 };
548
549 const unsigned char pcie_link_speed[] = {
550 PCI_SPEED_UNKNOWN, /* 0 */
551 PCIE_SPEED_2_5GT, /* 1 */
552 PCIE_SPEED_5_0GT, /* 2 */
553 PCIE_SPEED_8_0GT, /* 3 */
554 PCI_SPEED_UNKNOWN, /* 4 */
555 PCI_SPEED_UNKNOWN, /* 5 */
556 PCI_SPEED_UNKNOWN, /* 6 */
557 PCI_SPEED_UNKNOWN, /* 7 */
558 PCI_SPEED_UNKNOWN, /* 8 */
559 PCI_SPEED_UNKNOWN, /* 9 */
560 PCI_SPEED_UNKNOWN, /* A */
561 PCI_SPEED_UNKNOWN, /* B */
562 PCI_SPEED_UNKNOWN, /* C */
563 PCI_SPEED_UNKNOWN, /* D */
564 PCI_SPEED_UNKNOWN, /* E */
565 PCI_SPEED_UNKNOWN /* F */
566 };
567
568 void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
569 {
570 bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
571 }
572 EXPORT_SYMBOL_GPL(pcie_update_link_speed);
573
574 static unsigned char agp_speeds[] = {
575 AGP_UNKNOWN,
576 AGP_1X,
577 AGP_2X,
578 AGP_4X,
579 AGP_8X
580 };
581
582 static enum pci_bus_speed agp_speed(int agp3, int agpstat)
583 {
584 int index = 0;
585
586 if (agpstat & 4)
587 index = 3;
588 else if (agpstat & 2)
589 index = 2;
590 else if (agpstat & 1)
591 index = 1;
592 else
593 goto out;
594
595 if (agp3) {
596 index += 2;
597 if (index == 5)
598 index = 0;
599 }
600
601 out:
602 return agp_speeds[index];
603 }
604
605
606 static void pci_set_bus_speed(struct pci_bus *bus)
607 {
608 struct pci_dev *bridge = bus->self;
609 int pos;
610
611 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
612 if (!pos)
613 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
614 if (pos) {
615 u32 agpstat, agpcmd;
616
617 pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
618 bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
619
620 pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
621 bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
622 }
623
624 pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
625 if (pos) {
626 u16 status;
627 enum pci_bus_speed max;
628
629 pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
630 &status);
631
632 if (status & PCI_X_SSTATUS_533MHZ) {
633 max = PCI_SPEED_133MHz_PCIX_533;
634 } else if (status & PCI_X_SSTATUS_266MHZ) {
635 max = PCI_SPEED_133MHz_PCIX_266;
636 } else if (status & PCI_X_SSTATUS_133MHZ) {
637 if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2) {
638 max = PCI_SPEED_133MHz_PCIX_ECC;
639 } else {
640 max = PCI_SPEED_133MHz_PCIX;
641 }
642 } else {
643 max = PCI_SPEED_66MHz_PCIX;
644 }
645
646 bus->max_bus_speed = max;
647 bus->cur_bus_speed = pcix_bus_speed[
648 (status & PCI_X_SSTATUS_FREQ) >> 6];
649
650 return;
651 }
652
653 if (pci_is_pcie(bridge)) {
654 u32 linkcap;
655 u16 linksta;
656
657 pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
658 bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
659
660 pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
661 pcie_update_link_speed(bus, linksta);
662 }
663 }
664
665
666 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
667 struct pci_dev *bridge, int busnr)
668 {
669 struct pci_bus *child;
670 int i;
671 int ret;
672
673 /*
674 * Allocate a new bus, and inherit stuff from the parent..
675 */
676 child = pci_alloc_bus();
677 if (!child)
678 return NULL;
679
680 child->parent = parent;
681 child->ops = parent->ops;
682 child->msi = parent->msi;
683 child->sysdata = parent->sysdata;
684 child->bus_flags = parent->bus_flags;
685
686 /* initialize some portions of the bus device, but don't register it
687 * now as the parent is not properly set up yet.
688 */
689 child->dev.class = &pcibus_class;
690 dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
691
692 /*
693 * Set up the primary, secondary and subordinate
694 * bus numbers.
695 */
696 child->number = child->busn_res.start = busnr;
697 child->primary = parent->busn_res.start;
698 child->busn_res.end = 0xff;
699
700 if (!bridge) {
701 child->dev.parent = parent->bridge;
702 goto add_dev;
703 }
704
705 child->self = bridge;
706 child->bridge = get_device(&bridge->dev);
707 child->dev.parent = child->bridge;
708 pci_set_bus_of_node(child);
709 pci_set_bus_speed(child);
710
711 /* Set up default resource pointers and names.. */
712 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
713 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
714 child->resource[i]->name = child->name;
715 }
716 bridge->subordinate = child;
717
718 add_dev:
719 ret = device_register(&child->dev);
720 WARN_ON(ret < 0);
721
722 pcibios_add_bus(child);
723
724 /* Create legacy_io and legacy_mem files for this bus */
725 pci_create_legacy_files(child);
726
727 return child;
728 }
729
730 struct pci_bus *__ref pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr)
731 {
732 struct pci_bus *child;
733
734 child = pci_alloc_child_bus(parent, dev, busnr);
735 if (child) {
736 down_write(&pci_bus_sem);
737 list_add_tail(&child->node, &parent->children);
738 up_write(&pci_bus_sem);
739 }
740 return child;
741 }
742
743 /*
744 * If it's a bridge, configure it and scan the bus behind it.
745 * For CardBus bridges, we don't scan behind as the devices will
746 * be handled by the bridge driver itself.
747 *
748 * We need to process bridges in two passes -- first we scan those
749 * already configured by the BIOS and after we are done with all of
750 * them, we proceed to assigning numbers to the remaining buses in
751 * order to avoid overlaps between old and new bus numbers.
752 */
753 int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
754 {
755 struct pci_bus *child;
756 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
757 u32 buses, i, j = 0;
758 u16 bctl;
759 u8 primary, secondary, subordinate;
760 int broken = 0;
761
762 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
763 primary = buses & 0xFF;
764 secondary = (buses >> 8) & 0xFF;
765 subordinate = (buses >> 16) & 0xFF;
766
767 dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
768 secondary, subordinate, pass);
769
770 if (!primary && (primary != bus->number) && secondary && subordinate) {
771 dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
772 primary = bus->number;
773 }
774
775 /* Check if setup is sensible at all */
776 if (!pass &&
777 (primary != bus->number || secondary <= bus->number ||
778 secondary > subordinate || subordinate > bus->busn_res.end)) {
779 dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
780 secondary, subordinate);
781 broken = 1;
782 }
783
784 /* Disable MasterAbortMode during probing to avoid reporting
785 of bus errors (in some architectures) */
786 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
787 pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
788 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
789
790 if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
791 !is_cardbus && !broken) {
792 unsigned int cmax;
793 /*
794 * Bus already configured by firmware, process it in the first
795 * pass and just note the configuration.
796 */
797 if (pass)
798 goto out;
799
800 /*
801 * The bus might already exist for two reasons: Either we are
802 * rescanning the bus or the bus is reachable through more than
803 * one bridge. The second case can happen with the i450NX
804 * chipset.
805 */
806 child = pci_find_bus(pci_domain_nr(bus), secondary);
807 if (!child) {
808 child = pci_add_new_bus(bus, dev, secondary);
809 if (!child)
810 goto out;
811 child->primary = primary;
812 pci_bus_insert_busn_res(child, secondary, subordinate);
813 child->bridge_ctl = bctl;
814 }
815
816 cmax = pci_scan_child_bus(child);
817 if (cmax > subordinate)
818 dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n",
819 subordinate, cmax);
820 /* subordinate should equal child->busn_res.end */
821 if (subordinate > max)
822 max = subordinate;
823 } else {
824 /*
825 * We need to assign a number to this bus which we always
826 * do in the second pass.
827 */
828 if (!pass) {
829 if (pcibios_assign_all_busses() || broken || is_cardbus)
830 /* Temporarily disable forwarding of the
831 configuration cycles on all bridges in
832 this bus segment to avoid possible
833 conflicts in the second pass between two
834 bridges programmed with overlapping
835 bus ranges. */
836 pci_write_config_dword(dev, PCI_PRIMARY_BUS,
837 buses & ~0xffffff);
838 goto out;
839 }
840
841 if (max >= bus->busn_res.end) {
842 dev_warn(&dev->dev, "can't allocate child bus %02x from %pR\n",
843 max, &bus->busn_res);
844 goto out;
845 }
846
847 /* Clear errors */
848 pci_write_config_word(dev, PCI_STATUS, 0xffff);
849
850 /* The bus will already exist if we are rescanning */
851 child = pci_find_bus(pci_domain_nr(bus), max+1);
852 if (!child) {
853 child = pci_add_new_bus(bus, dev, max+1);
854 if (!child)
855 goto out;
856 pci_bus_insert_busn_res(child, max+1,
857 bus->busn_res.end);
858 }
859 max++;
860 buses = (buses & 0xff000000)
861 | ((unsigned int)(child->primary) << 0)
862 | ((unsigned int)(child->busn_res.start) << 8)
863 | ((unsigned int)(child->busn_res.end) << 16);
864
865 /*
866 * yenta.c forces a secondary latency timer of 176.
867 * Copy that behaviour here.
868 */
869 if (is_cardbus) {
870 buses &= ~0xff000000;
871 buses |= CARDBUS_LATENCY_TIMER << 24;
872 }
873
874 /*
875 * We need to blast all three values with a single write.
876 */
877 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
878
879 if (!is_cardbus) {
880 child->bridge_ctl = bctl;
881 max = pci_scan_child_bus(child);
882 } else {
883 /*
884 * For CardBus bridges, we leave 4 bus numbers
885 * as cards with a PCI-to-PCI bridge can be
886 * inserted later.
887 */
888 for (i=0; i<CARDBUS_RESERVE_BUSNR; i++) {
889 struct pci_bus *parent = bus;
890 if (pci_find_bus(pci_domain_nr(bus),
891 max+i+1))
892 break;
893 while (parent->parent) {
894 if ((!pcibios_assign_all_busses()) &&
895 (parent->busn_res.end > max) &&
896 (parent->busn_res.end <= max+i)) {
897 j = 1;
898 }
899 parent = parent->parent;
900 }
901 if (j) {
902 /*
903 * Often, there are two cardbus bridges
904 * -- try to leave one valid bus number
905 * for each one.
906 */
907 i /= 2;
908 break;
909 }
910 }
911 max += i;
912 }
913 /*
914 * Set the subordinate bus number to its real value.
915 */
916 if (max > bus->busn_res.end) {
917 dev_warn(&dev->dev, "max busn %02x is outside %pR\n",
918 max, &bus->busn_res);
919 max = bus->busn_res.end;
920 }
921 pci_bus_update_busn_res_end(child, max);
922 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
923 }
924
925 sprintf(child->name,
926 (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
927 pci_domain_nr(bus), child->number);
928
929 /* Has only triggered on CardBus, fixup is in yenta_socket */
930 while (bus->parent) {
931 if ((child->busn_res.end > bus->busn_res.end) ||
932 (child->number > bus->busn_res.end) ||
933 (child->number < bus->number) ||
934 (child->busn_res.end < bus->number)) {
935 dev_info(&child->dev, "%pR %s "
936 "hidden behind%s bridge %s %pR\n",
937 &child->busn_res,
938 (bus->number > child->busn_res.end &&
939 bus->busn_res.end < child->number) ?
940 "wholly" : "partially",
941 bus->self->transparent ? " transparent" : "",
942 dev_name(&bus->dev),
943 &bus->busn_res);
944 }
945 bus = bus->parent;
946 }
947
948 out:
949 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
950
951 return max;
952 }
953
954 /*
955 * Read interrupt line and base address registers.
956 * The architecture-dependent code can tweak these, of course.
957 */
958 static void pci_read_irq(struct pci_dev *dev)
959 {
960 unsigned char irq;
961
962 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
963 dev->pin = irq;
964 if (irq)
965 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
966 dev->irq = irq;
967 }
968
969 void set_pcie_port_type(struct pci_dev *pdev)
970 {
971 int pos;
972 u16 reg16;
973
974 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
975 if (!pos)
976 return;
977 pdev->pcie_cap = pos;
978 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
979 pdev->pcie_flags_reg = reg16;
980 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
981 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
982 }
983
984 void set_pcie_hotplug_bridge(struct pci_dev *pdev)
985 {
986 u32 reg32;
987
988 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
989 if (reg32 & PCI_EXP_SLTCAP_HPC)
990 pdev->is_hotplug_bridge = 1;
991 }
992
993
994 /**
995 * pci_cfg_space_size - get the configuration space size of the PCI device.
996 * @dev: PCI device
997 *
998 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
999 * have 4096 bytes. Even if the device is capable, that doesn't mean we can
1000 * access it. Maybe we don't have a way to generate extended config space
1001 * accesses, or the device is behind a reverse Express bridge. So we try
1002 * reading the dword at 0x100 which must either be 0 or a valid extended
1003 * capability header.
1004 */
1005 static int pci_cfg_space_size_ext(struct pci_dev *dev)
1006 {
1007 u32 status;
1008 int pos = PCI_CFG_SPACE_SIZE;
1009
1010 if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
1011 goto fail;
1012 if (status == 0xffffffff)
1013 goto fail;
1014
1015 return PCI_CFG_SPACE_EXP_SIZE;
1016
1017 fail:
1018 return PCI_CFG_SPACE_SIZE;
1019 }
1020
1021 int pci_cfg_space_size(struct pci_dev *dev)
1022 {
1023 int pos;
1024 u32 status;
1025 u16 class;
1026
1027 class = dev->class >> 8;
1028 if (class == PCI_CLASS_BRIDGE_HOST)
1029 return pci_cfg_space_size_ext(dev);
1030
1031 if (!pci_is_pcie(dev)) {
1032 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1033 if (!pos)
1034 goto fail;
1035
1036 pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1037 if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ)))
1038 goto fail;
1039 }
1040
1041 return pci_cfg_space_size_ext(dev);
1042
1043 fail:
1044 return PCI_CFG_SPACE_SIZE;
1045 }
1046
1047 #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
1048
1049 /**
1050 * pci_setup_device - fill in class and map information of a device
1051 * @dev: the device structure to fill
1052 *
1053 * Initialize the device structure with information about the device's
1054 * vendor,class,memory and IO-space addresses,IRQ lines etc.
1055 * Called at initialisation of the PCI subsystem and by CardBus services.
1056 * Returns 0 on success and negative if unknown type of device (not normal,
1057 * bridge or CardBus).
1058 */
1059 int pci_setup_device(struct pci_dev *dev)
1060 {
1061 u32 class;
1062 u8 hdr_type;
1063 struct pci_slot *slot;
1064 int pos = 0;
1065 struct pci_bus_region region;
1066 struct resource *res;
1067
1068 if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
1069 return -EIO;
1070
1071 dev->sysdata = dev->bus->sysdata;
1072 dev->dev.parent = dev->bus->bridge;
1073 dev->dev.bus = &pci_bus_type;
1074 dev->hdr_type = hdr_type & 0x7f;
1075 dev->multifunction = !!(hdr_type & 0x80);
1076 dev->error_state = pci_channel_io_normal;
1077 set_pcie_port_type(dev);
1078
1079 list_for_each_entry(slot, &dev->bus->slots, list)
1080 if (PCI_SLOT(dev->devfn) == slot->number)
1081 dev->slot = slot;
1082
1083 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1084 set this higher, assuming the system even supports it. */
1085 dev->dma_mask = 0xffffffff;
1086
1087 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
1088 dev->bus->number, PCI_SLOT(dev->devfn),
1089 PCI_FUNC(dev->devfn));
1090
1091 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
1092 dev->revision = class & 0xff;
1093 dev->class = class >> 8; /* upper 3 bytes */
1094
1095 dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
1096 dev->vendor, dev->device, dev->hdr_type, dev->class);
1097
1098 /* need to have dev->class ready */
1099 dev->cfg_size = pci_cfg_space_size(dev);
1100
1101 /* "Unknown power state" */
1102 dev->current_state = PCI_UNKNOWN;
1103
1104 /* Early fixups, before probing the BARs */
1105 pci_fixup_device(pci_fixup_early, dev);
1106 /* device class may be changed after fixup */
1107 class = dev->class >> 8;
1108
1109 switch (dev->hdr_type) { /* header type */
1110 case PCI_HEADER_TYPE_NORMAL: /* standard header */
1111 if (class == PCI_CLASS_BRIDGE_PCI)
1112 goto bad;
1113 pci_read_irq(dev);
1114 pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1115 pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1116 pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
1117
1118 /*
1119 * Do the ugly legacy mode stuff here rather than broken chip
1120 * quirk code. Legacy mode ATA controllers have fixed
1121 * addresses. These are not always echoed in BAR0-3, and
1122 * BAR0-3 in a few cases contain junk!
1123 */
1124 if (class == PCI_CLASS_STORAGE_IDE) {
1125 u8 progif;
1126 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1127 if ((progif & 1) == 0) {
1128 region.start = 0x1F0;
1129 region.end = 0x1F7;
1130 res = &dev->resource[0];
1131 res->flags = LEGACY_IO_RESOURCE;
1132 pcibios_bus_to_resource(dev->bus, res, &region);
1133 dev_info(&dev->dev, "legacy IDE quirk: reg 0x10: %pR\n",
1134 res);
1135 region.start = 0x3F6;
1136 region.end = 0x3F6;
1137 res = &dev->resource[1];
1138 res->flags = LEGACY_IO_RESOURCE;
1139 pcibios_bus_to_resource(dev->bus, res, &region);
1140 dev_info(&dev->dev, "legacy IDE quirk: reg 0x14: %pR\n",
1141 res);
1142 }
1143 if ((progif & 4) == 0) {
1144 region.start = 0x170;
1145 region.end = 0x177;
1146 res = &dev->resource[2];
1147 res->flags = LEGACY_IO_RESOURCE;
1148 pcibios_bus_to_resource(dev->bus, res, &region);
1149 dev_info(&dev->dev, "legacy IDE quirk: reg 0x18: %pR\n",
1150 res);
1151 region.start = 0x376;
1152 region.end = 0x376;
1153 res = &dev->resource[3];
1154 res->flags = LEGACY_IO_RESOURCE;
1155 pcibios_bus_to_resource(dev->bus, res, &region);
1156 dev_info(&dev->dev, "legacy IDE quirk: reg 0x1c: %pR\n",
1157 res);
1158 }
1159 }
1160 break;
1161
1162 case PCI_HEADER_TYPE_BRIDGE: /* bridge header */
1163 if (class != PCI_CLASS_BRIDGE_PCI)
1164 goto bad;
1165 /* The PCI-to-PCI bridge spec requires that subtractive
1166 decoding (i.e. transparent) bridge must have programming
1167 interface code of 0x01. */
1168 pci_read_irq(dev);
1169 dev->transparent = ((dev->class & 0xff) == 1);
1170 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
1171 set_pcie_hotplug_bridge(dev);
1172 pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
1173 if (pos) {
1174 pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
1175 pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
1176 }
1177 break;
1178
1179 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */
1180 if (class != PCI_CLASS_BRIDGE_CARDBUS)
1181 goto bad;
1182 pci_read_irq(dev);
1183 pci_read_bases(dev, 1, 0);
1184 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1185 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1186 break;
1187
1188 default: /* unknown header */
1189 dev_err(&dev->dev, "unknown header type %02x, "
1190 "ignoring device\n", dev->hdr_type);
1191 return -EIO;
1192
1193 bad:
1194 dev_err(&dev->dev, "ignoring class %#08x (doesn't match header "
1195 "type %02x)\n", dev->class, dev->hdr_type);
1196 dev->class = PCI_CLASS_NOT_DEFINED;
1197 }
1198
1199 /* We found a fine healthy device, go go go... */
1200 return 0;
1201 }
1202
1203 static void pci_release_capabilities(struct pci_dev *dev)
1204 {
1205 pci_vpd_release(dev);
1206 pci_iov_release(dev);
1207 pci_free_cap_save_buffers(dev);
1208 }
1209
1210 /**
1211 * pci_release_dev - free a pci device structure when all users of it are finished.
1212 * @dev: device that's been disconnected
1213 *
1214 * Will be called only by the device core when all users of this pci device are
1215 * done.
1216 */
1217 static void pci_release_dev(struct device *dev)
1218 {
1219 struct pci_dev *pci_dev;
1220
1221 pci_dev = to_pci_dev(dev);
1222 pci_release_capabilities(pci_dev);
1223 pci_release_of_node(pci_dev);
1224 pcibios_release_device(pci_dev);
1225 pci_bus_put(pci_dev->bus);
1226 kfree(pci_dev);
1227 }
1228
1229 struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
1230 {
1231 struct pci_dev *dev;
1232
1233 dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
1234 if (!dev)
1235 return NULL;
1236
1237 INIT_LIST_HEAD(&dev->bus_list);
1238 dev->dev.type = &pci_dev_type;
1239 dev->bus = pci_bus_get(bus);
1240
1241 return dev;
1242 }
1243 EXPORT_SYMBOL(pci_alloc_dev);
1244
1245 bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
1246 int crs_timeout)
1247 {
1248 int delay = 1;
1249
1250 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1251 return false;
1252
1253 /* some broken boards return 0 or ~0 if a slot is empty: */
1254 if (*l == 0xffffffff || *l == 0x00000000 ||
1255 *l == 0x0000ffff || *l == 0xffff0000)
1256 return false;
1257
1258 /* Configuration request Retry Status */
1259 while (*l == 0xffff0001) {
1260 if (!crs_timeout)
1261 return false;
1262
1263 msleep(delay);
1264 delay *= 2;
1265 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1266 return false;
1267 /* Card hasn't responded in 60 seconds? Must be stuck. */
1268 if (delay > crs_timeout) {
1269 printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not "
1270 "responding\n", pci_domain_nr(bus),
1271 bus->number, PCI_SLOT(devfn),
1272 PCI_FUNC(devfn));
1273 return false;
1274 }
1275 }
1276
1277 return true;
1278 }
1279 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
1280
1281 /*
1282 * Read the config data for a PCI device, sanity-check it
1283 * and fill in the dev structure...
1284 */
1285 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
1286 {
1287 struct pci_dev *dev;
1288 u32 l;
1289
1290 if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
1291 return NULL;
1292
1293 dev = pci_alloc_dev(bus);
1294 if (!dev)
1295 return NULL;
1296
1297 dev->devfn = devfn;
1298 dev->vendor = l & 0xffff;
1299 dev->device = (l >> 16) & 0xffff;
1300
1301 pci_set_of_node(dev);
1302
1303 if (pci_setup_device(dev)) {
1304 pci_bus_put(dev->bus);
1305 kfree(dev);
1306 return NULL;
1307 }
1308
1309 return dev;
1310 }
1311
1312 static void pci_init_capabilities(struct pci_dev *dev)
1313 {
1314 /* MSI/MSI-X list */
1315 pci_msi_init_pci_dev(dev);
1316
1317 /* Buffers for saving PCIe and PCI-X capabilities */
1318 pci_allocate_cap_save_buffers(dev);
1319
1320 /* Power Management */
1321 pci_pm_init(dev);
1322
1323 /* Vital Product Data */
1324 pci_vpd_pci22_init(dev);
1325
1326 /* Alternative Routing-ID Forwarding */
1327 pci_configure_ari(dev);
1328
1329 /* Single Root I/O Virtualization */
1330 pci_iov_init(dev);
1331
1332 /* Enable ACS P2P upstream forwarding */
1333 pci_enable_acs(dev);
1334 }
1335
1336 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
1337 {
1338 int ret;
1339
1340 device_initialize(&dev->dev);
1341 dev->dev.release = pci_release_dev;
1342
1343 set_dev_node(&dev->dev, pcibus_to_node(bus));
1344 dev->dev.dma_mask = &dev->dma_mask;
1345 dev->dev.dma_parms = &dev->dma_parms;
1346 dev->dev.coherent_dma_mask = 0xffffffffull;
1347
1348 pci_set_dma_max_seg_size(dev, 65536);
1349 pci_set_dma_seg_boundary(dev, 0xffffffff);
1350
1351 /* Fix up broken headers */
1352 pci_fixup_device(pci_fixup_header, dev);
1353
1354 /* moved out from quirk header fixup code */
1355 pci_reassigndev_resource_alignment(dev);
1356
1357 /* Clear the state_saved flag. */
1358 dev->state_saved = false;
1359
1360 /* Initialize various capabilities */
1361 pci_init_capabilities(dev);
1362
1363 /*
1364 * Add the device to our list of discovered devices
1365 * and the bus list for fixup functions, etc.
1366 */
1367 down_write(&pci_bus_sem);
1368 list_add_tail(&dev->bus_list, &bus->devices);
1369 up_write(&pci_bus_sem);
1370
1371 ret = pcibios_add_device(dev);
1372 WARN_ON(ret < 0);
1373
1374 /* Notifier could use PCI capabilities */
1375 dev->match_driver = false;
1376 ret = device_add(&dev->dev);
1377 WARN_ON(ret < 0);
1378 }
1379
1380 struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn)
1381 {
1382 struct pci_dev *dev;
1383
1384 dev = pci_get_slot(bus, devfn);
1385 if (dev) {
1386 pci_dev_put(dev);
1387 return dev;
1388 }
1389
1390 dev = pci_scan_device(bus, devfn);
1391 if (!dev)
1392 return NULL;
1393
1394 pci_device_add(dev, bus);
1395
1396 return dev;
1397 }
1398 EXPORT_SYMBOL(pci_scan_single_device);
1399
1400 static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
1401 {
1402 int pos;
1403 u16 cap = 0;
1404 unsigned next_fn;
1405
1406 if (pci_ari_enabled(bus)) {
1407 if (!dev)
1408 return 0;
1409 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1410 if (!pos)
1411 return 0;
1412
1413 pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
1414 next_fn = PCI_ARI_CAP_NFN(cap);
1415 if (next_fn <= fn)
1416 return 0; /* protect against malformed list */
1417
1418 return next_fn;
1419 }
1420
1421 /* dev may be NULL for non-contiguous multifunction devices */
1422 if (!dev || dev->multifunction)
1423 return (fn + 1) % 8;
1424
1425 return 0;
1426 }
1427
1428 static int only_one_child(struct pci_bus *bus)
1429 {
1430 struct pci_dev *parent = bus->self;
1431
1432 if (!parent || !pci_is_pcie(parent))
1433 return 0;
1434 if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
1435 return 1;
1436 if (pci_pcie_type(parent) == PCI_EXP_TYPE_DOWNSTREAM &&
1437 !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
1438 return 1;
1439 return 0;
1440 }
1441
1442 /**
1443 * pci_scan_slot - scan a PCI slot on a bus for devices.
1444 * @bus: PCI bus to scan
1445 * @devfn: slot number to scan (must have zero function.)
1446 *
1447 * Scan a PCI slot on the specified PCI bus for devices, adding
1448 * discovered devices to the @bus->devices list. New devices
1449 * will not have is_added set.
1450 *
1451 * Returns the number of new devices found.
1452 */
1453 int pci_scan_slot(struct pci_bus *bus, int devfn)
1454 {
1455 unsigned fn, nr = 0;
1456 struct pci_dev *dev;
1457
1458 if (only_one_child(bus) && (devfn > 0))
1459 return 0; /* Already scanned the entire slot */
1460
1461 dev = pci_scan_single_device(bus, devfn);
1462 if (!dev)
1463 return 0;
1464 if (!dev->is_added)
1465 nr++;
1466
1467 for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
1468 dev = pci_scan_single_device(bus, devfn + fn);
1469 if (dev) {
1470 if (!dev->is_added)
1471 nr++;
1472 dev->multifunction = 1;
1473 }
1474 }
1475
1476 /* only one slot has pcie device */
1477 if (bus->self && nr)
1478 pcie_aspm_init_link_state(bus->self);
1479
1480 return nr;
1481 }
1482
1483 static int pcie_find_smpss(struct pci_dev *dev, void *data)
1484 {
1485 u8 *smpss = data;
1486
1487 if (!pci_is_pcie(dev))
1488 return 0;
1489
1490 /*
1491 * We don't have a way to change MPS settings on devices that have
1492 * drivers attached. A hot-added device might support only the minimum
1493 * MPS setting (MPS=128). Therefore, if the fabric contains a bridge
1494 * where devices may be hot-added, we limit the fabric MPS to 128 so
1495 * hot-added devices will work correctly.
1496 *
1497 * However, if we hot-add a device to a slot directly below a Root
1498 * Port, it's impossible for there to be other existing devices below
1499 * the port. We don't limit the MPS in this case because we can
1500 * reconfigure MPS on both the Root Port and the hot-added device,
1501 * and there are no other devices involved.
1502 *
1503 * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
1504 */
1505 if (dev->is_hotplug_bridge &&
1506 pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
1507 *smpss = 0;
1508
1509 if (*smpss > dev->pcie_mpss)
1510 *smpss = dev->pcie_mpss;
1511
1512 return 0;
1513 }
1514
1515 static void pcie_write_mps(struct pci_dev *dev, int mps)
1516 {
1517 int rc;
1518
1519 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
1520 mps = 128 << dev->pcie_mpss;
1521
1522 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
1523 dev->bus->self)
1524 /* For "Performance", the assumption is made that
1525 * downstream communication will never be larger than
1526 * the MRRS. So, the MPS only needs to be configured
1527 * for the upstream communication. This being the case,
1528 * walk from the top down and set the MPS of the child
1529 * to that of the parent bus.
1530 *
1531 * Configure the device MPS with the smaller of the
1532 * device MPSS or the bridge MPS (which is assumed to be
1533 * properly configured at this point to the largest
1534 * allowable MPS based on its parent bus).
1535 */
1536 mps = min(mps, pcie_get_mps(dev->bus->self));
1537 }
1538
1539 rc = pcie_set_mps(dev, mps);
1540 if (rc)
1541 dev_err(&dev->dev, "Failed attempting to set the MPS\n");
1542 }
1543
1544 static void pcie_write_mrrs(struct pci_dev *dev)
1545 {
1546 int rc, mrrs;
1547
1548 /* In the "safe" case, do not configure the MRRS. There appear to be
1549 * issues with setting MRRS to 0 on a number of devices.
1550 */
1551 if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
1552 return;
1553
1554 /* For Max performance, the MRRS must be set to the largest supported
1555 * value. However, it cannot be configured larger than the MPS the
1556 * device or the bus can support. This should already be properly
1557 * configured by a prior call to pcie_write_mps.
1558 */
1559 mrrs = pcie_get_mps(dev);
1560
1561 /* MRRS is a R/W register. Invalid values can be written, but a
1562 * subsequent read will verify if the value is acceptable or not.
1563 * If the MRRS value provided is not acceptable (e.g., too large),
1564 * shrink the value until it is acceptable to the HW.
1565 */
1566 while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
1567 rc = pcie_set_readrq(dev, mrrs);
1568 if (!rc)
1569 break;
1570
1571 dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
1572 mrrs /= 2;
1573 }
1574
1575 if (mrrs < 128)
1576 dev_err(&dev->dev, "MRRS was unable to be configured with a "
1577 "safe value. If problems are experienced, try running "
1578 "with pci=pcie_bus_safe.\n");
1579 }
1580
1581 static void pcie_bus_detect_mps(struct pci_dev *dev)
1582 {
1583 struct pci_dev *bridge = dev->bus->self;
1584 int mps, p_mps;
1585
1586 if (!bridge)
1587 return;
1588
1589 mps = pcie_get_mps(dev);
1590 p_mps = pcie_get_mps(bridge);
1591
1592 if (mps != p_mps)
1593 dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1594 mps, pci_name(bridge), p_mps);
1595 }
1596
1597 static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
1598 {
1599 int mps, orig_mps;
1600
1601 if (!pci_is_pcie(dev))
1602 return 0;
1603
1604 if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
1605 pcie_bus_detect_mps(dev);
1606 return 0;
1607 }
1608
1609 mps = 128 << *(u8 *)data;
1610 orig_mps = pcie_get_mps(dev);
1611
1612 pcie_write_mps(dev, mps);
1613 pcie_write_mrrs(dev);
1614
1615 dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), "
1616 "Max Read Rq %4d\n", pcie_get_mps(dev), 128 << dev->pcie_mpss,
1617 orig_mps, pcie_get_readrq(dev));
1618
1619 return 0;
1620 }
1621
1622 /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
1623 * parents then children fashion. If this changes, then this code will not
1624 * work as designed.
1625 */
1626 void pcie_bus_configure_settings(struct pci_bus *bus)
1627 {
1628 u8 smpss;
1629
1630 if (!bus->self)
1631 return;
1632
1633 if (!pci_is_pcie(bus->self))
1634 return;
1635
1636 /* FIXME - Peer to peer DMA is possible, though the endpoint would need
1637 * to be aware of the MPS of the destination. To work around this,
1638 * simply force the MPS of the entire system to the smallest possible.
1639 */
1640 if (pcie_bus_config == PCIE_BUS_PEER2PEER)
1641 smpss = 0;
1642
1643 if (pcie_bus_config == PCIE_BUS_SAFE) {
1644 smpss = bus->self->pcie_mpss;
1645
1646 pcie_find_smpss(bus->self, &smpss);
1647 pci_walk_bus(bus, pcie_find_smpss, &smpss);
1648 }
1649
1650 pcie_bus_configure_set(bus->self, &smpss);
1651 pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
1652 }
1653 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
1654
1655 unsigned int pci_scan_child_bus(struct pci_bus *bus)
1656 {
1657 unsigned int devfn, pass, max = bus->busn_res.start;
1658 struct pci_dev *dev;
1659
1660 dev_dbg(&bus->dev, "scanning bus\n");
1661
1662 /* Go find them, Rover! */
1663 for (devfn = 0; devfn < 0x100; devfn += 8)
1664 pci_scan_slot(bus, devfn);
1665
1666 /* Reserve buses for SR-IOV capability. */
1667 max += pci_iov_bus_range(bus);
1668
1669 /*
1670 * After performing arch-dependent fixup of the bus, look behind
1671 * all PCI-to-PCI bridges on this bus.
1672 */
1673 if (!bus->is_added) {
1674 dev_dbg(&bus->dev, "fixups for bus\n");
1675 pcibios_fixup_bus(bus);
1676 bus->is_added = 1;
1677 }
1678
1679 for (pass=0; pass < 2; pass++)
1680 list_for_each_entry(dev, &bus->devices, bus_list) {
1681 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
1682 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
1683 max = pci_scan_bridge(bus, dev, max, pass);
1684 }
1685
1686 /*
1687 * We've scanned the bus and so we know all about what's on
1688 * the other side of any bridges that may be on this bus plus
1689 * any devices.
1690 *
1691 * Return how far we've got finding sub-buses.
1692 */
1693 dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
1694 return max;
1695 }
1696
1697 /**
1698 * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
1699 * @bridge: Host bridge to set up.
1700 *
1701 * Default empty implementation. Replace with an architecture-specific setup
1702 * routine, if necessary.
1703 */
1704 int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
1705 {
1706 return 0;
1707 }
1708
1709 void __weak pcibios_add_bus(struct pci_bus *bus)
1710 {
1711 }
1712
1713 void __weak pcibios_remove_bus(struct pci_bus *bus)
1714 {
1715 }
1716
1717 struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
1718 struct pci_ops *ops, void *sysdata, struct list_head *resources)
1719 {
1720 int error;
1721 struct pci_host_bridge *bridge;
1722 struct pci_bus *b, *b2;
1723 struct pci_host_bridge_window *window, *n;
1724 struct resource *res;
1725 resource_size_t offset;
1726 char bus_addr[64];
1727 char *fmt;
1728
1729 b = pci_alloc_bus();
1730 if (!b)
1731 return NULL;
1732
1733 b->sysdata = sysdata;
1734 b->ops = ops;
1735 b->number = b->busn_res.start = bus;
1736 b2 = pci_find_bus(pci_domain_nr(b), bus);
1737 if (b2) {
1738 /* If we already got to this bus through a different bridge, ignore it */
1739 dev_dbg(&b2->dev, "bus already known\n");
1740 goto err_out;
1741 }
1742
1743 bridge = pci_alloc_host_bridge(b);
1744 if (!bridge)
1745 goto err_out;
1746
1747 bridge->dev.parent = parent;
1748 bridge->dev.release = pci_release_host_bridge_dev;
1749 dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus);
1750 error = pcibios_root_bridge_prepare(bridge);
1751 if (error) {
1752 kfree(bridge);
1753 goto err_out;
1754 }
1755
1756 error = device_register(&bridge->dev);
1757 if (error) {
1758 put_device(&bridge->dev);
1759 goto err_out;
1760 }
1761 b->bridge = get_device(&bridge->dev);
1762 device_enable_async_suspend(b->bridge);
1763 pci_set_bus_of_node(b);
1764
1765 if (!parent)
1766 set_dev_node(b->bridge, pcibus_to_node(b));
1767
1768 b->dev.class = &pcibus_class;
1769 b->dev.parent = b->bridge;
1770 dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus);
1771 error = device_register(&b->dev);
1772 if (error)
1773 goto class_dev_reg_err;
1774
1775 pcibios_add_bus(b);
1776
1777 /* Create legacy_io and legacy_mem files for this bus */
1778 pci_create_legacy_files(b);
1779
1780 if (parent)
1781 dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev));
1782 else
1783 printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev));
1784
1785 /* Add initial resources to the bus */
1786 list_for_each_entry_safe(window, n, resources, list) {
1787 list_move_tail(&window->list, &bridge->windows);
1788 res = window->res;
1789 offset = window->offset;
1790 if (res->flags & IORESOURCE_BUS)
1791 pci_bus_insert_busn_res(b, bus, res->end);
1792 else
1793 pci_bus_add_resource(b, res, 0);
1794 if (offset) {
1795 if (resource_type(res) == IORESOURCE_IO)
1796 fmt = " (bus address [%#06llx-%#06llx])";
1797 else
1798 fmt = " (bus address [%#010llx-%#010llx])";
1799 snprintf(bus_addr, sizeof(bus_addr), fmt,
1800 (unsigned long long) (res->start - offset),
1801 (unsigned long long) (res->end - offset));
1802 } else
1803 bus_addr[0] = '\0';
1804 dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr);
1805 }
1806
1807 down_write(&pci_bus_sem);
1808 list_add_tail(&b->node, &pci_root_buses);
1809 up_write(&pci_bus_sem);
1810
1811 return b;
1812
1813 class_dev_reg_err:
1814 put_device(&bridge->dev);
1815 device_unregister(&bridge->dev);
1816 err_out:
1817 kfree(b);
1818 return NULL;
1819 }
1820
1821 int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
1822 {
1823 struct resource *res = &b->busn_res;
1824 struct resource *parent_res, *conflict;
1825
1826 res->start = bus;
1827 res->end = bus_max;
1828 res->flags = IORESOURCE_BUS;
1829
1830 if (!pci_is_root_bus(b))
1831 parent_res = &b->parent->busn_res;
1832 else {
1833 parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
1834 res->flags |= IORESOURCE_PCI_FIXED;
1835 }
1836
1837 conflict = request_resource_conflict(parent_res, res);
1838
1839 if (conflict)
1840 dev_printk(KERN_DEBUG, &b->dev,
1841 "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
1842 res, pci_is_root_bus(b) ? "domain " : "",
1843 parent_res, conflict->name, conflict);
1844
1845 return conflict == NULL;
1846 }
1847
1848 int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
1849 {
1850 struct resource *res = &b->busn_res;
1851 struct resource old_res = *res;
1852 resource_size_t size;
1853 int ret;
1854
1855 if (res->start > bus_max)
1856 return -EINVAL;
1857
1858 size = bus_max - res->start + 1;
1859 ret = adjust_resource(res, res->start, size);
1860 dev_printk(KERN_DEBUG, &b->dev,
1861 "busn_res: %pR end %s updated to %02x\n",
1862 &old_res, ret ? "can not be" : "is", bus_max);
1863
1864 if (!ret && !res->parent)
1865 pci_bus_insert_busn_res(b, res->start, res->end);
1866
1867 return ret;
1868 }
1869
1870 void pci_bus_release_busn_res(struct pci_bus *b)
1871 {
1872 struct resource *res = &b->busn_res;
1873 int ret;
1874
1875 if (!res->flags || !res->parent)
1876 return;
1877
1878 ret = release_resource(res);
1879 dev_printk(KERN_DEBUG, &b->dev,
1880 "busn_res: %pR %s released\n",
1881 res, ret ? "can not be" : "is");
1882 }
1883
1884 struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
1885 struct pci_ops *ops, void *sysdata, struct list_head *resources)
1886 {
1887 struct pci_host_bridge_window *window;
1888 bool found = false;
1889 struct pci_bus *b;
1890 int max;
1891
1892 list_for_each_entry(window, resources, list)
1893 if (window->res->flags & IORESOURCE_BUS) {
1894 found = true;
1895 break;
1896 }
1897
1898 b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
1899 if (!b)
1900 return NULL;
1901
1902 if (!found) {
1903 dev_info(&b->dev,
1904 "No busn resource found for root bus, will use [bus %02x-ff]\n",
1905 bus);
1906 pci_bus_insert_busn_res(b, bus, 255);
1907 }
1908
1909 max = pci_scan_child_bus(b);
1910
1911 if (!found)
1912 pci_bus_update_busn_res_end(b, max);
1913
1914 pci_bus_add_devices(b);
1915 return b;
1916 }
1917 EXPORT_SYMBOL(pci_scan_root_bus);
1918
1919 /* Deprecated; use pci_scan_root_bus() instead */
1920 struct pci_bus *pci_scan_bus_parented(struct device *parent,
1921 int bus, struct pci_ops *ops, void *sysdata)
1922 {
1923 LIST_HEAD(resources);
1924 struct pci_bus *b;
1925
1926 pci_add_resource(&resources, &ioport_resource);
1927 pci_add_resource(&resources, &iomem_resource);
1928 pci_add_resource(&resources, &busn_resource);
1929 b = pci_create_root_bus(parent, bus, ops, sysdata, &resources);
1930 if (b)
1931 pci_scan_child_bus(b);
1932 else
1933 pci_free_resource_list(&resources);
1934 return b;
1935 }
1936 EXPORT_SYMBOL(pci_scan_bus_parented);
1937
1938 struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
1939 void *sysdata)
1940 {
1941 LIST_HEAD(resources);
1942 struct pci_bus *b;
1943
1944 pci_add_resource(&resources, &ioport_resource);
1945 pci_add_resource(&resources, &iomem_resource);
1946 pci_add_resource(&resources, &busn_resource);
1947 b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
1948 if (b) {
1949 pci_scan_child_bus(b);
1950 pci_bus_add_devices(b);
1951 } else {
1952 pci_free_resource_list(&resources);
1953 }
1954 return b;
1955 }
1956 EXPORT_SYMBOL(pci_scan_bus);
1957
1958 /**
1959 * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
1960 * @bridge: PCI bridge for the bus to scan
1961 *
1962 * Scan a PCI bus and child buses for new devices, add them,
1963 * and enable them, resizing bridge mmio/io resource if necessary
1964 * and possible. The caller must ensure the child devices are already
1965 * removed for resizing to occur.
1966 *
1967 * Returns the max number of subordinate bus discovered.
1968 */
1969 unsigned int __ref pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
1970 {
1971 unsigned int max;
1972 struct pci_bus *bus = bridge->subordinate;
1973
1974 max = pci_scan_child_bus(bus);
1975
1976 pci_assign_unassigned_bridge_resources(bridge);
1977
1978 pci_bus_add_devices(bus);
1979
1980 return max;
1981 }
1982
1983 /**
1984 * pci_rescan_bus - scan a PCI bus for devices.
1985 * @bus: PCI bus to scan
1986 *
1987 * Scan a PCI bus and child buses for new devices, adds them,
1988 * and enables them.
1989 *
1990 * Returns the max number of subordinate bus discovered.
1991 */
1992 unsigned int __ref pci_rescan_bus(struct pci_bus *bus)
1993 {
1994 unsigned int max;
1995
1996 max = pci_scan_child_bus(bus);
1997 pci_assign_unassigned_bus_resources(bus);
1998 pci_bus_add_devices(bus);
1999
2000 return max;
2001 }
2002 EXPORT_SYMBOL_GPL(pci_rescan_bus);
2003
2004 EXPORT_SYMBOL(pci_add_new_bus);
2005 EXPORT_SYMBOL(pci_scan_slot);
2006 EXPORT_SYMBOL(pci_scan_bridge);
2007 EXPORT_SYMBOL_GPL(pci_scan_child_bus);
2008
2009 /*
2010 * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
2011 * routines should always be executed under this mutex.
2012 */
2013 static DEFINE_MUTEX(pci_rescan_remove_lock);
2014
2015 void pci_lock_rescan_remove(void)
2016 {
2017 mutex_lock(&pci_rescan_remove_lock);
2018 }
2019 EXPORT_SYMBOL_GPL(pci_lock_rescan_remove);
2020
2021 void pci_unlock_rescan_remove(void)
2022 {
2023 mutex_unlock(&pci_rescan_remove_lock);
2024 }
2025 EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove);
2026
2027 static int __init pci_sort_bf_cmp(const struct device *d_a, const struct device *d_b)
2028 {
2029 const struct pci_dev *a = to_pci_dev(d_a);
2030 const struct pci_dev *b = to_pci_dev(d_b);
2031
2032 if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
2033 else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1;
2034
2035 if (a->bus->number < b->bus->number) return -1;
2036 else if (a->bus->number > b->bus->number) return 1;
2037
2038 if (a->devfn < b->devfn) return -1;
2039 else if (a->devfn > b->devfn) return 1;
2040
2041 return 0;
2042 }
2043
2044 void __init pci_sort_breadthfirst(void)
2045 {
2046 bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
2047 }