]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - drivers/pci/probe.c
Merge branch 'drm-platform' into drm-testing
[mirror_ubuntu-artful-kernel.git] / drivers / pci / probe.c
1 /*
2 * probe.c - PCI detection and setup code
3 */
4
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/init.h>
8 #include <linux/pci.h>
9 #include <linux/slab.h>
10 #include <linux/module.h>
11 #include <linux/cpumask.h>
12 #include <linux/pci-aspm.h>
13 #include "pci.h"
14
15 #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
16 #define CARDBUS_RESERVE_BUSNR 3
17
18 /* Ugh. Need to stop exporting this to modules. */
19 LIST_HEAD(pci_root_buses);
20 EXPORT_SYMBOL(pci_root_buses);
21
22
23 static int find_anything(struct device *dev, void *data)
24 {
25 return 1;
26 }
27
28 /*
29 * Some device drivers need know if pci is initiated.
30 * Basically, we think pci is not initiated when there
31 * is no device to be found on the pci_bus_type.
32 */
33 int no_pci_devices(void)
34 {
35 struct device *dev;
36 int no_devices;
37
38 dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
39 no_devices = (dev == NULL);
40 put_device(dev);
41 return no_devices;
42 }
43 EXPORT_SYMBOL(no_pci_devices);
44
45 /*
46 * PCI Bus Class Devices
47 */
48 static ssize_t pci_bus_show_cpuaffinity(struct device *dev,
49 int type,
50 struct device_attribute *attr,
51 char *buf)
52 {
53 int ret;
54 const struct cpumask *cpumask;
55
56 cpumask = cpumask_of_pcibus(to_pci_bus(dev));
57 ret = type?
58 cpulist_scnprintf(buf, PAGE_SIZE-2, cpumask) :
59 cpumask_scnprintf(buf, PAGE_SIZE-2, cpumask);
60 buf[ret++] = '\n';
61 buf[ret] = '\0';
62 return ret;
63 }
64
65 static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
66 struct device_attribute *attr,
67 char *buf)
68 {
69 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
70 }
71
72 static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
73 struct device_attribute *attr,
74 char *buf)
75 {
76 return pci_bus_show_cpuaffinity(dev, 1, attr, buf);
77 }
78
79 DEVICE_ATTR(cpuaffinity, S_IRUGO, pci_bus_show_cpumaskaffinity, NULL);
80 DEVICE_ATTR(cpulistaffinity, S_IRUGO, pci_bus_show_cpulistaffinity, NULL);
81
82 /*
83 * PCI Bus Class
84 */
85 static void release_pcibus_dev(struct device *dev)
86 {
87 struct pci_bus *pci_bus = to_pci_bus(dev);
88
89 if (pci_bus->bridge)
90 put_device(pci_bus->bridge);
91 pci_bus_remove_resources(pci_bus);
92 kfree(pci_bus);
93 }
94
95 static struct class pcibus_class = {
96 .name = "pci_bus",
97 .dev_release = &release_pcibus_dev,
98 };
99
100 static int __init pcibus_class_init(void)
101 {
102 return class_register(&pcibus_class);
103 }
104 postcore_initcall(pcibus_class_init);
105
106 /*
107 * Translate the low bits of the PCI base
108 * to the resource type
109 */
110 static inline unsigned int pci_calc_resource_flags(unsigned int flags)
111 {
112 if (flags & PCI_BASE_ADDRESS_SPACE_IO)
113 return IORESOURCE_IO;
114
115 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
116 return IORESOURCE_MEM | IORESOURCE_PREFETCH;
117
118 return IORESOURCE_MEM;
119 }
120
121 static u64 pci_size(u64 base, u64 maxbase, u64 mask)
122 {
123 u64 size = mask & maxbase; /* Find the significant bits */
124 if (!size)
125 return 0;
126
127 /* Get the lowest of them to find the decode size, and
128 from that the extent. */
129 size = (size & ~(size-1)) - 1;
130
131 /* base == maxbase can be valid only if the BAR has
132 already been programmed with all 1s. */
133 if (base == maxbase && ((base | size) & mask) != mask)
134 return 0;
135
136 return size;
137 }
138
139 static inline enum pci_bar_type decode_bar(struct resource *res, u32 bar)
140 {
141 if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
142 res->flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
143 return pci_bar_io;
144 }
145
146 res->flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
147
148 if (res->flags & PCI_BASE_ADDRESS_MEM_TYPE_64)
149 return pci_bar_mem64;
150 return pci_bar_mem32;
151 }
152
153 /**
154 * pci_read_base - read a PCI BAR
155 * @dev: the PCI device
156 * @type: type of the BAR
157 * @res: resource buffer to be filled in
158 * @pos: BAR position in the config space
159 *
160 * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
161 */
162 int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
163 struct resource *res, unsigned int pos)
164 {
165 u32 l, sz, mask;
166
167 mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
168
169 res->name = pci_name(dev);
170
171 pci_read_config_dword(dev, pos, &l);
172 pci_write_config_dword(dev, pos, l | mask);
173 pci_read_config_dword(dev, pos, &sz);
174 pci_write_config_dword(dev, pos, l);
175
176 /*
177 * All bits set in sz means the device isn't working properly.
178 * If the BAR isn't implemented, all bits must be 0. If it's a
179 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
180 * 1 must be clear.
181 */
182 if (!sz || sz == 0xffffffff)
183 goto fail;
184
185 /*
186 * I don't know how l can have all bits set. Copied from old code.
187 * Maybe it fixes a bug on some ancient platform.
188 */
189 if (l == 0xffffffff)
190 l = 0;
191
192 if (type == pci_bar_unknown) {
193 type = decode_bar(res, l);
194 res->flags |= pci_calc_resource_flags(l) | IORESOURCE_SIZEALIGN;
195 if (type == pci_bar_io) {
196 l &= PCI_BASE_ADDRESS_IO_MASK;
197 mask = PCI_BASE_ADDRESS_IO_MASK & IO_SPACE_LIMIT;
198 } else {
199 l &= PCI_BASE_ADDRESS_MEM_MASK;
200 mask = (u32)PCI_BASE_ADDRESS_MEM_MASK;
201 }
202 } else {
203 res->flags |= (l & IORESOURCE_ROM_ENABLE);
204 l &= PCI_ROM_ADDRESS_MASK;
205 mask = (u32)PCI_ROM_ADDRESS_MASK;
206 }
207
208 if (type == pci_bar_mem64) {
209 u64 l64 = l;
210 u64 sz64 = sz;
211 u64 mask64 = mask | (u64)~0 << 32;
212
213 pci_read_config_dword(dev, pos + 4, &l);
214 pci_write_config_dword(dev, pos + 4, ~0);
215 pci_read_config_dword(dev, pos + 4, &sz);
216 pci_write_config_dword(dev, pos + 4, l);
217
218 l64 |= ((u64)l << 32);
219 sz64 |= ((u64)sz << 32);
220
221 sz64 = pci_size(l64, sz64, mask64);
222
223 if (!sz64)
224 goto fail;
225
226 if ((sizeof(resource_size_t) < 8) && (sz64 > 0x100000000ULL)) {
227 dev_err(&dev->dev, "reg %x: can't handle 64-bit BAR\n",
228 pos);
229 goto fail;
230 }
231
232 res->flags |= IORESOURCE_MEM_64;
233 if ((sizeof(resource_size_t) < 8) && l) {
234 /* Address above 32-bit boundary; disable the BAR */
235 pci_write_config_dword(dev, pos, 0);
236 pci_write_config_dword(dev, pos + 4, 0);
237 res->start = 0;
238 res->end = sz64;
239 } else {
240 res->start = l64;
241 res->end = l64 + sz64;
242 dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n",
243 pos, res);
244 }
245 } else {
246 sz = pci_size(l, sz, mask);
247
248 if (!sz)
249 goto fail;
250
251 res->start = l;
252 res->end = l + sz;
253
254 dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res);
255 }
256
257 out:
258 return (type == pci_bar_mem64) ? 1 : 0;
259 fail:
260 res->flags = 0;
261 goto out;
262 }
263
264 static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
265 {
266 unsigned int pos, reg;
267
268 for (pos = 0; pos < howmany; pos++) {
269 struct resource *res = &dev->resource[pos];
270 reg = PCI_BASE_ADDRESS_0 + (pos << 2);
271 pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
272 }
273
274 if (rom) {
275 struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
276 dev->rom_base_reg = rom;
277 res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
278 IORESOURCE_READONLY | IORESOURCE_CACHEABLE |
279 IORESOURCE_SIZEALIGN;
280 __pci_read_base(dev, pci_bar_mem32, res, rom);
281 }
282 }
283
284 static void __devinit pci_read_bridge_io(struct pci_bus *child)
285 {
286 struct pci_dev *dev = child->self;
287 u8 io_base_lo, io_limit_lo;
288 unsigned long base, limit;
289 struct resource *res;
290
291 res = child->resource[0];
292 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
293 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
294 base = (io_base_lo & PCI_IO_RANGE_MASK) << 8;
295 limit = (io_limit_lo & PCI_IO_RANGE_MASK) << 8;
296
297 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
298 u16 io_base_hi, io_limit_hi;
299 pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
300 pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
301 base |= (io_base_hi << 16);
302 limit |= (io_limit_hi << 16);
303 }
304
305 if (base && base <= limit) {
306 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
307 if (!res->start)
308 res->start = base;
309 if (!res->end)
310 res->end = limit + 0xfff;
311 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
312 } else {
313 dev_printk(KERN_DEBUG, &dev->dev,
314 " bridge window [io %#06lx-%#06lx] (disabled)\n",
315 base, limit);
316 }
317 }
318
319 static void __devinit pci_read_bridge_mmio(struct pci_bus *child)
320 {
321 struct pci_dev *dev = child->self;
322 u16 mem_base_lo, mem_limit_lo;
323 unsigned long base, limit;
324 struct resource *res;
325
326 res = child->resource[1];
327 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
328 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
329 base = (mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
330 limit = (mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
331 if (base && base <= limit) {
332 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
333 res->start = base;
334 res->end = limit + 0xfffff;
335 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
336 } else {
337 dev_printk(KERN_DEBUG, &dev->dev,
338 " bridge window [mem %#010lx-%#010lx] (disabled)\n",
339 base, limit + 0xfffff);
340 }
341 }
342
343 static void __devinit pci_read_bridge_mmio_pref(struct pci_bus *child)
344 {
345 struct pci_dev *dev = child->self;
346 u16 mem_base_lo, mem_limit_lo;
347 unsigned long base, limit;
348 struct resource *res;
349
350 res = child->resource[2];
351 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
352 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
353 base = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
354 limit = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
355
356 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
357 u32 mem_base_hi, mem_limit_hi;
358 pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
359 pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
360
361 /*
362 * Some bridges set the base > limit by default, and some
363 * (broken) BIOSes do not initialize them. If we find
364 * this, just assume they are not being used.
365 */
366 if (mem_base_hi <= mem_limit_hi) {
367 #if BITS_PER_LONG == 64
368 base |= ((long) mem_base_hi) << 32;
369 limit |= ((long) mem_limit_hi) << 32;
370 #else
371 if (mem_base_hi || mem_limit_hi) {
372 dev_err(&dev->dev, "can't handle 64-bit "
373 "address space for bridge\n");
374 return;
375 }
376 #endif
377 }
378 }
379 if (base && base <= limit) {
380 res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
381 IORESOURCE_MEM | IORESOURCE_PREFETCH;
382 if (res->flags & PCI_PREF_RANGE_TYPE_64)
383 res->flags |= IORESOURCE_MEM_64;
384 res->start = base;
385 res->end = limit + 0xfffff;
386 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
387 } else {
388 dev_printk(KERN_DEBUG, &dev->dev,
389 " bridge window [mem %#010lx-%#010lx pref] (disabled)\n",
390 base, limit + 0xfffff);
391 }
392 }
393
394 void __devinit pci_read_bridge_bases(struct pci_bus *child)
395 {
396 struct pci_dev *dev = child->self;
397 struct resource *res;
398 int i;
399
400 if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */
401 return;
402
403 dev_info(&dev->dev, "PCI bridge to [bus %02x-%02x]%s\n",
404 child->secondary, child->subordinate,
405 dev->transparent ? " (subtractive decode)" : "");
406
407 pci_bus_remove_resources(child);
408 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
409 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
410
411 pci_read_bridge_io(child);
412 pci_read_bridge_mmio(child);
413 pci_read_bridge_mmio_pref(child);
414
415 if (dev->transparent) {
416 pci_bus_for_each_resource(child->parent, res, i) {
417 if (res) {
418 pci_bus_add_resource(child, res,
419 PCI_SUBTRACTIVE_DECODE);
420 dev_printk(KERN_DEBUG, &dev->dev,
421 " bridge window %pR (subtractive decode)\n",
422 res);
423 }
424 }
425 }
426 }
427
428 static struct pci_bus * pci_alloc_bus(void)
429 {
430 struct pci_bus *b;
431
432 b = kzalloc(sizeof(*b), GFP_KERNEL);
433 if (b) {
434 INIT_LIST_HEAD(&b->node);
435 INIT_LIST_HEAD(&b->children);
436 INIT_LIST_HEAD(&b->devices);
437 INIT_LIST_HEAD(&b->slots);
438 INIT_LIST_HEAD(&b->resources);
439 b->max_bus_speed = PCI_SPEED_UNKNOWN;
440 b->cur_bus_speed = PCI_SPEED_UNKNOWN;
441 }
442 return b;
443 }
444
445 static unsigned char pcix_bus_speed[] = {
446 PCI_SPEED_UNKNOWN, /* 0 */
447 PCI_SPEED_66MHz_PCIX, /* 1 */
448 PCI_SPEED_100MHz_PCIX, /* 2 */
449 PCI_SPEED_133MHz_PCIX, /* 3 */
450 PCI_SPEED_UNKNOWN, /* 4 */
451 PCI_SPEED_66MHz_PCIX_ECC, /* 5 */
452 PCI_SPEED_100MHz_PCIX_ECC, /* 6 */
453 PCI_SPEED_133MHz_PCIX_ECC, /* 7 */
454 PCI_SPEED_UNKNOWN, /* 8 */
455 PCI_SPEED_66MHz_PCIX_266, /* 9 */
456 PCI_SPEED_100MHz_PCIX_266, /* A */
457 PCI_SPEED_133MHz_PCIX_266, /* B */
458 PCI_SPEED_UNKNOWN, /* C */
459 PCI_SPEED_66MHz_PCIX_533, /* D */
460 PCI_SPEED_100MHz_PCIX_533, /* E */
461 PCI_SPEED_133MHz_PCIX_533 /* F */
462 };
463
464 static unsigned char pcie_link_speed[] = {
465 PCI_SPEED_UNKNOWN, /* 0 */
466 PCIE_SPEED_2_5GT, /* 1 */
467 PCIE_SPEED_5_0GT, /* 2 */
468 PCIE_SPEED_8_0GT, /* 3 */
469 PCI_SPEED_UNKNOWN, /* 4 */
470 PCI_SPEED_UNKNOWN, /* 5 */
471 PCI_SPEED_UNKNOWN, /* 6 */
472 PCI_SPEED_UNKNOWN, /* 7 */
473 PCI_SPEED_UNKNOWN, /* 8 */
474 PCI_SPEED_UNKNOWN, /* 9 */
475 PCI_SPEED_UNKNOWN, /* A */
476 PCI_SPEED_UNKNOWN, /* B */
477 PCI_SPEED_UNKNOWN, /* C */
478 PCI_SPEED_UNKNOWN, /* D */
479 PCI_SPEED_UNKNOWN, /* E */
480 PCI_SPEED_UNKNOWN /* F */
481 };
482
483 void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
484 {
485 bus->cur_bus_speed = pcie_link_speed[linksta & 0xf];
486 }
487 EXPORT_SYMBOL_GPL(pcie_update_link_speed);
488
489 static unsigned char agp_speeds[] = {
490 AGP_UNKNOWN,
491 AGP_1X,
492 AGP_2X,
493 AGP_4X,
494 AGP_8X
495 };
496
497 static enum pci_bus_speed agp_speed(int agp3, int agpstat)
498 {
499 int index = 0;
500
501 if (agpstat & 4)
502 index = 3;
503 else if (agpstat & 2)
504 index = 2;
505 else if (agpstat & 1)
506 index = 1;
507 else
508 goto out;
509
510 if (agp3) {
511 index += 2;
512 if (index == 5)
513 index = 0;
514 }
515
516 out:
517 return agp_speeds[index];
518 }
519
520
521 static void pci_set_bus_speed(struct pci_bus *bus)
522 {
523 struct pci_dev *bridge = bus->self;
524 int pos;
525
526 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
527 if (!pos)
528 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
529 if (pos) {
530 u32 agpstat, agpcmd;
531
532 pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
533 bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
534
535 pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
536 bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
537 }
538
539 pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
540 if (pos) {
541 u16 status;
542 enum pci_bus_speed max;
543 pci_read_config_word(bridge, pos + 2, &status);
544
545 if (status & 0x8000) {
546 max = PCI_SPEED_133MHz_PCIX_533;
547 } else if (status & 0x4000) {
548 max = PCI_SPEED_133MHz_PCIX_266;
549 } else if (status & 0x0002) {
550 if (((status >> 12) & 0x3) == 2) {
551 max = PCI_SPEED_133MHz_PCIX_ECC;
552 } else {
553 max = PCI_SPEED_133MHz_PCIX;
554 }
555 } else {
556 max = PCI_SPEED_66MHz_PCIX;
557 }
558
559 bus->max_bus_speed = max;
560 bus->cur_bus_speed = pcix_bus_speed[(status >> 6) & 0xf];
561
562 return;
563 }
564
565 pos = pci_find_capability(bridge, PCI_CAP_ID_EXP);
566 if (pos) {
567 u32 linkcap;
568 u16 linksta;
569
570 pci_read_config_dword(bridge, pos + PCI_EXP_LNKCAP, &linkcap);
571 bus->max_bus_speed = pcie_link_speed[linkcap & 0xf];
572
573 pci_read_config_word(bridge, pos + PCI_EXP_LNKSTA, &linksta);
574 pcie_update_link_speed(bus, linksta);
575 }
576 }
577
578
579 static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
580 struct pci_dev *bridge, int busnr)
581 {
582 struct pci_bus *child;
583 int i;
584
585 /*
586 * Allocate a new bus, and inherit stuff from the parent..
587 */
588 child = pci_alloc_bus();
589 if (!child)
590 return NULL;
591
592 child->parent = parent;
593 child->ops = parent->ops;
594 child->sysdata = parent->sysdata;
595 child->bus_flags = parent->bus_flags;
596
597 /* initialize some portions of the bus device, but don't register it
598 * now as the parent is not properly set up yet. This device will get
599 * registered later in pci_bus_add_devices()
600 */
601 child->dev.class = &pcibus_class;
602 dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
603
604 /*
605 * Set up the primary, secondary and subordinate
606 * bus numbers.
607 */
608 child->number = child->secondary = busnr;
609 child->primary = parent->secondary;
610 child->subordinate = 0xff;
611
612 if (!bridge)
613 return child;
614
615 child->self = bridge;
616 child->bridge = get_device(&bridge->dev);
617
618 pci_set_bus_speed(child);
619
620 /* Set up default resource pointers and names.. */
621 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
622 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
623 child->resource[i]->name = child->name;
624 }
625 bridge->subordinate = child;
626
627 return child;
628 }
629
630 struct pci_bus *__ref pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev, int busnr)
631 {
632 struct pci_bus *child;
633
634 child = pci_alloc_child_bus(parent, dev, busnr);
635 if (child) {
636 down_write(&pci_bus_sem);
637 list_add_tail(&child->node, &parent->children);
638 up_write(&pci_bus_sem);
639 }
640 return child;
641 }
642
643 static void pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max)
644 {
645 struct pci_bus *parent = child->parent;
646
647 /* Attempts to fix that up are really dangerous unless
648 we're going to re-assign all bus numbers. */
649 if (!pcibios_assign_all_busses())
650 return;
651
652 while (parent->parent && parent->subordinate < max) {
653 parent->subordinate = max;
654 pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max);
655 parent = parent->parent;
656 }
657 }
658
659 /*
660 * If it's a bridge, configure it and scan the bus behind it.
661 * For CardBus bridges, we don't scan behind as the devices will
662 * be handled by the bridge driver itself.
663 *
664 * We need to process bridges in two passes -- first we scan those
665 * already configured by the BIOS and after we are done with all of
666 * them, we proceed to assigning numbers to the remaining buses in
667 * order to avoid overlaps between old and new bus numbers.
668 */
669 int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
670 {
671 struct pci_bus *child;
672 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
673 u32 buses, i, j = 0;
674 u16 bctl;
675 u8 primary, secondary, subordinate;
676 int broken = 0;
677
678 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
679 primary = buses & 0xFF;
680 secondary = (buses >> 8) & 0xFF;
681 subordinate = (buses >> 16) & 0xFF;
682
683 dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
684 secondary, subordinate, pass);
685
686 /* Check if setup is sensible at all */
687 if (!pass &&
688 (primary != bus->number || secondary <= bus->number)) {
689 dev_dbg(&dev->dev, "bus configuration invalid, reconfiguring\n");
690 broken = 1;
691 }
692
693 /* Disable MasterAbortMode during probing to avoid reporting
694 of bus errors (in some architectures) */
695 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
696 pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
697 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
698
699 if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
700 !is_cardbus && !broken) {
701 unsigned int cmax;
702 /*
703 * Bus already configured by firmware, process it in the first
704 * pass and just note the configuration.
705 */
706 if (pass)
707 goto out;
708
709 /*
710 * If we already got to this bus through a different bridge,
711 * don't re-add it. This can happen with the i450NX chipset.
712 *
713 * However, we continue to descend down the hierarchy and
714 * scan remaining child buses.
715 */
716 child = pci_find_bus(pci_domain_nr(bus), secondary);
717 if (!child) {
718 child = pci_add_new_bus(bus, dev, secondary);
719 if (!child)
720 goto out;
721 child->primary = primary;
722 child->subordinate = subordinate;
723 child->bridge_ctl = bctl;
724 }
725
726 cmax = pci_scan_child_bus(child);
727 if (cmax > max)
728 max = cmax;
729 if (child->subordinate > max)
730 max = child->subordinate;
731 } else {
732 /*
733 * We need to assign a number to this bus which we always
734 * do in the second pass.
735 */
736 if (!pass) {
737 if (pcibios_assign_all_busses() || broken)
738 /* Temporarily disable forwarding of the
739 configuration cycles on all bridges in
740 this bus segment to avoid possible
741 conflicts in the second pass between two
742 bridges programmed with overlapping
743 bus ranges. */
744 pci_write_config_dword(dev, PCI_PRIMARY_BUS,
745 buses & ~0xffffff);
746 goto out;
747 }
748
749 /* Clear errors */
750 pci_write_config_word(dev, PCI_STATUS, 0xffff);
751
752 /* Prevent assigning a bus number that already exists.
753 * This can happen when a bridge is hot-plugged */
754 if (pci_find_bus(pci_domain_nr(bus), max+1))
755 goto out;
756 child = pci_add_new_bus(bus, dev, ++max);
757 buses = (buses & 0xff000000)
758 | ((unsigned int)(child->primary) << 0)
759 | ((unsigned int)(child->secondary) << 8)
760 | ((unsigned int)(child->subordinate) << 16);
761
762 /*
763 * yenta.c forces a secondary latency timer of 176.
764 * Copy that behaviour here.
765 */
766 if (is_cardbus) {
767 buses &= ~0xff000000;
768 buses |= CARDBUS_LATENCY_TIMER << 24;
769 }
770
771 /*
772 * We need to blast all three values with a single write.
773 */
774 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
775
776 if (!is_cardbus) {
777 child->bridge_ctl = bctl;
778 /*
779 * Adjust subordinate busnr in parent buses.
780 * We do this before scanning for children because
781 * some devices may not be detected if the bios
782 * was lazy.
783 */
784 pci_fixup_parent_subordinate_busnr(child, max);
785 /* Now we can scan all subordinate buses... */
786 max = pci_scan_child_bus(child);
787 /*
788 * now fix it up again since we have found
789 * the real value of max.
790 */
791 pci_fixup_parent_subordinate_busnr(child, max);
792 } else {
793 /*
794 * For CardBus bridges, we leave 4 bus numbers
795 * as cards with a PCI-to-PCI bridge can be
796 * inserted later.
797 */
798 for (i=0; i<CARDBUS_RESERVE_BUSNR; i++) {
799 struct pci_bus *parent = bus;
800 if (pci_find_bus(pci_domain_nr(bus),
801 max+i+1))
802 break;
803 while (parent->parent) {
804 if ((!pcibios_assign_all_busses()) &&
805 (parent->subordinate > max) &&
806 (parent->subordinate <= max+i)) {
807 j = 1;
808 }
809 parent = parent->parent;
810 }
811 if (j) {
812 /*
813 * Often, there are two cardbus bridges
814 * -- try to leave one valid bus number
815 * for each one.
816 */
817 i /= 2;
818 break;
819 }
820 }
821 max += i;
822 pci_fixup_parent_subordinate_busnr(child, max);
823 }
824 /*
825 * Set the subordinate bus number to its real value.
826 */
827 child->subordinate = max;
828 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
829 }
830
831 sprintf(child->name,
832 (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
833 pci_domain_nr(bus), child->number);
834
835 /* Has only triggered on CardBus, fixup is in yenta_socket */
836 while (bus->parent) {
837 if ((child->subordinate > bus->subordinate) ||
838 (child->number > bus->subordinate) ||
839 (child->number < bus->number) ||
840 (child->subordinate < bus->number)) {
841 dev_info(&child->dev, "[bus %02x-%02x] %s "
842 "hidden behind%s bridge %s [bus %02x-%02x]\n",
843 child->number, child->subordinate,
844 (bus->number > child->subordinate &&
845 bus->subordinate < child->number) ?
846 "wholly" : "partially",
847 bus->self->transparent ? " transparent" : "",
848 dev_name(&bus->dev),
849 bus->number, bus->subordinate);
850 }
851 bus = bus->parent;
852 }
853
854 out:
855 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
856
857 return max;
858 }
859
860 /*
861 * Read interrupt line and base address registers.
862 * The architecture-dependent code can tweak these, of course.
863 */
864 static void pci_read_irq(struct pci_dev *dev)
865 {
866 unsigned char irq;
867
868 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
869 dev->pin = irq;
870 if (irq)
871 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
872 dev->irq = irq;
873 }
874
875 void set_pcie_port_type(struct pci_dev *pdev)
876 {
877 int pos;
878 u16 reg16;
879
880 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
881 if (!pos)
882 return;
883 pdev->is_pcie = 1;
884 pdev->pcie_cap = pos;
885 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
886 pdev->pcie_type = (reg16 & PCI_EXP_FLAGS_TYPE) >> 4;
887 }
888
889 void set_pcie_hotplug_bridge(struct pci_dev *pdev)
890 {
891 int pos;
892 u16 reg16;
893 u32 reg32;
894
895 pos = pci_pcie_cap(pdev);
896 if (!pos)
897 return;
898 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
899 if (!(reg16 & PCI_EXP_FLAGS_SLOT))
900 return;
901 pci_read_config_dword(pdev, pos + PCI_EXP_SLTCAP, &reg32);
902 if (reg32 & PCI_EXP_SLTCAP_HPC)
903 pdev->is_hotplug_bridge = 1;
904 }
905
906 #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
907
908 /**
909 * pci_setup_device - fill in class and map information of a device
910 * @dev: the device structure to fill
911 *
912 * Initialize the device structure with information about the device's
913 * vendor,class,memory and IO-space addresses,IRQ lines etc.
914 * Called at initialisation of the PCI subsystem and by CardBus services.
915 * Returns 0 on success and negative if unknown type of device (not normal,
916 * bridge or CardBus).
917 */
918 int pci_setup_device(struct pci_dev *dev)
919 {
920 u32 class;
921 u8 hdr_type;
922 struct pci_slot *slot;
923 int pos = 0;
924
925 if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
926 return -EIO;
927
928 dev->sysdata = dev->bus->sysdata;
929 dev->dev.parent = dev->bus->bridge;
930 dev->dev.bus = &pci_bus_type;
931 dev->hdr_type = hdr_type & 0x7f;
932 dev->multifunction = !!(hdr_type & 0x80);
933 dev->error_state = pci_channel_io_normal;
934 set_pcie_port_type(dev);
935
936 list_for_each_entry(slot, &dev->bus->slots, list)
937 if (PCI_SLOT(dev->devfn) == slot->number)
938 dev->slot = slot;
939
940 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
941 set this higher, assuming the system even supports it. */
942 dev->dma_mask = 0xffffffff;
943
944 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
945 dev->bus->number, PCI_SLOT(dev->devfn),
946 PCI_FUNC(dev->devfn));
947
948 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
949 dev->revision = class & 0xff;
950 class >>= 8; /* upper 3 bytes */
951 dev->class = class;
952 class >>= 8;
953
954 dev_dbg(&dev->dev, "found [%04x:%04x] class %06x header type %02x\n",
955 dev->vendor, dev->device, class, dev->hdr_type);
956
957 /* need to have dev->class ready */
958 dev->cfg_size = pci_cfg_space_size(dev);
959
960 /* "Unknown power state" */
961 dev->current_state = PCI_UNKNOWN;
962
963 /* Early fixups, before probing the BARs */
964 pci_fixup_device(pci_fixup_early, dev);
965 /* device class may be changed after fixup */
966 class = dev->class >> 8;
967
968 switch (dev->hdr_type) { /* header type */
969 case PCI_HEADER_TYPE_NORMAL: /* standard header */
970 if (class == PCI_CLASS_BRIDGE_PCI)
971 goto bad;
972 pci_read_irq(dev);
973 pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
974 pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
975 pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
976
977 /*
978 * Do the ugly legacy mode stuff here rather than broken chip
979 * quirk code. Legacy mode ATA controllers have fixed
980 * addresses. These are not always echoed in BAR0-3, and
981 * BAR0-3 in a few cases contain junk!
982 */
983 if (class == PCI_CLASS_STORAGE_IDE) {
984 u8 progif;
985 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
986 if ((progif & 1) == 0) {
987 dev->resource[0].start = 0x1F0;
988 dev->resource[0].end = 0x1F7;
989 dev->resource[0].flags = LEGACY_IO_RESOURCE;
990 dev->resource[1].start = 0x3F6;
991 dev->resource[1].end = 0x3F6;
992 dev->resource[1].flags = LEGACY_IO_RESOURCE;
993 }
994 if ((progif & 4) == 0) {
995 dev->resource[2].start = 0x170;
996 dev->resource[2].end = 0x177;
997 dev->resource[2].flags = LEGACY_IO_RESOURCE;
998 dev->resource[3].start = 0x376;
999 dev->resource[3].end = 0x376;
1000 dev->resource[3].flags = LEGACY_IO_RESOURCE;
1001 }
1002 }
1003 break;
1004
1005 case PCI_HEADER_TYPE_BRIDGE: /* bridge header */
1006 if (class != PCI_CLASS_BRIDGE_PCI)
1007 goto bad;
1008 /* The PCI-to-PCI bridge spec requires that subtractive
1009 decoding (i.e. transparent) bridge must have programming
1010 interface code of 0x01. */
1011 pci_read_irq(dev);
1012 dev->transparent = ((dev->class & 0xff) == 1);
1013 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
1014 set_pcie_hotplug_bridge(dev);
1015 pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
1016 if (pos) {
1017 pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
1018 pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
1019 }
1020 break;
1021
1022 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */
1023 if (class != PCI_CLASS_BRIDGE_CARDBUS)
1024 goto bad;
1025 pci_read_irq(dev);
1026 pci_read_bases(dev, 1, 0);
1027 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1028 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1029 break;
1030
1031 default: /* unknown header */
1032 dev_err(&dev->dev, "unknown header type %02x, "
1033 "ignoring device\n", dev->hdr_type);
1034 return -EIO;
1035
1036 bad:
1037 dev_err(&dev->dev, "ignoring class %02x (doesn't match header "
1038 "type %02x)\n", class, dev->hdr_type);
1039 dev->class = PCI_CLASS_NOT_DEFINED;
1040 }
1041
1042 /* We found a fine healthy device, go go go... */
1043 return 0;
1044 }
1045
1046 static void pci_release_capabilities(struct pci_dev *dev)
1047 {
1048 pci_vpd_release(dev);
1049 pci_iov_release(dev);
1050 }
1051
1052 /**
1053 * pci_release_dev - free a pci device structure when all users of it are finished.
1054 * @dev: device that's been disconnected
1055 *
1056 * Will be called only by the device core when all users of this pci device are
1057 * done.
1058 */
1059 static void pci_release_dev(struct device *dev)
1060 {
1061 struct pci_dev *pci_dev;
1062
1063 pci_dev = to_pci_dev(dev);
1064 pci_release_capabilities(pci_dev);
1065 kfree(pci_dev);
1066 }
1067
1068 /**
1069 * pci_cfg_space_size - get the configuration space size of the PCI device.
1070 * @dev: PCI device
1071 *
1072 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1073 * have 4096 bytes. Even if the device is capable, that doesn't mean we can
1074 * access it. Maybe we don't have a way to generate extended config space
1075 * accesses, or the device is behind a reverse Express bridge. So we try
1076 * reading the dword at 0x100 which must either be 0 or a valid extended
1077 * capability header.
1078 */
1079 int pci_cfg_space_size_ext(struct pci_dev *dev)
1080 {
1081 u32 status;
1082 int pos = PCI_CFG_SPACE_SIZE;
1083
1084 if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
1085 goto fail;
1086 if (status == 0xffffffff)
1087 goto fail;
1088
1089 return PCI_CFG_SPACE_EXP_SIZE;
1090
1091 fail:
1092 return PCI_CFG_SPACE_SIZE;
1093 }
1094
1095 int pci_cfg_space_size(struct pci_dev *dev)
1096 {
1097 int pos;
1098 u32 status;
1099 u16 class;
1100
1101 class = dev->class >> 8;
1102 if (class == PCI_CLASS_BRIDGE_HOST)
1103 return pci_cfg_space_size_ext(dev);
1104
1105 pos = pci_pcie_cap(dev);
1106 if (!pos) {
1107 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1108 if (!pos)
1109 goto fail;
1110
1111 pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1112 if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ)))
1113 goto fail;
1114 }
1115
1116 return pci_cfg_space_size_ext(dev);
1117
1118 fail:
1119 return PCI_CFG_SPACE_SIZE;
1120 }
1121
1122 static void pci_release_bus_bridge_dev(struct device *dev)
1123 {
1124 kfree(dev);
1125 }
1126
1127 struct pci_dev *alloc_pci_dev(void)
1128 {
1129 struct pci_dev *dev;
1130
1131 dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
1132 if (!dev)
1133 return NULL;
1134
1135 INIT_LIST_HEAD(&dev->bus_list);
1136
1137 return dev;
1138 }
1139 EXPORT_SYMBOL(alloc_pci_dev);
1140
1141 /*
1142 * Read the config data for a PCI device, sanity-check it
1143 * and fill in the dev structure...
1144 */
1145 static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
1146 {
1147 struct pci_dev *dev;
1148 u32 l;
1149 int delay = 1;
1150
1151 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &l))
1152 return NULL;
1153
1154 /* some broken boards return 0 or ~0 if a slot is empty: */
1155 if (l == 0xffffffff || l == 0x00000000 ||
1156 l == 0x0000ffff || l == 0xffff0000)
1157 return NULL;
1158
1159 /* Configuration request Retry Status */
1160 while (l == 0xffff0001) {
1161 msleep(delay);
1162 delay *= 2;
1163 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, &l))
1164 return NULL;
1165 /* Card hasn't responded in 60 seconds? Must be stuck. */
1166 if (delay > 60 * 1000) {
1167 printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not "
1168 "responding\n", pci_domain_nr(bus),
1169 bus->number, PCI_SLOT(devfn),
1170 PCI_FUNC(devfn));
1171 return NULL;
1172 }
1173 }
1174
1175 dev = alloc_pci_dev();
1176 if (!dev)
1177 return NULL;
1178
1179 dev->bus = bus;
1180 dev->devfn = devfn;
1181 dev->vendor = l & 0xffff;
1182 dev->device = (l >> 16) & 0xffff;
1183
1184 if (pci_setup_device(dev)) {
1185 kfree(dev);
1186 return NULL;
1187 }
1188
1189 return dev;
1190 }
1191
1192 static void pci_init_capabilities(struct pci_dev *dev)
1193 {
1194 /* MSI/MSI-X list */
1195 pci_msi_init_pci_dev(dev);
1196
1197 /* Buffers for saving PCIe and PCI-X capabilities */
1198 pci_allocate_cap_save_buffers(dev);
1199
1200 /* Power Management */
1201 pci_pm_init(dev);
1202 platform_pci_wakeup_init(dev);
1203
1204 /* Vital Product Data */
1205 pci_vpd_pci22_init(dev);
1206
1207 /* Alternative Routing-ID Forwarding */
1208 pci_enable_ari(dev);
1209
1210 /* Single Root I/O Virtualization */
1211 pci_iov_init(dev);
1212
1213 /* Enable ACS P2P upstream forwarding */
1214 pci_enable_acs(dev);
1215 }
1216
1217 void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
1218 {
1219 device_initialize(&dev->dev);
1220 dev->dev.release = pci_release_dev;
1221 pci_dev_get(dev);
1222
1223 dev->dev.dma_mask = &dev->dma_mask;
1224 dev->dev.dma_parms = &dev->dma_parms;
1225 dev->dev.coherent_dma_mask = 0xffffffffull;
1226
1227 pci_set_dma_max_seg_size(dev, 65536);
1228 pci_set_dma_seg_boundary(dev, 0xffffffff);
1229
1230 /* Fix up broken headers */
1231 pci_fixup_device(pci_fixup_header, dev);
1232
1233 /* Clear the state_saved flag. */
1234 dev->state_saved = false;
1235
1236 /* Initialize various capabilities */
1237 pci_init_capabilities(dev);
1238
1239 /*
1240 * Add the device to our list of discovered devices
1241 * and the bus list for fixup functions, etc.
1242 */
1243 down_write(&pci_bus_sem);
1244 list_add_tail(&dev->bus_list, &bus->devices);
1245 up_write(&pci_bus_sem);
1246 }
1247
1248 struct pci_dev *__ref pci_scan_single_device(struct pci_bus *bus, int devfn)
1249 {
1250 struct pci_dev *dev;
1251
1252 dev = pci_get_slot(bus, devfn);
1253 if (dev) {
1254 pci_dev_put(dev);
1255 return dev;
1256 }
1257
1258 dev = pci_scan_device(bus, devfn);
1259 if (!dev)
1260 return NULL;
1261
1262 pci_device_add(dev, bus);
1263
1264 return dev;
1265 }
1266 EXPORT_SYMBOL(pci_scan_single_device);
1267
1268 static unsigned next_ari_fn(struct pci_dev *dev, unsigned fn)
1269 {
1270 u16 cap;
1271 unsigned pos, next_fn;
1272
1273 if (!dev)
1274 return 0;
1275
1276 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1277 if (!pos)
1278 return 0;
1279 pci_read_config_word(dev, pos + 4, &cap);
1280 next_fn = cap >> 8;
1281 if (next_fn <= fn)
1282 return 0;
1283 return next_fn;
1284 }
1285
1286 static unsigned next_trad_fn(struct pci_dev *dev, unsigned fn)
1287 {
1288 return (fn + 1) % 8;
1289 }
1290
1291 static unsigned no_next_fn(struct pci_dev *dev, unsigned fn)
1292 {
1293 return 0;
1294 }
1295
1296 static int only_one_child(struct pci_bus *bus)
1297 {
1298 struct pci_dev *parent = bus->self;
1299 if (!parent || !pci_is_pcie(parent))
1300 return 0;
1301 if (parent->pcie_type == PCI_EXP_TYPE_ROOT_PORT ||
1302 parent->pcie_type == PCI_EXP_TYPE_DOWNSTREAM)
1303 return 1;
1304 return 0;
1305 }
1306
1307 /**
1308 * pci_scan_slot - scan a PCI slot on a bus for devices.
1309 * @bus: PCI bus to scan
1310 * @devfn: slot number to scan (must have zero function.)
1311 *
1312 * Scan a PCI slot on the specified PCI bus for devices, adding
1313 * discovered devices to the @bus->devices list. New devices
1314 * will not have is_added set.
1315 *
1316 * Returns the number of new devices found.
1317 */
1318 int pci_scan_slot(struct pci_bus *bus, int devfn)
1319 {
1320 unsigned fn, nr = 0;
1321 struct pci_dev *dev;
1322 unsigned (*next_fn)(struct pci_dev *, unsigned) = no_next_fn;
1323
1324 if (only_one_child(bus) && (devfn > 0))
1325 return 0; /* Already scanned the entire slot */
1326
1327 dev = pci_scan_single_device(bus, devfn);
1328 if (!dev)
1329 return 0;
1330 if (!dev->is_added)
1331 nr++;
1332
1333 if (pci_ari_enabled(bus))
1334 next_fn = next_ari_fn;
1335 else if (dev->multifunction)
1336 next_fn = next_trad_fn;
1337
1338 for (fn = next_fn(dev, 0); fn > 0; fn = next_fn(dev, fn)) {
1339 dev = pci_scan_single_device(bus, devfn + fn);
1340 if (dev) {
1341 if (!dev->is_added)
1342 nr++;
1343 dev->multifunction = 1;
1344 }
1345 }
1346
1347 /* only one slot has pcie device */
1348 if (bus->self && nr)
1349 pcie_aspm_init_link_state(bus->self);
1350
1351 return nr;
1352 }
1353
1354 unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus)
1355 {
1356 unsigned int devfn, pass, max = bus->secondary;
1357 struct pci_dev *dev;
1358
1359 dev_dbg(&bus->dev, "scanning bus\n");
1360
1361 /* Go find them, Rover! */
1362 for (devfn = 0; devfn < 0x100; devfn += 8)
1363 pci_scan_slot(bus, devfn);
1364
1365 /* Reserve buses for SR-IOV capability. */
1366 max += pci_iov_bus_range(bus);
1367
1368 /*
1369 * After performing arch-dependent fixup of the bus, look behind
1370 * all PCI-to-PCI bridges on this bus.
1371 */
1372 if (!bus->is_added) {
1373 dev_dbg(&bus->dev, "fixups for bus\n");
1374 pcibios_fixup_bus(bus);
1375 if (pci_is_root_bus(bus))
1376 bus->is_added = 1;
1377 }
1378
1379 for (pass=0; pass < 2; pass++)
1380 list_for_each_entry(dev, &bus->devices, bus_list) {
1381 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
1382 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
1383 max = pci_scan_bridge(bus, dev, max, pass);
1384 }
1385
1386 /*
1387 * We've scanned the bus and so we know all about what's on
1388 * the other side of any bridges that may be on this bus plus
1389 * any devices.
1390 *
1391 * Return how far we've got finding sub-buses.
1392 */
1393 dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
1394 return max;
1395 }
1396
1397 struct pci_bus * pci_create_bus(struct device *parent,
1398 int bus, struct pci_ops *ops, void *sysdata)
1399 {
1400 int error;
1401 struct pci_bus *b, *b2;
1402 struct device *dev;
1403
1404 b = pci_alloc_bus();
1405 if (!b)
1406 return NULL;
1407
1408 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1409 if (!dev){
1410 kfree(b);
1411 return NULL;
1412 }
1413
1414 b->sysdata = sysdata;
1415 b->ops = ops;
1416
1417 b2 = pci_find_bus(pci_domain_nr(b), bus);
1418 if (b2) {
1419 /* If we already got to this bus through a different bridge, ignore it */
1420 dev_dbg(&b2->dev, "bus already known\n");
1421 goto err_out;
1422 }
1423
1424 down_write(&pci_bus_sem);
1425 list_add_tail(&b->node, &pci_root_buses);
1426 up_write(&pci_bus_sem);
1427
1428 dev->parent = parent;
1429 dev->release = pci_release_bus_bridge_dev;
1430 dev_set_name(dev, "pci%04x:%02x", pci_domain_nr(b), bus);
1431 error = device_register(dev);
1432 if (error)
1433 goto dev_reg_err;
1434 b->bridge = get_device(dev);
1435 device_enable_async_suspend(b->bridge);
1436
1437 if (!parent)
1438 set_dev_node(b->bridge, pcibus_to_node(b));
1439
1440 b->dev.class = &pcibus_class;
1441 b->dev.parent = b->bridge;
1442 dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus);
1443 error = device_register(&b->dev);
1444 if (error)
1445 goto class_dev_reg_err;
1446 error = device_create_file(&b->dev, &dev_attr_cpuaffinity);
1447 if (error)
1448 goto dev_create_file_err;
1449
1450 /* Create legacy_io and legacy_mem files for this bus */
1451 pci_create_legacy_files(b);
1452
1453 b->number = b->secondary = bus;
1454 b->resource[0] = &ioport_resource;
1455 b->resource[1] = &iomem_resource;
1456
1457 return b;
1458
1459 dev_create_file_err:
1460 device_unregister(&b->dev);
1461 class_dev_reg_err:
1462 device_unregister(dev);
1463 dev_reg_err:
1464 down_write(&pci_bus_sem);
1465 list_del(&b->node);
1466 up_write(&pci_bus_sem);
1467 err_out:
1468 kfree(dev);
1469 kfree(b);
1470 return NULL;
1471 }
1472
1473 struct pci_bus * __devinit pci_scan_bus_parented(struct device *parent,
1474 int bus, struct pci_ops *ops, void *sysdata)
1475 {
1476 struct pci_bus *b;
1477
1478 b = pci_create_bus(parent, bus, ops, sysdata);
1479 if (b)
1480 b->subordinate = pci_scan_child_bus(b);
1481 return b;
1482 }
1483 EXPORT_SYMBOL(pci_scan_bus_parented);
1484
1485 #ifdef CONFIG_HOTPLUG
1486 /**
1487 * pci_rescan_bus - scan a PCI bus for devices.
1488 * @bus: PCI bus to scan
1489 *
1490 * Scan a PCI bus and child buses for new devices, adds them,
1491 * and enables them.
1492 *
1493 * Returns the max number of subordinate bus discovered.
1494 */
1495 unsigned int __ref pci_rescan_bus(struct pci_bus *bus)
1496 {
1497 unsigned int max;
1498 struct pci_dev *dev;
1499
1500 max = pci_scan_child_bus(bus);
1501
1502 down_read(&pci_bus_sem);
1503 list_for_each_entry(dev, &bus->devices, bus_list)
1504 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
1505 dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
1506 if (dev->subordinate)
1507 pci_bus_size_bridges(dev->subordinate);
1508 up_read(&pci_bus_sem);
1509
1510 pci_bus_assign_resources(bus);
1511 pci_enable_bridges(bus);
1512 pci_bus_add_devices(bus);
1513
1514 return max;
1515 }
1516 EXPORT_SYMBOL_GPL(pci_rescan_bus);
1517
1518 EXPORT_SYMBOL(pci_add_new_bus);
1519 EXPORT_SYMBOL(pci_scan_slot);
1520 EXPORT_SYMBOL(pci_scan_bridge);
1521 EXPORT_SYMBOL_GPL(pci_scan_child_bus);
1522 #endif
1523
1524 static int __init pci_sort_bf_cmp(const struct device *d_a, const struct device *d_b)
1525 {
1526 const struct pci_dev *a = to_pci_dev(d_a);
1527 const struct pci_dev *b = to_pci_dev(d_b);
1528
1529 if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
1530 else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1;
1531
1532 if (a->bus->number < b->bus->number) return -1;
1533 else if (a->bus->number > b->bus->number) return 1;
1534
1535 if (a->devfn < b->devfn) return -1;
1536 else if (a->devfn > b->devfn) return 1;
1537
1538 return 0;
1539 }
1540
1541 void __init pci_sort_breadthfirst(void)
1542 {
1543 bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
1544 }