2 * probe.c - PCI detection and setup code
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/module.h>
11 #include <linux/cpumask.h>
12 #include <linux/pci-aspm.h>
13 #include <asm-generic/pci-bridge.h>
16 #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
17 #define CARDBUS_RESERVE_BUSNR 3
19 static struct resource busn_resource
= {
23 .flags
= IORESOURCE_BUS
,
26 /* Ugh. Need to stop exporting this to modules. */
27 LIST_HEAD(pci_root_buses
);
28 EXPORT_SYMBOL(pci_root_buses
);
30 static LIST_HEAD(pci_domain_busn_res_list
);
32 struct pci_domain_busn_res
{
33 struct list_head list
;
38 static struct resource
*get_pci_domain_busn_res(int domain_nr
)
40 struct pci_domain_busn_res
*r
;
42 list_for_each_entry(r
, &pci_domain_busn_res_list
, list
)
43 if (r
->domain_nr
== domain_nr
)
46 r
= kzalloc(sizeof(*r
), GFP_KERNEL
);
50 r
->domain_nr
= domain_nr
;
53 r
->res
.flags
= IORESOURCE_BUS
| IORESOURCE_PCI_FIXED
;
55 list_add_tail(&r
->list
, &pci_domain_busn_res_list
);
60 static int find_anything(struct device
*dev
, void *data
)
66 * Some device drivers need know if pci is initiated.
67 * Basically, we think pci is not initiated when there
68 * is no device to be found on the pci_bus_type.
70 int no_pci_devices(void)
75 dev
= bus_find_device(&pci_bus_type
, NULL
, NULL
, find_anything
);
76 no_devices
= (dev
== NULL
);
80 EXPORT_SYMBOL(no_pci_devices
);
85 static void release_pcibus_dev(struct device
*dev
)
87 struct pci_bus
*pci_bus
= to_pci_bus(dev
);
90 put_device(pci_bus
->bridge
);
91 pci_bus_remove_resources(pci_bus
);
92 pci_release_bus_of_node(pci_bus
);
96 static struct class pcibus_class
= {
98 .dev_release
= &release_pcibus_dev
,
99 .dev_groups
= pcibus_groups
,
102 static int __init
pcibus_class_init(void)
104 return class_register(&pcibus_class
);
106 postcore_initcall(pcibus_class_init
);
108 static u64
pci_size(u64 base
, u64 maxbase
, u64 mask
)
110 u64 size
= mask
& maxbase
; /* Find the significant bits */
114 /* Get the lowest of them to find the decode size, and
115 from that the extent. */
116 size
= (size
& ~(size
-1)) - 1;
118 /* base == maxbase can be valid only if the BAR has
119 already been programmed with all 1s. */
120 if (base
== maxbase
&& ((base
| size
) & mask
) != mask
)
126 static inline unsigned long decode_bar(struct pci_dev
*dev
, u32 bar
)
131 if ((bar
& PCI_BASE_ADDRESS_SPACE
) == PCI_BASE_ADDRESS_SPACE_IO
) {
132 flags
= bar
& ~PCI_BASE_ADDRESS_IO_MASK
;
133 flags
|= IORESOURCE_IO
;
137 flags
= bar
& ~PCI_BASE_ADDRESS_MEM_MASK
;
138 flags
|= IORESOURCE_MEM
;
139 if (flags
& PCI_BASE_ADDRESS_MEM_PREFETCH
)
140 flags
|= IORESOURCE_PREFETCH
;
142 mem_type
= bar
& PCI_BASE_ADDRESS_MEM_TYPE_MASK
;
144 case PCI_BASE_ADDRESS_MEM_TYPE_32
:
146 case PCI_BASE_ADDRESS_MEM_TYPE_1M
:
147 /* 1M mem BAR treated as 32-bit BAR */
149 case PCI_BASE_ADDRESS_MEM_TYPE_64
:
150 flags
|= IORESOURCE_MEM_64
;
153 /* mem unknown type treated as 32-bit BAR */
159 #define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
162 * pci_read_base - read a PCI BAR
163 * @dev: the PCI device
164 * @type: type of the BAR
165 * @res: resource buffer to be filled in
166 * @pos: BAR position in the config space
168 * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
170 int __pci_read_base(struct pci_dev
*dev
, enum pci_bar_type type
,
171 struct resource
*res
, unsigned int pos
)
174 u64 l64
, sz64
, mask64
;
176 struct pci_bus_region region
, inverted_region
;
177 bool bar_too_big
= false, bar_too_high
= false;
179 mask
= type
? PCI_ROM_ADDRESS_MASK
: ~0;
181 /* No printks while decoding is disabled! */
182 if (!dev
->mmio_always_on
) {
183 pci_read_config_word(dev
, PCI_COMMAND
, &orig_cmd
);
184 if (orig_cmd
& PCI_COMMAND_DECODE_ENABLE
) {
185 pci_write_config_word(dev
, PCI_COMMAND
,
186 orig_cmd
& ~PCI_COMMAND_DECODE_ENABLE
);
190 res
->name
= pci_name(dev
);
192 pci_read_config_dword(dev
, pos
, &l
);
193 pci_write_config_dword(dev
, pos
, l
| mask
);
194 pci_read_config_dword(dev
, pos
, &sz
);
195 pci_write_config_dword(dev
, pos
, l
);
198 * All bits set in sz means the device isn't working properly.
199 * If the BAR isn't implemented, all bits must be 0. If it's a
200 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
203 if (!sz
|| sz
== 0xffffffff)
207 * I don't know how l can have all bits set. Copied from old code.
208 * Maybe it fixes a bug on some ancient platform.
213 if (type
== pci_bar_unknown
) {
214 res
->flags
= decode_bar(dev
, l
);
215 res
->flags
|= IORESOURCE_SIZEALIGN
;
216 if (res
->flags
& IORESOURCE_IO
) {
217 l
&= PCI_BASE_ADDRESS_IO_MASK
;
218 mask
= PCI_BASE_ADDRESS_IO_MASK
& (u32
) IO_SPACE_LIMIT
;
220 l
&= PCI_BASE_ADDRESS_MEM_MASK
;
221 mask
= (u32
)PCI_BASE_ADDRESS_MEM_MASK
;
224 res
->flags
|= (l
& IORESOURCE_ROM_ENABLE
);
225 l
&= PCI_ROM_ADDRESS_MASK
;
226 mask
= (u32
)PCI_ROM_ADDRESS_MASK
;
229 if (res
->flags
& IORESOURCE_MEM_64
) {
232 mask64
= mask
| (u64
)~0 << 32;
234 pci_read_config_dword(dev
, pos
+ 4, &l
);
235 pci_write_config_dword(dev
, pos
+ 4, ~0);
236 pci_read_config_dword(dev
, pos
+ 4, &sz
);
237 pci_write_config_dword(dev
, pos
+ 4, l
);
239 l64
|= ((u64
)l
<< 32);
240 sz64
|= ((u64
)sz
<< 32);
242 sz64
= pci_size(l64
, sz64
, mask64
);
247 if ((sizeof(dma_addr_t
) < 8 || sizeof(resource_size_t
) < 8) &&
248 sz64
> 0x100000000ULL
) {
249 res
->flags
|= IORESOURCE_UNSET
| IORESOURCE_DISABLED
;
256 if ((sizeof(dma_addr_t
) < 8) && l
) {
257 /* Above 32-bit boundary; try to reallocate */
258 res
->flags
|= IORESOURCE_UNSET
;
265 region
.end
= l64
+ sz64
;
268 sz
= pci_size(l
, sz
, mask
);
277 pcibios_bus_to_resource(dev
->bus
, res
, ®ion
);
278 pcibios_resource_to_bus(dev
->bus
, &inverted_region
, res
);
281 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
282 * the corresponding resource address (the physical address used by
283 * the CPU. Converting that resource address back to a bus address
284 * should yield the original BAR value:
286 * resource_to_bus(bus_to_resource(A)) == A
288 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not
289 * be claimed by the device.
291 if (inverted_region
.start
!= region
.start
) {
292 dev_info(&dev
->dev
, "reg 0x%x: initial BAR value %pa invalid; forcing reassignment\n",
294 res
->flags
|= IORESOURCE_UNSET
;
295 res
->end
-= res
->start
;
305 if (!dev
->mmio_always_on
&&
306 (orig_cmd
& PCI_COMMAND_DECODE_ENABLE
))
307 pci_write_config_word(dev
, PCI_COMMAND
, orig_cmd
);
310 dev_err(&dev
->dev
, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
311 pos
, (unsigned long long) sz64
);
313 dev_info(&dev
->dev
, "reg 0x%x: can't handle BAR above 4G (bus address %#010llx)\n",
314 pos
, (unsigned long long) l64
);
316 dev_printk(KERN_DEBUG
, &dev
->dev
, "reg 0x%x: %pR\n", pos
, res
);
318 return (res
->flags
& IORESOURCE_MEM_64
) ? 1 : 0;
321 static void pci_read_bases(struct pci_dev
*dev
, unsigned int howmany
, int rom
)
323 unsigned int pos
, reg
;
325 for (pos
= 0; pos
< howmany
; pos
++) {
326 struct resource
*res
= &dev
->resource
[pos
];
327 reg
= PCI_BASE_ADDRESS_0
+ (pos
<< 2);
328 pos
+= __pci_read_base(dev
, pci_bar_unknown
, res
, reg
);
332 struct resource
*res
= &dev
->resource
[PCI_ROM_RESOURCE
];
333 dev
->rom_base_reg
= rom
;
334 res
->flags
= IORESOURCE_MEM
| IORESOURCE_PREFETCH
|
335 IORESOURCE_READONLY
| IORESOURCE_CACHEABLE
|
336 IORESOURCE_SIZEALIGN
;
337 __pci_read_base(dev
, pci_bar_mem32
, res
, rom
);
341 static void pci_read_bridge_io(struct pci_bus
*child
)
343 struct pci_dev
*dev
= child
->self
;
344 u8 io_base_lo
, io_limit_lo
;
345 unsigned long io_mask
, io_granularity
, base
, limit
;
346 struct pci_bus_region region
;
347 struct resource
*res
;
349 io_mask
= PCI_IO_RANGE_MASK
;
350 io_granularity
= 0x1000;
351 if (dev
->io_window_1k
) {
352 /* Support 1K I/O space granularity */
353 io_mask
= PCI_IO_1K_RANGE_MASK
;
354 io_granularity
= 0x400;
357 res
= child
->resource
[0];
358 pci_read_config_byte(dev
, PCI_IO_BASE
, &io_base_lo
);
359 pci_read_config_byte(dev
, PCI_IO_LIMIT
, &io_limit_lo
);
360 base
= (io_base_lo
& io_mask
) << 8;
361 limit
= (io_limit_lo
& io_mask
) << 8;
363 if ((io_base_lo
& PCI_IO_RANGE_TYPE_MASK
) == PCI_IO_RANGE_TYPE_32
) {
364 u16 io_base_hi
, io_limit_hi
;
366 pci_read_config_word(dev
, PCI_IO_BASE_UPPER16
, &io_base_hi
);
367 pci_read_config_word(dev
, PCI_IO_LIMIT_UPPER16
, &io_limit_hi
);
368 base
|= ((unsigned long) io_base_hi
<< 16);
369 limit
|= ((unsigned long) io_limit_hi
<< 16);
373 res
->flags
= (io_base_lo
& PCI_IO_RANGE_TYPE_MASK
) | IORESOURCE_IO
;
375 region
.end
= limit
+ io_granularity
- 1;
376 pcibios_bus_to_resource(dev
->bus
, res
, ®ion
);
377 dev_printk(KERN_DEBUG
, &dev
->dev
, " bridge window %pR\n", res
);
381 static void pci_read_bridge_mmio(struct pci_bus
*child
)
383 struct pci_dev
*dev
= child
->self
;
384 u16 mem_base_lo
, mem_limit_lo
;
385 unsigned long base
, limit
;
386 struct pci_bus_region region
;
387 struct resource
*res
;
389 res
= child
->resource
[1];
390 pci_read_config_word(dev
, PCI_MEMORY_BASE
, &mem_base_lo
);
391 pci_read_config_word(dev
, PCI_MEMORY_LIMIT
, &mem_limit_lo
);
392 base
= ((unsigned long) mem_base_lo
& PCI_MEMORY_RANGE_MASK
) << 16;
393 limit
= ((unsigned long) mem_limit_lo
& PCI_MEMORY_RANGE_MASK
) << 16;
395 res
->flags
= (mem_base_lo
& PCI_MEMORY_RANGE_TYPE_MASK
) | IORESOURCE_MEM
;
397 region
.end
= limit
+ 0xfffff;
398 pcibios_bus_to_resource(dev
->bus
, res
, ®ion
);
399 dev_printk(KERN_DEBUG
, &dev
->dev
, " bridge window %pR\n", res
);
403 static void pci_read_bridge_mmio_pref(struct pci_bus
*child
)
405 struct pci_dev
*dev
= child
->self
;
406 u16 mem_base_lo
, mem_limit_lo
;
407 unsigned long base
, limit
;
408 struct pci_bus_region region
;
409 struct resource
*res
;
411 res
= child
->resource
[2];
412 pci_read_config_word(dev
, PCI_PREF_MEMORY_BASE
, &mem_base_lo
);
413 pci_read_config_word(dev
, PCI_PREF_MEMORY_LIMIT
, &mem_limit_lo
);
414 base
= ((unsigned long) mem_base_lo
& PCI_PREF_RANGE_MASK
) << 16;
415 limit
= ((unsigned long) mem_limit_lo
& PCI_PREF_RANGE_MASK
) << 16;
417 if ((mem_base_lo
& PCI_PREF_RANGE_TYPE_MASK
) == PCI_PREF_RANGE_TYPE_64
) {
418 u32 mem_base_hi
, mem_limit_hi
;
420 pci_read_config_dword(dev
, PCI_PREF_BASE_UPPER32
, &mem_base_hi
);
421 pci_read_config_dword(dev
, PCI_PREF_LIMIT_UPPER32
, &mem_limit_hi
);
424 * Some bridges set the base > limit by default, and some
425 * (broken) BIOSes do not initialize them. If we find
426 * this, just assume they are not being used.
428 if (mem_base_hi
<= mem_limit_hi
) {
429 #if BITS_PER_LONG == 64
430 base
|= ((unsigned long) mem_base_hi
) << 32;
431 limit
|= ((unsigned long) mem_limit_hi
) << 32;
433 if (mem_base_hi
|| mem_limit_hi
) {
434 dev_err(&dev
->dev
, "can't handle 64-bit "
435 "address space for bridge\n");
442 res
->flags
= (mem_base_lo
& PCI_PREF_RANGE_TYPE_MASK
) |
443 IORESOURCE_MEM
| IORESOURCE_PREFETCH
;
444 if (res
->flags
& PCI_PREF_RANGE_TYPE_64
)
445 res
->flags
|= IORESOURCE_MEM_64
;
447 region
.end
= limit
+ 0xfffff;
448 pcibios_bus_to_resource(dev
->bus
, res
, ®ion
);
449 dev_printk(KERN_DEBUG
, &dev
->dev
, " bridge window %pR\n", res
);
453 void pci_read_bridge_bases(struct pci_bus
*child
)
455 struct pci_dev
*dev
= child
->self
;
456 struct resource
*res
;
459 if (pci_is_root_bus(child
)) /* It's a host bus, nothing to read */
462 dev_info(&dev
->dev
, "PCI bridge to %pR%s\n",
464 dev
->transparent
? " (subtractive decode)" : "");
466 pci_bus_remove_resources(child
);
467 for (i
= 0; i
< PCI_BRIDGE_RESOURCE_NUM
; i
++)
468 child
->resource
[i
] = &dev
->resource
[PCI_BRIDGE_RESOURCES
+i
];
470 pci_read_bridge_io(child
);
471 pci_read_bridge_mmio(child
);
472 pci_read_bridge_mmio_pref(child
);
474 if (dev
->transparent
) {
475 pci_bus_for_each_resource(child
->parent
, res
, i
) {
477 pci_bus_add_resource(child
, res
,
478 PCI_SUBTRACTIVE_DECODE
);
479 dev_printk(KERN_DEBUG
, &dev
->dev
,
480 " bridge window %pR (subtractive decode)\n",
487 static struct pci_bus
*pci_alloc_bus(void)
491 b
= kzalloc(sizeof(*b
), GFP_KERNEL
);
495 INIT_LIST_HEAD(&b
->node
);
496 INIT_LIST_HEAD(&b
->children
);
497 INIT_LIST_HEAD(&b
->devices
);
498 INIT_LIST_HEAD(&b
->slots
);
499 INIT_LIST_HEAD(&b
->resources
);
500 b
->max_bus_speed
= PCI_SPEED_UNKNOWN
;
501 b
->cur_bus_speed
= PCI_SPEED_UNKNOWN
;
505 static void pci_release_host_bridge_dev(struct device
*dev
)
507 struct pci_host_bridge
*bridge
= to_pci_host_bridge(dev
);
509 if (bridge
->release_fn
)
510 bridge
->release_fn(bridge
);
512 pci_free_resource_list(&bridge
->windows
);
517 static struct pci_host_bridge
*pci_alloc_host_bridge(struct pci_bus
*b
)
519 struct pci_host_bridge
*bridge
;
521 bridge
= kzalloc(sizeof(*bridge
), GFP_KERNEL
);
525 INIT_LIST_HEAD(&bridge
->windows
);
530 static const unsigned char pcix_bus_speed
[] = {
531 PCI_SPEED_UNKNOWN
, /* 0 */
532 PCI_SPEED_66MHz_PCIX
, /* 1 */
533 PCI_SPEED_100MHz_PCIX
, /* 2 */
534 PCI_SPEED_133MHz_PCIX
, /* 3 */
535 PCI_SPEED_UNKNOWN
, /* 4 */
536 PCI_SPEED_66MHz_PCIX_ECC
, /* 5 */
537 PCI_SPEED_100MHz_PCIX_ECC
, /* 6 */
538 PCI_SPEED_133MHz_PCIX_ECC
, /* 7 */
539 PCI_SPEED_UNKNOWN
, /* 8 */
540 PCI_SPEED_66MHz_PCIX_266
, /* 9 */
541 PCI_SPEED_100MHz_PCIX_266
, /* A */
542 PCI_SPEED_133MHz_PCIX_266
, /* B */
543 PCI_SPEED_UNKNOWN
, /* C */
544 PCI_SPEED_66MHz_PCIX_533
, /* D */
545 PCI_SPEED_100MHz_PCIX_533
, /* E */
546 PCI_SPEED_133MHz_PCIX_533
/* F */
549 const unsigned char pcie_link_speed
[] = {
550 PCI_SPEED_UNKNOWN
, /* 0 */
551 PCIE_SPEED_2_5GT
, /* 1 */
552 PCIE_SPEED_5_0GT
, /* 2 */
553 PCIE_SPEED_8_0GT
, /* 3 */
554 PCI_SPEED_UNKNOWN
, /* 4 */
555 PCI_SPEED_UNKNOWN
, /* 5 */
556 PCI_SPEED_UNKNOWN
, /* 6 */
557 PCI_SPEED_UNKNOWN
, /* 7 */
558 PCI_SPEED_UNKNOWN
, /* 8 */
559 PCI_SPEED_UNKNOWN
, /* 9 */
560 PCI_SPEED_UNKNOWN
, /* A */
561 PCI_SPEED_UNKNOWN
, /* B */
562 PCI_SPEED_UNKNOWN
, /* C */
563 PCI_SPEED_UNKNOWN
, /* D */
564 PCI_SPEED_UNKNOWN
, /* E */
565 PCI_SPEED_UNKNOWN
/* F */
568 void pcie_update_link_speed(struct pci_bus
*bus
, u16 linksta
)
570 bus
->cur_bus_speed
= pcie_link_speed
[linksta
& PCI_EXP_LNKSTA_CLS
];
572 EXPORT_SYMBOL_GPL(pcie_update_link_speed
);
574 static unsigned char agp_speeds
[] = {
582 static enum pci_bus_speed
agp_speed(int agp3
, int agpstat
)
588 else if (agpstat
& 2)
590 else if (agpstat
& 1)
602 return agp_speeds
[index
];
606 static void pci_set_bus_speed(struct pci_bus
*bus
)
608 struct pci_dev
*bridge
= bus
->self
;
611 pos
= pci_find_capability(bridge
, PCI_CAP_ID_AGP
);
613 pos
= pci_find_capability(bridge
, PCI_CAP_ID_AGP3
);
617 pci_read_config_dword(bridge
, pos
+ PCI_AGP_STATUS
, &agpstat
);
618 bus
->max_bus_speed
= agp_speed(agpstat
& 8, agpstat
& 7);
620 pci_read_config_dword(bridge
, pos
+ PCI_AGP_COMMAND
, &agpcmd
);
621 bus
->cur_bus_speed
= agp_speed(agpstat
& 8, agpcmd
& 7);
624 pos
= pci_find_capability(bridge
, PCI_CAP_ID_PCIX
);
627 enum pci_bus_speed max
;
629 pci_read_config_word(bridge
, pos
+ PCI_X_BRIDGE_SSTATUS
,
632 if (status
& PCI_X_SSTATUS_533MHZ
) {
633 max
= PCI_SPEED_133MHz_PCIX_533
;
634 } else if (status
& PCI_X_SSTATUS_266MHZ
) {
635 max
= PCI_SPEED_133MHz_PCIX_266
;
636 } else if (status
& PCI_X_SSTATUS_133MHZ
) {
637 if ((status
& PCI_X_SSTATUS_VERS
) == PCI_X_SSTATUS_V2
) {
638 max
= PCI_SPEED_133MHz_PCIX_ECC
;
640 max
= PCI_SPEED_133MHz_PCIX
;
643 max
= PCI_SPEED_66MHz_PCIX
;
646 bus
->max_bus_speed
= max
;
647 bus
->cur_bus_speed
= pcix_bus_speed
[
648 (status
& PCI_X_SSTATUS_FREQ
) >> 6];
653 if (pci_is_pcie(bridge
)) {
657 pcie_capability_read_dword(bridge
, PCI_EXP_LNKCAP
, &linkcap
);
658 bus
->max_bus_speed
= pcie_link_speed
[linkcap
& PCI_EXP_LNKCAP_SLS
];
660 pcie_capability_read_word(bridge
, PCI_EXP_LNKSTA
, &linksta
);
661 pcie_update_link_speed(bus
, linksta
);
666 static struct pci_bus
*pci_alloc_child_bus(struct pci_bus
*parent
,
667 struct pci_dev
*bridge
, int busnr
)
669 struct pci_bus
*child
;
674 * Allocate a new bus, and inherit stuff from the parent..
676 child
= pci_alloc_bus();
680 child
->parent
= parent
;
681 child
->ops
= parent
->ops
;
682 child
->msi
= parent
->msi
;
683 child
->sysdata
= parent
->sysdata
;
684 child
->bus_flags
= parent
->bus_flags
;
686 /* initialize some portions of the bus device, but don't register it
687 * now as the parent is not properly set up yet.
689 child
->dev
.class = &pcibus_class
;
690 dev_set_name(&child
->dev
, "%04x:%02x", pci_domain_nr(child
), busnr
);
693 * Set up the primary, secondary and subordinate
696 child
->number
= child
->busn_res
.start
= busnr
;
697 child
->primary
= parent
->busn_res
.start
;
698 child
->busn_res
.end
= 0xff;
701 child
->dev
.parent
= parent
->bridge
;
705 child
->self
= bridge
;
706 child
->bridge
= get_device(&bridge
->dev
);
707 child
->dev
.parent
= child
->bridge
;
708 pci_set_bus_of_node(child
);
709 pci_set_bus_speed(child
);
711 /* Set up default resource pointers and names.. */
712 for (i
= 0; i
< PCI_BRIDGE_RESOURCE_NUM
; i
++) {
713 child
->resource
[i
] = &bridge
->resource
[PCI_BRIDGE_RESOURCES
+i
];
714 child
->resource
[i
]->name
= child
->name
;
716 bridge
->subordinate
= child
;
719 ret
= device_register(&child
->dev
);
722 pcibios_add_bus(child
);
724 /* Create legacy_io and legacy_mem files for this bus */
725 pci_create_legacy_files(child
);
730 struct pci_bus
*__ref
pci_add_new_bus(struct pci_bus
*parent
, struct pci_dev
*dev
, int busnr
)
732 struct pci_bus
*child
;
734 child
= pci_alloc_child_bus(parent
, dev
, busnr
);
736 down_write(&pci_bus_sem
);
737 list_add_tail(&child
->node
, &parent
->children
);
738 up_write(&pci_bus_sem
);
744 * If it's a bridge, configure it and scan the bus behind it.
745 * For CardBus bridges, we don't scan behind as the devices will
746 * be handled by the bridge driver itself.
748 * We need to process bridges in two passes -- first we scan those
749 * already configured by the BIOS and after we are done with all of
750 * them, we proceed to assigning numbers to the remaining buses in
751 * order to avoid overlaps between old and new bus numbers.
753 int pci_scan_bridge(struct pci_bus
*bus
, struct pci_dev
*dev
, int max
, int pass
)
755 struct pci_bus
*child
;
756 int is_cardbus
= (dev
->hdr_type
== PCI_HEADER_TYPE_CARDBUS
);
759 u8 primary
, secondary
, subordinate
;
762 pci_read_config_dword(dev
, PCI_PRIMARY_BUS
, &buses
);
763 primary
= buses
& 0xFF;
764 secondary
= (buses
>> 8) & 0xFF;
765 subordinate
= (buses
>> 16) & 0xFF;
767 dev_dbg(&dev
->dev
, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
768 secondary
, subordinate
, pass
);
770 if (!primary
&& (primary
!= bus
->number
) && secondary
&& subordinate
) {
771 dev_warn(&dev
->dev
, "Primary bus is hard wired to 0\n");
772 primary
= bus
->number
;
775 /* Check if setup is sensible at all */
777 (primary
!= bus
->number
|| secondary
<= bus
->number
||
778 secondary
> subordinate
|| subordinate
> bus
->busn_res
.end
)) {
779 dev_info(&dev
->dev
, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
780 secondary
, subordinate
);
784 /* Disable MasterAbortMode during probing to avoid reporting
785 of bus errors (in some architectures) */
786 pci_read_config_word(dev
, PCI_BRIDGE_CONTROL
, &bctl
);
787 pci_write_config_word(dev
, PCI_BRIDGE_CONTROL
,
788 bctl
& ~PCI_BRIDGE_CTL_MASTER_ABORT
);
790 if ((secondary
|| subordinate
) && !pcibios_assign_all_busses() &&
791 !is_cardbus
&& !broken
) {
794 * Bus already configured by firmware, process it in the first
795 * pass and just note the configuration.
801 * The bus might already exist for two reasons: Either we are
802 * rescanning the bus or the bus is reachable through more than
803 * one bridge. The second case can happen with the i450NX
806 child
= pci_find_bus(pci_domain_nr(bus
), secondary
);
808 child
= pci_add_new_bus(bus
, dev
, secondary
);
811 child
->primary
= primary
;
812 pci_bus_insert_busn_res(child
, secondary
, subordinate
);
813 child
->bridge_ctl
= bctl
;
816 cmax
= pci_scan_child_bus(child
);
817 if (cmax
> subordinate
)
818 dev_warn(&dev
->dev
, "bridge has subordinate %02x but max busn %02x\n",
820 /* subordinate should equal child->busn_res.end */
821 if (subordinate
> max
)
825 * We need to assign a number to this bus which we always
826 * do in the second pass.
829 if (pcibios_assign_all_busses() || broken
|| is_cardbus
)
830 /* Temporarily disable forwarding of the
831 configuration cycles on all bridges in
832 this bus segment to avoid possible
833 conflicts in the second pass between two
834 bridges programmed with overlapping
836 pci_write_config_dword(dev
, PCI_PRIMARY_BUS
,
841 if (max
>= bus
->busn_res
.end
) {
842 dev_warn(&dev
->dev
, "can't allocate child bus %02x from %pR\n",
843 max
, &bus
->busn_res
);
848 pci_write_config_word(dev
, PCI_STATUS
, 0xffff);
850 /* The bus will already exist if we are rescanning */
851 child
= pci_find_bus(pci_domain_nr(bus
), max
+1);
853 child
= pci_add_new_bus(bus
, dev
, max
+1);
856 pci_bus_insert_busn_res(child
, max
+1,
860 buses
= (buses
& 0xff000000)
861 | ((unsigned int)(child
->primary
) << 0)
862 | ((unsigned int)(child
->busn_res
.start
) << 8)
863 | ((unsigned int)(child
->busn_res
.end
) << 16);
866 * yenta.c forces a secondary latency timer of 176.
867 * Copy that behaviour here.
870 buses
&= ~0xff000000;
871 buses
|= CARDBUS_LATENCY_TIMER
<< 24;
875 * We need to blast all three values with a single write.
877 pci_write_config_dword(dev
, PCI_PRIMARY_BUS
, buses
);
880 child
->bridge_ctl
= bctl
;
881 max
= pci_scan_child_bus(child
);
884 * For CardBus bridges, we leave 4 bus numbers
885 * as cards with a PCI-to-PCI bridge can be
888 for (i
=0; i
<CARDBUS_RESERVE_BUSNR
; i
++) {
889 struct pci_bus
*parent
= bus
;
890 if (pci_find_bus(pci_domain_nr(bus
),
893 while (parent
->parent
) {
894 if ((!pcibios_assign_all_busses()) &&
895 (parent
->busn_res
.end
> max
) &&
896 (parent
->busn_res
.end
<= max
+i
)) {
899 parent
= parent
->parent
;
903 * Often, there are two cardbus bridges
904 * -- try to leave one valid bus number
914 * Set the subordinate bus number to its real value.
916 if (max
> bus
->busn_res
.end
) {
917 dev_warn(&dev
->dev
, "max busn %02x is outside %pR\n",
918 max
, &bus
->busn_res
);
919 max
= bus
->busn_res
.end
;
921 pci_bus_update_busn_res_end(child
, max
);
922 pci_write_config_byte(dev
, PCI_SUBORDINATE_BUS
, max
);
926 (is_cardbus
? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
927 pci_domain_nr(bus
), child
->number
);
929 /* Has only triggered on CardBus, fixup is in yenta_socket */
930 while (bus
->parent
) {
931 if ((child
->busn_res
.end
> bus
->busn_res
.end
) ||
932 (child
->number
> bus
->busn_res
.end
) ||
933 (child
->number
< bus
->number
) ||
934 (child
->busn_res
.end
< bus
->number
)) {
935 dev_info(&child
->dev
, "%pR %s "
936 "hidden behind%s bridge %s %pR\n",
938 (bus
->number
> child
->busn_res
.end
&&
939 bus
->busn_res
.end
< child
->number
) ?
940 "wholly" : "partially",
941 bus
->self
->transparent
? " transparent" : "",
949 pci_write_config_word(dev
, PCI_BRIDGE_CONTROL
, bctl
);
955 * Read interrupt line and base address registers.
956 * The architecture-dependent code can tweak these, of course.
958 static void pci_read_irq(struct pci_dev
*dev
)
962 pci_read_config_byte(dev
, PCI_INTERRUPT_PIN
, &irq
);
965 pci_read_config_byte(dev
, PCI_INTERRUPT_LINE
, &irq
);
969 void set_pcie_port_type(struct pci_dev
*pdev
)
974 pos
= pci_find_capability(pdev
, PCI_CAP_ID_EXP
);
977 pdev
->pcie_cap
= pos
;
978 pci_read_config_word(pdev
, pos
+ PCI_EXP_FLAGS
, ®16
);
979 pdev
->pcie_flags_reg
= reg16
;
980 pci_read_config_word(pdev
, pos
+ PCI_EXP_DEVCAP
, ®16
);
981 pdev
->pcie_mpss
= reg16
& PCI_EXP_DEVCAP_PAYLOAD
;
984 void set_pcie_hotplug_bridge(struct pci_dev
*pdev
)
988 pcie_capability_read_dword(pdev
, PCI_EXP_SLTCAP
, ®32
);
989 if (reg32
& PCI_EXP_SLTCAP_HPC
)
990 pdev
->is_hotplug_bridge
= 1;
995 * pci_cfg_space_size - get the configuration space size of the PCI device.
998 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
999 * have 4096 bytes. Even if the device is capable, that doesn't mean we can
1000 * access it. Maybe we don't have a way to generate extended config space
1001 * accesses, or the device is behind a reverse Express bridge. So we try
1002 * reading the dword at 0x100 which must either be 0 or a valid extended
1003 * capability header.
1005 static int pci_cfg_space_size_ext(struct pci_dev
*dev
)
1008 int pos
= PCI_CFG_SPACE_SIZE
;
1010 if (pci_read_config_dword(dev
, pos
, &status
) != PCIBIOS_SUCCESSFUL
)
1012 if (status
== 0xffffffff)
1015 return PCI_CFG_SPACE_EXP_SIZE
;
1018 return PCI_CFG_SPACE_SIZE
;
1021 int pci_cfg_space_size(struct pci_dev
*dev
)
1027 class = dev
->class >> 8;
1028 if (class == PCI_CLASS_BRIDGE_HOST
)
1029 return pci_cfg_space_size_ext(dev
);
1031 if (!pci_is_pcie(dev
)) {
1032 pos
= pci_find_capability(dev
, PCI_CAP_ID_PCIX
);
1036 pci_read_config_dword(dev
, pos
+ PCI_X_STATUS
, &status
);
1037 if (!(status
& (PCI_X_STATUS_266MHZ
| PCI_X_STATUS_533MHZ
)))
1041 return pci_cfg_space_size_ext(dev
);
1044 return PCI_CFG_SPACE_SIZE
;
1047 #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
1050 * pci_setup_device - fill in class and map information of a device
1051 * @dev: the device structure to fill
1053 * Initialize the device structure with information about the device's
1054 * vendor,class,memory and IO-space addresses,IRQ lines etc.
1055 * Called at initialisation of the PCI subsystem and by CardBus services.
1056 * Returns 0 on success and negative if unknown type of device (not normal,
1057 * bridge or CardBus).
1059 int pci_setup_device(struct pci_dev
*dev
)
1063 struct pci_slot
*slot
;
1065 struct pci_bus_region region
;
1066 struct resource
*res
;
1068 if (pci_read_config_byte(dev
, PCI_HEADER_TYPE
, &hdr_type
))
1071 dev
->sysdata
= dev
->bus
->sysdata
;
1072 dev
->dev
.parent
= dev
->bus
->bridge
;
1073 dev
->dev
.bus
= &pci_bus_type
;
1074 dev
->hdr_type
= hdr_type
& 0x7f;
1075 dev
->multifunction
= !!(hdr_type
& 0x80);
1076 dev
->error_state
= pci_channel_io_normal
;
1077 set_pcie_port_type(dev
);
1079 list_for_each_entry(slot
, &dev
->bus
->slots
, list
)
1080 if (PCI_SLOT(dev
->devfn
) == slot
->number
)
1083 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1084 set this higher, assuming the system even supports it. */
1085 dev
->dma_mask
= 0xffffffff;
1087 dev_set_name(&dev
->dev
, "%04x:%02x:%02x.%d", pci_domain_nr(dev
->bus
),
1088 dev
->bus
->number
, PCI_SLOT(dev
->devfn
),
1089 PCI_FUNC(dev
->devfn
));
1091 pci_read_config_dword(dev
, PCI_CLASS_REVISION
, &class);
1092 dev
->revision
= class & 0xff;
1093 dev
->class = class >> 8; /* upper 3 bytes */
1095 dev_printk(KERN_DEBUG
, &dev
->dev
, "[%04x:%04x] type %02x class %#08x\n",
1096 dev
->vendor
, dev
->device
, dev
->hdr_type
, dev
->class);
1098 /* need to have dev->class ready */
1099 dev
->cfg_size
= pci_cfg_space_size(dev
);
1101 /* "Unknown power state" */
1102 dev
->current_state
= PCI_UNKNOWN
;
1104 /* Early fixups, before probing the BARs */
1105 pci_fixup_device(pci_fixup_early
, dev
);
1106 /* device class may be changed after fixup */
1107 class = dev
->class >> 8;
1109 switch (dev
->hdr_type
) { /* header type */
1110 case PCI_HEADER_TYPE_NORMAL
: /* standard header */
1111 if (class == PCI_CLASS_BRIDGE_PCI
)
1114 pci_read_bases(dev
, 6, PCI_ROM_ADDRESS
);
1115 pci_read_config_word(dev
, PCI_SUBSYSTEM_VENDOR_ID
, &dev
->subsystem_vendor
);
1116 pci_read_config_word(dev
, PCI_SUBSYSTEM_ID
, &dev
->subsystem_device
);
1119 * Do the ugly legacy mode stuff here rather than broken chip
1120 * quirk code. Legacy mode ATA controllers have fixed
1121 * addresses. These are not always echoed in BAR0-3, and
1122 * BAR0-3 in a few cases contain junk!
1124 if (class == PCI_CLASS_STORAGE_IDE
) {
1126 pci_read_config_byte(dev
, PCI_CLASS_PROG
, &progif
);
1127 if ((progif
& 1) == 0) {
1128 region
.start
= 0x1F0;
1130 res
= &dev
->resource
[0];
1131 res
->flags
= LEGACY_IO_RESOURCE
;
1132 pcibios_bus_to_resource(dev
->bus
, res
, ®ion
);
1133 dev_info(&dev
->dev
, "legacy IDE quirk: reg 0x10: %pR\n",
1135 region
.start
= 0x3F6;
1137 res
= &dev
->resource
[1];
1138 res
->flags
= LEGACY_IO_RESOURCE
;
1139 pcibios_bus_to_resource(dev
->bus
, res
, ®ion
);
1140 dev_info(&dev
->dev
, "legacy IDE quirk: reg 0x14: %pR\n",
1143 if ((progif
& 4) == 0) {
1144 region
.start
= 0x170;
1146 res
= &dev
->resource
[2];
1147 res
->flags
= LEGACY_IO_RESOURCE
;
1148 pcibios_bus_to_resource(dev
->bus
, res
, ®ion
);
1149 dev_info(&dev
->dev
, "legacy IDE quirk: reg 0x18: %pR\n",
1151 region
.start
= 0x376;
1153 res
= &dev
->resource
[3];
1154 res
->flags
= LEGACY_IO_RESOURCE
;
1155 pcibios_bus_to_resource(dev
->bus
, res
, ®ion
);
1156 dev_info(&dev
->dev
, "legacy IDE quirk: reg 0x1c: %pR\n",
1162 case PCI_HEADER_TYPE_BRIDGE
: /* bridge header */
1163 if (class != PCI_CLASS_BRIDGE_PCI
)
1165 /* The PCI-to-PCI bridge spec requires that subtractive
1166 decoding (i.e. transparent) bridge must have programming
1167 interface code of 0x01. */
1169 dev
->transparent
= ((dev
->class & 0xff) == 1);
1170 pci_read_bases(dev
, 2, PCI_ROM_ADDRESS1
);
1171 set_pcie_hotplug_bridge(dev
);
1172 pos
= pci_find_capability(dev
, PCI_CAP_ID_SSVID
);
1174 pci_read_config_word(dev
, pos
+ PCI_SSVID_VENDOR_ID
, &dev
->subsystem_vendor
);
1175 pci_read_config_word(dev
, pos
+ PCI_SSVID_DEVICE_ID
, &dev
->subsystem_device
);
1179 case PCI_HEADER_TYPE_CARDBUS
: /* CardBus bridge header */
1180 if (class != PCI_CLASS_BRIDGE_CARDBUS
)
1183 pci_read_bases(dev
, 1, 0);
1184 pci_read_config_word(dev
, PCI_CB_SUBSYSTEM_VENDOR_ID
, &dev
->subsystem_vendor
);
1185 pci_read_config_word(dev
, PCI_CB_SUBSYSTEM_ID
, &dev
->subsystem_device
);
1188 default: /* unknown header */
1189 dev_err(&dev
->dev
, "unknown header type %02x, "
1190 "ignoring device\n", dev
->hdr_type
);
1194 dev_err(&dev
->dev
, "ignoring class %#08x (doesn't match header "
1195 "type %02x)\n", dev
->class, dev
->hdr_type
);
1196 dev
->class = PCI_CLASS_NOT_DEFINED
;
1199 /* We found a fine healthy device, go go go... */
1203 static void pci_release_capabilities(struct pci_dev
*dev
)
1205 pci_vpd_release(dev
);
1206 pci_iov_release(dev
);
1207 pci_free_cap_save_buffers(dev
);
1211 * pci_release_dev - free a pci device structure when all users of it are finished.
1212 * @dev: device that's been disconnected
1214 * Will be called only by the device core when all users of this pci device are
1217 static void pci_release_dev(struct device
*dev
)
1219 struct pci_dev
*pci_dev
;
1221 pci_dev
= to_pci_dev(dev
);
1222 pci_release_capabilities(pci_dev
);
1223 pci_release_of_node(pci_dev
);
1224 pcibios_release_device(pci_dev
);
1225 pci_bus_put(pci_dev
->bus
);
1229 struct pci_dev
*pci_alloc_dev(struct pci_bus
*bus
)
1231 struct pci_dev
*dev
;
1233 dev
= kzalloc(sizeof(struct pci_dev
), GFP_KERNEL
);
1237 INIT_LIST_HEAD(&dev
->bus_list
);
1238 dev
->dev
.type
= &pci_dev_type
;
1239 dev
->bus
= pci_bus_get(bus
);
1243 EXPORT_SYMBOL(pci_alloc_dev
);
1245 bool pci_bus_read_dev_vendor_id(struct pci_bus
*bus
, int devfn
, u32
*l
,
1250 if (pci_bus_read_config_dword(bus
, devfn
, PCI_VENDOR_ID
, l
))
1253 /* some broken boards return 0 or ~0 if a slot is empty: */
1254 if (*l
== 0xffffffff || *l
== 0x00000000 ||
1255 *l
== 0x0000ffff || *l
== 0xffff0000)
1258 /* Configuration request Retry Status */
1259 while (*l
== 0xffff0001) {
1265 if (pci_bus_read_config_dword(bus
, devfn
, PCI_VENDOR_ID
, l
))
1267 /* Card hasn't responded in 60 seconds? Must be stuck. */
1268 if (delay
> crs_timeout
) {
1269 printk(KERN_WARNING
"pci %04x:%02x:%02x.%d: not "
1270 "responding\n", pci_domain_nr(bus
),
1271 bus
->number
, PCI_SLOT(devfn
),
1279 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id
);
1282 * Read the config data for a PCI device, sanity-check it
1283 * and fill in the dev structure...
1285 static struct pci_dev
*pci_scan_device(struct pci_bus
*bus
, int devfn
)
1287 struct pci_dev
*dev
;
1290 if (!pci_bus_read_dev_vendor_id(bus
, devfn
, &l
, 60*1000))
1293 dev
= pci_alloc_dev(bus
);
1298 dev
->vendor
= l
& 0xffff;
1299 dev
->device
= (l
>> 16) & 0xffff;
1301 pci_set_of_node(dev
);
1303 if (pci_setup_device(dev
)) {
1304 pci_bus_put(dev
->bus
);
1312 static void pci_init_capabilities(struct pci_dev
*dev
)
1314 /* MSI/MSI-X list */
1315 pci_msi_init_pci_dev(dev
);
1317 /* Buffers for saving PCIe and PCI-X capabilities */
1318 pci_allocate_cap_save_buffers(dev
);
1320 /* Power Management */
1323 /* Vital Product Data */
1324 pci_vpd_pci22_init(dev
);
1326 /* Alternative Routing-ID Forwarding */
1327 pci_configure_ari(dev
);
1329 /* Single Root I/O Virtualization */
1332 /* Enable ACS P2P upstream forwarding */
1333 pci_enable_acs(dev
);
1336 void pci_device_add(struct pci_dev
*dev
, struct pci_bus
*bus
)
1340 device_initialize(&dev
->dev
);
1341 dev
->dev
.release
= pci_release_dev
;
1343 set_dev_node(&dev
->dev
, pcibus_to_node(bus
));
1344 dev
->dev
.dma_mask
= &dev
->dma_mask
;
1345 dev
->dev
.dma_parms
= &dev
->dma_parms
;
1346 dev
->dev
.coherent_dma_mask
= 0xffffffffull
;
1348 pci_set_dma_max_seg_size(dev
, 65536);
1349 pci_set_dma_seg_boundary(dev
, 0xffffffff);
1351 /* Fix up broken headers */
1352 pci_fixup_device(pci_fixup_header
, dev
);
1354 /* moved out from quirk header fixup code */
1355 pci_reassigndev_resource_alignment(dev
);
1357 /* Clear the state_saved flag. */
1358 dev
->state_saved
= false;
1360 /* Initialize various capabilities */
1361 pci_init_capabilities(dev
);
1364 * Add the device to our list of discovered devices
1365 * and the bus list for fixup functions, etc.
1367 down_write(&pci_bus_sem
);
1368 list_add_tail(&dev
->bus_list
, &bus
->devices
);
1369 up_write(&pci_bus_sem
);
1371 ret
= pcibios_add_device(dev
);
1374 /* Notifier could use PCI capabilities */
1375 dev
->match_driver
= false;
1376 ret
= device_add(&dev
->dev
);
1380 struct pci_dev
*__ref
pci_scan_single_device(struct pci_bus
*bus
, int devfn
)
1382 struct pci_dev
*dev
;
1384 dev
= pci_get_slot(bus
, devfn
);
1390 dev
= pci_scan_device(bus
, devfn
);
1394 pci_device_add(dev
, bus
);
1398 EXPORT_SYMBOL(pci_scan_single_device
);
1400 static unsigned next_fn(struct pci_bus
*bus
, struct pci_dev
*dev
, unsigned fn
)
1406 if (pci_ari_enabled(bus
)) {
1409 pos
= pci_find_ext_capability(dev
, PCI_EXT_CAP_ID_ARI
);
1413 pci_read_config_word(dev
, pos
+ PCI_ARI_CAP
, &cap
);
1414 next_fn
= PCI_ARI_CAP_NFN(cap
);
1416 return 0; /* protect against malformed list */
1421 /* dev may be NULL for non-contiguous multifunction devices */
1422 if (!dev
|| dev
->multifunction
)
1423 return (fn
+ 1) % 8;
1428 static int only_one_child(struct pci_bus
*bus
)
1430 struct pci_dev
*parent
= bus
->self
;
1432 if (!parent
|| !pci_is_pcie(parent
))
1434 if (pci_pcie_type(parent
) == PCI_EXP_TYPE_ROOT_PORT
)
1436 if (pci_pcie_type(parent
) == PCI_EXP_TYPE_DOWNSTREAM
&&
1437 !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS
))
1443 * pci_scan_slot - scan a PCI slot on a bus for devices.
1444 * @bus: PCI bus to scan
1445 * @devfn: slot number to scan (must have zero function.)
1447 * Scan a PCI slot on the specified PCI bus for devices, adding
1448 * discovered devices to the @bus->devices list. New devices
1449 * will not have is_added set.
1451 * Returns the number of new devices found.
1453 int pci_scan_slot(struct pci_bus
*bus
, int devfn
)
1455 unsigned fn
, nr
= 0;
1456 struct pci_dev
*dev
;
1458 if (only_one_child(bus
) && (devfn
> 0))
1459 return 0; /* Already scanned the entire slot */
1461 dev
= pci_scan_single_device(bus
, devfn
);
1467 for (fn
= next_fn(bus
, dev
, 0); fn
> 0; fn
= next_fn(bus
, dev
, fn
)) {
1468 dev
= pci_scan_single_device(bus
, devfn
+ fn
);
1472 dev
->multifunction
= 1;
1476 /* only one slot has pcie device */
1477 if (bus
->self
&& nr
)
1478 pcie_aspm_init_link_state(bus
->self
);
1483 static int pcie_find_smpss(struct pci_dev
*dev
, void *data
)
1487 if (!pci_is_pcie(dev
))
1491 * We don't have a way to change MPS settings on devices that have
1492 * drivers attached. A hot-added device might support only the minimum
1493 * MPS setting (MPS=128). Therefore, if the fabric contains a bridge
1494 * where devices may be hot-added, we limit the fabric MPS to 128 so
1495 * hot-added devices will work correctly.
1497 * However, if we hot-add a device to a slot directly below a Root
1498 * Port, it's impossible for there to be other existing devices below
1499 * the port. We don't limit the MPS in this case because we can
1500 * reconfigure MPS on both the Root Port and the hot-added device,
1501 * and there are no other devices involved.
1503 * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
1505 if (dev
->is_hotplug_bridge
&&
1506 pci_pcie_type(dev
) != PCI_EXP_TYPE_ROOT_PORT
)
1509 if (*smpss
> dev
->pcie_mpss
)
1510 *smpss
= dev
->pcie_mpss
;
1515 static void pcie_write_mps(struct pci_dev
*dev
, int mps
)
1519 if (pcie_bus_config
== PCIE_BUS_PERFORMANCE
) {
1520 mps
= 128 << dev
->pcie_mpss
;
1522 if (pci_pcie_type(dev
) != PCI_EXP_TYPE_ROOT_PORT
&&
1524 /* For "Performance", the assumption is made that
1525 * downstream communication will never be larger than
1526 * the MRRS. So, the MPS only needs to be configured
1527 * for the upstream communication. This being the case,
1528 * walk from the top down and set the MPS of the child
1529 * to that of the parent bus.
1531 * Configure the device MPS with the smaller of the
1532 * device MPSS or the bridge MPS (which is assumed to be
1533 * properly configured at this point to the largest
1534 * allowable MPS based on its parent bus).
1536 mps
= min(mps
, pcie_get_mps(dev
->bus
->self
));
1539 rc
= pcie_set_mps(dev
, mps
);
1541 dev_err(&dev
->dev
, "Failed attempting to set the MPS\n");
1544 static void pcie_write_mrrs(struct pci_dev
*dev
)
1548 /* In the "safe" case, do not configure the MRRS. There appear to be
1549 * issues with setting MRRS to 0 on a number of devices.
1551 if (pcie_bus_config
!= PCIE_BUS_PERFORMANCE
)
1554 /* For Max performance, the MRRS must be set to the largest supported
1555 * value. However, it cannot be configured larger than the MPS the
1556 * device or the bus can support. This should already be properly
1557 * configured by a prior call to pcie_write_mps.
1559 mrrs
= pcie_get_mps(dev
);
1561 /* MRRS is a R/W register. Invalid values can be written, but a
1562 * subsequent read will verify if the value is acceptable or not.
1563 * If the MRRS value provided is not acceptable (e.g., too large),
1564 * shrink the value until it is acceptable to the HW.
1566 while (mrrs
!= pcie_get_readrq(dev
) && mrrs
>= 128) {
1567 rc
= pcie_set_readrq(dev
, mrrs
);
1571 dev_warn(&dev
->dev
, "Failed attempting to set the MRRS\n");
1576 dev_err(&dev
->dev
, "MRRS was unable to be configured with a "
1577 "safe value. If problems are experienced, try running "
1578 "with pci=pcie_bus_safe.\n");
1581 static void pcie_bus_detect_mps(struct pci_dev
*dev
)
1583 struct pci_dev
*bridge
= dev
->bus
->self
;
1589 mps
= pcie_get_mps(dev
);
1590 p_mps
= pcie_get_mps(bridge
);
1593 dev_warn(&dev
->dev
, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1594 mps
, pci_name(bridge
), p_mps
);
1597 static int pcie_bus_configure_set(struct pci_dev
*dev
, void *data
)
1601 if (!pci_is_pcie(dev
))
1604 if (pcie_bus_config
== PCIE_BUS_TUNE_OFF
) {
1605 pcie_bus_detect_mps(dev
);
1609 mps
= 128 << *(u8
*)data
;
1610 orig_mps
= pcie_get_mps(dev
);
1612 pcie_write_mps(dev
, mps
);
1613 pcie_write_mrrs(dev
);
1615 dev_info(&dev
->dev
, "Max Payload Size set to %4d/%4d (was %4d), "
1616 "Max Read Rq %4d\n", pcie_get_mps(dev
), 128 << dev
->pcie_mpss
,
1617 orig_mps
, pcie_get_readrq(dev
));
1622 /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
1623 * parents then children fashion. If this changes, then this code will not
1626 void pcie_bus_configure_settings(struct pci_bus
*bus
)
1633 if (!pci_is_pcie(bus
->self
))
1636 /* FIXME - Peer to peer DMA is possible, though the endpoint would need
1637 * to be aware of the MPS of the destination. To work around this,
1638 * simply force the MPS of the entire system to the smallest possible.
1640 if (pcie_bus_config
== PCIE_BUS_PEER2PEER
)
1643 if (pcie_bus_config
== PCIE_BUS_SAFE
) {
1644 smpss
= bus
->self
->pcie_mpss
;
1646 pcie_find_smpss(bus
->self
, &smpss
);
1647 pci_walk_bus(bus
, pcie_find_smpss
, &smpss
);
1650 pcie_bus_configure_set(bus
->self
, &smpss
);
1651 pci_walk_bus(bus
, pcie_bus_configure_set
, &smpss
);
1653 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings
);
1655 unsigned int pci_scan_child_bus(struct pci_bus
*bus
)
1657 unsigned int devfn
, pass
, max
= bus
->busn_res
.start
;
1658 struct pci_dev
*dev
;
1660 dev_dbg(&bus
->dev
, "scanning bus\n");
1662 /* Go find them, Rover! */
1663 for (devfn
= 0; devfn
< 0x100; devfn
+= 8)
1664 pci_scan_slot(bus
, devfn
);
1666 /* Reserve buses for SR-IOV capability. */
1667 max
+= pci_iov_bus_range(bus
);
1670 * After performing arch-dependent fixup of the bus, look behind
1671 * all PCI-to-PCI bridges on this bus.
1673 if (!bus
->is_added
) {
1674 dev_dbg(&bus
->dev
, "fixups for bus\n");
1675 pcibios_fixup_bus(bus
);
1679 for (pass
=0; pass
< 2; pass
++)
1680 list_for_each_entry(dev
, &bus
->devices
, bus_list
) {
1681 if (dev
->hdr_type
== PCI_HEADER_TYPE_BRIDGE
||
1682 dev
->hdr_type
== PCI_HEADER_TYPE_CARDBUS
)
1683 max
= pci_scan_bridge(bus
, dev
, max
, pass
);
1687 * We've scanned the bus and so we know all about what's on
1688 * the other side of any bridges that may be on this bus plus
1691 * Return how far we've got finding sub-buses.
1693 dev_dbg(&bus
->dev
, "bus scan returning with max=%02x\n", max
);
1698 * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
1699 * @bridge: Host bridge to set up.
1701 * Default empty implementation. Replace with an architecture-specific setup
1702 * routine, if necessary.
1704 int __weak
pcibios_root_bridge_prepare(struct pci_host_bridge
*bridge
)
1709 void __weak
pcibios_add_bus(struct pci_bus
*bus
)
1713 void __weak
pcibios_remove_bus(struct pci_bus
*bus
)
1717 struct pci_bus
*pci_create_root_bus(struct device
*parent
, int bus
,
1718 struct pci_ops
*ops
, void *sysdata
, struct list_head
*resources
)
1721 struct pci_host_bridge
*bridge
;
1722 struct pci_bus
*b
, *b2
;
1723 struct pci_host_bridge_window
*window
, *n
;
1724 struct resource
*res
;
1725 resource_size_t offset
;
1729 b
= pci_alloc_bus();
1733 b
->sysdata
= sysdata
;
1735 b
->number
= b
->busn_res
.start
= bus
;
1736 b2
= pci_find_bus(pci_domain_nr(b
), bus
);
1738 /* If we already got to this bus through a different bridge, ignore it */
1739 dev_dbg(&b2
->dev
, "bus already known\n");
1743 bridge
= pci_alloc_host_bridge(b
);
1747 bridge
->dev
.parent
= parent
;
1748 bridge
->dev
.release
= pci_release_host_bridge_dev
;
1749 dev_set_name(&bridge
->dev
, "pci%04x:%02x", pci_domain_nr(b
), bus
);
1750 error
= pcibios_root_bridge_prepare(bridge
);
1756 error
= device_register(&bridge
->dev
);
1758 put_device(&bridge
->dev
);
1761 b
->bridge
= get_device(&bridge
->dev
);
1762 device_enable_async_suspend(b
->bridge
);
1763 pci_set_bus_of_node(b
);
1766 set_dev_node(b
->bridge
, pcibus_to_node(b
));
1768 b
->dev
.class = &pcibus_class
;
1769 b
->dev
.parent
= b
->bridge
;
1770 dev_set_name(&b
->dev
, "%04x:%02x", pci_domain_nr(b
), bus
);
1771 error
= device_register(&b
->dev
);
1773 goto class_dev_reg_err
;
1777 /* Create legacy_io and legacy_mem files for this bus */
1778 pci_create_legacy_files(b
);
1781 dev_info(parent
, "PCI host bridge to bus %s\n", dev_name(&b
->dev
));
1783 printk(KERN_INFO
"PCI host bridge to bus %s\n", dev_name(&b
->dev
));
1785 /* Add initial resources to the bus */
1786 list_for_each_entry_safe(window
, n
, resources
, list
) {
1787 list_move_tail(&window
->list
, &bridge
->windows
);
1789 offset
= window
->offset
;
1790 if (res
->flags
& IORESOURCE_BUS
)
1791 pci_bus_insert_busn_res(b
, bus
, res
->end
);
1793 pci_bus_add_resource(b
, res
, 0);
1795 if (resource_type(res
) == IORESOURCE_IO
)
1796 fmt
= " (bus address [%#06llx-%#06llx])";
1798 fmt
= " (bus address [%#010llx-%#010llx])";
1799 snprintf(bus_addr
, sizeof(bus_addr
), fmt
,
1800 (unsigned long long) (res
->start
- offset
),
1801 (unsigned long long) (res
->end
- offset
));
1804 dev_info(&b
->dev
, "root bus resource %pR%s\n", res
, bus_addr
);
1807 down_write(&pci_bus_sem
);
1808 list_add_tail(&b
->node
, &pci_root_buses
);
1809 up_write(&pci_bus_sem
);
1814 put_device(&bridge
->dev
);
1815 device_unregister(&bridge
->dev
);
1821 int pci_bus_insert_busn_res(struct pci_bus
*b
, int bus
, int bus_max
)
1823 struct resource
*res
= &b
->busn_res
;
1824 struct resource
*parent_res
, *conflict
;
1828 res
->flags
= IORESOURCE_BUS
;
1830 if (!pci_is_root_bus(b
))
1831 parent_res
= &b
->parent
->busn_res
;
1833 parent_res
= get_pci_domain_busn_res(pci_domain_nr(b
));
1834 res
->flags
|= IORESOURCE_PCI_FIXED
;
1837 conflict
= request_resource_conflict(parent_res
, res
);
1840 dev_printk(KERN_DEBUG
, &b
->dev
,
1841 "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
1842 res
, pci_is_root_bus(b
) ? "domain " : "",
1843 parent_res
, conflict
->name
, conflict
);
1845 return conflict
== NULL
;
1848 int pci_bus_update_busn_res_end(struct pci_bus
*b
, int bus_max
)
1850 struct resource
*res
= &b
->busn_res
;
1851 struct resource old_res
= *res
;
1852 resource_size_t size
;
1855 if (res
->start
> bus_max
)
1858 size
= bus_max
- res
->start
+ 1;
1859 ret
= adjust_resource(res
, res
->start
, size
);
1860 dev_printk(KERN_DEBUG
, &b
->dev
,
1861 "busn_res: %pR end %s updated to %02x\n",
1862 &old_res
, ret
? "can not be" : "is", bus_max
);
1864 if (!ret
&& !res
->parent
)
1865 pci_bus_insert_busn_res(b
, res
->start
, res
->end
);
1870 void pci_bus_release_busn_res(struct pci_bus
*b
)
1872 struct resource
*res
= &b
->busn_res
;
1875 if (!res
->flags
|| !res
->parent
)
1878 ret
= release_resource(res
);
1879 dev_printk(KERN_DEBUG
, &b
->dev
,
1880 "busn_res: %pR %s released\n",
1881 res
, ret
? "can not be" : "is");
1884 struct pci_bus
*pci_scan_root_bus(struct device
*parent
, int bus
,
1885 struct pci_ops
*ops
, void *sysdata
, struct list_head
*resources
)
1887 struct pci_host_bridge_window
*window
;
1892 list_for_each_entry(window
, resources
, list
)
1893 if (window
->res
->flags
& IORESOURCE_BUS
) {
1898 b
= pci_create_root_bus(parent
, bus
, ops
, sysdata
, resources
);
1904 "No busn resource found for root bus, will use [bus %02x-ff]\n",
1906 pci_bus_insert_busn_res(b
, bus
, 255);
1909 max
= pci_scan_child_bus(b
);
1912 pci_bus_update_busn_res_end(b
, max
);
1914 pci_bus_add_devices(b
);
1917 EXPORT_SYMBOL(pci_scan_root_bus
);
1919 /* Deprecated; use pci_scan_root_bus() instead */
1920 struct pci_bus
*pci_scan_bus_parented(struct device
*parent
,
1921 int bus
, struct pci_ops
*ops
, void *sysdata
)
1923 LIST_HEAD(resources
);
1926 pci_add_resource(&resources
, &ioport_resource
);
1927 pci_add_resource(&resources
, &iomem_resource
);
1928 pci_add_resource(&resources
, &busn_resource
);
1929 b
= pci_create_root_bus(parent
, bus
, ops
, sysdata
, &resources
);
1931 pci_scan_child_bus(b
);
1933 pci_free_resource_list(&resources
);
1936 EXPORT_SYMBOL(pci_scan_bus_parented
);
1938 struct pci_bus
*pci_scan_bus(int bus
, struct pci_ops
*ops
,
1941 LIST_HEAD(resources
);
1944 pci_add_resource(&resources
, &ioport_resource
);
1945 pci_add_resource(&resources
, &iomem_resource
);
1946 pci_add_resource(&resources
, &busn_resource
);
1947 b
= pci_create_root_bus(NULL
, bus
, ops
, sysdata
, &resources
);
1949 pci_scan_child_bus(b
);
1950 pci_bus_add_devices(b
);
1952 pci_free_resource_list(&resources
);
1956 EXPORT_SYMBOL(pci_scan_bus
);
1959 * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
1960 * @bridge: PCI bridge for the bus to scan
1962 * Scan a PCI bus and child buses for new devices, add them,
1963 * and enable them, resizing bridge mmio/io resource if necessary
1964 * and possible. The caller must ensure the child devices are already
1965 * removed for resizing to occur.
1967 * Returns the max number of subordinate bus discovered.
1969 unsigned int __ref
pci_rescan_bus_bridge_resize(struct pci_dev
*bridge
)
1972 struct pci_bus
*bus
= bridge
->subordinate
;
1974 max
= pci_scan_child_bus(bus
);
1976 pci_assign_unassigned_bridge_resources(bridge
);
1978 pci_bus_add_devices(bus
);
1984 * pci_rescan_bus - scan a PCI bus for devices.
1985 * @bus: PCI bus to scan
1987 * Scan a PCI bus and child buses for new devices, adds them,
1990 * Returns the max number of subordinate bus discovered.
1992 unsigned int __ref
pci_rescan_bus(struct pci_bus
*bus
)
1996 max
= pci_scan_child_bus(bus
);
1997 pci_assign_unassigned_bus_resources(bus
);
1998 pci_bus_add_devices(bus
);
2002 EXPORT_SYMBOL_GPL(pci_rescan_bus
);
2004 EXPORT_SYMBOL(pci_add_new_bus
);
2005 EXPORT_SYMBOL(pci_scan_slot
);
2006 EXPORT_SYMBOL(pci_scan_bridge
);
2007 EXPORT_SYMBOL_GPL(pci_scan_child_bus
);
2010 * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
2011 * routines should always be executed under this mutex.
2013 static DEFINE_MUTEX(pci_rescan_remove_lock
);
2015 void pci_lock_rescan_remove(void)
2017 mutex_lock(&pci_rescan_remove_lock
);
2019 EXPORT_SYMBOL_GPL(pci_lock_rescan_remove
);
2021 void pci_unlock_rescan_remove(void)
2023 mutex_unlock(&pci_rescan_remove_lock
);
2025 EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove
);
2027 static int __init
pci_sort_bf_cmp(const struct device
*d_a
, const struct device
*d_b
)
2029 const struct pci_dev
*a
= to_pci_dev(d_a
);
2030 const struct pci_dev
*b
= to_pci_dev(d_b
);
2032 if (pci_domain_nr(a
->bus
) < pci_domain_nr(b
->bus
)) return -1;
2033 else if (pci_domain_nr(a
->bus
) > pci_domain_nr(b
->bus
)) return 1;
2035 if (a
->bus
->number
< b
->bus
->number
) return -1;
2036 else if (a
->bus
->number
> b
->bus
->number
) return 1;
2038 if (a
->devfn
< b
->devfn
) return -1;
2039 else if (a
->devfn
> b
->devfn
) return 1;
2044 void __init
pci_sort_breadthfirst(void)
2046 bus_sort_breadthfirst(&pci_bus_type
, &pci_sort_bf_cmp
);