2 * probe.c - PCI detection and setup code
5 #include <linux/kernel.h>
6 #include <linux/delay.h>
7 #include <linux/init.h>
9 #include <linux/of_device.h>
10 #include <linux/of_pci.h>
11 #include <linux/pci_hotplug.h>
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/cpumask.h>
15 #include <linux/pci-aspm.h>
16 #include <linux/aer.h>
17 #include <linux/acpi.h>
18 #include <linux/irqdomain.h>
19 #include <linux/pm_runtime.h>
22 #define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
23 #define CARDBUS_RESERVE_BUSNR 3
25 static struct resource busn_resource
= {
29 .flags
= IORESOURCE_BUS
,
32 /* Ugh. Need to stop exporting this to modules. */
33 LIST_HEAD(pci_root_buses
);
34 EXPORT_SYMBOL(pci_root_buses
);
36 static LIST_HEAD(pci_domain_busn_res_list
);
38 struct pci_domain_busn_res
{
39 struct list_head list
;
44 static struct resource
*get_pci_domain_busn_res(int domain_nr
)
46 struct pci_domain_busn_res
*r
;
48 list_for_each_entry(r
, &pci_domain_busn_res_list
, list
)
49 if (r
->domain_nr
== domain_nr
)
52 r
= kzalloc(sizeof(*r
), GFP_KERNEL
);
56 r
->domain_nr
= domain_nr
;
59 r
->res
.flags
= IORESOURCE_BUS
| IORESOURCE_PCI_FIXED
;
61 list_add_tail(&r
->list
, &pci_domain_busn_res_list
);
66 static int find_anything(struct device
*dev
, void *data
)
72 * Some device drivers need know if pci is initiated.
73 * Basically, we think pci is not initiated when there
74 * is no device to be found on the pci_bus_type.
76 int no_pci_devices(void)
81 dev
= bus_find_device(&pci_bus_type
, NULL
, NULL
, find_anything
);
82 no_devices
= (dev
== NULL
);
86 EXPORT_SYMBOL(no_pci_devices
);
91 static void release_pcibus_dev(struct device
*dev
)
93 struct pci_bus
*pci_bus
= to_pci_bus(dev
);
95 put_device(pci_bus
->bridge
);
96 pci_bus_remove_resources(pci_bus
);
97 pci_release_bus_of_node(pci_bus
);
101 static struct class pcibus_class
= {
103 .dev_release
= &release_pcibus_dev
,
104 .dev_groups
= pcibus_groups
,
107 static int __init
pcibus_class_init(void)
109 return class_register(&pcibus_class
);
111 postcore_initcall(pcibus_class_init
);
113 static u64
pci_size(u64 base
, u64 maxbase
, u64 mask
)
115 u64 size
= mask
& maxbase
; /* Find the significant bits */
119 /* Get the lowest of them to find the decode size, and
120 from that the extent. */
121 size
= (size
& ~(size
-1)) - 1;
123 /* base == maxbase can be valid only if the BAR has
124 already been programmed with all 1s. */
125 if (base
== maxbase
&& ((base
| size
) & mask
) != mask
)
131 static inline unsigned long decode_bar(struct pci_dev
*dev
, u32 bar
)
136 if ((bar
& PCI_BASE_ADDRESS_SPACE
) == PCI_BASE_ADDRESS_SPACE_IO
) {
137 flags
= bar
& ~PCI_BASE_ADDRESS_IO_MASK
;
138 flags
|= IORESOURCE_IO
;
142 flags
= bar
& ~PCI_BASE_ADDRESS_MEM_MASK
;
143 flags
|= IORESOURCE_MEM
;
144 if (flags
& PCI_BASE_ADDRESS_MEM_PREFETCH
)
145 flags
|= IORESOURCE_PREFETCH
;
147 mem_type
= bar
& PCI_BASE_ADDRESS_MEM_TYPE_MASK
;
149 case PCI_BASE_ADDRESS_MEM_TYPE_32
:
151 case PCI_BASE_ADDRESS_MEM_TYPE_1M
:
152 /* 1M mem BAR treated as 32-bit BAR */
154 case PCI_BASE_ADDRESS_MEM_TYPE_64
:
155 flags
|= IORESOURCE_MEM_64
;
158 /* mem unknown type treated as 32-bit BAR */
164 #define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
167 * pci_read_base - read a PCI BAR
168 * @dev: the PCI device
169 * @type: type of the BAR
170 * @res: resource buffer to be filled in
171 * @pos: BAR position in the config space
173 * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
175 int __pci_read_base(struct pci_dev
*dev
, enum pci_bar_type type
,
176 struct resource
*res
, unsigned int pos
)
178 u32 l
= 0, sz
= 0, mask
;
179 u64 l64
, sz64
, mask64
;
181 struct pci_bus_region region
, inverted_region
;
183 mask
= type
? PCI_ROM_ADDRESS_MASK
: ~0;
185 /* No printks while decoding is disabled! */
186 if (!dev
->mmio_always_on
) {
187 pci_read_config_word(dev
, PCI_COMMAND
, &orig_cmd
);
188 if (orig_cmd
& PCI_COMMAND_DECODE_ENABLE
) {
189 pci_write_config_word(dev
, PCI_COMMAND
,
190 orig_cmd
& ~PCI_COMMAND_DECODE_ENABLE
);
194 res
->name
= pci_name(dev
);
196 pci_read_config_dword(dev
, pos
, &l
);
197 pci_write_config_dword(dev
, pos
, l
| mask
);
198 pci_read_config_dword(dev
, pos
, &sz
);
199 pci_write_config_dword(dev
, pos
, l
);
202 * All bits set in sz means the device isn't working properly.
203 * If the BAR isn't implemented, all bits must be 0. If it's a
204 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
207 if (sz
== 0xffffffff)
211 * I don't know how l can have all bits set. Copied from old code.
212 * Maybe it fixes a bug on some ancient platform.
217 if (type
== pci_bar_unknown
) {
218 res
->flags
= decode_bar(dev
, l
);
219 res
->flags
|= IORESOURCE_SIZEALIGN
;
220 if (res
->flags
& IORESOURCE_IO
) {
221 l64
= l
& PCI_BASE_ADDRESS_IO_MASK
;
222 sz64
= sz
& PCI_BASE_ADDRESS_IO_MASK
;
223 mask64
= PCI_BASE_ADDRESS_IO_MASK
& (u32
)IO_SPACE_LIMIT
;
225 l64
= l
& PCI_BASE_ADDRESS_MEM_MASK
;
226 sz64
= sz
& PCI_BASE_ADDRESS_MEM_MASK
;
227 mask64
= (u32
)PCI_BASE_ADDRESS_MEM_MASK
;
230 if (l
& PCI_ROM_ADDRESS_ENABLE
)
231 res
->flags
|= IORESOURCE_ROM_ENABLE
;
232 l64
= l
& PCI_ROM_ADDRESS_MASK
;
233 sz64
= sz
& PCI_ROM_ADDRESS_MASK
;
234 mask64
= PCI_ROM_ADDRESS_MASK
;
237 if (res
->flags
& IORESOURCE_MEM_64
) {
238 pci_read_config_dword(dev
, pos
+ 4, &l
);
239 pci_write_config_dword(dev
, pos
+ 4, ~0);
240 pci_read_config_dword(dev
, pos
+ 4, &sz
);
241 pci_write_config_dword(dev
, pos
+ 4, l
);
243 l64
|= ((u64
)l
<< 32);
244 sz64
|= ((u64
)sz
<< 32);
245 mask64
|= ((u64
)~0 << 32);
248 if (!dev
->mmio_always_on
&& (orig_cmd
& PCI_COMMAND_DECODE_ENABLE
))
249 pci_write_config_word(dev
, PCI_COMMAND
, orig_cmd
);
254 sz64
= pci_size(l64
, sz64
, mask64
);
256 dev_info(&dev
->dev
, FW_BUG
"reg 0x%x: invalid BAR (can't size)\n",
261 if (res
->flags
& IORESOURCE_MEM_64
) {
262 if ((sizeof(pci_bus_addr_t
) < 8 || sizeof(resource_size_t
) < 8)
263 && sz64
> 0x100000000ULL
) {
264 res
->flags
|= IORESOURCE_UNSET
| IORESOURCE_DISABLED
;
267 dev_err(&dev
->dev
, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
268 pos
, (unsigned long long)sz64
);
272 if ((sizeof(pci_bus_addr_t
) < 8) && l
) {
273 /* Above 32-bit boundary; try to reallocate */
274 res
->flags
|= IORESOURCE_UNSET
;
277 dev_info(&dev
->dev
, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n",
278 pos
, (unsigned long long)l64
);
284 region
.end
= l64
+ sz64
;
286 pcibios_bus_to_resource(dev
->bus
, res
, ®ion
);
287 pcibios_resource_to_bus(dev
->bus
, &inverted_region
, res
);
290 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
291 * the corresponding resource address (the physical address used by
292 * the CPU. Converting that resource address back to a bus address
293 * should yield the original BAR value:
295 * resource_to_bus(bus_to_resource(A)) == A
297 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not
298 * be claimed by the device.
300 if (inverted_region
.start
!= region
.start
) {
301 res
->flags
|= IORESOURCE_UNSET
;
303 res
->end
= region
.end
- region
.start
;
304 dev_info(&dev
->dev
, "reg 0x%x: initial BAR value %#010llx invalid\n",
305 pos
, (unsigned long long)region
.start
);
315 dev_printk(KERN_DEBUG
, &dev
->dev
, "reg 0x%x: %pR\n", pos
, res
);
317 return (res
->flags
& IORESOURCE_MEM_64
) ? 1 : 0;
320 static void pci_read_bases(struct pci_dev
*dev
, unsigned int howmany
, int rom
)
322 unsigned int pos
, reg
;
324 if (dev
->non_compliant_bars
)
327 for (pos
= 0; pos
< howmany
; pos
++) {
328 struct resource
*res
= &dev
->resource
[pos
];
329 reg
= PCI_BASE_ADDRESS_0
+ (pos
<< 2);
330 pos
+= __pci_read_base(dev
, pci_bar_unknown
, res
, reg
);
334 struct resource
*res
= &dev
->resource
[PCI_ROM_RESOURCE
];
335 dev
->rom_base_reg
= rom
;
336 res
->flags
= IORESOURCE_MEM
| IORESOURCE_PREFETCH
|
337 IORESOURCE_READONLY
| IORESOURCE_SIZEALIGN
;
338 __pci_read_base(dev
, pci_bar_mem32
, res
, rom
);
342 static void pci_read_bridge_io(struct pci_bus
*child
)
344 struct pci_dev
*dev
= child
->self
;
345 u8 io_base_lo
, io_limit_lo
;
346 unsigned long io_mask
, io_granularity
, base
, limit
;
347 struct pci_bus_region region
;
348 struct resource
*res
;
350 io_mask
= PCI_IO_RANGE_MASK
;
351 io_granularity
= 0x1000;
352 if (dev
->io_window_1k
) {
353 /* Support 1K I/O space granularity */
354 io_mask
= PCI_IO_1K_RANGE_MASK
;
355 io_granularity
= 0x400;
358 res
= child
->resource
[0];
359 pci_read_config_byte(dev
, PCI_IO_BASE
, &io_base_lo
);
360 pci_read_config_byte(dev
, PCI_IO_LIMIT
, &io_limit_lo
);
361 base
= (io_base_lo
& io_mask
) << 8;
362 limit
= (io_limit_lo
& io_mask
) << 8;
364 if ((io_base_lo
& PCI_IO_RANGE_TYPE_MASK
) == PCI_IO_RANGE_TYPE_32
) {
365 u16 io_base_hi
, io_limit_hi
;
367 pci_read_config_word(dev
, PCI_IO_BASE_UPPER16
, &io_base_hi
);
368 pci_read_config_word(dev
, PCI_IO_LIMIT_UPPER16
, &io_limit_hi
);
369 base
|= ((unsigned long) io_base_hi
<< 16);
370 limit
|= ((unsigned long) io_limit_hi
<< 16);
374 res
->flags
= (io_base_lo
& PCI_IO_RANGE_TYPE_MASK
) | IORESOURCE_IO
;
376 region
.end
= limit
+ io_granularity
- 1;
377 pcibios_bus_to_resource(dev
->bus
, res
, ®ion
);
378 dev_printk(KERN_DEBUG
, &dev
->dev
, " bridge window %pR\n", res
);
382 static void pci_read_bridge_mmio(struct pci_bus
*child
)
384 struct pci_dev
*dev
= child
->self
;
385 u16 mem_base_lo
, mem_limit_lo
;
386 unsigned long base
, limit
;
387 struct pci_bus_region region
;
388 struct resource
*res
;
390 res
= child
->resource
[1];
391 pci_read_config_word(dev
, PCI_MEMORY_BASE
, &mem_base_lo
);
392 pci_read_config_word(dev
, PCI_MEMORY_LIMIT
, &mem_limit_lo
);
393 base
= ((unsigned long) mem_base_lo
& PCI_MEMORY_RANGE_MASK
) << 16;
394 limit
= ((unsigned long) mem_limit_lo
& PCI_MEMORY_RANGE_MASK
) << 16;
396 res
->flags
= (mem_base_lo
& PCI_MEMORY_RANGE_TYPE_MASK
) | IORESOURCE_MEM
;
398 region
.end
= limit
+ 0xfffff;
399 pcibios_bus_to_resource(dev
->bus
, res
, ®ion
);
400 dev_printk(KERN_DEBUG
, &dev
->dev
, " bridge window %pR\n", res
);
404 static void pci_read_bridge_mmio_pref(struct pci_bus
*child
)
406 struct pci_dev
*dev
= child
->self
;
407 u16 mem_base_lo
, mem_limit_lo
;
409 pci_bus_addr_t base
, limit
;
410 struct pci_bus_region region
;
411 struct resource
*res
;
413 res
= child
->resource
[2];
414 pci_read_config_word(dev
, PCI_PREF_MEMORY_BASE
, &mem_base_lo
);
415 pci_read_config_word(dev
, PCI_PREF_MEMORY_LIMIT
, &mem_limit_lo
);
416 base64
= (mem_base_lo
& PCI_PREF_RANGE_MASK
) << 16;
417 limit64
= (mem_limit_lo
& PCI_PREF_RANGE_MASK
) << 16;
419 if ((mem_base_lo
& PCI_PREF_RANGE_TYPE_MASK
) == PCI_PREF_RANGE_TYPE_64
) {
420 u32 mem_base_hi
, mem_limit_hi
;
422 pci_read_config_dword(dev
, PCI_PREF_BASE_UPPER32
, &mem_base_hi
);
423 pci_read_config_dword(dev
, PCI_PREF_LIMIT_UPPER32
, &mem_limit_hi
);
426 * Some bridges set the base > limit by default, and some
427 * (broken) BIOSes do not initialize them. If we find
428 * this, just assume they are not being used.
430 if (mem_base_hi
<= mem_limit_hi
) {
431 base64
|= (u64
) mem_base_hi
<< 32;
432 limit64
|= (u64
) mem_limit_hi
<< 32;
436 base
= (pci_bus_addr_t
) base64
;
437 limit
= (pci_bus_addr_t
) limit64
;
439 if (base
!= base64
) {
440 dev_err(&dev
->dev
, "can't handle bridge window above 4GB (bus address %#010llx)\n",
441 (unsigned long long) base64
);
446 res
->flags
= (mem_base_lo
& PCI_PREF_RANGE_TYPE_MASK
) |
447 IORESOURCE_MEM
| IORESOURCE_PREFETCH
;
448 if (res
->flags
& PCI_PREF_RANGE_TYPE_64
)
449 res
->flags
|= IORESOURCE_MEM_64
;
451 region
.end
= limit
+ 0xfffff;
452 pcibios_bus_to_resource(dev
->bus
, res
, ®ion
);
453 dev_printk(KERN_DEBUG
, &dev
->dev
, " bridge window %pR\n", res
);
457 void pci_read_bridge_bases(struct pci_bus
*child
)
459 struct pci_dev
*dev
= child
->self
;
460 struct resource
*res
;
463 if (pci_is_root_bus(child
)) /* It's a host bus, nothing to read */
466 dev_info(&dev
->dev
, "PCI bridge to %pR%s\n",
468 dev
->transparent
? " (subtractive decode)" : "");
470 pci_bus_remove_resources(child
);
471 for (i
= 0; i
< PCI_BRIDGE_RESOURCE_NUM
; i
++)
472 child
->resource
[i
] = &dev
->resource
[PCI_BRIDGE_RESOURCES
+i
];
474 pci_read_bridge_io(child
);
475 pci_read_bridge_mmio(child
);
476 pci_read_bridge_mmio_pref(child
);
478 if (dev
->transparent
) {
479 pci_bus_for_each_resource(child
->parent
, res
, i
) {
480 if (res
&& res
->flags
) {
481 pci_bus_add_resource(child
, res
,
482 PCI_SUBTRACTIVE_DECODE
);
483 dev_printk(KERN_DEBUG
, &dev
->dev
,
484 " bridge window %pR (subtractive decode)\n",
491 static struct pci_bus
*pci_alloc_bus(struct pci_bus
*parent
)
495 b
= kzalloc(sizeof(*b
), GFP_KERNEL
);
499 INIT_LIST_HEAD(&b
->node
);
500 INIT_LIST_HEAD(&b
->children
);
501 INIT_LIST_HEAD(&b
->devices
);
502 INIT_LIST_HEAD(&b
->slots
);
503 INIT_LIST_HEAD(&b
->resources
);
504 b
->max_bus_speed
= PCI_SPEED_UNKNOWN
;
505 b
->cur_bus_speed
= PCI_SPEED_UNKNOWN
;
506 #ifdef CONFIG_PCI_DOMAINS_GENERIC
508 b
->domain_nr
= parent
->domain_nr
;
513 static void devm_pci_release_host_bridge_dev(struct device
*dev
)
515 struct pci_host_bridge
*bridge
= to_pci_host_bridge(dev
);
517 if (bridge
->release_fn
)
518 bridge
->release_fn(bridge
);
520 pci_free_resource_list(&bridge
->windows
);
523 static void pci_release_host_bridge_dev(struct device
*dev
)
525 devm_pci_release_host_bridge_dev(dev
);
526 kfree(to_pci_host_bridge(dev
));
529 struct pci_host_bridge
*pci_alloc_host_bridge(size_t priv
)
531 struct pci_host_bridge
*bridge
;
533 bridge
= kzalloc(sizeof(*bridge
) + priv
, GFP_KERNEL
);
537 INIT_LIST_HEAD(&bridge
->windows
);
538 bridge
->dev
.release
= pci_release_host_bridge_dev
;
542 EXPORT_SYMBOL(pci_alloc_host_bridge
);
544 struct pci_host_bridge
*devm_pci_alloc_host_bridge(struct device
*dev
,
547 struct pci_host_bridge
*bridge
;
549 bridge
= devm_kzalloc(dev
, sizeof(*bridge
) + priv
, GFP_KERNEL
);
553 INIT_LIST_HEAD(&bridge
->windows
);
554 bridge
->dev
.release
= devm_pci_release_host_bridge_dev
;
558 EXPORT_SYMBOL(devm_pci_alloc_host_bridge
);
560 void pci_free_host_bridge(struct pci_host_bridge
*bridge
)
562 pci_free_resource_list(&bridge
->windows
);
566 EXPORT_SYMBOL(pci_free_host_bridge
);
568 static const unsigned char pcix_bus_speed
[] = {
569 PCI_SPEED_UNKNOWN
, /* 0 */
570 PCI_SPEED_66MHz_PCIX
, /* 1 */
571 PCI_SPEED_100MHz_PCIX
, /* 2 */
572 PCI_SPEED_133MHz_PCIX
, /* 3 */
573 PCI_SPEED_UNKNOWN
, /* 4 */
574 PCI_SPEED_66MHz_PCIX_ECC
, /* 5 */
575 PCI_SPEED_100MHz_PCIX_ECC
, /* 6 */
576 PCI_SPEED_133MHz_PCIX_ECC
, /* 7 */
577 PCI_SPEED_UNKNOWN
, /* 8 */
578 PCI_SPEED_66MHz_PCIX_266
, /* 9 */
579 PCI_SPEED_100MHz_PCIX_266
, /* A */
580 PCI_SPEED_133MHz_PCIX_266
, /* B */
581 PCI_SPEED_UNKNOWN
, /* C */
582 PCI_SPEED_66MHz_PCIX_533
, /* D */
583 PCI_SPEED_100MHz_PCIX_533
, /* E */
584 PCI_SPEED_133MHz_PCIX_533
/* F */
587 const unsigned char pcie_link_speed
[] = {
588 PCI_SPEED_UNKNOWN
, /* 0 */
589 PCIE_SPEED_2_5GT
, /* 1 */
590 PCIE_SPEED_5_0GT
, /* 2 */
591 PCIE_SPEED_8_0GT
, /* 3 */
592 PCIE_SPEED_16_0GT
, /* 4 */
593 PCI_SPEED_UNKNOWN
, /* 5 */
594 PCI_SPEED_UNKNOWN
, /* 6 */
595 PCI_SPEED_UNKNOWN
, /* 7 */
596 PCI_SPEED_UNKNOWN
, /* 8 */
597 PCI_SPEED_UNKNOWN
, /* 9 */
598 PCI_SPEED_UNKNOWN
, /* A */
599 PCI_SPEED_UNKNOWN
, /* B */
600 PCI_SPEED_UNKNOWN
, /* C */
601 PCI_SPEED_UNKNOWN
, /* D */
602 PCI_SPEED_UNKNOWN
, /* E */
603 PCI_SPEED_UNKNOWN
/* F */
606 void pcie_update_link_speed(struct pci_bus
*bus
, u16 linksta
)
608 bus
->cur_bus_speed
= pcie_link_speed
[linksta
& PCI_EXP_LNKSTA_CLS
];
610 EXPORT_SYMBOL_GPL(pcie_update_link_speed
);
612 static unsigned char agp_speeds
[] = {
620 static enum pci_bus_speed
agp_speed(int agp3
, int agpstat
)
626 else if (agpstat
& 2)
628 else if (agpstat
& 1)
640 return agp_speeds
[index
];
643 static void pci_set_bus_speed(struct pci_bus
*bus
)
645 struct pci_dev
*bridge
= bus
->self
;
648 pos
= pci_find_capability(bridge
, PCI_CAP_ID_AGP
);
650 pos
= pci_find_capability(bridge
, PCI_CAP_ID_AGP3
);
654 pci_read_config_dword(bridge
, pos
+ PCI_AGP_STATUS
, &agpstat
);
655 bus
->max_bus_speed
= agp_speed(agpstat
& 8, agpstat
& 7);
657 pci_read_config_dword(bridge
, pos
+ PCI_AGP_COMMAND
, &agpcmd
);
658 bus
->cur_bus_speed
= agp_speed(agpstat
& 8, agpcmd
& 7);
661 pos
= pci_find_capability(bridge
, PCI_CAP_ID_PCIX
);
664 enum pci_bus_speed max
;
666 pci_read_config_word(bridge
, pos
+ PCI_X_BRIDGE_SSTATUS
,
669 if (status
& PCI_X_SSTATUS_533MHZ
) {
670 max
= PCI_SPEED_133MHz_PCIX_533
;
671 } else if (status
& PCI_X_SSTATUS_266MHZ
) {
672 max
= PCI_SPEED_133MHz_PCIX_266
;
673 } else if (status
& PCI_X_SSTATUS_133MHZ
) {
674 if ((status
& PCI_X_SSTATUS_VERS
) == PCI_X_SSTATUS_V2
)
675 max
= PCI_SPEED_133MHz_PCIX_ECC
;
677 max
= PCI_SPEED_133MHz_PCIX
;
679 max
= PCI_SPEED_66MHz_PCIX
;
682 bus
->max_bus_speed
= max
;
683 bus
->cur_bus_speed
= pcix_bus_speed
[
684 (status
& PCI_X_SSTATUS_FREQ
) >> 6];
689 if (pci_is_pcie(bridge
)) {
693 pcie_capability_read_dword(bridge
, PCI_EXP_LNKCAP
, &linkcap
);
694 bus
->max_bus_speed
= pcie_link_speed
[linkcap
& PCI_EXP_LNKCAP_SLS
];
696 pcie_capability_read_word(bridge
, PCI_EXP_LNKSTA
, &linksta
);
697 pcie_update_link_speed(bus
, linksta
);
701 static struct irq_domain
*pci_host_bridge_msi_domain(struct pci_bus
*bus
)
703 struct irq_domain
*d
;
706 * Any firmware interface that can resolve the msi_domain
707 * should be called from here.
709 d
= pci_host_bridge_of_msi_domain(bus
);
711 d
= pci_host_bridge_acpi_msi_domain(bus
);
713 #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
715 * If no IRQ domain was found via the OF tree, try looking it up
716 * directly through the fwnode_handle.
719 struct fwnode_handle
*fwnode
= pci_root_bus_fwnode(bus
);
722 d
= irq_find_matching_fwnode(fwnode
,
730 static void pci_set_bus_msi_domain(struct pci_bus
*bus
)
732 struct irq_domain
*d
;
736 * The bus can be a root bus, a subordinate bus, or a virtual bus
737 * created by an SR-IOV device. Walk up to the first bridge device
738 * found or derive the domain from the host bridge.
740 for (b
= bus
, d
= NULL
; !d
&& !pci_is_root_bus(b
); b
= b
->parent
) {
742 d
= dev_get_msi_domain(&b
->self
->dev
);
746 d
= pci_host_bridge_msi_domain(b
);
748 dev_set_msi_domain(&bus
->dev
, d
);
751 static int pci_register_host_bridge(struct pci_host_bridge
*bridge
)
753 struct device
*parent
= bridge
->dev
.parent
;
754 struct resource_entry
*window
, *n
;
755 struct pci_bus
*bus
, *b
;
756 resource_size_t offset
;
757 LIST_HEAD(resources
);
758 struct resource
*res
;
763 bus
= pci_alloc_bus(NULL
);
769 /* temporarily move resources off the list */
770 list_splice_init(&bridge
->windows
, &resources
);
771 bus
->sysdata
= bridge
->sysdata
;
772 bus
->msi
= bridge
->msi
;
773 bus
->ops
= bridge
->ops
;
774 bus
->number
= bus
->busn_res
.start
= bridge
->busnr
;
775 #ifdef CONFIG_PCI_DOMAINS_GENERIC
776 bus
->domain_nr
= pci_bus_find_domain_nr(bus
, parent
);
779 b
= pci_find_bus(pci_domain_nr(bus
), bridge
->busnr
);
781 /* If we already got to this bus through a different bridge, ignore it */
782 dev_dbg(&b
->dev
, "bus already known\n");
787 dev_set_name(&bridge
->dev
, "pci%04x:%02x", pci_domain_nr(bus
),
790 err
= pcibios_root_bridge_prepare(bridge
);
794 err
= device_register(&bridge
->dev
);
796 put_device(&bridge
->dev
);
798 bus
->bridge
= get_device(&bridge
->dev
);
799 device_enable_async_suspend(bus
->bridge
);
800 pci_set_bus_of_node(bus
);
801 pci_set_bus_msi_domain(bus
);
804 set_dev_node(bus
->bridge
, pcibus_to_node(bus
));
806 bus
->dev
.class = &pcibus_class
;
807 bus
->dev
.parent
= bus
->bridge
;
809 dev_set_name(&bus
->dev
, "%04x:%02x", pci_domain_nr(bus
), bus
->number
);
810 name
= dev_name(&bus
->dev
);
812 err
= device_register(&bus
->dev
);
816 pcibios_add_bus(bus
);
818 /* Create legacy_io and legacy_mem files for this bus */
819 pci_create_legacy_files(bus
);
822 dev_info(parent
, "PCI host bridge to bus %s\n", name
);
824 pr_info("PCI host bridge to bus %s\n", name
);
826 /* Add initial resources to the bus */
827 resource_list_for_each_entry_safe(window
, n
, &resources
) {
828 list_move_tail(&window
->node
, &bridge
->windows
);
829 offset
= window
->offset
;
832 if (res
->flags
& IORESOURCE_BUS
)
833 pci_bus_insert_busn_res(bus
, bus
->number
, res
->end
);
835 pci_bus_add_resource(bus
, res
, 0);
838 if (resource_type(res
) == IORESOURCE_IO
)
839 fmt
= " (bus address [%#06llx-%#06llx])";
841 fmt
= " (bus address [%#010llx-%#010llx])";
843 snprintf(addr
, sizeof(addr
), fmt
,
844 (unsigned long long)(res
->start
- offset
),
845 (unsigned long long)(res
->end
- offset
));
849 dev_info(&bus
->dev
, "root bus resource %pR%s\n", res
, addr
);
852 down_write(&pci_bus_sem
);
853 list_add_tail(&bus
->node
, &pci_root_buses
);
854 up_write(&pci_bus_sem
);
859 put_device(&bridge
->dev
);
860 device_unregister(&bridge
->dev
);
867 static struct pci_bus
*pci_alloc_child_bus(struct pci_bus
*parent
,
868 struct pci_dev
*bridge
, int busnr
)
870 struct pci_bus
*child
;
875 * Allocate a new bus, and inherit stuff from the parent..
877 child
= pci_alloc_bus(parent
);
881 child
->parent
= parent
;
882 child
->ops
= parent
->ops
;
883 child
->msi
= parent
->msi
;
884 child
->sysdata
= parent
->sysdata
;
885 child
->bus_flags
= parent
->bus_flags
;
887 /* initialize some portions of the bus device, but don't register it
888 * now as the parent is not properly set up yet.
890 child
->dev
.class = &pcibus_class
;
891 dev_set_name(&child
->dev
, "%04x:%02x", pci_domain_nr(child
), busnr
);
894 * Set up the primary, secondary and subordinate
897 child
->number
= child
->busn_res
.start
= busnr
;
898 child
->primary
= parent
->busn_res
.start
;
899 child
->busn_res
.end
= 0xff;
902 child
->dev
.parent
= parent
->bridge
;
906 child
->self
= bridge
;
907 child
->bridge
= get_device(&bridge
->dev
);
908 child
->dev
.parent
= child
->bridge
;
909 pci_set_bus_of_node(child
);
910 pci_set_bus_speed(child
);
912 /* Set up default resource pointers and names.. */
913 for (i
= 0; i
< PCI_BRIDGE_RESOURCE_NUM
; i
++) {
914 child
->resource
[i
] = &bridge
->resource
[PCI_BRIDGE_RESOURCES
+i
];
915 child
->resource
[i
]->name
= child
->name
;
917 bridge
->subordinate
= child
;
920 pci_set_bus_msi_domain(child
);
921 ret
= device_register(&child
->dev
);
924 pcibios_add_bus(child
);
926 if (child
->ops
->add_bus
) {
927 ret
= child
->ops
->add_bus(child
);
928 if (WARN_ON(ret
< 0))
929 dev_err(&child
->dev
, "failed to add bus: %d\n", ret
);
932 /* Create legacy_io and legacy_mem files for this bus */
933 pci_create_legacy_files(child
);
938 struct pci_bus
*pci_add_new_bus(struct pci_bus
*parent
, struct pci_dev
*dev
,
941 struct pci_bus
*child
;
943 child
= pci_alloc_child_bus(parent
, dev
, busnr
);
945 down_write(&pci_bus_sem
);
946 list_add_tail(&child
->node
, &parent
->children
);
947 up_write(&pci_bus_sem
);
951 EXPORT_SYMBOL(pci_add_new_bus
);
953 static void pci_enable_crs(struct pci_dev
*pdev
)
957 /* Enable CRS Software Visibility if supported */
958 pcie_capability_read_word(pdev
, PCI_EXP_RTCAP
, &root_cap
);
959 if (root_cap
& PCI_EXP_RTCAP_CRSVIS
)
960 pcie_capability_set_word(pdev
, PCI_EXP_RTCTL
,
961 PCI_EXP_RTCTL_CRSSVE
);
964 static unsigned int pci_scan_child_bus_extend(struct pci_bus
*bus
,
965 unsigned int available_buses
);
968 * pci_scan_bridge_extend() - Scan buses behind a bridge
969 * @bus: Parent bus the bridge is on
970 * @dev: Bridge itself
971 * @max: Starting subordinate number of buses behind this bridge
972 * @available_buses: Total number of buses available for this bridge and
973 * the devices below. After the minimal bus space has
974 * been allocated the remaining buses will be
975 * distributed equally between hotplug-capable bridges.
976 * @pass: Either %0 (scan already configured bridges) or %1 (scan bridges
977 * that need to be reconfigured.
979 * If it's a bridge, configure it and scan the bus behind it.
980 * For CardBus bridges, we don't scan behind as the devices will
981 * be handled by the bridge driver itself.
983 * We need to process bridges in two passes -- first we scan those
984 * already configured by the BIOS and after we are done with all of
985 * them, we proceed to assigning numbers to the remaining buses in
986 * order to avoid overlaps between old and new bus numbers.
988 static int pci_scan_bridge_extend(struct pci_bus
*bus
, struct pci_dev
*dev
,
989 int max
, unsigned int available_buses
,
992 struct pci_bus
*child
;
993 int is_cardbus
= (dev
->hdr_type
== PCI_HEADER_TYPE_CARDBUS
);
996 u8 primary
, secondary
, subordinate
;
1000 * Make sure the bridge is powered on to be able to access config
1001 * space of devices below it.
1003 pm_runtime_get_sync(&dev
->dev
);
1005 pci_read_config_dword(dev
, PCI_PRIMARY_BUS
, &buses
);
1006 primary
= buses
& 0xFF;
1007 secondary
= (buses
>> 8) & 0xFF;
1008 subordinate
= (buses
>> 16) & 0xFF;
1010 dev_dbg(&dev
->dev
, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
1011 secondary
, subordinate
, pass
);
1013 if (!primary
&& (primary
!= bus
->number
) && secondary
&& subordinate
) {
1014 dev_warn(&dev
->dev
, "Primary bus is hard wired to 0\n");
1015 primary
= bus
->number
;
1018 /* Check if setup is sensible at all */
1020 (primary
!= bus
->number
|| secondary
<= bus
->number
||
1021 secondary
> subordinate
)) {
1022 dev_info(&dev
->dev
, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
1023 secondary
, subordinate
);
1027 /* Disable MasterAbortMode during probing to avoid reporting
1028 of bus errors (in some architectures) */
1029 pci_read_config_word(dev
, PCI_BRIDGE_CONTROL
, &bctl
);
1030 pci_write_config_word(dev
, PCI_BRIDGE_CONTROL
,
1031 bctl
& ~PCI_BRIDGE_CTL_MASTER_ABORT
);
1033 pci_enable_crs(dev
);
1035 if ((secondary
|| subordinate
) && !pcibios_assign_all_busses() &&
1036 !is_cardbus
&& !broken
) {
1039 * Bus already configured by firmware, process it in the first
1040 * pass and just note the configuration.
1046 * The bus might already exist for two reasons: Either we are
1047 * rescanning the bus or the bus is reachable through more than
1048 * one bridge. The second case can happen with the i450NX
1051 child
= pci_find_bus(pci_domain_nr(bus
), secondary
);
1053 child
= pci_add_new_bus(bus
, dev
, secondary
);
1056 child
->primary
= primary
;
1057 pci_bus_insert_busn_res(child
, secondary
, subordinate
);
1058 child
->bridge_ctl
= bctl
;
1061 cmax
= pci_scan_child_bus(child
);
1062 if (cmax
> subordinate
)
1063 dev_warn(&dev
->dev
, "bridge has subordinate %02x but max busn %02x\n",
1065 /* subordinate should equal child->busn_res.end */
1066 if (subordinate
> max
)
1070 * We need to assign a number to this bus which we always
1071 * do in the second pass.
1074 if (pcibios_assign_all_busses() || broken
|| is_cardbus
)
1075 /* Temporarily disable forwarding of the
1076 configuration cycles on all bridges in
1077 this bus segment to avoid possible
1078 conflicts in the second pass between two
1079 bridges programmed with overlapping
1081 pci_write_config_dword(dev
, PCI_PRIMARY_BUS
,
1087 pci_write_config_word(dev
, PCI_STATUS
, 0xffff);
1089 /* Prevent assigning a bus number that already exists.
1090 * This can happen when a bridge is hot-plugged, so in
1091 * this case we only re-scan this bus. */
1092 child
= pci_find_bus(pci_domain_nr(bus
), max
+1);
1094 child
= pci_add_new_bus(bus
, dev
, max
+1);
1097 pci_bus_insert_busn_res(child
, max
+1,
1101 if (available_buses
)
1104 buses
= (buses
& 0xff000000)
1105 | ((unsigned int)(child
->primary
) << 0)
1106 | ((unsigned int)(child
->busn_res
.start
) << 8)
1107 | ((unsigned int)(child
->busn_res
.end
) << 16);
1110 * yenta.c forces a secondary latency timer of 176.
1111 * Copy that behaviour here.
1114 buses
&= ~0xff000000;
1115 buses
|= CARDBUS_LATENCY_TIMER
<< 24;
1119 * We need to blast all three values with a single write.
1121 pci_write_config_dword(dev
, PCI_PRIMARY_BUS
, buses
);
1124 child
->bridge_ctl
= bctl
;
1125 max
= pci_scan_child_bus_extend(child
, available_buses
);
1128 * For CardBus bridges, we leave 4 bus numbers
1129 * as cards with a PCI-to-PCI bridge can be
1132 for (i
= 0; i
< CARDBUS_RESERVE_BUSNR
; i
++) {
1133 struct pci_bus
*parent
= bus
;
1134 if (pci_find_bus(pci_domain_nr(bus
),
1137 while (parent
->parent
) {
1138 if ((!pcibios_assign_all_busses()) &&
1139 (parent
->busn_res
.end
> max
) &&
1140 (parent
->busn_res
.end
<= max
+i
)) {
1143 parent
= parent
->parent
;
1147 * Often, there are two cardbus bridges
1148 * -- try to leave one valid bus number
1158 * Set the subordinate bus number to its real value.
1160 pci_bus_update_busn_res_end(child
, max
);
1161 pci_write_config_byte(dev
, PCI_SUBORDINATE_BUS
, max
);
1164 sprintf(child
->name
,
1165 (is_cardbus
? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
1166 pci_domain_nr(bus
), child
->number
);
1168 /* Has only triggered on CardBus, fixup is in yenta_socket */
1169 while (bus
->parent
) {
1170 if ((child
->busn_res
.end
> bus
->busn_res
.end
) ||
1171 (child
->number
> bus
->busn_res
.end
) ||
1172 (child
->number
< bus
->number
) ||
1173 (child
->busn_res
.end
< bus
->number
)) {
1174 dev_info(&child
->dev
, "%pR %s hidden behind%s bridge %s %pR\n",
1176 (bus
->number
> child
->busn_res
.end
&&
1177 bus
->busn_res
.end
< child
->number
) ?
1178 "wholly" : "partially",
1179 bus
->self
->transparent
? " transparent" : "",
1180 dev_name(&bus
->dev
),
1187 pci_write_config_word(dev
, PCI_BRIDGE_CONTROL
, bctl
);
1189 pm_runtime_put(&dev
->dev
);
1195 * pci_scan_bridge() - Scan buses behind a bridge
1196 * @bus: Parent bus the bridge is on
1197 * @dev: Bridge itself
1198 * @max: Starting subordinate number of buses behind this bridge
1199 * @pass: Either %0 (scan already configured bridges) or %1 (scan bridges
1200 * that need to be reconfigured.
1202 * If it's a bridge, configure it and scan the bus behind it.
1203 * For CardBus bridges, we don't scan behind as the devices will
1204 * be handled by the bridge driver itself.
1206 * We need to process bridges in two passes -- first we scan those
1207 * already configured by the BIOS and after we are done with all of
1208 * them, we proceed to assigning numbers to the remaining buses in
1209 * order to avoid overlaps between old and new bus numbers.
1211 int pci_scan_bridge(struct pci_bus
*bus
, struct pci_dev
*dev
, int max
, int pass
)
1213 return pci_scan_bridge_extend(bus
, dev
, max
, 0, pass
);
1215 EXPORT_SYMBOL(pci_scan_bridge
);
1218 * Read interrupt line and base address registers.
1219 * The architecture-dependent code can tweak these, of course.
1221 static void pci_read_irq(struct pci_dev
*dev
)
1225 pci_read_config_byte(dev
, PCI_INTERRUPT_PIN
, &irq
);
1228 pci_read_config_byte(dev
, PCI_INTERRUPT_LINE
, &irq
);
1232 void set_pcie_port_type(struct pci_dev
*pdev
)
1237 struct pci_dev
*parent
;
1239 pos
= pci_find_capability(pdev
, PCI_CAP_ID_EXP
);
1243 pdev
->pcie_cap
= pos
;
1244 pci_read_config_word(pdev
, pos
+ PCI_EXP_FLAGS
, ®16
);
1245 pdev
->pcie_flags_reg
= reg16
;
1246 pci_read_config_word(pdev
, pos
+ PCI_EXP_DEVCAP
, ®16
);
1247 pdev
->pcie_mpss
= reg16
& PCI_EXP_DEVCAP_PAYLOAD
;
1250 * A Root Port or a PCI-to-PCIe bridge is always the upstream end
1251 * of a Link. No PCIe component has two Links. Two Links are
1252 * connected by a Switch that has a Port on each Link and internal
1253 * logic to connect the two Ports.
1255 type
= pci_pcie_type(pdev
);
1256 if (type
== PCI_EXP_TYPE_ROOT_PORT
||
1257 type
== PCI_EXP_TYPE_PCIE_BRIDGE
)
1258 pdev
->has_secondary_link
= 1;
1259 else if (type
== PCI_EXP_TYPE_UPSTREAM
||
1260 type
== PCI_EXP_TYPE_DOWNSTREAM
) {
1261 parent
= pci_upstream_bridge(pdev
);
1264 * Usually there's an upstream device (Root Port or Switch
1265 * Downstream Port), but we can't assume one exists.
1267 if (parent
&& !parent
->has_secondary_link
)
1268 pdev
->has_secondary_link
= 1;
1272 void set_pcie_hotplug_bridge(struct pci_dev
*pdev
)
1276 pcie_capability_read_dword(pdev
, PCI_EXP_SLTCAP
, ®32
);
1277 if (reg32
& PCI_EXP_SLTCAP_HPC
)
1278 pdev
->is_hotplug_bridge
= 1;
1281 static void set_pcie_thunderbolt(struct pci_dev
*dev
)
1286 while ((vsec
= pci_find_next_ext_capability(dev
, vsec
,
1287 PCI_EXT_CAP_ID_VNDR
))) {
1288 pci_read_config_dword(dev
, vsec
+ PCI_VNDR_HEADER
, &header
);
1290 /* Is the device part of a Thunderbolt controller? */
1291 if (dev
->vendor
== PCI_VENDOR_ID_INTEL
&&
1292 PCI_VNDR_HEADER_ID(header
) == PCI_VSEC_ID_INTEL_TBT
) {
1293 dev
->is_thunderbolt
= 1;
1299 static void set_pcie_untrusted(struct pci_dev
*dev
)
1301 struct pci_dev
*parent
;
1304 * If the upstream bridge is untrusted we treat this device
1305 * untrusted as well.
1307 parent
= pci_upstream_bridge(dev
);
1308 if (parent
&& parent
->untrusted
)
1309 dev
->untrusted
= true;
1313 * pci_ext_cfg_is_aliased - is ext config space just an alias of std config?
1316 * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that
1317 * when forwarding a type1 configuration request the bridge must check that
1318 * the extended register address field is zero. The bridge is not permitted
1319 * to forward the transactions and must handle it as an Unsupported Request.
1320 * Some bridges do not follow this rule and simply drop the extended register
1321 * bits, resulting in the standard config space being aliased, every 256
1322 * bytes across the entire configuration space. Test for this condition by
1323 * comparing the first dword of each potential alias to the vendor/device ID.
1325 * ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03)
1326 * AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40)
1328 static bool pci_ext_cfg_is_aliased(struct pci_dev
*dev
)
1330 #ifdef CONFIG_PCI_QUIRKS
1334 pci_read_config_dword(dev
, PCI_VENDOR_ID
, &header
);
1336 for (pos
= PCI_CFG_SPACE_SIZE
;
1337 pos
< PCI_CFG_SPACE_EXP_SIZE
; pos
+= PCI_CFG_SPACE_SIZE
) {
1338 if (pci_read_config_dword(dev
, pos
, &tmp
) != PCIBIOS_SUCCESSFUL
1350 * pci_cfg_space_size - get the configuration space size of the PCI device.
1353 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1354 * have 4096 bytes. Even if the device is capable, that doesn't mean we can
1355 * access it. Maybe we don't have a way to generate extended config space
1356 * accesses, or the device is behind a reverse Express bridge. So we try
1357 * reading the dword at 0x100 which must either be 0 or a valid extended
1358 * capability header.
1360 static int pci_cfg_space_size_ext(struct pci_dev
*dev
)
1363 int pos
= PCI_CFG_SPACE_SIZE
;
1365 if (pci_read_config_dword(dev
, pos
, &status
) != PCIBIOS_SUCCESSFUL
)
1366 return PCI_CFG_SPACE_SIZE
;
1367 if (status
== 0xffffffff || pci_ext_cfg_is_aliased(dev
))
1368 return PCI_CFG_SPACE_SIZE
;
1370 return PCI_CFG_SPACE_EXP_SIZE
;
1373 int pci_cfg_space_size(struct pci_dev
*dev
)
1379 class = dev
->class >> 8;
1380 if (class == PCI_CLASS_BRIDGE_HOST
)
1381 return pci_cfg_space_size_ext(dev
);
1383 if (pci_is_pcie(dev
))
1384 return pci_cfg_space_size_ext(dev
);
1386 pos
= pci_find_capability(dev
, PCI_CAP_ID_PCIX
);
1388 return PCI_CFG_SPACE_SIZE
;
1390 pci_read_config_dword(dev
, pos
+ PCI_X_STATUS
, &status
);
1391 if (status
& (PCI_X_STATUS_266MHZ
| PCI_X_STATUS_533MHZ
))
1392 return pci_cfg_space_size_ext(dev
);
1394 return PCI_CFG_SPACE_SIZE
;
1397 #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
1399 static void pci_msi_setup_pci_dev(struct pci_dev
*dev
)
1402 * Disable the MSI hardware to avoid screaming interrupts
1403 * during boot. This is the power on reset default so
1404 * usually this should be a noop.
1406 dev
->msi_cap
= pci_find_capability(dev
, PCI_CAP_ID_MSI
);
1408 pci_msi_set_enable(dev
, 0);
1410 dev
->msix_cap
= pci_find_capability(dev
, PCI_CAP_ID_MSIX
);
1412 pci_msix_clear_and_set_ctrl(dev
, PCI_MSIX_FLAGS_ENABLE
, 0);
1416 * pci_intx_mask_broken - test PCI_COMMAND_INTX_DISABLE writability
1419 * Test whether PCI_COMMAND_INTX_DISABLE is writable for @dev. Check this
1420 * at enumeration-time to avoid modifying PCI_COMMAND at run-time.
1422 static int pci_intx_mask_broken(struct pci_dev
*dev
)
1424 u16 orig
, toggle
, new;
1426 pci_read_config_word(dev
, PCI_COMMAND
, &orig
);
1427 toggle
= orig
^ PCI_COMMAND_INTX_DISABLE
;
1428 pci_write_config_word(dev
, PCI_COMMAND
, toggle
);
1429 pci_read_config_word(dev
, PCI_COMMAND
, &new);
1431 pci_write_config_word(dev
, PCI_COMMAND
, orig
);
1434 * PCI_COMMAND_INTX_DISABLE was reserved and read-only prior to PCI
1435 * r2.3, so strictly speaking, a device is not *broken* if it's not
1436 * writable. But we'll live with the misnomer for now.
1444 * pci_setup_device - fill in class and map information of a device
1445 * @dev: the device structure to fill
1447 * Initialize the device structure with information about the device's
1448 * vendor,class,memory and IO-space addresses,IRQ lines etc.
1449 * Called at initialisation of the PCI subsystem and by CardBus services.
1450 * Returns 0 on success and negative if unknown type of device (not normal,
1451 * bridge or CardBus).
1453 int pci_setup_device(struct pci_dev
*dev
)
1459 struct pci_bus_region region
;
1460 struct resource
*res
;
1462 if (pci_read_config_byte(dev
, PCI_HEADER_TYPE
, &hdr_type
))
1465 dev
->sysdata
= dev
->bus
->sysdata
;
1466 dev
->dev
.parent
= dev
->bus
->bridge
;
1467 dev
->dev
.bus
= &pci_bus_type
;
1468 dev
->hdr_type
= hdr_type
& 0x7f;
1469 dev
->multifunction
= !!(hdr_type
& 0x80);
1470 dev
->error_state
= pci_channel_io_normal
;
1471 set_pcie_port_type(dev
);
1473 pci_dev_assign_slot(dev
);
1474 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1475 set this higher, assuming the system even supports it. */
1476 dev
->dma_mask
= 0xffffffff;
1478 dev_set_name(&dev
->dev
, "%04x:%02x:%02x.%d", pci_domain_nr(dev
->bus
),
1479 dev
->bus
->number
, PCI_SLOT(dev
->devfn
),
1480 PCI_FUNC(dev
->devfn
));
1482 pci_read_config_dword(dev
, PCI_CLASS_REVISION
, &class);
1483 dev
->revision
= class & 0xff;
1484 dev
->class = class >> 8; /* upper 3 bytes */
1486 dev_printk(KERN_DEBUG
, &dev
->dev
, "[%04x:%04x] type %02x class %#08x\n",
1487 dev
->vendor
, dev
->device
, dev
->hdr_type
, dev
->class);
1489 /* need to have dev->class ready */
1490 dev
->cfg_size
= pci_cfg_space_size(dev
);
1492 /* need to have dev->cfg_size ready */
1493 set_pcie_thunderbolt(dev
);
1495 set_pcie_untrusted(dev
);
1497 /* "Unknown power state" */
1498 dev
->current_state
= PCI_UNKNOWN
;
1500 /* Early fixups, before probing the BARs */
1501 pci_fixup_device(pci_fixup_early
, dev
);
1502 /* device class may be changed after fixup */
1503 class = dev
->class >> 8;
1505 if (dev
->non_compliant_bars
) {
1506 pci_read_config_word(dev
, PCI_COMMAND
, &cmd
);
1507 if (cmd
& (PCI_COMMAND_IO
| PCI_COMMAND_MEMORY
)) {
1508 dev_info(&dev
->dev
, "device has non-compliant BARs; disabling IO/MEM decoding\n");
1509 cmd
&= ~PCI_COMMAND_IO
;
1510 cmd
&= ~PCI_COMMAND_MEMORY
;
1511 pci_write_config_word(dev
, PCI_COMMAND
, cmd
);
1515 dev
->broken_intx_masking
= pci_intx_mask_broken(dev
);
1517 switch (dev
->hdr_type
) { /* header type */
1518 case PCI_HEADER_TYPE_NORMAL
: /* standard header */
1519 if (class == PCI_CLASS_BRIDGE_PCI
)
1522 pci_read_bases(dev
, 6, PCI_ROM_ADDRESS
);
1523 pci_read_config_word(dev
, PCI_SUBSYSTEM_VENDOR_ID
, &dev
->subsystem_vendor
);
1524 pci_read_config_word(dev
, PCI_SUBSYSTEM_ID
, &dev
->subsystem_device
);
1527 * Do the ugly legacy mode stuff here rather than broken chip
1528 * quirk code. Legacy mode ATA controllers have fixed
1529 * addresses. These are not always echoed in BAR0-3, and
1530 * BAR0-3 in a few cases contain junk!
1532 if (class == PCI_CLASS_STORAGE_IDE
) {
1534 pci_read_config_byte(dev
, PCI_CLASS_PROG
, &progif
);
1535 if ((progif
& 1) == 0) {
1536 region
.start
= 0x1F0;
1538 res
= &dev
->resource
[0];
1539 res
->flags
= LEGACY_IO_RESOURCE
;
1540 pcibios_bus_to_resource(dev
->bus
, res
, ®ion
);
1541 dev_info(&dev
->dev
, "legacy IDE quirk: reg 0x10: %pR\n",
1543 region
.start
= 0x3F6;
1545 res
= &dev
->resource
[1];
1546 res
->flags
= LEGACY_IO_RESOURCE
;
1547 pcibios_bus_to_resource(dev
->bus
, res
, ®ion
);
1548 dev_info(&dev
->dev
, "legacy IDE quirk: reg 0x14: %pR\n",
1551 if ((progif
& 4) == 0) {
1552 region
.start
= 0x170;
1554 res
= &dev
->resource
[2];
1555 res
->flags
= LEGACY_IO_RESOURCE
;
1556 pcibios_bus_to_resource(dev
->bus
, res
, ®ion
);
1557 dev_info(&dev
->dev
, "legacy IDE quirk: reg 0x18: %pR\n",
1559 region
.start
= 0x376;
1561 res
= &dev
->resource
[3];
1562 res
->flags
= LEGACY_IO_RESOURCE
;
1563 pcibios_bus_to_resource(dev
->bus
, res
, ®ion
);
1564 dev_info(&dev
->dev
, "legacy IDE quirk: reg 0x1c: %pR\n",
1570 case PCI_HEADER_TYPE_BRIDGE
: /* bridge header */
1571 if (class != PCI_CLASS_BRIDGE_PCI
)
1573 /* The PCI-to-PCI bridge spec requires that subtractive
1574 decoding (i.e. transparent) bridge must have programming
1575 interface code of 0x01. */
1577 dev
->transparent
= ((dev
->class & 0xff) == 1);
1578 pci_read_bases(dev
, 2, PCI_ROM_ADDRESS1
);
1579 set_pcie_hotplug_bridge(dev
);
1580 pos
= pci_find_capability(dev
, PCI_CAP_ID_SSVID
);
1582 pci_read_config_word(dev
, pos
+ PCI_SSVID_VENDOR_ID
, &dev
->subsystem_vendor
);
1583 pci_read_config_word(dev
, pos
+ PCI_SSVID_DEVICE_ID
, &dev
->subsystem_device
);
1587 case PCI_HEADER_TYPE_CARDBUS
: /* CardBus bridge header */
1588 if (class != PCI_CLASS_BRIDGE_CARDBUS
)
1591 pci_read_bases(dev
, 1, 0);
1592 pci_read_config_word(dev
, PCI_CB_SUBSYSTEM_VENDOR_ID
, &dev
->subsystem_vendor
);
1593 pci_read_config_word(dev
, PCI_CB_SUBSYSTEM_ID
, &dev
->subsystem_device
);
1596 default: /* unknown header */
1597 dev_err(&dev
->dev
, "unknown header type %02x, ignoring device\n",
1602 dev_err(&dev
->dev
, "ignoring class %#08x (doesn't match header type %02x)\n",
1603 dev
->class, dev
->hdr_type
);
1604 dev
->class = PCI_CLASS_NOT_DEFINED
<< 8;
1607 /* We found a fine healthy device, go go go... */
1611 static void pci_configure_mps(struct pci_dev
*dev
)
1613 struct pci_dev
*bridge
= pci_upstream_bridge(dev
);
1614 int mps
, mpss
, p_mps
, rc
;
1616 if (!pci_is_pcie(dev
) || !bridge
|| !pci_is_pcie(bridge
))
1619 /* MPS and MRRS fields are of type 'RsvdP' for VFs, short-circuit out */
1623 mps
= pcie_get_mps(dev
);
1624 p_mps
= pcie_get_mps(bridge
);
1629 if (pcie_bus_config
== PCIE_BUS_TUNE_OFF
) {
1630 dev_warn(&dev
->dev
, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1631 mps
, pci_name(bridge
), p_mps
);
1636 * Fancier MPS configuration is done later by
1637 * pcie_bus_configure_settings()
1639 if (pcie_bus_config
!= PCIE_BUS_DEFAULT
)
1642 mpss
= 128 << dev
->pcie_mpss
;
1643 if (mpss
< p_mps
&& pci_pcie_type(bridge
) == PCI_EXP_TYPE_ROOT_PORT
) {
1644 pcie_set_mps(bridge
, mpss
);
1645 pci_info(dev
, "Upstream bridge's Max Payload Size set to %d (was %d, max %d)\n",
1646 mpss
, p_mps
, 128 << bridge
->pcie_mpss
);
1647 p_mps
= pcie_get_mps(bridge
);
1650 rc
= pcie_set_mps(dev
, p_mps
);
1652 dev_warn(&dev
->dev
, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1657 dev_info(&dev
->dev
, "Max Payload Size set to %d (was %d, max %d)\n",
1661 static struct hpp_type0 pci_default_type0
= {
1663 .cache_line_size
= 8,
1664 .latency_timer
= 0x40,
1669 static void program_hpp_type0(struct pci_dev
*dev
, struct hpp_type0
*hpp
)
1671 u16 pci_cmd
, pci_bctl
;
1674 hpp
= &pci_default_type0
;
1676 if (hpp
->revision
> 1) {
1678 "PCI settings rev %d not supported; using defaults\n",
1680 hpp
= &pci_default_type0
;
1683 pci_write_config_byte(dev
, PCI_CACHE_LINE_SIZE
, hpp
->cache_line_size
);
1684 pci_write_config_byte(dev
, PCI_LATENCY_TIMER
, hpp
->latency_timer
);
1685 pci_read_config_word(dev
, PCI_COMMAND
, &pci_cmd
);
1686 if (hpp
->enable_serr
)
1687 pci_cmd
|= PCI_COMMAND_SERR
;
1688 if (hpp
->enable_perr
)
1689 pci_cmd
|= PCI_COMMAND_PARITY
;
1690 pci_write_config_word(dev
, PCI_COMMAND
, pci_cmd
);
1692 /* Program bridge control value */
1693 if ((dev
->class >> 8) == PCI_CLASS_BRIDGE_PCI
) {
1694 pci_write_config_byte(dev
, PCI_SEC_LATENCY_TIMER
,
1695 hpp
->latency_timer
);
1696 pci_read_config_word(dev
, PCI_BRIDGE_CONTROL
, &pci_bctl
);
1697 if (hpp
->enable_serr
)
1698 pci_bctl
|= PCI_BRIDGE_CTL_SERR
;
1699 if (hpp
->enable_perr
)
1700 pci_bctl
|= PCI_BRIDGE_CTL_PARITY
;
1701 pci_write_config_word(dev
, PCI_BRIDGE_CONTROL
, pci_bctl
);
1705 static void program_hpp_type1(struct pci_dev
*dev
, struct hpp_type1
*hpp
)
1712 pos
= pci_find_capability(dev
, PCI_CAP_ID_PCIX
);
1716 dev_warn(&dev
->dev
, "PCI-X settings not supported\n");
1719 static bool pcie_root_rcb_set(struct pci_dev
*dev
)
1721 struct pci_dev
*rp
= pcie_find_root_port(dev
);
1727 pcie_capability_read_word(rp
, PCI_EXP_LNKCTL
, &lnkctl
);
1728 if (lnkctl
& PCI_EXP_LNKCTL_RCB
)
1734 static void program_hpp_type2(struct pci_dev
*dev
, struct hpp_type2
*hpp
)
1742 if (!pci_is_pcie(dev
))
1745 if (hpp
->revision
> 1) {
1746 dev_warn(&dev
->dev
, "PCIe settings rev %d not supported\n",
1752 * Don't allow _HPX to change MPS or MRRS settings. We manage
1753 * those to make sure they're consistent with the rest of the
1756 hpp
->pci_exp_devctl_and
|= PCI_EXP_DEVCTL_PAYLOAD
|
1757 PCI_EXP_DEVCTL_READRQ
;
1758 hpp
->pci_exp_devctl_or
&= ~(PCI_EXP_DEVCTL_PAYLOAD
|
1759 PCI_EXP_DEVCTL_READRQ
);
1761 /* Initialize Device Control Register */
1762 pcie_capability_clear_and_set_word(dev
, PCI_EXP_DEVCTL
,
1763 ~hpp
->pci_exp_devctl_and
, hpp
->pci_exp_devctl_or
);
1765 /* Initialize Link Control Register */
1766 if (pcie_cap_has_lnkctl(dev
)) {
1769 * If the Root Port supports Read Completion Boundary of
1770 * 128, set RCB to 128. Otherwise, clear it.
1772 hpp
->pci_exp_lnkctl_and
|= PCI_EXP_LNKCTL_RCB
;
1773 hpp
->pci_exp_lnkctl_or
&= ~PCI_EXP_LNKCTL_RCB
;
1774 if (pcie_root_rcb_set(dev
))
1775 hpp
->pci_exp_lnkctl_or
|= PCI_EXP_LNKCTL_RCB
;
1777 pcie_capability_clear_and_set_word(dev
, PCI_EXP_LNKCTL
,
1778 ~hpp
->pci_exp_lnkctl_and
, hpp
->pci_exp_lnkctl_or
);
1781 /* Find Advanced Error Reporting Enhanced Capability */
1782 pos
= pci_find_ext_capability(dev
, PCI_EXT_CAP_ID_ERR
);
1786 /* Initialize Uncorrectable Error Mask Register */
1787 pci_read_config_dword(dev
, pos
+ PCI_ERR_UNCOR_MASK
, ®32
);
1788 reg32
= (reg32
& hpp
->unc_err_mask_and
) | hpp
->unc_err_mask_or
;
1789 pci_write_config_dword(dev
, pos
+ PCI_ERR_UNCOR_MASK
, reg32
);
1791 /* Initialize Uncorrectable Error Severity Register */
1792 pci_read_config_dword(dev
, pos
+ PCI_ERR_UNCOR_SEVER
, ®32
);
1793 reg32
= (reg32
& hpp
->unc_err_sever_and
) | hpp
->unc_err_sever_or
;
1794 pci_write_config_dword(dev
, pos
+ PCI_ERR_UNCOR_SEVER
, reg32
);
1796 /* Initialize Correctable Error Mask Register */
1797 pci_read_config_dword(dev
, pos
+ PCI_ERR_COR_MASK
, ®32
);
1798 reg32
= (reg32
& hpp
->cor_err_mask_and
) | hpp
->cor_err_mask_or
;
1799 pci_write_config_dword(dev
, pos
+ PCI_ERR_COR_MASK
, reg32
);
1801 /* Initialize Advanced Error Capabilities and Control Register */
1802 pci_read_config_dword(dev
, pos
+ PCI_ERR_CAP
, ®32
);
1803 reg32
= (reg32
& hpp
->adv_err_cap_and
) | hpp
->adv_err_cap_or
;
1804 /* Don't enable ECRC generation or checking if unsupported */
1805 if (!(reg32
& PCI_ERR_CAP_ECRC_GENC
))
1806 reg32
&= ~PCI_ERR_CAP_ECRC_GENE
;
1807 if (!(reg32
& PCI_ERR_CAP_ECRC_CHKC
))
1808 reg32
&= ~PCI_ERR_CAP_ECRC_CHKE
;
1809 pci_write_config_dword(dev
, pos
+ PCI_ERR_CAP
, reg32
);
1812 * FIXME: The following two registers are not supported yet.
1814 * o Secondary Uncorrectable Error Severity Register
1815 * o Secondary Uncorrectable Error Mask Register
1819 int pci_configure_extended_tags(struct pci_dev
*dev
, void *ign
)
1821 struct pci_host_bridge
*host
;
1826 if (!pci_is_pcie(dev
))
1829 ret
= pcie_capability_read_dword(dev
, PCI_EXP_DEVCAP
, &cap
);
1833 if (!(cap
& PCI_EXP_DEVCAP_EXT_TAG
))
1836 ret
= pcie_capability_read_word(dev
, PCI_EXP_DEVCTL
, &ctl
);
1840 host
= pci_find_host_bridge(dev
->bus
);
1845 * If some device in the hierarchy doesn't handle Extended Tags
1846 * correctly, make sure they're disabled.
1848 if (host
->no_ext_tags
) {
1849 if (ctl
& PCI_EXP_DEVCTL_EXT_TAG
) {
1850 dev_info(&dev
->dev
, "disabling Extended Tags\n");
1851 pcie_capability_clear_word(dev
, PCI_EXP_DEVCTL
,
1852 PCI_EXP_DEVCTL_EXT_TAG
);
1857 if (!(ctl
& PCI_EXP_DEVCTL_EXT_TAG
)) {
1858 dev_info(&dev
->dev
, "enabling Extended Tags\n");
1859 pcie_capability_set_word(dev
, PCI_EXP_DEVCTL
,
1860 PCI_EXP_DEVCTL_EXT_TAG
);
1866 * pcie_relaxed_ordering_enabled - Probe for PCIe relaxed ordering enable
1867 * @dev: PCI device to query
1869 * Returns true if the device has enabled relaxed ordering attribute.
1871 bool pcie_relaxed_ordering_enabled(struct pci_dev
*dev
)
1875 pcie_capability_read_word(dev
, PCI_EXP_DEVCTL
, &v
);
1877 return !!(v
& PCI_EXP_DEVCTL_RELAX_EN
);
1879 EXPORT_SYMBOL(pcie_relaxed_ordering_enabled
);
1881 static void pci_configure_relaxed_ordering(struct pci_dev
*dev
)
1883 struct pci_dev
*root
;
1885 /* PCI_EXP_DEVICE_RELAX_EN is RsvdP in VFs */
1889 if (!pcie_relaxed_ordering_enabled(dev
))
1893 * For now, we only deal with Relaxed Ordering issues with Root
1894 * Ports. Peer-to-Peer DMA is another can of worms.
1896 root
= pci_find_pcie_root_port(dev
);
1900 if (root
->dev_flags
& PCI_DEV_FLAGS_NO_RELAXED_ORDERING
) {
1901 pcie_capability_clear_word(dev
, PCI_EXP_DEVCTL
,
1902 PCI_EXP_DEVCTL_RELAX_EN
);
1903 dev_info(&dev
->dev
, "Disable Relaxed Ordering because the Root Port didn't support it\n");
1907 static void pci_configure_ltr(struct pci_dev
*dev
)
1909 #ifdef CONFIG_PCIEASPM
1911 struct pci_dev
*bridge
;
1913 if (!pci_is_pcie(dev
))
1916 pcie_capability_read_dword(dev
, PCI_EXP_DEVCAP2
, &cap
);
1917 if (!(cap
& PCI_EXP_DEVCAP2_LTR
))
1921 * Software must not enable LTR in an Endpoint unless the Root
1922 * Complex and all intermediate Switches indicate support for LTR.
1923 * PCIe r3.1, sec 6.18.
1925 if (pci_pcie_type(dev
) == PCI_EXP_TYPE_ROOT_PORT
)
1928 bridge
= pci_upstream_bridge(dev
);
1929 if (bridge
&& bridge
->ltr_path
)
1934 pcie_capability_set_word(dev
, PCI_EXP_DEVCTL2
,
1935 PCI_EXP_DEVCTL2_LTR_EN
);
1939 static void pci_configure_device(struct pci_dev
*dev
)
1941 struct hotplug_params hpp
;
1944 pci_configure_mps(dev
);
1945 pci_configure_extended_tags(dev
, NULL
);
1946 pci_configure_relaxed_ordering(dev
);
1947 pci_configure_ltr(dev
);
1949 memset(&hpp
, 0, sizeof(hpp
));
1950 ret
= pci_get_hp_params(dev
, &hpp
);
1954 program_hpp_type2(dev
, hpp
.t2
);
1955 program_hpp_type1(dev
, hpp
.t1
);
1956 program_hpp_type0(dev
, hpp
.t0
);
1959 static void pci_release_capabilities(struct pci_dev
*dev
)
1961 pci_vpd_release(dev
);
1962 pci_iov_release(dev
);
1963 pci_free_cap_save_buffers(dev
);
1967 * pci_release_dev - free a pci device structure when all users of it are finished.
1968 * @dev: device that's been disconnected
1970 * Will be called only by the device core when all users of this pci device are
1973 static void pci_release_dev(struct device
*dev
)
1975 struct pci_dev
*pci_dev
;
1977 pci_dev
= to_pci_dev(dev
);
1978 pci_release_capabilities(pci_dev
);
1979 pci_release_of_node(pci_dev
);
1980 pcibios_release_device(pci_dev
);
1981 pci_bus_put(pci_dev
->bus
);
1982 kfree(pci_dev
->driver_override
);
1983 kfree(pci_dev
->dma_alias_mask
);
1987 struct pci_dev
*pci_alloc_dev(struct pci_bus
*bus
)
1989 struct pci_dev
*dev
;
1991 dev
= kzalloc(sizeof(struct pci_dev
), GFP_KERNEL
);
1995 INIT_LIST_HEAD(&dev
->bus_list
);
1996 dev
->dev
.type
= &pci_dev_type
;
1997 dev
->bus
= pci_bus_get(bus
);
2001 EXPORT_SYMBOL(pci_alloc_dev
);
2003 static bool pci_bus_crs_vendor_id(u32 l
)
2005 return (l
& 0xffff) == 0x0001;
2008 static bool pci_bus_wait_crs(struct pci_bus
*bus
, int devfn
, u32
*l
,
2013 if (!pci_bus_crs_vendor_id(*l
))
2014 return true; /* not a CRS completion */
2017 return false; /* CRS, but caller doesn't want to wait */
2020 * We got the reserved Vendor ID that indicates a completion with
2021 * Configuration Request Retry Status (CRS). Retry until we get a
2022 * valid Vendor ID or we time out.
2024 while (pci_bus_crs_vendor_id(*l
)) {
2025 if (delay
> timeout
) {
2026 pr_warn("pci %04x:%02x:%02x.%d: not ready after %dms; giving up\n",
2027 pci_domain_nr(bus
), bus
->number
,
2028 PCI_SLOT(devfn
), PCI_FUNC(devfn
), delay
- 1);
2033 pr_info("pci %04x:%02x:%02x.%d: not ready after %dms; waiting\n",
2034 pci_domain_nr(bus
), bus
->number
,
2035 PCI_SLOT(devfn
), PCI_FUNC(devfn
), delay
- 1);
2040 if (pci_bus_read_config_dword(bus
, devfn
, PCI_VENDOR_ID
, l
))
2045 pr_info("pci %04x:%02x:%02x.%d: ready after %dms\n",
2046 pci_domain_nr(bus
), bus
->number
,
2047 PCI_SLOT(devfn
), PCI_FUNC(devfn
), delay
- 1);
2052 bool pci_bus_read_dev_vendor_id(struct pci_bus
*bus
, int devfn
, u32
*l
,
2055 if (pci_bus_read_config_dword(bus
, devfn
, PCI_VENDOR_ID
, l
))
2058 /* some broken boards return 0 or ~0 if a slot is empty: */
2059 if (*l
== 0xffffffff || *l
== 0x00000000 ||
2060 *l
== 0x0000ffff || *l
== 0xffff0000)
2063 if (pci_bus_crs_vendor_id(*l
))
2064 return pci_bus_wait_crs(bus
, devfn
, l
, timeout
);
2068 EXPORT_SYMBOL(pci_bus_read_dev_vendor_id
);
2071 * Read the config data for a PCI device, sanity-check it
2072 * and fill in the dev structure...
2074 static struct pci_dev
*pci_scan_device(struct pci_bus
*bus
, int devfn
)
2076 struct pci_dev
*dev
;
2079 if (!pci_bus_read_dev_vendor_id(bus
, devfn
, &l
, 60*1000))
2082 dev
= pci_alloc_dev(bus
);
2087 dev
->vendor
= l
& 0xffff;
2088 dev
->device
= (l
>> 16) & 0xffff;
2090 pci_set_of_node(dev
);
2092 if (pci_setup_device(dev
)) {
2093 pci_bus_put(dev
->bus
);
2101 static void pci_init_capabilities(struct pci_dev
*dev
)
2103 /* Enhanced Allocation */
2106 /* Setup MSI caps & disable MSI/MSI-X interrupts */
2107 pci_msi_setup_pci_dev(dev
);
2109 /* Buffers for saving PCIe and PCI-X capabilities */
2110 pci_allocate_cap_save_buffers(dev
);
2112 /* Power Management */
2115 /* Vital Product Data */
2118 /* Alternative Routing-ID Forwarding */
2119 pci_configure_ari(dev
);
2121 /* Single Root I/O Virtualization */
2124 /* Address Translation Services */
2127 /* Enable ACS P2P upstream forwarding */
2128 pci_enable_acs(dev
);
2130 /* Precision Time Measurement */
2133 /* Advanced Error Reporting */
2138 * This is the equivalent of pci_host_bridge_msi_domain that acts on
2139 * devices. Firmware interfaces that can select the MSI domain on a
2140 * per-device basis should be called from here.
2142 static struct irq_domain
*pci_dev_msi_domain(struct pci_dev
*dev
)
2144 struct irq_domain
*d
;
2147 * If a domain has been set through the pcibios_add_device
2148 * callback, then this is the one (platform code knows best).
2150 d
= dev_get_msi_domain(&dev
->dev
);
2155 * Let's see if we have a firmware interface able to provide
2158 d
= pci_msi_get_device_domain(dev
);
2165 static void pci_set_msi_domain(struct pci_dev
*dev
)
2167 struct irq_domain
*d
;
2170 * If the platform or firmware interfaces cannot supply a
2171 * device-specific MSI domain, then inherit the default domain
2172 * from the host bridge itself.
2174 d
= pci_dev_msi_domain(dev
);
2176 d
= dev_get_msi_domain(&dev
->bus
->dev
);
2178 dev_set_msi_domain(&dev
->dev
, d
);
2181 void pci_device_add(struct pci_dev
*dev
, struct pci_bus
*bus
)
2185 pci_configure_device(dev
);
2187 device_initialize(&dev
->dev
);
2188 dev
->dev
.release
= pci_release_dev
;
2190 set_dev_node(&dev
->dev
, pcibus_to_node(bus
));
2191 dev
->dev
.dma_mask
= &dev
->dma_mask
;
2192 dev
->dev
.dma_parms
= &dev
->dma_parms
;
2193 dev
->dev
.coherent_dma_mask
= 0xffffffffull
;
2195 pci_set_dma_max_seg_size(dev
, 65536);
2196 pci_set_dma_seg_boundary(dev
, 0xffffffff);
2198 /* Fix up broken headers */
2199 pci_fixup_device(pci_fixup_header
, dev
);
2201 /* moved out from quirk header fixup code */
2202 pci_reassigndev_resource_alignment(dev
);
2204 /* Clear the state_saved flag. */
2205 dev
->state_saved
= false;
2207 /* Initialize various capabilities */
2208 pci_init_capabilities(dev
);
2211 * Add the device to our list of discovered devices
2212 * and the bus list for fixup functions, etc.
2214 down_write(&pci_bus_sem
);
2215 list_add_tail(&dev
->bus_list
, &bus
->devices
);
2216 up_write(&pci_bus_sem
);
2218 ret
= pcibios_add_device(dev
);
2221 /* Setup MSI irq domain */
2222 pci_set_msi_domain(dev
);
2224 /* Notifier could use PCI capabilities */
2225 dev
->match_driver
= false;
2226 ret
= device_add(&dev
->dev
);
2230 struct pci_dev
*pci_scan_single_device(struct pci_bus
*bus
, int devfn
)
2232 struct pci_dev
*dev
;
2234 dev
= pci_get_slot(bus
, devfn
);
2240 dev
= pci_scan_device(bus
, devfn
);
2244 pci_device_add(dev
, bus
);
2248 EXPORT_SYMBOL(pci_scan_single_device
);
2250 static unsigned next_fn(struct pci_bus
*bus
, struct pci_dev
*dev
, unsigned fn
)
2256 if (pci_ari_enabled(bus
)) {
2259 pos
= pci_find_ext_capability(dev
, PCI_EXT_CAP_ID_ARI
);
2263 pci_read_config_word(dev
, pos
+ PCI_ARI_CAP
, &cap
);
2264 next_fn
= PCI_ARI_CAP_NFN(cap
);
2266 return 0; /* protect against malformed list */
2271 /* dev may be NULL for non-contiguous multifunction devices */
2272 if (!dev
|| dev
->multifunction
)
2273 return (fn
+ 1) % 8;
2278 static int only_one_child(struct pci_bus
*bus
)
2280 struct pci_dev
*bridge
= bus
->self
;
2283 * Systems with unusual topologies set PCI_SCAN_ALL_PCIE_DEVS so
2284 * we scan for all possible devices, not just Device 0.
2286 if (pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS
))
2290 * A PCIe Downstream Port normally leads to a Link with only Device
2291 * 0 on it (PCIe spec r3.1, sec 7.3.1). As an optimization, scan
2292 * only for Device 0 in that situation.
2294 * Checking has_secondary_link is a hack to identify Downstream
2295 * Ports because sometimes Switches are configured such that the
2296 * PCIe Port Type labels are backwards.
2298 if (bridge
&& pci_is_pcie(bridge
) && bridge
->has_secondary_link
)
2305 * pci_scan_slot - scan a PCI slot on a bus for devices.
2306 * @bus: PCI bus to scan
2307 * @devfn: slot number to scan (must have zero function.)
2309 * Scan a PCI slot on the specified PCI bus for devices, adding
2310 * discovered devices to the @bus->devices list. New devices
2311 * will not have is_added set.
2313 * Returns the number of new devices found.
2315 int pci_scan_slot(struct pci_bus
*bus
, int devfn
)
2317 unsigned fn
, nr
= 0;
2318 struct pci_dev
*dev
;
2320 if (only_one_child(bus
) && (devfn
> 0))
2321 return 0; /* Already scanned the entire slot */
2323 dev
= pci_scan_single_device(bus
, devfn
);
2329 for (fn
= next_fn(bus
, dev
, 0); fn
> 0; fn
= next_fn(bus
, dev
, fn
)) {
2330 dev
= pci_scan_single_device(bus
, devfn
+ fn
);
2334 dev
->multifunction
= 1;
2338 /* only one slot has pcie device */
2339 if (bus
->self
&& nr
)
2340 pcie_aspm_init_link_state(bus
->self
);
2344 EXPORT_SYMBOL(pci_scan_slot
);
2346 static int pcie_find_smpss(struct pci_dev
*dev
, void *data
)
2350 if (!pci_is_pcie(dev
))
2354 * We don't have a way to change MPS settings on devices that have
2355 * drivers attached. A hot-added device might support only the minimum
2356 * MPS setting (MPS=128). Therefore, if the fabric contains a bridge
2357 * where devices may be hot-added, we limit the fabric MPS to 128 so
2358 * hot-added devices will work correctly.
2360 * However, if we hot-add a device to a slot directly below a Root
2361 * Port, it's impossible for there to be other existing devices below
2362 * the port. We don't limit the MPS in this case because we can
2363 * reconfigure MPS on both the Root Port and the hot-added device,
2364 * and there are no other devices involved.
2366 * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
2368 if (dev
->is_hotplug_bridge
&&
2369 pci_pcie_type(dev
) != PCI_EXP_TYPE_ROOT_PORT
)
2372 if (*smpss
> dev
->pcie_mpss
)
2373 *smpss
= dev
->pcie_mpss
;
2378 static void pcie_write_mps(struct pci_dev
*dev
, int mps
)
2382 if (pcie_bus_config
== PCIE_BUS_PERFORMANCE
) {
2383 mps
= 128 << dev
->pcie_mpss
;
2385 if (pci_pcie_type(dev
) != PCI_EXP_TYPE_ROOT_PORT
&&
2387 /* For "Performance", the assumption is made that
2388 * downstream communication will never be larger than
2389 * the MRRS. So, the MPS only needs to be configured
2390 * for the upstream communication. This being the case,
2391 * walk from the top down and set the MPS of the child
2392 * to that of the parent bus.
2394 * Configure the device MPS with the smaller of the
2395 * device MPSS or the bridge MPS (which is assumed to be
2396 * properly configured at this point to the largest
2397 * allowable MPS based on its parent bus).
2399 mps
= min(mps
, pcie_get_mps(dev
->bus
->self
));
2402 rc
= pcie_set_mps(dev
, mps
);
2404 dev_err(&dev
->dev
, "Failed attempting to set the MPS\n");
2407 static void pcie_write_mrrs(struct pci_dev
*dev
)
2411 /* In the "safe" case, do not configure the MRRS. There appear to be
2412 * issues with setting MRRS to 0 on a number of devices.
2414 if (pcie_bus_config
!= PCIE_BUS_PERFORMANCE
)
2417 /* For Max performance, the MRRS must be set to the largest supported
2418 * value. However, it cannot be configured larger than the MPS the
2419 * device or the bus can support. This should already be properly
2420 * configured by a prior call to pcie_write_mps.
2422 mrrs
= pcie_get_mps(dev
);
2424 /* MRRS is a R/W register. Invalid values can be written, but a
2425 * subsequent read will verify if the value is acceptable or not.
2426 * If the MRRS value provided is not acceptable (e.g., too large),
2427 * shrink the value until it is acceptable to the HW.
2429 while (mrrs
!= pcie_get_readrq(dev
) && mrrs
>= 128) {
2430 rc
= pcie_set_readrq(dev
, mrrs
);
2434 dev_warn(&dev
->dev
, "Failed attempting to set the MRRS\n");
2439 dev_err(&dev
->dev
, "MRRS was unable to be configured with a safe value. If problems are experienced, try running with pci=pcie_bus_safe\n");
2442 static int pcie_bus_configure_set(struct pci_dev
*dev
, void *data
)
2446 if (!pci_is_pcie(dev
))
2449 if (pcie_bus_config
== PCIE_BUS_TUNE_OFF
||
2450 pcie_bus_config
== PCIE_BUS_DEFAULT
)
2453 mps
= 128 << *(u8
*)data
;
2454 orig_mps
= pcie_get_mps(dev
);
2456 pcie_write_mps(dev
, mps
);
2457 pcie_write_mrrs(dev
);
2459 dev_info(&dev
->dev
, "Max Payload Size set to %4d/%4d (was %4d), Max Read Rq %4d\n",
2460 pcie_get_mps(dev
), 128 << dev
->pcie_mpss
,
2461 orig_mps
, pcie_get_readrq(dev
));
2466 /* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
2467 * parents then children fashion. If this changes, then this code will not
2470 void pcie_bus_configure_settings(struct pci_bus
*bus
)
2477 if (!pci_is_pcie(bus
->self
))
2480 /* FIXME - Peer to peer DMA is possible, though the endpoint would need
2481 * to be aware of the MPS of the destination. To work around this,
2482 * simply force the MPS of the entire system to the smallest possible.
2484 if (pcie_bus_config
== PCIE_BUS_PEER2PEER
)
2487 if (pcie_bus_config
== PCIE_BUS_SAFE
) {
2488 smpss
= bus
->self
->pcie_mpss
;
2490 pcie_find_smpss(bus
->self
, &smpss
);
2491 pci_walk_bus(bus
, pcie_find_smpss
, &smpss
);
2494 pcie_bus_configure_set(bus
->self
, &smpss
);
2495 pci_walk_bus(bus
, pcie_bus_configure_set
, &smpss
);
2497 EXPORT_SYMBOL_GPL(pcie_bus_configure_settings
);
2500 * Called after each bus is probed, but before its children are examined. This
2501 * is marked as __weak because multiple architectures define it.
2503 void __weak
pcibios_fixup_bus(struct pci_bus
*bus
)
2505 /* nothing to do, expected to be removed in the future */
2509 * pci_scan_child_bus_extend() - Scan devices below a bus
2510 * @bus: Bus to scan for devices
2511 * @available_buses: Total number of buses available (%0 does not try to
2512 * extend beyond the minimal)
2514 * Scans devices below @bus including subordinate buses. Returns new
2515 * subordinate number including all the found devices. Passing
2516 * @available_buses causes the remaining bus space to be distributed
2517 * equally between hotplug-capable bridges to allow future extension of the
2520 static unsigned int pci_scan_child_bus_extend(struct pci_bus
*bus
,
2521 unsigned int available_buses
)
2523 unsigned int used_buses
, normal_bridges
= 0, hotplug_bridges
= 0;
2524 unsigned int start
= bus
->busn_res
.start
;
2525 unsigned int devfn
, cmax
, max
= start
;
2526 struct pci_dev
*dev
;
2528 dev_dbg(&bus
->dev
, "scanning bus\n");
2530 /* Go find them, Rover! */
2531 for (devfn
= 0; devfn
< 0x100; devfn
+= 8)
2532 pci_scan_slot(bus
, devfn
);
2534 /* Reserve buses for SR-IOV capability. */
2535 used_buses
= pci_iov_bus_range(bus
);
2539 * After performing arch-dependent fixup of the bus, look behind
2540 * all PCI-to-PCI bridges on this bus.
2542 if (!bus
->is_added
) {
2543 dev_dbg(&bus
->dev
, "fixups for bus\n");
2544 pcibios_fixup_bus(bus
);
2549 * Calculate how many hotplug bridges and normal bridges there
2550 * are on this bus. We will distribute the additional available
2551 * buses between hotplug bridges.
2553 for_each_pci_bridge(dev
, bus
) {
2554 if (dev
->is_hotplug_bridge
)
2561 * Scan bridges that are already configured. We don't touch them
2562 * unless they are misconfigured (which will be done in the second
2565 for_each_pci_bridge(dev
, bus
) {
2567 max
= pci_scan_bridge_extend(bus
, dev
, max
, 0, 0);
2570 * Reserve one bus for each bridge now to avoid extending
2571 * hotplug bridges too much during the second scan below.
2575 used_buses
+= cmax
- max
- 1;
2578 /* Scan bridges that need to be reconfigured */
2579 for_each_pci_bridge(dev
, bus
) {
2580 unsigned int buses
= 0;
2582 if (!hotplug_bridges
&& normal_bridges
== 1) {
2584 * There is only one bridge on the bus (upstream
2585 * port) so it gets all available buses which it
2586 * can then distribute to the possible hotplug
2589 buses
= available_buses
;
2590 } else if (dev
->is_hotplug_bridge
) {
2592 * Distribute the extra buses between hotplug
2595 buses
= available_buses
/ hotplug_bridges
;
2596 buses
= min(buses
, available_buses
- used_buses
+ 1);
2600 max
= pci_scan_bridge_extend(bus
, dev
, cmax
, buses
, 1);
2601 /* One bus is already accounted so don't add it again */
2603 used_buses
+= max
- cmax
- 1;
2607 * Make sure a hotplug bridge has at least the minimum requested
2608 * number of buses but allow it to grow up to the maximum available
2609 * bus number of there is room.
2611 if (bus
->self
&& bus
->self
->is_hotplug_bridge
) {
2612 used_buses
= max_t(unsigned int, available_buses
,
2613 pci_hotplug_bus_size
- 1);
2614 if (max
- start
< used_buses
) {
2615 max
= start
+ used_buses
;
2617 /* Do not allocate more buses than we have room left */
2618 if (max
> bus
->busn_res
.end
)
2619 max
= bus
->busn_res
.end
;
2621 dev_dbg(&bus
->dev
, "%pR extended by %#02x\n",
2622 &bus
->busn_res
, max
- start
);
2627 * We've scanned the bus and so we know all about what's on
2628 * the other side of any bridges that may be on this bus plus
2631 * Return how far we've got finding sub-buses.
2633 dev_dbg(&bus
->dev
, "bus scan returning with max=%02x\n", max
);
2638 * pci_scan_child_bus() - Scan devices below a bus
2639 * @bus: Bus to scan for devices
2641 * Scans devices below @bus including subordinate buses. Returns new
2642 * subordinate number including all the found devices.
2644 unsigned int pci_scan_child_bus(struct pci_bus
*bus
)
2646 return pci_scan_child_bus_extend(bus
, 0);
2648 EXPORT_SYMBOL_GPL(pci_scan_child_bus
);
2651 * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
2652 * @bridge: Host bridge to set up.
2654 * Default empty implementation. Replace with an architecture-specific setup
2655 * routine, if necessary.
2657 int __weak
pcibios_root_bridge_prepare(struct pci_host_bridge
*bridge
)
2662 void __weak
pcibios_add_bus(struct pci_bus
*bus
)
2666 void __weak
pcibios_remove_bus(struct pci_bus
*bus
)
2670 struct pci_bus
*pci_create_root_bus(struct device
*parent
, int bus
,
2671 struct pci_ops
*ops
, void *sysdata
, struct list_head
*resources
)
2674 struct pci_host_bridge
*bridge
;
2676 bridge
= pci_alloc_host_bridge(0);
2680 bridge
->dev
.parent
= parent
;
2682 list_splice_init(resources
, &bridge
->windows
);
2683 bridge
->sysdata
= sysdata
;
2684 bridge
->busnr
= bus
;
2687 error
= pci_register_host_bridge(bridge
);
2697 EXPORT_SYMBOL_GPL(pci_create_root_bus
);
2699 int pci_bus_insert_busn_res(struct pci_bus
*b
, int bus
, int bus_max
)
2701 struct resource
*res
= &b
->busn_res
;
2702 struct resource
*parent_res
, *conflict
;
2706 res
->flags
= IORESOURCE_BUS
;
2708 if (!pci_is_root_bus(b
))
2709 parent_res
= &b
->parent
->busn_res
;
2711 parent_res
= get_pci_domain_busn_res(pci_domain_nr(b
));
2712 res
->flags
|= IORESOURCE_PCI_FIXED
;
2715 conflict
= request_resource_conflict(parent_res
, res
);
2718 dev_printk(KERN_DEBUG
, &b
->dev
,
2719 "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
2720 res
, pci_is_root_bus(b
) ? "domain " : "",
2721 parent_res
, conflict
->name
, conflict
);
2723 return conflict
== NULL
;
2726 int pci_bus_update_busn_res_end(struct pci_bus
*b
, int bus_max
)
2728 struct resource
*res
= &b
->busn_res
;
2729 struct resource old_res
= *res
;
2730 resource_size_t size
;
2733 if (res
->start
> bus_max
)
2736 size
= bus_max
- res
->start
+ 1;
2737 ret
= adjust_resource(res
, res
->start
, size
);
2738 dev_printk(KERN_DEBUG
, &b
->dev
,
2739 "busn_res: %pR end %s updated to %02x\n",
2740 &old_res
, ret
? "can not be" : "is", bus_max
);
2742 if (!ret
&& !res
->parent
)
2743 pci_bus_insert_busn_res(b
, res
->start
, res
->end
);
2748 void pci_bus_release_busn_res(struct pci_bus
*b
)
2750 struct resource
*res
= &b
->busn_res
;
2753 if (!res
->flags
|| !res
->parent
)
2756 ret
= release_resource(res
);
2757 dev_printk(KERN_DEBUG
, &b
->dev
,
2758 "busn_res: %pR %s released\n",
2759 res
, ret
? "can not be" : "is");
2762 int pci_scan_root_bus_bridge(struct pci_host_bridge
*bridge
)
2764 struct resource_entry
*window
;
2772 resource_list_for_each_entry(window
, &bridge
->windows
)
2773 if (window
->res
->flags
& IORESOURCE_BUS
) {
2778 ret
= pci_register_host_bridge(bridge
);
2783 bus
= bridge
->busnr
;
2787 "No busn resource found for root bus, will use [bus %02x-ff]\n",
2789 pci_bus_insert_busn_res(b
, bus
, 255);
2792 max
= pci_scan_child_bus(b
);
2795 pci_bus_update_busn_res_end(b
, max
);
2799 EXPORT_SYMBOL(pci_scan_root_bus_bridge
);
2801 struct pci_bus
*pci_scan_root_bus(struct device
*parent
, int bus
,
2802 struct pci_ops
*ops
, void *sysdata
, struct list_head
*resources
)
2804 struct resource_entry
*window
;
2809 resource_list_for_each_entry(window
, resources
)
2810 if (window
->res
->flags
& IORESOURCE_BUS
) {
2815 b
= pci_create_root_bus(parent
, bus
, ops
, sysdata
, resources
);
2821 "No busn resource found for root bus, will use [bus %02x-ff]\n",
2823 pci_bus_insert_busn_res(b
, bus
, 255);
2826 max
= pci_scan_child_bus(b
);
2829 pci_bus_update_busn_res_end(b
, max
);
2833 EXPORT_SYMBOL(pci_scan_root_bus
);
2835 struct pci_bus
*pci_scan_bus(int bus
, struct pci_ops
*ops
,
2838 LIST_HEAD(resources
);
2841 pci_add_resource(&resources
, &ioport_resource
);
2842 pci_add_resource(&resources
, &iomem_resource
);
2843 pci_add_resource(&resources
, &busn_resource
);
2844 b
= pci_create_root_bus(NULL
, bus
, ops
, sysdata
, &resources
);
2846 pci_scan_child_bus(b
);
2848 pci_free_resource_list(&resources
);
2852 EXPORT_SYMBOL(pci_scan_bus
);
2855 * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
2856 * @bridge: PCI bridge for the bus to scan
2858 * Scan a PCI bus and child buses for new devices, add them,
2859 * and enable them, resizing bridge mmio/io resource if necessary
2860 * and possible. The caller must ensure the child devices are already
2861 * removed for resizing to occur.
2863 * Returns the max number of subordinate bus discovered.
2865 unsigned int pci_rescan_bus_bridge_resize(struct pci_dev
*bridge
)
2868 struct pci_bus
*bus
= bridge
->subordinate
;
2870 max
= pci_scan_child_bus(bus
);
2872 pci_assign_unassigned_bridge_resources(bridge
);
2874 pci_bus_add_devices(bus
);
2880 * pci_rescan_bus - scan a PCI bus for devices.
2881 * @bus: PCI bus to scan
2883 * Scan a PCI bus and child buses for new devices, adds them,
2886 * Returns the max number of subordinate bus discovered.
2888 unsigned int pci_rescan_bus(struct pci_bus
*bus
)
2892 max
= pci_scan_child_bus(bus
);
2893 pci_assign_unassigned_bus_resources(bus
);
2894 pci_bus_add_devices(bus
);
2898 EXPORT_SYMBOL_GPL(pci_rescan_bus
);
2901 * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
2902 * routines should always be executed under this mutex.
2904 static DEFINE_MUTEX(pci_rescan_remove_lock
);
2906 void pci_lock_rescan_remove(void)
2908 mutex_lock(&pci_rescan_remove_lock
);
2910 EXPORT_SYMBOL_GPL(pci_lock_rescan_remove
);
2912 void pci_unlock_rescan_remove(void)
2914 mutex_unlock(&pci_rescan_remove_lock
);
2916 EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove
);
2918 static int __init
pci_sort_bf_cmp(const struct device
*d_a
,
2919 const struct device
*d_b
)
2921 const struct pci_dev
*a
= to_pci_dev(d_a
);
2922 const struct pci_dev
*b
= to_pci_dev(d_b
);
2924 if (pci_domain_nr(a
->bus
) < pci_domain_nr(b
->bus
)) return -1;
2925 else if (pci_domain_nr(a
->bus
) > pci_domain_nr(b
->bus
)) return 1;
2927 if (a
->bus
->number
< b
->bus
->number
) return -1;
2928 else if (a
->bus
->number
> b
->bus
->number
) return 1;
2930 if (a
->devfn
< b
->devfn
) return -1;
2931 else if (a
->devfn
> b
->devfn
) return 1;
2936 void __init
pci_sort_breadthfirst(void)
2938 bus_sort_breadthfirst(&pci_bus_type
, &pci_sort_bf_cmp
);
2941 int pci_hp_add_bridge(struct pci_dev
*dev
)
2943 struct pci_bus
*parent
= dev
->bus
;
2944 int busnr
, start
= parent
->busn_res
.start
;
2945 unsigned int available_buses
= 0;
2946 int end
= parent
->busn_res
.end
;
2948 for (busnr
= start
; busnr
<= end
; busnr
++) {
2949 if (!pci_find_bus(pci_domain_nr(parent
), busnr
))
2952 if (busnr
-- > end
) {
2953 dev_err(&dev
->dev
, "No bus number available for hot-added bridge\n");
2957 /* Scan bridges that are already configured */
2958 busnr
= pci_scan_bridge(parent
, dev
, busnr
, 0);
2961 * Distribute the available bus numbers between hotplug-capable
2962 * bridges to make extending the chain later possible.
2964 available_buses
= end
- busnr
;
2966 /* Scan bridges that need to be reconfigured */
2967 pci_scan_bridge_extend(parent
, dev
, busnr
, available_buses
, 1);
2969 if (!dev
->subordinate
)
2974 EXPORT_SYMBOL_GPL(pci_hp_add_bridge
);