]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - drivers/pci/probe.c
PCI/MSI: Add hooks to populate the msi_domain field
[mirror_ubuntu-artful-kernel.git] / drivers / pci / probe.c
CommitLineData
1da177e4
LT
1/*
2 * probe.c - PCI detection and setup code
3 */
4
5#include <linux/kernel.h>
6#include <linux/delay.h>
7#include <linux/init.h>
8#include <linux/pci.h>
de335bb4 9#include <linux/of_pci.h>
589fcc23 10#include <linux/pci_hotplug.h>
1da177e4
LT
11#include <linux/slab.h>
12#include <linux/module.h>
13#include <linux/cpumask.h>
7d715a6c 14#include <linux/pci-aspm.h>
284f5f9d 15#include <asm-generic/pci-bridge.h>
bc56b9e0 16#include "pci.h"
1da177e4
LT
17
18#define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
19#define CARDBUS_RESERVE_BUSNR 3
1da177e4 20
0b950f0f 21static struct resource busn_resource = {
67cdc827
YL
22 .name = "PCI busn",
23 .start = 0,
24 .end = 255,
25 .flags = IORESOURCE_BUS,
26};
27
1da177e4
LT
28/* Ugh. Need to stop exporting this to modules. */
29LIST_HEAD(pci_root_buses);
30EXPORT_SYMBOL(pci_root_buses);
31
5cc62c20
YL
32static LIST_HEAD(pci_domain_busn_res_list);
33
34struct pci_domain_busn_res {
35 struct list_head list;
36 struct resource res;
37 int domain_nr;
38};
39
40static struct resource *get_pci_domain_busn_res(int domain_nr)
41{
42 struct pci_domain_busn_res *r;
43
44 list_for_each_entry(r, &pci_domain_busn_res_list, list)
45 if (r->domain_nr == domain_nr)
46 return &r->res;
47
48 r = kzalloc(sizeof(*r), GFP_KERNEL);
49 if (!r)
50 return NULL;
51
52 r->domain_nr = domain_nr;
53 r->res.start = 0;
54 r->res.end = 0xff;
55 r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
56
57 list_add_tail(&r->list, &pci_domain_busn_res_list);
58
59 return &r->res;
60}
61
70308923
GKH
62static int find_anything(struct device *dev, void *data)
63{
64 return 1;
65}
1da177e4 66
ed4aaadb
ZY
67/*
68 * Some device drivers need know if pci is initiated.
69 * Basically, we think pci is not initiated when there
70308923 70 * is no device to be found on the pci_bus_type.
ed4aaadb
ZY
71 */
72int no_pci_devices(void)
73{
70308923
GKH
74 struct device *dev;
75 int no_devices;
ed4aaadb 76
70308923
GKH
77 dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
78 no_devices = (dev == NULL);
79 put_device(dev);
80 return no_devices;
81}
ed4aaadb
ZY
82EXPORT_SYMBOL(no_pci_devices);
83
1da177e4
LT
84/*
85 * PCI Bus Class
86 */
fd7d1ced 87static void release_pcibus_dev(struct device *dev)
1da177e4 88{
fd7d1ced 89 struct pci_bus *pci_bus = to_pci_bus(dev);
1da177e4 90
ff0387c3 91 put_device(pci_bus->bridge);
2fe2abf8 92 pci_bus_remove_resources(pci_bus);
98d9f30c 93 pci_release_bus_of_node(pci_bus);
1da177e4
LT
94 kfree(pci_bus);
95}
96
97static struct class pcibus_class = {
98 .name = "pci_bus",
fd7d1ced 99 .dev_release = &release_pcibus_dev,
56039e65 100 .dev_groups = pcibus_groups,
1da177e4
LT
101};
102
103static int __init pcibus_class_init(void)
104{
105 return class_register(&pcibus_class);
106}
107postcore_initcall(pcibus_class_init);
108
6ac665c6 109static u64 pci_size(u64 base, u64 maxbase, u64 mask)
1da177e4 110{
6ac665c6 111 u64 size = mask & maxbase; /* Find the significant bits */
1da177e4
LT
112 if (!size)
113 return 0;
114
115 /* Get the lowest of them to find the decode size, and
116 from that the extent. */
117 size = (size & ~(size-1)) - 1;
118
119 /* base == maxbase can be valid only if the BAR has
120 already been programmed with all 1s. */
121 if (base == maxbase && ((base | size) & mask) != mask)
122 return 0;
123
124 return size;
125}
126
28c6821a 127static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
6ac665c6 128{
8d6a6a47 129 u32 mem_type;
28c6821a 130 unsigned long flags;
8d6a6a47 131
6ac665c6 132 if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
28c6821a
BH
133 flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
134 flags |= IORESOURCE_IO;
135 return flags;
6ac665c6 136 }
07eddf3d 137
28c6821a
BH
138 flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
139 flags |= IORESOURCE_MEM;
140 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
141 flags |= IORESOURCE_PREFETCH;
07eddf3d 142
8d6a6a47
BH
143 mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
144 switch (mem_type) {
145 case PCI_BASE_ADDRESS_MEM_TYPE_32:
146 break;
147 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
0ff9514b 148 /* 1M mem BAR treated as 32-bit BAR */
8d6a6a47
BH
149 break;
150 case PCI_BASE_ADDRESS_MEM_TYPE_64:
28c6821a
BH
151 flags |= IORESOURCE_MEM_64;
152 break;
8d6a6a47 153 default:
0ff9514b 154 /* mem unknown type treated as 32-bit BAR */
8d6a6a47
BH
155 break;
156 }
28c6821a 157 return flags;
07eddf3d
YL
158}
159
808e34e2
ZK
160#define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
161
0b400c7e
YZ
162/**
163 * pci_read_base - read a PCI BAR
164 * @dev: the PCI device
165 * @type: type of the BAR
166 * @res: resource buffer to be filled in
167 * @pos: BAR position in the config space
168 *
169 * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
6ac665c6 170 */
0b400c7e 171int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
3c78bc61 172 struct resource *res, unsigned int pos)
07eddf3d 173{
6ac665c6 174 u32 l, sz, mask;
23b13bc7 175 u64 l64, sz64, mask64;
253d2e54 176 u16 orig_cmd;
cf4d1cf5 177 struct pci_bus_region region, inverted_region;
6ac665c6 178
1ed67439 179 mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
6ac665c6 180
0ff9514b 181 /* No printks while decoding is disabled! */
253d2e54
JP
182 if (!dev->mmio_always_on) {
183 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
808e34e2
ZK
184 if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
185 pci_write_config_word(dev, PCI_COMMAND,
186 orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
187 }
253d2e54
JP
188 }
189
6ac665c6
MW
190 res->name = pci_name(dev);
191
192 pci_read_config_dword(dev, pos, &l);
1ed67439 193 pci_write_config_dword(dev, pos, l | mask);
6ac665c6
MW
194 pci_read_config_dword(dev, pos, &sz);
195 pci_write_config_dword(dev, pos, l);
196
197 /*
198 * All bits set in sz means the device isn't working properly.
45aa23b4
BH
199 * If the BAR isn't implemented, all bits must be 0. If it's a
200 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
201 * 1 must be clear.
6ac665c6 202 */
f795d86a
MS
203 if (sz == 0xffffffff)
204 sz = 0;
6ac665c6
MW
205
206 /*
207 * I don't know how l can have all bits set. Copied from old code.
208 * Maybe it fixes a bug on some ancient platform.
209 */
210 if (l == 0xffffffff)
211 l = 0;
212
213 if (type == pci_bar_unknown) {
28c6821a
BH
214 res->flags = decode_bar(dev, l);
215 res->flags |= IORESOURCE_SIZEALIGN;
216 if (res->flags & IORESOURCE_IO) {
f795d86a
MS
217 l64 = l & PCI_BASE_ADDRESS_IO_MASK;
218 sz64 = sz & PCI_BASE_ADDRESS_IO_MASK;
219 mask64 = PCI_BASE_ADDRESS_IO_MASK & (u32)IO_SPACE_LIMIT;
6ac665c6 220 } else {
f795d86a
MS
221 l64 = l & PCI_BASE_ADDRESS_MEM_MASK;
222 sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK;
223 mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK;
6ac665c6
MW
224 }
225 } else {
226 res->flags |= (l & IORESOURCE_ROM_ENABLE);
f795d86a
MS
227 l64 = l & PCI_ROM_ADDRESS_MASK;
228 sz64 = sz & PCI_ROM_ADDRESS_MASK;
229 mask64 = (u32)PCI_ROM_ADDRESS_MASK;
6ac665c6
MW
230 }
231
28c6821a 232 if (res->flags & IORESOURCE_MEM_64) {
6ac665c6
MW
233 pci_read_config_dword(dev, pos + 4, &l);
234 pci_write_config_dword(dev, pos + 4, ~0);
235 pci_read_config_dword(dev, pos + 4, &sz);
236 pci_write_config_dword(dev, pos + 4, l);
237
238 l64 |= ((u64)l << 32);
239 sz64 |= ((u64)sz << 32);
f795d86a
MS
240 mask64 |= ((u64)~0 << 32);
241 }
6ac665c6 242
f795d86a
MS
243 if (!dev->mmio_always_on && (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
244 pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
6ac665c6 245
f795d86a
MS
246 if (!sz64)
247 goto fail;
6ac665c6 248
f795d86a 249 sz64 = pci_size(l64, sz64, mask64);
7e79c5f8
MS
250 if (!sz64) {
251 dev_info(&dev->dev, FW_BUG "reg 0x%x: invalid BAR (can't size)\n",
252 pos);
f795d86a 253 goto fail;
7e79c5f8 254 }
f795d86a
MS
255
256 if (res->flags & IORESOURCE_MEM_64) {
3a9ad0b4
YL
257 if ((sizeof(pci_bus_addr_t) < 8 || sizeof(resource_size_t) < 8)
258 && sz64 > 0x100000000ULL) {
23b13bc7
BH
259 res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
260 res->start = 0;
261 res->end = 0;
f795d86a
MS
262 dev_err(&dev->dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
263 pos, (unsigned long long)sz64);
23b13bc7 264 goto out;
c7dabef8
BH
265 }
266
3a9ad0b4 267 if ((sizeof(pci_bus_addr_t) < 8) && l) {
31e9dd25 268 /* Above 32-bit boundary; try to reallocate */
c83bd900 269 res->flags |= IORESOURCE_UNSET;
72dc5601
BH
270 res->start = 0;
271 res->end = sz64;
f795d86a
MS
272 dev_info(&dev->dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n",
273 pos, (unsigned long long)l64);
72dc5601 274 goto out;
6ac665c6 275 }
6ac665c6
MW
276 }
277
f795d86a
MS
278 region.start = l64;
279 region.end = l64 + sz64;
280
fc279850
YL
281 pcibios_bus_to_resource(dev->bus, res, &region);
282 pcibios_resource_to_bus(dev->bus, &inverted_region, res);
cf4d1cf5
KH
283
284 /*
285 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
286 * the corresponding resource address (the physical address used by
287 * the CPU. Converting that resource address back to a bus address
288 * should yield the original BAR value:
289 *
290 * resource_to_bus(bus_to_resource(A)) == A
291 *
292 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not
293 * be claimed by the device.
294 */
295 if (inverted_region.start != region.start) {
cf4d1cf5 296 res->flags |= IORESOURCE_UNSET;
cf4d1cf5 297 res->start = 0;
26370fc6 298 res->end = region.end - region.start;
f795d86a
MS
299 dev_info(&dev->dev, "reg 0x%x: initial BAR value %#010llx invalid\n",
300 pos, (unsigned long long)region.start);
cf4d1cf5 301 }
96ddef25 302
0ff9514b
BH
303 goto out;
304
305
306fail:
307 res->flags = 0;
308out:
31e9dd25 309 if (res->flags)
33963e30 310 dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res);
0ff9514b 311
28c6821a 312 return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
07eddf3d
YL
313}
314
1da177e4
LT
315static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
316{
6ac665c6 317 unsigned int pos, reg;
07eddf3d 318
6ac665c6
MW
319 for (pos = 0; pos < howmany; pos++) {
320 struct resource *res = &dev->resource[pos];
1da177e4 321 reg = PCI_BASE_ADDRESS_0 + (pos << 2);
6ac665c6 322 pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
1da177e4 323 }
6ac665c6 324
1da177e4 325 if (rom) {
6ac665c6 326 struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
1da177e4 327 dev->rom_base_reg = rom;
6ac665c6
MW
328 res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
329 IORESOURCE_READONLY | IORESOURCE_CACHEABLE |
330 IORESOURCE_SIZEALIGN;
331 __pci_read_base(dev, pci_bar_mem32, res, rom);
1da177e4
LT
332 }
333}
334
15856ad5 335static void pci_read_bridge_io(struct pci_bus *child)
1da177e4
LT
336{
337 struct pci_dev *dev = child->self;
338 u8 io_base_lo, io_limit_lo;
2b28ae19 339 unsigned long io_mask, io_granularity, base, limit;
5bfa14ed 340 struct pci_bus_region region;
2b28ae19
BH
341 struct resource *res;
342
343 io_mask = PCI_IO_RANGE_MASK;
344 io_granularity = 0x1000;
345 if (dev->io_window_1k) {
346 /* Support 1K I/O space granularity */
347 io_mask = PCI_IO_1K_RANGE_MASK;
348 io_granularity = 0x400;
349 }
1da177e4 350
1da177e4
LT
351 res = child->resource[0];
352 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
353 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
2b28ae19
BH
354 base = (io_base_lo & io_mask) << 8;
355 limit = (io_limit_lo & io_mask) << 8;
1da177e4
LT
356
357 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
358 u16 io_base_hi, io_limit_hi;
8f38eaca 359
1da177e4
LT
360 pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
361 pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
8f38eaca
BH
362 base |= ((unsigned long) io_base_hi << 16);
363 limit |= ((unsigned long) io_limit_hi << 16);
1da177e4
LT
364 }
365
5dde383e 366 if (base <= limit) {
1da177e4 367 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
5bfa14ed 368 region.start = base;
2b28ae19 369 region.end = limit + io_granularity - 1;
fc279850 370 pcibios_bus_to_resource(dev->bus, res, &region);
c7dabef8 371 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
1da177e4 372 }
fa27b2d1
BH
373}
374
15856ad5 375static void pci_read_bridge_mmio(struct pci_bus *child)
fa27b2d1
BH
376{
377 struct pci_dev *dev = child->self;
378 u16 mem_base_lo, mem_limit_lo;
379 unsigned long base, limit;
5bfa14ed 380 struct pci_bus_region region;
fa27b2d1 381 struct resource *res;
1da177e4
LT
382
383 res = child->resource[1];
384 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
385 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
8f38eaca
BH
386 base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
387 limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
5dde383e 388 if (base <= limit) {
1da177e4 389 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
5bfa14ed
BH
390 region.start = base;
391 region.end = limit + 0xfffff;
fc279850 392 pcibios_bus_to_resource(dev->bus, res, &region);
c7dabef8 393 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
1da177e4 394 }
fa27b2d1
BH
395}
396
15856ad5 397static void pci_read_bridge_mmio_pref(struct pci_bus *child)
fa27b2d1
BH
398{
399 struct pci_dev *dev = child->self;
400 u16 mem_base_lo, mem_limit_lo;
7fc986d8 401 u64 base64, limit64;
3a9ad0b4 402 pci_bus_addr_t base, limit;
5bfa14ed 403 struct pci_bus_region region;
fa27b2d1 404 struct resource *res;
1da177e4
LT
405
406 res = child->resource[2];
407 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
408 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
7fc986d8
YL
409 base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
410 limit64 = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
1da177e4
LT
411
412 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
413 u32 mem_base_hi, mem_limit_hi;
8f38eaca 414
1da177e4
LT
415 pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
416 pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
417
418 /*
419 * Some bridges set the base > limit by default, and some
420 * (broken) BIOSes do not initialize them. If we find
421 * this, just assume they are not being used.
422 */
423 if (mem_base_hi <= mem_limit_hi) {
7fc986d8
YL
424 base64 |= (u64) mem_base_hi << 32;
425 limit64 |= (u64) mem_limit_hi << 32;
1da177e4
LT
426 }
427 }
7fc986d8 428
3a9ad0b4
YL
429 base = (pci_bus_addr_t) base64;
430 limit = (pci_bus_addr_t) limit64;
7fc986d8
YL
431
432 if (base != base64) {
433 dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n",
434 (unsigned long long) base64);
435 return;
436 }
437
5dde383e 438 if (base <= limit) {
1f82de10
YL
439 res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
440 IORESOURCE_MEM | IORESOURCE_PREFETCH;
441 if (res->flags & PCI_PREF_RANGE_TYPE_64)
442 res->flags |= IORESOURCE_MEM_64;
5bfa14ed
BH
443 region.start = base;
444 region.end = limit + 0xfffff;
fc279850 445 pcibios_bus_to_resource(dev->bus, res, &region);
c7dabef8 446 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
1da177e4
LT
447 }
448}
449
15856ad5 450void pci_read_bridge_bases(struct pci_bus *child)
fa27b2d1
BH
451{
452 struct pci_dev *dev = child->self;
2fe2abf8 453 struct resource *res;
fa27b2d1
BH
454 int i;
455
456 if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */
457 return;
458
b918c62e
YL
459 dev_info(&dev->dev, "PCI bridge to %pR%s\n",
460 &child->busn_res,
fa27b2d1
BH
461 dev->transparent ? " (subtractive decode)" : "");
462
2fe2abf8
BH
463 pci_bus_remove_resources(child);
464 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
465 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
466
fa27b2d1
BH
467 pci_read_bridge_io(child);
468 pci_read_bridge_mmio(child);
469 pci_read_bridge_mmio_pref(child);
2adf7516
BH
470
471 if (dev->transparent) {
2fe2abf8 472 pci_bus_for_each_resource(child->parent, res, i) {
d739a099 473 if (res && res->flags) {
2fe2abf8
BH
474 pci_bus_add_resource(child, res,
475 PCI_SUBTRACTIVE_DECODE);
2adf7516
BH
476 dev_printk(KERN_DEBUG, &dev->dev,
477 " bridge window %pR (subtractive decode)\n",
2fe2abf8
BH
478 res);
479 }
2adf7516
BH
480 }
481 }
fa27b2d1
BH
482}
483
670ba0c8 484static struct pci_bus *pci_alloc_bus(struct pci_bus *parent)
1da177e4
LT
485{
486 struct pci_bus *b;
487
f5afe806 488 b = kzalloc(sizeof(*b), GFP_KERNEL);
05013486
BH
489 if (!b)
490 return NULL;
491
492 INIT_LIST_HEAD(&b->node);
493 INIT_LIST_HEAD(&b->children);
494 INIT_LIST_HEAD(&b->devices);
495 INIT_LIST_HEAD(&b->slots);
496 INIT_LIST_HEAD(&b->resources);
497 b->max_bus_speed = PCI_SPEED_UNKNOWN;
498 b->cur_bus_speed = PCI_SPEED_UNKNOWN;
670ba0c8
CM
499#ifdef CONFIG_PCI_DOMAINS_GENERIC
500 if (parent)
501 b->domain_nr = parent->domain_nr;
502#endif
1da177e4
LT
503 return b;
504}
505
70efde2a
JL
506static void pci_release_host_bridge_dev(struct device *dev)
507{
508 struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
509
510 if (bridge->release_fn)
511 bridge->release_fn(bridge);
512
513 pci_free_resource_list(&bridge->windows);
514
515 kfree(bridge);
516}
517
7b543663
YL
518static struct pci_host_bridge *pci_alloc_host_bridge(struct pci_bus *b)
519{
520 struct pci_host_bridge *bridge;
521
522 bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
05013486
BH
523 if (!bridge)
524 return NULL;
7b543663 525
05013486
BH
526 INIT_LIST_HEAD(&bridge->windows);
527 bridge->bus = b;
7b543663
YL
528 return bridge;
529}
530
0b950f0f 531static const unsigned char pcix_bus_speed[] = {
9be60ca0
MW
532 PCI_SPEED_UNKNOWN, /* 0 */
533 PCI_SPEED_66MHz_PCIX, /* 1 */
534 PCI_SPEED_100MHz_PCIX, /* 2 */
535 PCI_SPEED_133MHz_PCIX, /* 3 */
536 PCI_SPEED_UNKNOWN, /* 4 */
537 PCI_SPEED_66MHz_PCIX_ECC, /* 5 */
538 PCI_SPEED_100MHz_PCIX_ECC, /* 6 */
539 PCI_SPEED_133MHz_PCIX_ECC, /* 7 */
540 PCI_SPEED_UNKNOWN, /* 8 */
541 PCI_SPEED_66MHz_PCIX_266, /* 9 */
542 PCI_SPEED_100MHz_PCIX_266, /* A */
543 PCI_SPEED_133MHz_PCIX_266, /* B */
544 PCI_SPEED_UNKNOWN, /* C */
545 PCI_SPEED_66MHz_PCIX_533, /* D */
546 PCI_SPEED_100MHz_PCIX_533, /* E */
547 PCI_SPEED_133MHz_PCIX_533 /* F */
548};
549
343e51ae 550const unsigned char pcie_link_speed[] = {
3749c51a
MW
551 PCI_SPEED_UNKNOWN, /* 0 */
552 PCIE_SPEED_2_5GT, /* 1 */
553 PCIE_SPEED_5_0GT, /* 2 */
9dfd97fe 554 PCIE_SPEED_8_0GT, /* 3 */
3749c51a
MW
555 PCI_SPEED_UNKNOWN, /* 4 */
556 PCI_SPEED_UNKNOWN, /* 5 */
557 PCI_SPEED_UNKNOWN, /* 6 */
558 PCI_SPEED_UNKNOWN, /* 7 */
559 PCI_SPEED_UNKNOWN, /* 8 */
560 PCI_SPEED_UNKNOWN, /* 9 */
561 PCI_SPEED_UNKNOWN, /* A */
562 PCI_SPEED_UNKNOWN, /* B */
563 PCI_SPEED_UNKNOWN, /* C */
564 PCI_SPEED_UNKNOWN, /* D */
565 PCI_SPEED_UNKNOWN, /* E */
566 PCI_SPEED_UNKNOWN /* F */
567};
568
569void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
570{
231afea1 571 bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
3749c51a
MW
572}
573EXPORT_SYMBOL_GPL(pcie_update_link_speed);
574
45b4cdd5
MW
575static unsigned char agp_speeds[] = {
576 AGP_UNKNOWN,
577 AGP_1X,
578 AGP_2X,
579 AGP_4X,
580 AGP_8X
581};
582
583static enum pci_bus_speed agp_speed(int agp3, int agpstat)
584{
585 int index = 0;
586
587 if (agpstat & 4)
588 index = 3;
589 else if (agpstat & 2)
590 index = 2;
591 else if (agpstat & 1)
592 index = 1;
593 else
594 goto out;
f7625980 595
45b4cdd5
MW
596 if (agp3) {
597 index += 2;
598 if (index == 5)
599 index = 0;
600 }
601
602 out:
603 return agp_speeds[index];
604}
605
9be60ca0
MW
606static void pci_set_bus_speed(struct pci_bus *bus)
607{
608 struct pci_dev *bridge = bus->self;
609 int pos;
610
45b4cdd5
MW
611 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
612 if (!pos)
613 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
614 if (pos) {
615 u32 agpstat, agpcmd;
616
617 pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
618 bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
619
620 pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
621 bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
622 }
623
9be60ca0
MW
624 pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
625 if (pos) {
626 u16 status;
627 enum pci_bus_speed max;
9be60ca0 628
7793eeab
BH
629 pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
630 &status);
631
632 if (status & PCI_X_SSTATUS_533MHZ) {
9be60ca0 633 max = PCI_SPEED_133MHz_PCIX_533;
7793eeab 634 } else if (status & PCI_X_SSTATUS_266MHZ) {
9be60ca0 635 max = PCI_SPEED_133MHz_PCIX_266;
7793eeab 636 } else if (status & PCI_X_SSTATUS_133MHZ) {
3c78bc61 637 if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2)
9be60ca0 638 max = PCI_SPEED_133MHz_PCIX_ECC;
3c78bc61 639 else
9be60ca0 640 max = PCI_SPEED_133MHz_PCIX;
9be60ca0
MW
641 } else {
642 max = PCI_SPEED_66MHz_PCIX;
643 }
644
645 bus->max_bus_speed = max;
7793eeab
BH
646 bus->cur_bus_speed = pcix_bus_speed[
647 (status & PCI_X_SSTATUS_FREQ) >> 6];
9be60ca0
MW
648
649 return;
650 }
651
fdfe1511 652 if (pci_is_pcie(bridge)) {
9be60ca0
MW
653 u32 linkcap;
654 u16 linksta;
655
59875ae4 656 pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
231afea1 657 bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
9be60ca0 658
59875ae4 659 pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
9be60ca0
MW
660 pcie_update_link_speed(bus, linksta);
661 }
662}
663
44aa0c65
MZ
664static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus)
665{
666 /*
667 * Any firmware interface that can resolve the msi_domain
668 * should be called from here.
669 */
670
671 return NULL;
672}
673
674static void pci_set_bus_msi_domain(struct pci_bus *bus)
675{
676 struct irq_domain *d;
677
678 /*
679 * Either bus is the root, and we must obtain it from the
680 * firmware, or we inherit it from the bridge device.
681 */
682 if (pci_is_root_bus(bus))
683 d = pci_host_bridge_msi_domain(bus);
684 else
685 d = dev_get_msi_domain(&bus->self->dev);
686
687 dev_set_msi_domain(&bus->dev, d);
688}
689
cbd4e055
AB
690static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
691 struct pci_dev *bridge, int busnr)
1da177e4
LT
692{
693 struct pci_bus *child;
694 int i;
4f535093 695 int ret;
1da177e4
LT
696
697 /*
698 * Allocate a new bus, and inherit stuff from the parent..
699 */
670ba0c8 700 child = pci_alloc_bus(parent);
1da177e4
LT
701 if (!child)
702 return NULL;
703
1da177e4
LT
704 child->parent = parent;
705 child->ops = parent->ops;
0cbdcfcf 706 child->msi = parent->msi;
1da177e4 707 child->sysdata = parent->sysdata;
6e325a62 708 child->bus_flags = parent->bus_flags;
1da177e4 709
fd7d1ced 710 /* initialize some portions of the bus device, but don't register it
4f535093 711 * now as the parent is not properly set up yet.
fd7d1ced
GKH
712 */
713 child->dev.class = &pcibus_class;
1a927133 714 dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
1da177e4
LT
715
716 /*
717 * Set up the primary, secondary and subordinate
718 * bus numbers.
719 */
b918c62e
YL
720 child->number = child->busn_res.start = busnr;
721 child->primary = parent->busn_res.start;
722 child->busn_res.end = 0xff;
1da177e4 723
4f535093
YL
724 if (!bridge) {
725 child->dev.parent = parent->bridge;
726 goto add_dev;
727 }
3789fa8a
YZ
728
729 child->self = bridge;
730 child->bridge = get_device(&bridge->dev);
4f535093 731 child->dev.parent = child->bridge;
98d9f30c 732 pci_set_bus_of_node(child);
9be60ca0
MW
733 pci_set_bus_speed(child);
734
1da177e4 735 /* Set up default resource pointers and names.. */
fde09c6d 736 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
1da177e4
LT
737 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
738 child->resource[i]->name = child->name;
739 }
740 bridge->subordinate = child;
741
4f535093 742add_dev:
44aa0c65 743 pci_set_bus_msi_domain(child);
4f535093
YL
744 ret = device_register(&child->dev);
745 WARN_ON(ret < 0);
746
10a95747
JL
747 pcibios_add_bus(child);
748
4f535093
YL
749 /* Create legacy_io and legacy_mem files for this bus */
750 pci_create_legacy_files(child);
751
1da177e4
LT
752 return child;
753}
754
3c78bc61
RD
755struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
756 int busnr)
1da177e4
LT
757{
758 struct pci_bus *child;
759
760 child = pci_alloc_child_bus(parent, dev, busnr);
e4ea9bb7 761 if (child) {
d71374da 762 down_write(&pci_bus_sem);
1da177e4 763 list_add_tail(&child->node, &parent->children);
d71374da 764 up_write(&pci_bus_sem);
e4ea9bb7 765 }
1da177e4
LT
766 return child;
767}
b7fe9434 768EXPORT_SYMBOL(pci_add_new_bus);
1da177e4 769
f3dbd802
RJ
770static void pci_enable_crs(struct pci_dev *pdev)
771{
772 u16 root_cap = 0;
773
774 /* Enable CRS Software Visibility if supported */
775 pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap);
776 if (root_cap & PCI_EXP_RTCAP_CRSVIS)
777 pcie_capability_set_word(pdev, PCI_EXP_RTCTL,
778 PCI_EXP_RTCTL_CRSSVE);
779}
780
1da177e4
LT
781/*
782 * If it's a bridge, configure it and scan the bus behind it.
783 * For CardBus bridges, we don't scan behind as the devices will
784 * be handled by the bridge driver itself.
785 *
786 * We need to process bridges in two passes -- first we scan those
787 * already configured by the BIOS and after we are done with all of
788 * them, we proceed to assigning numbers to the remaining buses in
789 * order to avoid overlaps between old and new bus numbers.
790 */
15856ad5 791int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
1da177e4
LT
792{
793 struct pci_bus *child;
794 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
49887941 795 u32 buses, i, j = 0;
1da177e4 796 u16 bctl;
99ddd552 797 u8 primary, secondary, subordinate;
a1c19894 798 int broken = 0;
1da177e4
LT
799
800 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
99ddd552
BH
801 primary = buses & 0xFF;
802 secondary = (buses >> 8) & 0xFF;
803 subordinate = (buses >> 16) & 0xFF;
1da177e4 804
99ddd552
BH
805 dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
806 secondary, subordinate, pass);
1da177e4 807
71f6bd4a
YL
808 if (!primary && (primary != bus->number) && secondary && subordinate) {
809 dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
810 primary = bus->number;
811 }
812
a1c19894
BH
813 /* Check if setup is sensible at all */
814 if (!pass &&
1965f66e 815 (primary != bus->number || secondary <= bus->number ||
12d87069 816 secondary > subordinate)) {
1965f66e
YL
817 dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
818 secondary, subordinate);
a1c19894
BH
819 broken = 1;
820 }
821
1da177e4 822 /* Disable MasterAbortMode during probing to avoid reporting
f7625980 823 of bus errors (in some architectures) */
1da177e4
LT
824 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
825 pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
826 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
827
f3dbd802
RJ
828 pci_enable_crs(dev);
829
99ddd552
BH
830 if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
831 !is_cardbus && !broken) {
832 unsigned int cmax;
1da177e4
LT
833 /*
834 * Bus already configured by firmware, process it in the first
835 * pass and just note the configuration.
836 */
837 if (pass)
bbe8f9a3 838 goto out;
1da177e4
LT
839
840 /*
2ed85823
AN
841 * The bus might already exist for two reasons: Either we are
842 * rescanning the bus or the bus is reachable through more than
843 * one bridge. The second case can happen with the i450NX
844 * chipset.
1da177e4 845 */
99ddd552 846 child = pci_find_bus(pci_domain_nr(bus), secondary);
74710ded 847 if (!child) {
99ddd552 848 child = pci_add_new_bus(bus, dev, secondary);
74710ded
AC
849 if (!child)
850 goto out;
99ddd552 851 child->primary = primary;
bc76b731 852 pci_bus_insert_busn_res(child, secondary, subordinate);
74710ded 853 child->bridge_ctl = bctl;
1da177e4
LT
854 }
855
1da177e4 856 cmax = pci_scan_child_bus(child);
c95b0bd6
AN
857 if (cmax > subordinate)
858 dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n",
859 subordinate, cmax);
860 /* subordinate should equal child->busn_res.end */
861 if (subordinate > max)
862 max = subordinate;
1da177e4
LT
863 } else {
864 /*
865 * We need to assign a number to this bus which we always
866 * do in the second pass.
867 */
12f44f46 868 if (!pass) {
619c8c31 869 if (pcibios_assign_all_busses() || broken || is_cardbus)
12f44f46
IK
870 /* Temporarily disable forwarding of the
871 configuration cycles on all bridges in
872 this bus segment to avoid possible
873 conflicts in the second pass between two
874 bridges programmed with overlapping
875 bus ranges. */
876 pci_write_config_dword(dev, PCI_PRIMARY_BUS,
877 buses & ~0xffffff);
bbe8f9a3 878 goto out;
12f44f46 879 }
1da177e4
LT
880
881 /* Clear errors */
882 pci_write_config_word(dev, PCI_STATUS, 0xffff);
883
7a0b33d4
BH
884 /* Prevent assigning a bus number that already exists.
885 * This can happen when a bridge is hot-plugged, so in
886 * this case we only re-scan this bus. */
b1a98b69
TC
887 child = pci_find_bus(pci_domain_nr(bus), max+1);
888 if (!child) {
9a4d7d87 889 child = pci_add_new_bus(bus, dev, max+1);
b1a98b69
TC
890 if (!child)
891 goto out;
12d87069 892 pci_bus_insert_busn_res(child, max+1, 0xff);
b1a98b69 893 }
9a4d7d87 894 max++;
1da177e4
LT
895 buses = (buses & 0xff000000)
896 | ((unsigned int)(child->primary) << 0)
b918c62e
YL
897 | ((unsigned int)(child->busn_res.start) << 8)
898 | ((unsigned int)(child->busn_res.end) << 16);
1da177e4
LT
899
900 /*
901 * yenta.c forces a secondary latency timer of 176.
902 * Copy that behaviour here.
903 */
904 if (is_cardbus) {
905 buses &= ~0xff000000;
906 buses |= CARDBUS_LATENCY_TIMER << 24;
907 }
7c867c88 908
1da177e4
LT
909 /*
910 * We need to blast all three values with a single write.
911 */
912 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
913
914 if (!is_cardbus) {
11949255 915 child->bridge_ctl = bctl;
1da177e4
LT
916 max = pci_scan_child_bus(child);
917 } else {
918 /*
919 * For CardBus bridges, we leave 4 bus numbers
920 * as cards with a PCI-to-PCI bridge can be
921 * inserted later.
922 */
3c78bc61 923 for (i = 0; i < CARDBUS_RESERVE_BUSNR; i++) {
49887941 924 struct pci_bus *parent = bus;
cc57450f
RS
925 if (pci_find_bus(pci_domain_nr(bus),
926 max+i+1))
927 break;
49887941
DB
928 while (parent->parent) {
929 if ((!pcibios_assign_all_busses()) &&
b918c62e
YL
930 (parent->busn_res.end > max) &&
931 (parent->busn_res.end <= max+i)) {
49887941
DB
932 j = 1;
933 }
934 parent = parent->parent;
935 }
936 if (j) {
937 /*
938 * Often, there are two cardbus bridges
939 * -- try to leave one valid bus number
940 * for each one.
941 */
942 i /= 2;
943 break;
944 }
945 }
cc57450f 946 max += i;
1da177e4
LT
947 }
948 /*
949 * Set the subordinate bus number to its real value.
950 */
bc76b731 951 pci_bus_update_busn_res_end(child, max);
1da177e4
LT
952 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
953 }
954
cb3576fa
GH
955 sprintf(child->name,
956 (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
957 pci_domain_nr(bus), child->number);
1da177e4 958
d55bef51 959 /* Has only triggered on CardBus, fixup is in yenta_socket */
49887941 960 while (bus->parent) {
b918c62e
YL
961 if ((child->busn_res.end > bus->busn_res.end) ||
962 (child->number > bus->busn_res.end) ||
49887941 963 (child->number < bus->number) ||
b918c62e 964 (child->busn_res.end < bus->number)) {
227f0647 965 dev_info(&child->dev, "%pR %s hidden behind%s bridge %s %pR\n",
b918c62e
YL
966 &child->busn_res,
967 (bus->number > child->busn_res.end &&
968 bus->busn_res.end < child->number) ?
a6f29a98
JP
969 "wholly" : "partially",
970 bus->self->transparent ? " transparent" : "",
865df576 971 dev_name(&bus->dev),
b918c62e 972 &bus->busn_res);
49887941
DB
973 }
974 bus = bus->parent;
975 }
976
bbe8f9a3
RB
977out:
978 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
979
1da177e4
LT
980 return max;
981}
b7fe9434 982EXPORT_SYMBOL(pci_scan_bridge);
1da177e4
LT
983
984/*
985 * Read interrupt line and base address registers.
986 * The architecture-dependent code can tweak these, of course.
987 */
988static void pci_read_irq(struct pci_dev *dev)
989{
990 unsigned char irq;
991
992 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
ffeff788 993 dev->pin = irq;
1da177e4
LT
994 if (irq)
995 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
996 dev->irq = irq;
997}
998
bb209c82 999void set_pcie_port_type(struct pci_dev *pdev)
480b93b7
YZ
1000{
1001 int pos;
1002 u16 reg16;
d0751b98
YW
1003 int type;
1004 struct pci_dev *parent;
480b93b7
YZ
1005
1006 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
1007 if (!pos)
1008 return;
0efea000 1009 pdev->pcie_cap = pos;
480b93b7 1010 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
786e2288 1011 pdev->pcie_flags_reg = reg16;
b03e7495
JM
1012 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
1013 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
d0751b98
YW
1014
1015 /*
1016 * A Root Port is always the upstream end of a Link. No PCIe
1017 * component has two Links. Two Links are connected by a Switch
1018 * that has a Port on each Link and internal logic to connect the
1019 * two Ports.
1020 */
1021 type = pci_pcie_type(pdev);
1022 if (type == PCI_EXP_TYPE_ROOT_PORT)
1023 pdev->has_secondary_link = 1;
1024 else if (type == PCI_EXP_TYPE_UPSTREAM ||
1025 type == PCI_EXP_TYPE_DOWNSTREAM) {
1026 parent = pci_upstream_bridge(pdev);
1027 if (!parent->has_secondary_link)
1028 pdev->has_secondary_link = 1;
1029 }
480b93b7
YZ
1030}
1031
bb209c82 1032void set_pcie_hotplug_bridge(struct pci_dev *pdev)
28760489 1033{
28760489
EB
1034 u32 reg32;
1035
59875ae4 1036 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
28760489
EB
1037 if (reg32 & PCI_EXP_SLTCAP_HPC)
1038 pdev->is_hotplug_bridge = 1;
1039}
1040
78916b00
AW
1041/**
1042 * pci_ext_cfg_is_aliased - is ext config space just an alias of std config?
1043 * @dev: PCI device
1044 *
1045 * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that
1046 * when forwarding a type1 configuration request the bridge must check that
1047 * the extended register address field is zero. The bridge is not permitted
1048 * to forward the transactions and must handle it as an Unsupported Request.
1049 * Some bridges do not follow this rule and simply drop the extended register
1050 * bits, resulting in the standard config space being aliased, every 256
1051 * bytes across the entire configuration space. Test for this condition by
1052 * comparing the first dword of each potential alias to the vendor/device ID.
1053 * Known offenders:
1054 * ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03)
1055 * AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40)
1056 */
1057static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
1058{
1059#ifdef CONFIG_PCI_QUIRKS
1060 int pos;
1061 u32 header, tmp;
1062
1063 pci_read_config_dword(dev, PCI_VENDOR_ID, &header);
1064
1065 for (pos = PCI_CFG_SPACE_SIZE;
1066 pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) {
1067 if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL
1068 || header != tmp)
1069 return false;
1070 }
1071
1072 return true;
1073#else
1074 return false;
1075#endif
1076}
1077
0b950f0f
SH
1078/**
1079 * pci_cfg_space_size - get the configuration space size of the PCI device.
1080 * @dev: PCI device
1081 *
1082 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1083 * have 4096 bytes. Even if the device is capable, that doesn't mean we can
1084 * access it. Maybe we don't have a way to generate extended config space
1085 * accesses, or the device is behind a reverse Express bridge. So we try
1086 * reading the dword at 0x100 which must either be 0 or a valid extended
1087 * capability header.
1088 */
1089static int pci_cfg_space_size_ext(struct pci_dev *dev)
1090{
1091 u32 status;
1092 int pos = PCI_CFG_SPACE_SIZE;
1093
1094 if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
1095 goto fail;
78916b00 1096 if (status == 0xffffffff || pci_ext_cfg_is_aliased(dev))
0b950f0f
SH
1097 goto fail;
1098
1099 return PCI_CFG_SPACE_EXP_SIZE;
1100
1101 fail:
1102 return PCI_CFG_SPACE_SIZE;
1103}
1104
1105int pci_cfg_space_size(struct pci_dev *dev)
1106{
1107 int pos;
1108 u32 status;
1109 u16 class;
1110
1111 class = dev->class >> 8;
1112 if (class == PCI_CLASS_BRIDGE_HOST)
1113 return pci_cfg_space_size_ext(dev);
1114
1115 if (!pci_is_pcie(dev)) {
1116 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1117 if (!pos)
1118 goto fail;
1119
1120 pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1121 if (!(status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ)))
1122 goto fail;
1123 }
1124
1125 return pci_cfg_space_size_ext(dev);
1126
1127 fail:
1128 return PCI_CFG_SPACE_SIZE;
1129}
1130
01abc2aa 1131#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
76e6a1d6 1132
1851617c
MT
1133static void pci_msi_setup_pci_dev(struct pci_dev *dev)
1134{
1135 /*
1136 * Disable the MSI hardware to avoid screaming interrupts
1137 * during boot. This is the power on reset default so
1138 * usually this should be a noop.
1139 */
1140 dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI);
1141 if (dev->msi_cap)
1142 pci_msi_set_enable(dev, 0);
1143
1144 dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1145 if (dev->msix_cap)
1146 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
1147}
1148
1da177e4
LT
1149/**
1150 * pci_setup_device - fill in class and map information of a device
1151 * @dev: the device structure to fill
1152 *
f7625980 1153 * Initialize the device structure with information about the device's
1da177e4
LT
1154 * vendor,class,memory and IO-space addresses,IRQ lines etc.
1155 * Called at initialisation of the PCI subsystem and by CardBus services.
480b93b7
YZ
1156 * Returns 0 on success and negative if unknown type of device (not normal,
1157 * bridge or CardBus).
1da177e4 1158 */
480b93b7 1159int pci_setup_device(struct pci_dev *dev)
1da177e4
LT
1160{
1161 u32 class;
480b93b7
YZ
1162 u8 hdr_type;
1163 struct pci_slot *slot;
bc577d2b 1164 int pos = 0;
5bfa14ed
BH
1165 struct pci_bus_region region;
1166 struct resource *res;
480b93b7
YZ
1167
1168 if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
1169 return -EIO;
1170
1171 dev->sysdata = dev->bus->sysdata;
1172 dev->dev.parent = dev->bus->bridge;
1173 dev->dev.bus = &pci_bus_type;
1174 dev->hdr_type = hdr_type & 0x7f;
1175 dev->multifunction = !!(hdr_type & 0x80);
480b93b7
YZ
1176 dev->error_state = pci_channel_io_normal;
1177 set_pcie_port_type(dev);
1178
1179 list_for_each_entry(slot, &dev->bus->slots, list)
1180 if (PCI_SLOT(dev->devfn) == slot->number)
1181 dev->slot = slot;
1182
1183 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1184 set this higher, assuming the system even supports it. */
1185 dev->dma_mask = 0xffffffff;
1da177e4 1186
eebfcfb5
GKH
1187 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
1188 dev->bus->number, PCI_SLOT(dev->devfn),
1189 PCI_FUNC(dev->devfn));
1da177e4
LT
1190
1191 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
b8a3a521 1192 dev->revision = class & 0xff;
2dd8ba92 1193 dev->class = class >> 8; /* upper 3 bytes */
1da177e4 1194
2dd8ba92
YL
1195 dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
1196 dev->vendor, dev->device, dev->hdr_type, dev->class);
1da177e4 1197
853346e4
YZ
1198 /* need to have dev->class ready */
1199 dev->cfg_size = pci_cfg_space_size(dev);
1200
1da177e4 1201 /* "Unknown power state" */
3fe9d19f 1202 dev->current_state = PCI_UNKNOWN;
1da177e4 1203
1851617c
MT
1204 pci_msi_setup_pci_dev(dev);
1205
1da177e4
LT
1206 /* Early fixups, before probing the BARs */
1207 pci_fixup_device(pci_fixup_early, dev);
f79b1b14
YZ
1208 /* device class may be changed after fixup */
1209 class = dev->class >> 8;
1da177e4
LT
1210
1211 switch (dev->hdr_type) { /* header type */
1212 case PCI_HEADER_TYPE_NORMAL: /* standard header */
1213 if (class == PCI_CLASS_BRIDGE_PCI)
1214 goto bad;
1215 pci_read_irq(dev);
1216 pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1217 pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1218 pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
368c73d4
AC
1219
1220 /*
075eb9e3
BH
1221 * Do the ugly legacy mode stuff here rather than broken chip
1222 * quirk code. Legacy mode ATA controllers have fixed
1223 * addresses. These are not always echoed in BAR0-3, and
1224 * BAR0-3 in a few cases contain junk!
368c73d4
AC
1225 */
1226 if (class == PCI_CLASS_STORAGE_IDE) {
1227 u8 progif;
1228 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1229 if ((progif & 1) == 0) {
5bfa14ed
BH
1230 region.start = 0x1F0;
1231 region.end = 0x1F7;
1232 res = &dev->resource[0];
1233 res->flags = LEGACY_IO_RESOURCE;
fc279850 1234 pcibios_bus_to_resource(dev->bus, res, &region);
075eb9e3
BH
1235 dev_info(&dev->dev, "legacy IDE quirk: reg 0x10: %pR\n",
1236 res);
5bfa14ed
BH
1237 region.start = 0x3F6;
1238 region.end = 0x3F6;
1239 res = &dev->resource[1];
1240 res->flags = LEGACY_IO_RESOURCE;
fc279850 1241 pcibios_bus_to_resource(dev->bus, res, &region);
075eb9e3
BH
1242 dev_info(&dev->dev, "legacy IDE quirk: reg 0x14: %pR\n",
1243 res);
368c73d4
AC
1244 }
1245 if ((progif & 4) == 0) {
5bfa14ed
BH
1246 region.start = 0x170;
1247 region.end = 0x177;
1248 res = &dev->resource[2];
1249 res->flags = LEGACY_IO_RESOURCE;
fc279850 1250 pcibios_bus_to_resource(dev->bus, res, &region);
075eb9e3
BH
1251 dev_info(&dev->dev, "legacy IDE quirk: reg 0x18: %pR\n",
1252 res);
5bfa14ed
BH
1253 region.start = 0x376;
1254 region.end = 0x376;
1255 res = &dev->resource[3];
1256 res->flags = LEGACY_IO_RESOURCE;
fc279850 1257 pcibios_bus_to_resource(dev->bus, res, &region);
075eb9e3
BH
1258 dev_info(&dev->dev, "legacy IDE quirk: reg 0x1c: %pR\n",
1259 res);
368c73d4
AC
1260 }
1261 }
1da177e4
LT
1262 break;
1263
1264 case PCI_HEADER_TYPE_BRIDGE: /* bridge header */
1265 if (class != PCI_CLASS_BRIDGE_PCI)
1266 goto bad;
1267 /* The PCI-to-PCI bridge spec requires that subtractive
1268 decoding (i.e. transparent) bridge must have programming
f7625980 1269 interface code of 0x01. */
3efd273b 1270 pci_read_irq(dev);
1da177e4
LT
1271 dev->transparent = ((dev->class & 0xff) == 1);
1272 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
28760489 1273 set_pcie_hotplug_bridge(dev);
bc577d2b
GB
1274 pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
1275 if (pos) {
1276 pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
1277 pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
1278 }
1da177e4
LT
1279 break;
1280
1281 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */
1282 if (class != PCI_CLASS_BRIDGE_CARDBUS)
1283 goto bad;
1284 pci_read_irq(dev);
1285 pci_read_bases(dev, 1, 0);
1286 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1287 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1288 break;
1289
1290 default: /* unknown header */
227f0647
RD
1291 dev_err(&dev->dev, "unknown header type %02x, ignoring device\n",
1292 dev->hdr_type);
480b93b7 1293 return -EIO;
1da177e4
LT
1294
1295 bad:
227f0647
RD
1296 dev_err(&dev->dev, "ignoring class %#08x (doesn't match header type %02x)\n",
1297 dev->class, dev->hdr_type);
1da177e4
LT
1298 dev->class = PCI_CLASS_NOT_DEFINED;
1299 }
1300
1301 /* We found a fine healthy device, go go go... */
1302 return 0;
1303}
1304
589fcc23
BH
1305static struct hpp_type0 pci_default_type0 = {
1306 .revision = 1,
1307 .cache_line_size = 8,
1308 .latency_timer = 0x40,
1309 .enable_serr = 0,
1310 .enable_perr = 0,
1311};
1312
1313static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
1314{
1315 u16 pci_cmd, pci_bctl;
1316
c6285fc5 1317 if (!hpp)
589fcc23 1318 hpp = &pci_default_type0;
589fcc23
BH
1319
1320 if (hpp->revision > 1) {
1321 dev_warn(&dev->dev,
1322 "PCI settings rev %d not supported; using defaults\n",
1323 hpp->revision);
1324 hpp = &pci_default_type0;
1325 }
1326
1327 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
1328 pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
1329 pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
1330 if (hpp->enable_serr)
1331 pci_cmd |= PCI_COMMAND_SERR;
589fcc23
BH
1332 if (hpp->enable_perr)
1333 pci_cmd |= PCI_COMMAND_PARITY;
589fcc23
BH
1334 pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
1335
1336 /* Program bridge control value */
1337 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
1338 pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
1339 hpp->latency_timer);
1340 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
1341 if (hpp->enable_serr)
1342 pci_bctl |= PCI_BRIDGE_CTL_SERR;
589fcc23
BH
1343 if (hpp->enable_perr)
1344 pci_bctl |= PCI_BRIDGE_CTL_PARITY;
589fcc23
BH
1345 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
1346 }
1347}
1348
1349static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
1350{
1351 if (hpp)
1352 dev_warn(&dev->dev, "PCI-X settings not supported\n");
1353}
1354
1355static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
1356{
1357 int pos;
1358 u32 reg32;
1359
1360 if (!hpp)
1361 return;
1362
1363 if (hpp->revision > 1) {
1364 dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
1365 hpp->revision);
1366 return;
1367 }
1368
302328c0
BH
1369 /*
1370 * Don't allow _HPX to change MPS or MRRS settings. We manage
1371 * those to make sure they're consistent with the rest of the
1372 * platform.
1373 */
1374 hpp->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
1375 PCI_EXP_DEVCTL_READRQ;
1376 hpp->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
1377 PCI_EXP_DEVCTL_READRQ);
1378
589fcc23
BH
1379 /* Initialize Device Control Register */
1380 pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
1381 ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
1382
1383 /* Initialize Link Control Register */
7a1562d4 1384 if (pcie_cap_has_lnkctl(dev))
589fcc23
BH
1385 pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
1386 ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
1387
1388 /* Find Advanced Error Reporting Enhanced Capability */
1389 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
1390 if (!pos)
1391 return;
1392
1393 /* Initialize Uncorrectable Error Mask Register */
1394 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
1395 reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
1396 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
1397
1398 /* Initialize Uncorrectable Error Severity Register */
1399 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
1400 reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
1401 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
1402
1403 /* Initialize Correctable Error Mask Register */
1404 pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
1405 reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
1406 pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
1407
1408 /* Initialize Advanced Error Capabilities and Control Register */
1409 pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
1410 reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
1411 pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
1412
1413 /*
1414 * FIXME: The following two registers are not supported yet.
1415 *
1416 * o Secondary Uncorrectable Error Severity Register
1417 * o Secondary Uncorrectable Error Mask Register
1418 */
1419}
1420
6cd33649
BH
1421static void pci_configure_device(struct pci_dev *dev)
1422{
1423 struct hotplug_params hpp;
1424 int ret;
1425
6cd33649
BH
1426 memset(&hpp, 0, sizeof(hpp));
1427 ret = pci_get_hp_params(dev, &hpp);
1428 if (ret)
1429 return;
1430
1431 program_hpp_type2(dev, hpp.t2);
1432 program_hpp_type1(dev, hpp.t1);
1433 program_hpp_type0(dev, hpp.t0);
1434}
1435
201de56e
ZY
1436static void pci_release_capabilities(struct pci_dev *dev)
1437{
1438 pci_vpd_release(dev);
d1b054da 1439 pci_iov_release(dev);
f796841e 1440 pci_free_cap_save_buffers(dev);
201de56e
ZY
1441}
1442
1da177e4
LT
1443/**
1444 * pci_release_dev - free a pci device structure when all users of it are finished.
1445 * @dev: device that's been disconnected
1446 *
1447 * Will be called only by the device core when all users of this pci device are
1448 * done.
1449 */
1450static void pci_release_dev(struct device *dev)
1451{
04480094 1452 struct pci_dev *pci_dev;
1da177e4 1453
04480094 1454 pci_dev = to_pci_dev(dev);
201de56e 1455 pci_release_capabilities(pci_dev);
98d9f30c 1456 pci_release_of_node(pci_dev);
6ae32c53 1457 pcibios_release_device(pci_dev);
8b1fce04 1458 pci_bus_put(pci_dev->bus);
782a985d 1459 kfree(pci_dev->driver_override);
1da177e4
LT
1460 kfree(pci_dev);
1461}
1462
3c6e6ae7 1463struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
65891215
ME
1464{
1465 struct pci_dev *dev;
1466
1467 dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
1468 if (!dev)
1469 return NULL;
1470
65891215 1471 INIT_LIST_HEAD(&dev->bus_list);
88e7b167 1472 dev->dev.type = &pci_dev_type;
3c6e6ae7 1473 dev->bus = pci_bus_get(bus);
65891215
ME
1474
1475 return dev;
1476}
3c6e6ae7
GZ
1477EXPORT_SYMBOL(pci_alloc_dev);
1478
efdc87da 1479bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
3c78bc61 1480 int crs_timeout)
1da177e4 1481{
1da177e4
LT
1482 int delay = 1;
1483
efdc87da
YL
1484 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1485 return false;
1da177e4
LT
1486
1487 /* some broken boards return 0 or ~0 if a slot is empty: */
efdc87da
YL
1488 if (*l == 0xffffffff || *l == 0x00000000 ||
1489 *l == 0x0000ffff || *l == 0xffff0000)
1490 return false;
1da177e4 1491
89665a6a
RJ
1492 /*
1493 * Configuration Request Retry Status. Some root ports return the
1494 * actual device ID instead of the synthetic ID (0xFFFF) required
1495 * by the PCIe spec. Ignore the device ID and only check for
1496 * (vendor id == 1).
1497 */
1498 while ((*l & 0xffff) == 0x0001) {
efdc87da
YL
1499 if (!crs_timeout)
1500 return false;
1501
1da177e4
LT
1502 msleep(delay);
1503 delay *= 2;
efdc87da
YL
1504 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
1505 return false;
1da177e4 1506 /* Card hasn't responded in 60 seconds? Must be stuck. */
efdc87da 1507 if (delay > crs_timeout) {
227f0647
RD
1508 printk(KERN_WARNING "pci %04x:%02x:%02x.%d: not responding\n",
1509 pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
1510 PCI_FUNC(devfn));
efdc87da 1511 return false;
1da177e4
LT
1512 }
1513 }
1514
efdc87da
YL
1515 return true;
1516}
1517EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
1518
1519/*
1520 * Read the config data for a PCI device, sanity-check it
1521 * and fill in the dev structure...
1522 */
1523static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
1524{
1525 struct pci_dev *dev;
1526 u32 l;
1527
1528 if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
1529 return NULL;
1530
8b1fce04 1531 dev = pci_alloc_dev(bus);
1da177e4
LT
1532 if (!dev)
1533 return NULL;
1534
1da177e4 1535 dev->devfn = devfn;
1da177e4
LT
1536 dev->vendor = l & 0xffff;
1537 dev->device = (l >> 16) & 0xffff;
cef354db 1538
98d9f30c
BH
1539 pci_set_of_node(dev);
1540
480b93b7 1541 if (pci_setup_device(dev)) {
8b1fce04 1542 pci_bus_put(dev->bus);
1da177e4
LT
1543 kfree(dev);
1544 return NULL;
1545 }
1da177e4
LT
1546
1547 return dev;
1548}
1549
201de56e
ZY
1550static void pci_init_capabilities(struct pci_dev *dev)
1551{
1552 /* MSI/MSI-X list */
1553 pci_msi_init_pci_dev(dev);
1554
63f4898a
RW
1555 /* Buffers for saving PCIe and PCI-X capabilities */
1556 pci_allocate_cap_save_buffers(dev);
1557
201de56e
ZY
1558 /* Power Management */
1559 pci_pm_init(dev);
1560
1561 /* Vital Product Data */
1562 pci_vpd_pci22_init(dev);
58c3a727
YZ
1563
1564 /* Alternative Routing-ID Forwarding */
31ab2476 1565 pci_configure_ari(dev);
d1b054da
YZ
1566
1567 /* Single Root I/O Virtualization */
1568 pci_iov_init(dev);
ae21ee65
AK
1569
1570 /* Enable ACS P2P upstream forwarding */
5d990b62 1571 pci_enable_acs(dev);
201de56e
ZY
1572}
1573
44aa0c65
MZ
1574static void pci_set_msi_domain(struct pci_dev *dev)
1575{
1576 /*
1577 * If no domain has been set through the pcibios_add_device
1578 * callback, inherit the default from the bus device.
1579 */
1580 if (!dev_get_msi_domain(&dev->dev))
1581 dev_set_msi_domain(&dev->dev,
1582 dev_get_msi_domain(&dev->bus->dev));
1583}
1584
96bde06a 1585void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
1da177e4 1586{
4f535093
YL
1587 int ret;
1588
6cd33649
BH
1589 pci_configure_device(dev);
1590
cdb9b9f7
PM
1591 device_initialize(&dev->dev);
1592 dev->dev.release = pci_release_dev;
1da177e4 1593
7629d19a 1594 set_dev_node(&dev->dev, pcibus_to_node(bus));
cdb9b9f7 1595 dev->dev.dma_mask = &dev->dma_mask;
4d57cdfa 1596 dev->dev.dma_parms = &dev->dma_parms;
cdb9b9f7 1597 dev->dev.coherent_dma_mask = 0xffffffffull;
de335bb4 1598 of_pci_dma_configure(dev);
1da177e4 1599
4d57cdfa 1600 pci_set_dma_max_seg_size(dev, 65536);
59fc67de 1601 pci_set_dma_seg_boundary(dev, 0xffffffff);
4d57cdfa 1602
1da177e4
LT
1603 /* Fix up broken headers */
1604 pci_fixup_device(pci_fixup_header, dev);
1605
2069ecfb
YL
1606 /* moved out from quirk header fixup code */
1607 pci_reassigndev_resource_alignment(dev);
1608
4b77b0a2
RW
1609 /* Clear the state_saved flag. */
1610 dev->state_saved = false;
1611
201de56e
ZY
1612 /* Initialize various capabilities */
1613 pci_init_capabilities(dev);
eb9d0fe4 1614
1da177e4
LT
1615 /*
1616 * Add the device to our list of discovered devices
1617 * and the bus list for fixup functions, etc.
1618 */
d71374da 1619 down_write(&pci_bus_sem);
1da177e4 1620 list_add_tail(&dev->bus_list, &bus->devices);
d71374da 1621 up_write(&pci_bus_sem);
4f535093 1622
4f535093
YL
1623 ret = pcibios_add_device(dev);
1624 WARN_ON(ret < 0);
1625
44aa0c65
MZ
1626 /* Setup MSI irq domain */
1627 pci_set_msi_domain(dev);
1628
4f535093
YL
1629 /* Notifier could use PCI capabilities */
1630 dev->match_driver = false;
1631 ret = device_add(&dev->dev);
1632 WARN_ON(ret < 0);
cdb9b9f7
PM
1633}
1634
10874f5a 1635struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
cdb9b9f7
PM
1636{
1637 struct pci_dev *dev;
1638
90bdb311
TP
1639 dev = pci_get_slot(bus, devfn);
1640 if (dev) {
1641 pci_dev_put(dev);
1642 return dev;
1643 }
1644
cdb9b9f7
PM
1645 dev = pci_scan_device(bus, devfn);
1646 if (!dev)
1647 return NULL;
1648
1649 pci_device_add(dev, bus);
1da177e4
LT
1650
1651 return dev;
1652}
b73e9687 1653EXPORT_SYMBOL(pci_scan_single_device);
1da177e4 1654
b1bd58e4 1655static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
f07852d6 1656{
b1bd58e4
YW
1657 int pos;
1658 u16 cap = 0;
1659 unsigned next_fn;
4fb88c1a 1660
b1bd58e4
YW
1661 if (pci_ari_enabled(bus)) {
1662 if (!dev)
1663 return 0;
1664 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
1665 if (!pos)
1666 return 0;
4fb88c1a 1667
b1bd58e4
YW
1668 pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
1669 next_fn = PCI_ARI_CAP_NFN(cap);
1670 if (next_fn <= fn)
1671 return 0; /* protect against malformed list */
f07852d6 1672
b1bd58e4
YW
1673 return next_fn;
1674 }
1675
1676 /* dev may be NULL for non-contiguous multifunction devices */
1677 if (!dev || dev->multifunction)
1678 return (fn + 1) % 8;
f07852d6 1679
f07852d6
MW
1680 return 0;
1681}
1682
1683static int only_one_child(struct pci_bus *bus)
1684{
1685 struct pci_dev *parent = bus->self;
284f5f9d 1686
f07852d6
MW
1687 if (!parent || !pci_is_pcie(parent))
1688 return 0;
62f87c0e 1689 if (pci_pcie_type(parent) == PCI_EXP_TYPE_ROOT_PORT)
284f5f9d 1690 return 1;
777e61ea 1691 if (parent->has_secondary_link &&
284f5f9d 1692 !pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
f07852d6
MW
1693 return 1;
1694 return 0;
1695}
1696
1da177e4
LT
1697/**
1698 * pci_scan_slot - scan a PCI slot on a bus for devices.
1699 * @bus: PCI bus to scan
1700 * @devfn: slot number to scan (must have zero function.)
1701 *
1702 * Scan a PCI slot on the specified PCI bus for devices, adding
1703 * discovered devices to the @bus->devices list. New devices
8a1bc901 1704 * will not have is_added set.
1b69dfc6
TP
1705 *
1706 * Returns the number of new devices found.
1da177e4 1707 */
96bde06a 1708int pci_scan_slot(struct pci_bus *bus, int devfn)
1da177e4 1709{
f07852d6 1710 unsigned fn, nr = 0;
1b69dfc6 1711 struct pci_dev *dev;
f07852d6
MW
1712
1713 if (only_one_child(bus) && (devfn > 0))
1714 return 0; /* Already scanned the entire slot */
1da177e4 1715
1b69dfc6 1716 dev = pci_scan_single_device(bus, devfn);
4fb88c1a
MW
1717 if (!dev)
1718 return 0;
1719 if (!dev->is_added)
1b69dfc6
TP
1720 nr++;
1721
b1bd58e4 1722 for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
f07852d6
MW
1723 dev = pci_scan_single_device(bus, devfn + fn);
1724 if (dev) {
1725 if (!dev->is_added)
1726 nr++;
1727 dev->multifunction = 1;
1da177e4
LT
1728 }
1729 }
7d715a6c 1730
149e1637
SL
1731 /* only one slot has pcie device */
1732 if (bus->self && nr)
7d715a6c
SL
1733 pcie_aspm_init_link_state(bus->self);
1734
1da177e4
LT
1735 return nr;
1736}
b7fe9434 1737EXPORT_SYMBOL(pci_scan_slot);
1da177e4 1738
b03e7495
JM
1739static int pcie_find_smpss(struct pci_dev *dev, void *data)
1740{
1741 u8 *smpss = data;
1742
1743 if (!pci_is_pcie(dev))
1744 return 0;
1745
d4aa68f6
YW
1746 /*
1747 * We don't have a way to change MPS settings on devices that have
1748 * drivers attached. A hot-added device might support only the minimum
1749 * MPS setting (MPS=128). Therefore, if the fabric contains a bridge
1750 * where devices may be hot-added, we limit the fabric MPS to 128 so
1751 * hot-added devices will work correctly.
1752 *
1753 * However, if we hot-add a device to a slot directly below a Root
1754 * Port, it's impossible for there to be other existing devices below
1755 * the port. We don't limit the MPS in this case because we can
1756 * reconfigure MPS on both the Root Port and the hot-added device,
1757 * and there are no other devices involved.
1758 *
1759 * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
b03e7495 1760 */
d4aa68f6
YW
1761 if (dev->is_hotplug_bridge &&
1762 pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
b03e7495
JM
1763 *smpss = 0;
1764
1765 if (*smpss > dev->pcie_mpss)
1766 *smpss = dev->pcie_mpss;
1767
1768 return 0;
1769}
1770
1771static void pcie_write_mps(struct pci_dev *dev, int mps)
1772{
62f392ea 1773 int rc;
b03e7495
JM
1774
1775 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
62f392ea 1776 mps = 128 << dev->pcie_mpss;
b03e7495 1777
62f87c0e
YW
1778 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
1779 dev->bus->self)
62f392ea 1780 /* For "Performance", the assumption is made that
b03e7495
JM
1781 * downstream communication will never be larger than
1782 * the MRRS. So, the MPS only needs to be configured
1783 * for the upstream communication. This being the case,
1784 * walk from the top down and set the MPS of the child
1785 * to that of the parent bus.
62f392ea
JM
1786 *
1787 * Configure the device MPS with the smaller of the
1788 * device MPSS or the bridge MPS (which is assumed to be
1789 * properly configured at this point to the largest
1790 * allowable MPS based on its parent bus).
b03e7495 1791 */
62f392ea 1792 mps = min(mps, pcie_get_mps(dev->bus->self));
b03e7495
JM
1793 }
1794
1795 rc = pcie_set_mps(dev, mps);
1796 if (rc)
1797 dev_err(&dev->dev, "Failed attempting to set the MPS\n");
1798}
1799
62f392ea 1800static void pcie_write_mrrs(struct pci_dev *dev)
b03e7495 1801{
62f392ea 1802 int rc, mrrs;
b03e7495 1803
ed2888e9
JM
1804 /* In the "safe" case, do not configure the MRRS. There appear to be
1805 * issues with setting MRRS to 0 on a number of devices.
1806 */
ed2888e9
JM
1807 if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
1808 return;
1809
ed2888e9
JM
1810 /* For Max performance, the MRRS must be set to the largest supported
1811 * value. However, it cannot be configured larger than the MPS the
62f392ea
JM
1812 * device or the bus can support. This should already be properly
1813 * configured by a prior call to pcie_write_mps.
ed2888e9 1814 */
62f392ea 1815 mrrs = pcie_get_mps(dev);
b03e7495
JM
1816
1817 /* MRRS is a R/W register. Invalid values can be written, but a
ed2888e9 1818 * subsequent read will verify if the value is acceptable or not.
b03e7495
JM
1819 * If the MRRS value provided is not acceptable (e.g., too large),
1820 * shrink the value until it is acceptable to the HW.
f7625980 1821 */
b03e7495
JM
1822 while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
1823 rc = pcie_set_readrq(dev, mrrs);
62f392ea
JM
1824 if (!rc)
1825 break;
b03e7495 1826
62f392ea 1827 dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
b03e7495
JM
1828 mrrs /= 2;
1829 }
62f392ea
JM
1830
1831 if (mrrs < 128)
227f0647 1832 dev_err(&dev->dev, "MRRS was unable to be configured with a safe value. If problems are experienced, try running with pci=pcie_bus_safe\n");
b03e7495
JM
1833}
1834
5895af79
YW
1835static void pcie_bus_detect_mps(struct pci_dev *dev)
1836{
1837 struct pci_dev *bridge = dev->bus->self;
1838 int mps, p_mps;
1839
1840 if (!bridge)
1841 return;
1842
1843 mps = pcie_get_mps(dev);
1844 p_mps = pcie_get_mps(bridge);
1845
1846 if (mps != p_mps)
1847 dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1848 mps, pci_name(bridge), p_mps);
1849}
1850
b03e7495
JM
1851static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
1852{
a513a99a 1853 int mps, orig_mps;
b03e7495
JM
1854
1855 if (!pci_is_pcie(dev))
1856 return 0;
1857
5895af79
YW
1858 if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
1859 pcie_bus_detect_mps(dev);
1860 return 0;
1861 }
1862
a513a99a
JM
1863 mps = 128 << *(u8 *)data;
1864 orig_mps = pcie_get_mps(dev);
b03e7495
JM
1865
1866 pcie_write_mps(dev, mps);
62f392ea 1867 pcie_write_mrrs(dev);
b03e7495 1868
227f0647
RD
1869 dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), Max Read Rq %4d\n",
1870 pcie_get_mps(dev), 128 << dev->pcie_mpss,
a513a99a 1871 orig_mps, pcie_get_readrq(dev));
b03e7495
JM
1872
1873 return 0;
1874}
1875
a513a99a 1876/* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
b03e7495
JM
1877 * parents then children fashion. If this changes, then this code will not
1878 * work as designed.
1879 */
a58674ff 1880void pcie_bus_configure_settings(struct pci_bus *bus)
b03e7495 1881{
1e358f94 1882 u8 smpss = 0;
b03e7495 1883
a58674ff 1884 if (!bus->self)
b03e7495
JM
1885 return;
1886
b03e7495 1887 if (!pci_is_pcie(bus->self))
5f39e670
JM
1888 return;
1889
1890 /* FIXME - Peer to peer DMA is possible, though the endpoint would need
3315472c 1891 * to be aware of the MPS of the destination. To work around this,
5f39e670
JM
1892 * simply force the MPS of the entire system to the smallest possible.
1893 */
1894 if (pcie_bus_config == PCIE_BUS_PEER2PEER)
1895 smpss = 0;
1896
b03e7495 1897 if (pcie_bus_config == PCIE_BUS_SAFE) {
a58674ff 1898 smpss = bus->self->pcie_mpss;
5f39e670 1899
b03e7495
JM
1900 pcie_find_smpss(bus->self, &smpss);
1901 pci_walk_bus(bus, pcie_find_smpss, &smpss);
1902 }
1903
1904 pcie_bus_configure_set(bus->self, &smpss);
1905 pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
1906}
debc3b77 1907EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
b03e7495 1908
15856ad5 1909unsigned int pci_scan_child_bus(struct pci_bus *bus)
1da177e4 1910{
b918c62e 1911 unsigned int devfn, pass, max = bus->busn_res.start;
1da177e4
LT
1912 struct pci_dev *dev;
1913
0207c356 1914 dev_dbg(&bus->dev, "scanning bus\n");
1da177e4
LT
1915
1916 /* Go find them, Rover! */
1917 for (devfn = 0; devfn < 0x100; devfn += 8)
1918 pci_scan_slot(bus, devfn);
1919
a28724b0
YZ
1920 /* Reserve buses for SR-IOV capability. */
1921 max += pci_iov_bus_range(bus);
1922
1da177e4
LT
1923 /*
1924 * After performing arch-dependent fixup of the bus, look behind
1925 * all PCI-to-PCI bridges on this bus.
1926 */
74710ded 1927 if (!bus->is_added) {
0207c356 1928 dev_dbg(&bus->dev, "fixups for bus\n");
74710ded 1929 pcibios_fixup_bus(bus);
981cf9ea 1930 bus->is_added = 1;
74710ded
AC
1931 }
1932
3c78bc61 1933 for (pass = 0; pass < 2; pass++)
1da177e4 1934 list_for_each_entry(dev, &bus->devices, bus_list) {
6788a51f 1935 if (pci_is_bridge(dev))
1da177e4
LT
1936 max = pci_scan_bridge(bus, dev, max, pass);
1937 }
1938
1939 /*
1940 * We've scanned the bus and so we know all about what's on
1941 * the other side of any bridges that may be on this bus plus
1942 * any devices.
1943 *
1944 * Return how far we've got finding sub-buses.
1945 */
0207c356 1946 dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
1da177e4
LT
1947 return max;
1948}
b7fe9434 1949EXPORT_SYMBOL_GPL(pci_scan_child_bus);
1da177e4 1950
6c0cc950
RW
1951/**
1952 * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
1953 * @bridge: Host bridge to set up.
1954 *
1955 * Default empty implementation. Replace with an architecture-specific setup
1956 * routine, if necessary.
1957 */
1958int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
1959{
1960 return 0;
1961}
1962
10a95747
JL
1963void __weak pcibios_add_bus(struct pci_bus *bus)
1964{
1965}
1966
1967void __weak pcibios_remove_bus(struct pci_bus *bus)
1968{
1969}
1970
166c6370
BH
1971struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
1972 struct pci_ops *ops, void *sysdata, struct list_head *resources)
1da177e4 1973{
0efd5aab 1974 int error;
5a21d70d 1975 struct pci_host_bridge *bridge;
0207c356 1976 struct pci_bus *b, *b2;
14d76b68 1977 struct resource_entry *window, *n;
a9d9f527 1978 struct resource *res;
0efd5aab
BH
1979 resource_size_t offset;
1980 char bus_addr[64];
1981 char *fmt;
1da177e4 1982
670ba0c8 1983 b = pci_alloc_bus(NULL);
1da177e4 1984 if (!b)
7b543663 1985 return NULL;
1da177e4
LT
1986
1987 b->sysdata = sysdata;
1988 b->ops = ops;
4f535093 1989 b->number = b->busn_res.start = bus;
670ba0c8 1990 pci_bus_assign_domain_nr(b, parent);
0207c356
BH
1991 b2 = pci_find_bus(pci_domain_nr(b), bus);
1992 if (b2) {
1da177e4 1993 /* If we already got to this bus through a different bridge, ignore it */
0207c356 1994 dev_dbg(&b2->dev, "bus already known\n");
1da177e4
LT
1995 goto err_out;
1996 }
d71374da 1997
7b543663
YL
1998 bridge = pci_alloc_host_bridge(b);
1999 if (!bridge)
2000 goto err_out;
2001
2002 bridge->dev.parent = parent;
70efde2a 2003 bridge->dev.release = pci_release_host_bridge_dev;
7b543663 2004 dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(b), bus);
6c0cc950 2005 error = pcibios_root_bridge_prepare(bridge);
343df771
JL
2006 if (error) {
2007 kfree(bridge);
2008 goto err_out;
2009 }
6c0cc950 2010
7b543663 2011 error = device_register(&bridge->dev);
343df771
JL
2012 if (error) {
2013 put_device(&bridge->dev);
2014 goto err_out;
2015 }
7b543663 2016 b->bridge = get_device(&bridge->dev);
a1e4d72c 2017 device_enable_async_suspend(b->bridge);
98d9f30c 2018 pci_set_bus_of_node(b);
44aa0c65 2019 pci_set_bus_msi_domain(b);
1da177e4 2020
0d358f22
YL
2021 if (!parent)
2022 set_dev_node(b->bridge, pcibus_to_node(b));
2023
fd7d1ced
GKH
2024 b->dev.class = &pcibus_class;
2025 b->dev.parent = b->bridge;
1a927133 2026 dev_set_name(&b->dev, "%04x:%02x", pci_domain_nr(b), bus);
fd7d1ced 2027 error = device_register(&b->dev);
1da177e4
LT
2028 if (error)
2029 goto class_dev_reg_err;
1da177e4 2030
10a95747
JL
2031 pcibios_add_bus(b);
2032
1da177e4
LT
2033 /* Create legacy_io and legacy_mem files for this bus */
2034 pci_create_legacy_files(b);
2035
a9d9f527
BH
2036 if (parent)
2037 dev_info(parent, "PCI host bridge to bus %s\n", dev_name(&b->dev));
2038 else
2039 printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev));
2040
0efd5aab 2041 /* Add initial resources to the bus */
14d76b68
JL
2042 resource_list_for_each_entry_safe(window, n, resources) {
2043 list_move_tail(&window->node, &bridge->windows);
0efd5aab
BH
2044 res = window->res;
2045 offset = window->offset;
f848ffb1
YL
2046 if (res->flags & IORESOURCE_BUS)
2047 pci_bus_insert_busn_res(b, bus, res->end);
2048 else
2049 pci_bus_add_resource(b, res, 0);
0efd5aab
BH
2050 if (offset) {
2051 if (resource_type(res) == IORESOURCE_IO)
2052 fmt = " (bus address [%#06llx-%#06llx])";
2053 else
2054 fmt = " (bus address [%#010llx-%#010llx])";
2055 snprintf(bus_addr, sizeof(bus_addr), fmt,
2056 (unsigned long long) (res->start - offset),
2057 (unsigned long long) (res->end - offset));
2058 } else
2059 bus_addr[0] = '\0';
2060 dev_info(&b->dev, "root bus resource %pR%s\n", res, bus_addr);
a9d9f527
BH
2061 }
2062
a5390aa6
BH
2063 down_write(&pci_bus_sem);
2064 list_add_tail(&b->node, &pci_root_buses);
2065 up_write(&pci_bus_sem);
2066
1da177e4
LT
2067 return b;
2068
1da177e4 2069class_dev_reg_err:
7b543663
YL
2070 put_device(&bridge->dev);
2071 device_unregister(&bridge->dev);
1da177e4 2072err_out:
1da177e4
LT
2073 kfree(b);
2074 return NULL;
2075}
e6b29dea 2076EXPORT_SYMBOL_GPL(pci_create_root_bus);
cdb9b9f7 2077
98a35831
YL
2078int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
2079{
2080 struct resource *res = &b->busn_res;
2081 struct resource *parent_res, *conflict;
2082
2083 res->start = bus;
2084 res->end = bus_max;
2085 res->flags = IORESOURCE_BUS;
2086
2087 if (!pci_is_root_bus(b))
2088 parent_res = &b->parent->busn_res;
2089 else {
2090 parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
2091 res->flags |= IORESOURCE_PCI_FIXED;
2092 }
2093
ced04d15 2094 conflict = request_resource_conflict(parent_res, res);
98a35831
YL
2095
2096 if (conflict)
2097 dev_printk(KERN_DEBUG, &b->dev,
2098 "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
2099 res, pci_is_root_bus(b) ? "domain " : "",
2100 parent_res, conflict->name, conflict);
98a35831
YL
2101
2102 return conflict == NULL;
2103}
2104
2105int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
2106{
2107 struct resource *res = &b->busn_res;
2108 struct resource old_res = *res;
2109 resource_size_t size;
2110 int ret;
2111
2112 if (res->start > bus_max)
2113 return -EINVAL;
2114
2115 size = bus_max - res->start + 1;
2116 ret = adjust_resource(res, res->start, size);
2117 dev_printk(KERN_DEBUG, &b->dev,
2118 "busn_res: %pR end %s updated to %02x\n",
2119 &old_res, ret ? "can not be" : "is", bus_max);
2120
2121 if (!ret && !res->parent)
2122 pci_bus_insert_busn_res(b, res->start, res->end);
2123
2124 return ret;
2125}
2126
2127void pci_bus_release_busn_res(struct pci_bus *b)
2128{
2129 struct resource *res = &b->busn_res;
2130 int ret;
2131
2132 if (!res->flags || !res->parent)
2133 return;
2134
2135 ret = release_resource(res);
2136 dev_printk(KERN_DEBUG, &b->dev,
2137 "busn_res: %pR %s released\n",
2138 res, ret ? "can not be" : "is");
2139}
2140
15856ad5 2141struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
a2ebb827
BH
2142 struct pci_ops *ops, void *sysdata, struct list_head *resources)
2143{
14d76b68 2144 struct resource_entry *window;
4d99f524 2145 bool found = false;
a2ebb827 2146 struct pci_bus *b;
4d99f524
YL
2147 int max;
2148
14d76b68 2149 resource_list_for_each_entry(window, resources)
4d99f524
YL
2150 if (window->res->flags & IORESOURCE_BUS) {
2151 found = true;
2152 break;
2153 }
a2ebb827
BH
2154
2155 b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
2156 if (!b)
2157 return NULL;
2158
4d99f524
YL
2159 if (!found) {
2160 dev_info(&b->dev,
2161 "No busn resource found for root bus, will use [bus %02x-ff]\n",
2162 bus);
2163 pci_bus_insert_busn_res(b, bus, 255);
2164 }
2165
2166 max = pci_scan_child_bus(b);
2167
2168 if (!found)
2169 pci_bus_update_busn_res_end(b, max);
2170
a2ebb827
BH
2171 return b;
2172}
2173EXPORT_SYMBOL(pci_scan_root_bus);
2174
15856ad5 2175struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
de4b2f76
BH
2176 void *sysdata)
2177{
2178 LIST_HEAD(resources);
2179 struct pci_bus *b;
2180
2181 pci_add_resource(&resources, &ioport_resource);
2182 pci_add_resource(&resources, &iomem_resource);
857c3b66 2183 pci_add_resource(&resources, &busn_resource);
de4b2f76
BH
2184 b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
2185 if (b) {
857c3b66 2186 pci_scan_child_bus(b);
de4b2f76
BH
2187 } else {
2188 pci_free_resource_list(&resources);
2189 }
2190 return b;
2191}
2192EXPORT_SYMBOL(pci_scan_bus);
2193
2f320521
YL
2194/**
2195 * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
2196 * @bridge: PCI bridge for the bus to scan
2197 *
2198 * Scan a PCI bus and child buses for new devices, add them,
2199 * and enable them, resizing bridge mmio/io resource if necessary
2200 * and possible. The caller must ensure the child devices are already
2201 * removed for resizing to occur.
2202 *
2203 * Returns the max number of subordinate bus discovered.
2204 */
10874f5a 2205unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
2f320521
YL
2206{
2207 unsigned int max;
2208 struct pci_bus *bus = bridge->subordinate;
2209
2210 max = pci_scan_child_bus(bus);
2211
2212 pci_assign_unassigned_bridge_resources(bridge);
2213
2214 pci_bus_add_devices(bus);
2215
2216 return max;
2217}
2218
a5213a31
YL
2219/**
2220 * pci_rescan_bus - scan a PCI bus for devices.
2221 * @bus: PCI bus to scan
2222 *
2223 * Scan a PCI bus and child buses for new devices, adds them,
2224 * and enables them.
2225 *
2226 * Returns the max number of subordinate bus discovered.
2227 */
10874f5a 2228unsigned int pci_rescan_bus(struct pci_bus *bus)
a5213a31
YL
2229{
2230 unsigned int max;
2231
2232 max = pci_scan_child_bus(bus);
2233 pci_assign_unassigned_bus_resources(bus);
2234 pci_bus_add_devices(bus);
2235
2236 return max;
2237}
2238EXPORT_SYMBOL_GPL(pci_rescan_bus);
2239
9d16947b
RW
2240/*
2241 * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
2242 * routines should always be executed under this mutex.
2243 */
2244static DEFINE_MUTEX(pci_rescan_remove_lock);
2245
2246void pci_lock_rescan_remove(void)
2247{
2248 mutex_lock(&pci_rescan_remove_lock);
2249}
2250EXPORT_SYMBOL_GPL(pci_lock_rescan_remove);
2251
2252void pci_unlock_rescan_remove(void)
2253{
2254 mutex_unlock(&pci_rescan_remove_lock);
2255}
2256EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove);
2257
3c78bc61
RD
2258static int __init pci_sort_bf_cmp(const struct device *d_a,
2259 const struct device *d_b)
6b4b78fe 2260{
99178b03
GKH
2261 const struct pci_dev *a = to_pci_dev(d_a);
2262 const struct pci_dev *b = to_pci_dev(d_b);
2263
6b4b78fe
MD
2264 if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
2265 else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1;
2266
2267 if (a->bus->number < b->bus->number) return -1;
2268 else if (a->bus->number > b->bus->number) return 1;
2269
2270 if (a->devfn < b->devfn) return -1;
2271 else if (a->devfn > b->devfn) return 1;
2272
2273 return 0;
2274}
2275
5ff580c1 2276void __init pci_sort_breadthfirst(void)
6b4b78fe 2277{
99178b03 2278 bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
6b4b78fe 2279}