]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/pci/probe.c
PCI: PM: Skip devices in D0 for suspend-to-idle
[mirror_ubuntu-bionic-kernel.git] / drivers / pci / probe.c
CommitLineData
1da177e4
LT
1/*
2 * probe.c - PCI detection and setup code
3 */
4
5#include <linux/kernel.h>
6#include <linux/delay.h>
7#include <linux/init.h>
8#include <linux/pci.h>
50230713 9#include <linux/of_device.h>
de335bb4 10#include <linux/of_pci.h>
589fcc23 11#include <linux/pci_hotplug.h>
1da177e4
LT
12#include <linux/slab.h>
13#include <linux/module.h>
14#include <linux/cpumask.h>
7d715a6c 15#include <linux/pci-aspm.h>
b07461a8 16#include <linux/aer.h>
29dbe1f0 17#include <linux/acpi.h>
788858eb 18#include <linux/irqdomain.h>
d963f651 19#include <linux/pm_runtime.h>
bc56b9e0 20#include "pci.h"
1da177e4
LT
21
22#define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
23#define CARDBUS_RESERVE_BUSNR 3
1da177e4 24
0b950f0f 25static struct resource busn_resource = {
67cdc827
YL
26 .name = "PCI busn",
27 .start = 0,
28 .end = 255,
29 .flags = IORESOURCE_BUS,
30};
31
1da177e4
LT
32/* Ugh. Need to stop exporting this to modules. */
33LIST_HEAD(pci_root_buses);
34EXPORT_SYMBOL(pci_root_buses);
35
5cc62c20
YL
36static LIST_HEAD(pci_domain_busn_res_list);
37
38struct pci_domain_busn_res {
39 struct list_head list;
40 struct resource res;
41 int domain_nr;
42};
43
44static struct resource *get_pci_domain_busn_res(int domain_nr)
45{
46 struct pci_domain_busn_res *r;
47
48 list_for_each_entry(r, &pci_domain_busn_res_list, list)
49 if (r->domain_nr == domain_nr)
50 return &r->res;
51
52 r = kzalloc(sizeof(*r), GFP_KERNEL);
53 if (!r)
54 return NULL;
55
56 r->domain_nr = domain_nr;
57 r->res.start = 0;
58 r->res.end = 0xff;
59 r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
60
61 list_add_tail(&r->list, &pci_domain_busn_res_list);
62
63 return &r->res;
64}
65
70308923
GKH
66static int find_anything(struct device *dev, void *data)
67{
68 return 1;
69}
1da177e4 70
ed4aaadb
ZY
71/*
72 * Some device drivers need know if pci is initiated.
73 * Basically, we think pci is not initiated when there
70308923 74 * is no device to be found on the pci_bus_type.
ed4aaadb
ZY
75 */
76int no_pci_devices(void)
77{
70308923
GKH
78 struct device *dev;
79 int no_devices;
ed4aaadb 80
70308923
GKH
81 dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
82 no_devices = (dev == NULL);
83 put_device(dev);
84 return no_devices;
85}
ed4aaadb
ZY
86EXPORT_SYMBOL(no_pci_devices);
87
1da177e4
LT
88/*
89 * PCI Bus Class
90 */
fd7d1ced 91static void release_pcibus_dev(struct device *dev)
1da177e4 92{
fd7d1ced 93 struct pci_bus *pci_bus = to_pci_bus(dev);
1da177e4 94
ff0387c3 95 put_device(pci_bus->bridge);
2fe2abf8 96 pci_bus_remove_resources(pci_bus);
98d9f30c 97 pci_release_bus_of_node(pci_bus);
1da177e4
LT
98 kfree(pci_bus);
99}
100
101static struct class pcibus_class = {
102 .name = "pci_bus",
fd7d1ced 103 .dev_release = &release_pcibus_dev,
56039e65 104 .dev_groups = pcibus_groups,
1da177e4
LT
105};
106
107static int __init pcibus_class_init(void)
108{
109 return class_register(&pcibus_class);
110}
111postcore_initcall(pcibus_class_init);
112
6ac665c6 113static u64 pci_size(u64 base, u64 maxbase, u64 mask)
1da177e4 114{
6ac665c6 115 u64 size = mask & maxbase; /* Find the significant bits */
1da177e4
LT
116 if (!size)
117 return 0;
118
119 /* Get the lowest of them to find the decode size, and
120 from that the extent. */
121 size = (size & ~(size-1)) - 1;
122
123 /* base == maxbase can be valid only if the BAR has
124 already been programmed with all 1s. */
125 if (base == maxbase && ((base | size) & mask) != mask)
126 return 0;
127
128 return size;
129}
130
28c6821a 131static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
6ac665c6 132{
8d6a6a47 133 u32 mem_type;
28c6821a 134 unsigned long flags;
8d6a6a47 135
6ac665c6 136 if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
28c6821a
BH
137 flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
138 flags |= IORESOURCE_IO;
139 return flags;
6ac665c6 140 }
07eddf3d 141
28c6821a
BH
142 flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
143 flags |= IORESOURCE_MEM;
144 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
145 flags |= IORESOURCE_PREFETCH;
07eddf3d 146
8d6a6a47
BH
147 mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
148 switch (mem_type) {
149 case PCI_BASE_ADDRESS_MEM_TYPE_32:
150 break;
151 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
0ff9514b 152 /* 1M mem BAR treated as 32-bit BAR */
8d6a6a47
BH
153 break;
154 case PCI_BASE_ADDRESS_MEM_TYPE_64:
28c6821a
BH
155 flags |= IORESOURCE_MEM_64;
156 break;
8d6a6a47 157 default:
0ff9514b 158 /* mem unknown type treated as 32-bit BAR */
8d6a6a47
BH
159 break;
160 }
28c6821a 161 return flags;
07eddf3d
YL
162}
163
808e34e2
ZK
164#define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
165
0b400c7e
YZ
166/**
167 * pci_read_base - read a PCI BAR
168 * @dev: the PCI device
169 * @type: type of the BAR
170 * @res: resource buffer to be filled in
171 * @pos: BAR position in the config space
172 *
173 * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
6ac665c6 174 */
0b400c7e 175int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
3c78bc61 176 struct resource *res, unsigned int pos)
07eddf3d 177{
dc5205ef 178 u32 l = 0, sz = 0, mask;
23b13bc7 179 u64 l64, sz64, mask64;
253d2e54 180 u16 orig_cmd;
cf4d1cf5 181 struct pci_bus_region region, inverted_region;
6ac665c6 182
1ed67439 183 mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
6ac665c6 184
0ff9514b 185 /* No printks while decoding is disabled! */
253d2e54
JP
186 if (!dev->mmio_always_on) {
187 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
808e34e2
ZK
188 if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
189 pci_write_config_word(dev, PCI_COMMAND,
190 orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
191 }
253d2e54
JP
192 }
193
6ac665c6
MW
194 res->name = pci_name(dev);
195
196 pci_read_config_dword(dev, pos, &l);
1ed67439 197 pci_write_config_dword(dev, pos, l | mask);
6ac665c6
MW
198 pci_read_config_dword(dev, pos, &sz);
199 pci_write_config_dword(dev, pos, l);
200
201 /*
202 * All bits set in sz means the device isn't working properly.
45aa23b4
BH
203 * If the BAR isn't implemented, all bits must be 0. If it's a
204 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
205 * 1 must be clear.
6ac665c6 206 */
f795d86a
MS
207 if (sz == 0xffffffff)
208 sz = 0;
6ac665c6
MW
209
210 /*
211 * I don't know how l can have all bits set. Copied from old code.
212 * Maybe it fixes a bug on some ancient platform.
213 */
214 if (l == 0xffffffff)
215 l = 0;
216
217 if (type == pci_bar_unknown) {
28c6821a
BH
218 res->flags = decode_bar(dev, l);
219 res->flags |= IORESOURCE_SIZEALIGN;
220 if (res->flags & IORESOURCE_IO) {
f795d86a
MS
221 l64 = l & PCI_BASE_ADDRESS_IO_MASK;
222 sz64 = sz & PCI_BASE_ADDRESS_IO_MASK;
223 mask64 = PCI_BASE_ADDRESS_IO_MASK & (u32)IO_SPACE_LIMIT;
6ac665c6 224 } else {
f795d86a
MS
225 l64 = l & PCI_BASE_ADDRESS_MEM_MASK;
226 sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK;
227 mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK;
6ac665c6
MW
228 }
229 } else {
7a6d312b
BH
230 if (l & PCI_ROM_ADDRESS_ENABLE)
231 res->flags |= IORESOURCE_ROM_ENABLE;
f795d86a
MS
232 l64 = l & PCI_ROM_ADDRESS_MASK;
233 sz64 = sz & PCI_ROM_ADDRESS_MASK;
76dc5268 234 mask64 = PCI_ROM_ADDRESS_MASK;
6ac665c6
MW
235 }
236
28c6821a 237 if (res->flags & IORESOURCE_MEM_64) {
6ac665c6
MW
238 pci_read_config_dword(dev, pos + 4, &l);
239 pci_write_config_dword(dev, pos + 4, ~0);
240 pci_read_config_dword(dev, pos + 4, &sz);
241 pci_write_config_dword(dev, pos + 4, l);
242
243 l64 |= ((u64)l << 32);
244 sz64 |= ((u64)sz << 32);
f795d86a
MS
245 mask64 |= ((u64)~0 << 32);
246 }
6ac665c6 247
f795d86a
MS
248 if (!dev->mmio_always_on && (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
249 pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
6ac665c6 250
f795d86a
MS
251 if (!sz64)
252 goto fail;
6ac665c6 253
f795d86a 254 sz64 = pci_size(l64, sz64, mask64);
7e79c5f8
MS
255 if (!sz64) {
256 dev_info(&dev->dev, FW_BUG "reg 0x%x: invalid BAR (can't size)\n",
257 pos);
f795d86a 258 goto fail;
7e79c5f8 259 }
f795d86a
MS
260
261 if (res->flags & IORESOURCE_MEM_64) {
3a9ad0b4
YL
262 if ((sizeof(pci_bus_addr_t) < 8 || sizeof(resource_size_t) < 8)
263 && sz64 > 0x100000000ULL) {
23b13bc7
BH
264 res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
265 res->start = 0;
266 res->end = 0;
f795d86a
MS
267 dev_err(&dev->dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
268 pos, (unsigned long long)sz64);
23b13bc7 269 goto out;
c7dabef8
BH
270 }
271
3a9ad0b4 272 if ((sizeof(pci_bus_addr_t) < 8) && l) {
31e9dd25 273 /* Above 32-bit boundary; try to reallocate */
c83bd900 274 res->flags |= IORESOURCE_UNSET;
72dc5601
BH
275 res->start = 0;
276 res->end = sz64;
f795d86a
MS
277 dev_info(&dev->dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n",
278 pos, (unsigned long long)l64);
72dc5601 279 goto out;
6ac665c6 280 }
6ac665c6
MW
281 }
282
f795d86a
MS
283 region.start = l64;
284 region.end = l64 + sz64;
285
fc279850
YL
286 pcibios_bus_to_resource(dev->bus, res, &region);
287 pcibios_resource_to_bus(dev->bus, &inverted_region, res);
cf4d1cf5
KH
288
289 /*
290 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
291 * the corresponding resource address (the physical address used by
292 * the CPU. Converting that resource address back to a bus address
293 * should yield the original BAR value:
294 *
295 * resource_to_bus(bus_to_resource(A)) == A
296 *
297 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not
298 * be claimed by the device.
299 */
300 if (inverted_region.start != region.start) {
cf4d1cf5 301 res->flags |= IORESOURCE_UNSET;
cf4d1cf5 302 res->start = 0;
26370fc6 303 res->end = region.end - region.start;
f795d86a
MS
304 dev_info(&dev->dev, "reg 0x%x: initial BAR value %#010llx invalid\n",
305 pos, (unsigned long long)region.start);
cf4d1cf5 306 }
96ddef25 307
0ff9514b
BH
308 goto out;
309
310
311fail:
312 res->flags = 0;
313out:
31e9dd25 314 if (res->flags)
33963e30 315 dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res);
0ff9514b 316
28c6821a 317 return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
07eddf3d
YL
318}
319
1da177e4
LT
320static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
321{
6ac665c6 322 unsigned int pos, reg;
07eddf3d 323
ad67b437
PB
324 if (dev->non_compliant_bars)
325 return;
326
6ac665c6
MW
327 for (pos = 0; pos < howmany; pos++) {
328 struct resource *res = &dev->resource[pos];
1da177e4 329 reg = PCI_BASE_ADDRESS_0 + (pos << 2);
6ac665c6 330 pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
1da177e4 331 }
6ac665c6 332
1da177e4 333 if (rom) {
6ac665c6 334 struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
1da177e4 335 dev->rom_base_reg = rom;
6ac665c6 336 res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
92b19ff5 337 IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
6ac665c6 338 __pci_read_base(dev, pci_bar_mem32, res, rom);
1da177e4
LT
339 }
340}
341
15856ad5 342static void pci_read_bridge_io(struct pci_bus *child)
1da177e4
LT
343{
344 struct pci_dev *dev = child->self;
345 u8 io_base_lo, io_limit_lo;
2b28ae19 346 unsigned long io_mask, io_granularity, base, limit;
5bfa14ed 347 struct pci_bus_region region;
2b28ae19
BH
348 struct resource *res;
349
350 io_mask = PCI_IO_RANGE_MASK;
351 io_granularity = 0x1000;
352 if (dev->io_window_1k) {
353 /* Support 1K I/O space granularity */
354 io_mask = PCI_IO_1K_RANGE_MASK;
355 io_granularity = 0x400;
356 }
1da177e4 357
1da177e4
LT
358 res = child->resource[0];
359 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
360 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
2b28ae19
BH
361 base = (io_base_lo & io_mask) << 8;
362 limit = (io_limit_lo & io_mask) << 8;
1da177e4
LT
363
364 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
365 u16 io_base_hi, io_limit_hi;
8f38eaca 366
1da177e4
LT
367 pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
368 pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
8f38eaca
BH
369 base |= ((unsigned long) io_base_hi << 16);
370 limit |= ((unsigned long) io_limit_hi << 16);
1da177e4
LT
371 }
372
5dde383e 373 if (base <= limit) {
1da177e4 374 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
5bfa14ed 375 region.start = base;
2b28ae19 376 region.end = limit + io_granularity - 1;
fc279850 377 pcibios_bus_to_resource(dev->bus, res, &region);
c7dabef8 378 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
1da177e4 379 }
fa27b2d1
BH
380}
381
15856ad5 382static void pci_read_bridge_mmio(struct pci_bus *child)
fa27b2d1
BH
383{
384 struct pci_dev *dev = child->self;
385 u16 mem_base_lo, mem_limit_lo;
386 unsigned long base, limit;
5bfa14ed 387 struct pci_bus_region region;
fa27b2d1 388 struct resource *res;
1da177e4
LT
389
390 res = child->resource[1];
391 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
392 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
8f38eaca
BH
393 base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
394 limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
5dde383e 395 if (base <= limit) {
1da177e4 396 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
5bfa14ed
BH
397 region.start = base;
398 region.end = limit + 0xfffff;
fc279850 399 pcibios_bus_to_resource(dev->bus, res, &region);
c7dabef8 400 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
1da177e4 401 }
fa27b2d1
BH
402}
403
15856ad5 404static void pci_read_bridge_mmio_pref(struct pci_bus *child)
fa27b2d1
BH
405{
406 struct pci_dev *dev = child->self;
407 u16 mem_base_lo, mem_limit_lo;
7fc986d8 408 u64 base64, limit64;
3a9ad0b4 409 pci_bus_addr_t base, limit;
5bfa14ed 410 struct pci_bus_region region;
fa27b2d1 411 struct resource *res;
1da177e4
LT
412
413 res = child->resource[2];
414 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
415 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
7fc986d8
YL
416 base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
417 limit64 = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
1da177e4
LT
418
419 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
420 u32 mem_base_hi, mem_limit_hi;
8f38eaca 421
1da177e4
LT
422 pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
423 pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
424
425 /*
426 * Some bridges set the base > limit by default, and some
427 * (broken) BIOSes do not initialize them. If we find
428 * this, just assume they are not being used.
429 */
430 if (mem_base_hi <= mem_limit_hi) {
7fc986d8
YL
431 base64 |= (u64) mem_base_hi << 32;
432 limit64 |= (u64) mem_limit_hi << 32;
1da177e4
LT
433 }
434 }
7fc986d8 435
3a9ad0b4
YL
436 base = (pci_bus_addr_t) base64;
437 limit = (pci_bus_addr_t) limit64;
7fc986d8
YL
438
439 if (base != base64) {
440 dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n",
441 (unsigned long long) base64);
442 return;
443 }
444
5dde383e 445 if (base <= limit) {
1f82de10
YL
446 res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
447 IORESOURCE_MEM | IORESOURCE_PREFETCH;
448 if (res->flags & PCI_PREF_RANGE_TYPE_64)
449 res->flags |= IORESOURCE_MEM_64;
5bfa14ed
BH
450 region.start = base;
451 region.end = limit + 0xfffff;
fc279850 452 pcibios_bus_to_resource(dev->bus, res, &region);
c7dabef8 453 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
1da177e4
LT
454 }
455}
456
15856ad5 457void pci_read_bridge_bases(struct pci_bus *child)
fa27b2d1
BH
458{
459 struct pci_dev *dev = child->self;
2fe2abf8 460 struct resource *res;
fa27b2d1
BH
461 int i;
462
463 if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */
464 return;
465
b918c62e
YL
466 dev_info(&dev->dev, "PCI bridge to %pR%s\n",
467 &child->busn_res,
fa27b2d1
BH
468 dev->transparent ? " (subtractive decode)" : "");
469
2fe2abf8
BH
470 pci_bus_remove_resources(child);
471 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
472 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
473
fa27b2d1
BH
474 pci_read_bridge_io(child);
475 pci_read_bridge_mmio(child);
476 pci_read_bridge_mmio_pref(child);
2adf7516
BH
477
478 if (dev->transparent) {
2fe2abf8 479 pci_bus_for_each_resource(child->parent, res, i) {
d739a099 480 if (res && res->flags) {
2fe2abf8
BH
481 pci_bus_add_resource(child, res,
482 PCI_SUBTRACTIVE_DECODE);
2adf7516
BH
483 dev_printk(KERN_DEBUG, &dev->dev,
484 " bridge window %pR (subtractive decode)\n",
2fe2abf8
BH
485 res);
486 }
2adf7516
BH
487 }
488 }
fa27b2d1
BH
489}
490
670ba0c8 491static struct pci_bus *pci_alloc_bus(struct pci_bus *parent)
1da177e4
LT
492{
493 struct pci_bus *b;
494
f5afe806 495 b = kzalloc(sizeof(*b), GFP_KERNEL);
05013486
BH
496 if (!b)
497 return NULL;
498
499 INIT_LIST_HEAD(&b->node);
500 INIT_LIST_HEAD(&b->children);
501 INIT_LIST_HEAD(&b->devices);
502 INIT_LIST_HEAD(&b->slots);
503 INIT_LIST_HEAD(&b->resources);
504 b->max_bus_speed = PCI_SPEED_UNKNOWN;
505 b->cur_bus_speed = PCI_SPEED_UNKNOWN;
670ba0c8
CM
506#ifdef CONFIG_PCI_DOMAINS_GENERIC
507 if (parent)
508 b->domain_nr = parent->domain_nr;
509#endif
1da177e4
LT
510 return b;
511}
512
5c3f18cc 513static void devm_pci_release_host_bridge_dev(struct device *dev)
70efde2a
JL
514{
515 struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
516
517 if (bridge->release_fn)
518 bridge->release_fn(bridge);
52b30c56
JK
519
520 pci_free_resource_list(&bridge->windows);
5c3f18cc 521}
70efde2a 522
5c3f18cc
LP
523static void pci_release_host_bridge_dev(struct device *dev)
524{
525 devm_pci_release_host_bridge_dev(dev);
52b30c56 526 kfree(to_pci_host_bridge(dev));
70efde2a
JL
527}
528
a52d1443 529struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
7b543663
YL
530{
531 struct pci_host_bridge *bridge;
532
59094065 533 bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL);
05013486
BH
534 if (!bridge)
535 return NULL;
7b543663 536
05013486 537 INIT_LIST_HEAD(&bridge->windows);
a1c0050a 538 bridge->dev.release = pci_release_host_bridge_dev;
37d6a0a6 539
7b543663
YL
540 return bridge;
541}
a52d1443 542EXPORT_SYMBOL(pci_alloc_host_bridge);
7b543663 543
5c3f18cc
LP
544struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
545 size_t priv)
546{
547 struct pci_host_bridge *bridge;
548
549 bridge = devm_kzalloc(dev, sizeof(*bridge) + priv, GFP_KERNEL);
550 if (!bridge)
551 return NULL;
552
553 INIT_LIST_HEAD(&bridge->windows);
554 bridge->dev.release = devm_pci_release_host_bridge_dev;
555
556 return bridge;
557}
558EXPORT_SYMBOL(devm_pci_alloc_host_bridge);
559
dff79b91
LP
560void pci_free_host_bridge(struct pci_host_bridge *bridge)
561{
562 pci_free_resource_list(&bridge->windows);
563
564 kfree(bridge);
565}
566EXPORT_SYMBOL(pci_free_host_bridge);
567
0b950f0f 568static const unsigned char pcix_bus_speed[] = {
9be60ca0
MW
569 PCI_SPEED_UNKNOWN, /* 0 */
570 PCI_SPEED_66MHz_PCIX, /* 1 */
571 PCI_SPEED_100MHz_PCIX, /* 2 */
572 PCI_SPEED_133MHz_PCIX, /* 3 */
573 PCI_SPEED_UNKNOWN, /* 4 */
574 PCI_SPEED_66MHz_PCIX_ECC, /* 5 */
575 PCI_SPEED_100MHz_PCIX_ECC, /* 6 */
576 PCI_SPEED_133MHz_PCIX_ECC, /* 7 */
577 PCI_SPEED_UNKNOWN, /* 8 */
578 PCI_SPEED_66MHz_PCIX_266, /* 9 */
579 PCI_SPEED_100MHz_PCIX_266, /* A */
580 PCI_SPEED_133MHz_PCIX_266, /* B */
581 PCI_SPEED_UNKNOWN, /* C */
582 PCI_SPEED_66MHz_PCIX_533, /* D */
583 PCI_SPEED_100MHz_PCIX_533, /* E */
584 PCI_SPEED_133MHz_PCIX_533 /* F */
585};
586
343e51ae 587const unsigned char pcie_link_speed[] = {
3749c51a
MW
588 PCI_SPEED_UNKNOWN, /* 0 */
589 PCIE_SPEED_2_5GT, /* 1 */
590 PCIE_SPEED_5_0GT, /* 2 */
9dfd97fe 591 PCIE_SPEED_8_0GT, /* 3 */
ac924662 592 PCIE_SPEED_16_0GT, /* 4 */
3749c51a
MW
593 PCI_SPEED_UNKNOWN, /* 5 */
594 PCI_SPEED_UNKNOWN, /* 6 */
595 PCI_SPEED_UNKNOWN, /* 7 */
596 PCI_SPEED_UNKNOWN, /* 8 */
597 PCI_SPEED_UNKNOWN, /* 9 */
598 PCI_SPEED_UNKNOWN, /* A */
599 PCI_SPEED_UNKNOWN, /* B */
600 PCI_SPEED_UNKNOWN, /* C */
601 PCI_SPEED_UNKNOWN, /* D */
602 PCI_SPEED_UNKNOWN, /* E */
603 PCI_SPEED_UNKNOWN /* F */
604};
605
606void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
607{
231afea1 608 bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
3749c51a
MW
609}
610EXPORT_SYMBOL_GPL(pcie_update_link_speed);
611
45b4cdd5
MW
612static unsigned char agp_speeds[] = {
613 AGP_UNKNOWN,
614 AGP_1X,
615 AGP_2X,
616 AGP_4X,
617 AGP_8X
618};
619
620static enum pci_bus_speed agp_speed(int agp3, int agpstat)
621{
622 int index = 0;
623
624 if (agpstat & 4)
625 index = 3;
626 else if (agpstat & 2)
627 index = 2;
628 else if (agpstat & 1)
629 index = 1;
630 else
631 goto out;
f7625980 632
45b4cdd5
MW
633 if (agp3) {
634 index += 2;
635 if (index == 5)
636 index = 0;
637 }
638
639 out:
640 return agp_speeds[index];
641}
642
9be60ca0
MW
643static void pci_set_bus_speed(struct pci_bus *bus)
644{
645 struct pci_dev *bridge = bus->self;
646 int pos;
647
45b4cdd5
MW
648 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
649 if (!pos)
650 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
651 if (pos) {
652 u32 agpstat, agpcmd;
653
654 pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
655 bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
656
657 pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
658 bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
659 }
660
9be60ca0
MW
661 pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
662 if (pos) {
663 u16 status;
664 enum pci_bus_speed max;
9be60ca0 665
7793eeab
BH
666 pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
667 &status);
668
669 if (status & PCI_X_SSTATUS_533MHZ) {
9be60ca0 670 max = PCI_SPEED_133MHz_PCIX_533;
7793eeab 671 } else if (status & PCI_X_SSTATUS_266MHZ) {
9be60ca0 672 max = PCI_SPEED_133MHz_PCIX_266;
7793eeab 673 } else if (status & PCI_X_SSTATUS_133MHZ) {
3c78bc61 674 if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2)
9be60ca0 675 max = PCI_SPEED_133MHz_PCIX_ECC;
3c78bc61 676 else
9be60ca0 677 max = PCI_SPEED_133MHz_PCIX;
9be60ca0
MW
678 } else {
679 max = PCI_SPEED_66MHz_PCIX;
680 }
681
682 bus->max_bus_speed = max;
7793eeab
BH
683 bus->cur_bus_speed = pcix_bus_speed[
684 (status & PCI_X_SSTATUS_FREQ) >> 6];
9be60ca0
MW
685
686 return;
687 }
688
fdfe1511 689 if (pci_is_pcie(bridge)) {
9be60ca0
MW
690 u32 linkcap;
691 u16 linksta;
692
59875ae4 693 pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
231afea1 694 bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
9be60ca0 695
59875ae4 696 pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
9be60ca0
MW
697 pcie_update_link_speed(bus, linksta);
698 }
699}
700
44aa0c65
MZ
701static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus)
702{
b165e2b6
MZ
703 struct irq_domain *d;
704
44aa0c65
MZ
705 /*
706 * Any firmware interface that can resolve the msi_domain
707 * should be called from here.
708 */
b165e2b6 709 d = pci_host_bridge_of_msi_domain(bus);
471036b2
SS
710 if (!d)
711 d = pci_host_bridge_acpi_msi_domain(bus);
44aa0c65 712
788858eb
JO
713#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
714 /*
715 * If no IRQ domain was found via the OF tree, try looking it up
716 * directly through the fwnode_handle.
717 */
718 if (!d) {
719 struct fwnode_handle *fwnode = pci_root_bus_fwnode(bus);
720
721 if (fwnode)
722 d = irq_find_matching_fwnode(fwnode,
723 DOMAIN_BUS_PCI_MSI);
724 }
725#endif
726
b165e2b6 727 return d;
44aa0c65
MZ
728}
729
730static void pci_set_bus_msi_domain(struct pci_bus *bus)
731{
732 struct irq_domain *d;
38ea72bd 733 struct pci_bus *b;
44aa0c65
MZ
734
735 /*
38ea72bd
AW
736 * The bus can be a root bus, a subordinate bus, or a virtual bus
737 * created by an SR-IOV device. Walk up to the first bridge device
738 * found or derive the domain from the host bridge.
44aa0c65 739 */
38ea72bd
AW
740 for (b = bus, d = NULL; !d && !pci_is_root_bus(b); b = b->parent) {
741 if (b->self)
742 d = dev_get_msi_domain(&b->self->dev);
743 }
744
745 if (!d)
746 d = pci_host_bridge_msi_domain(b);
44aa0c65
MZ
747
748 dev_set_msi_domain(&bus->dev, d);
749}
750
cea9bc0b 751static int pci_register_host_bridge(struct pci_host_bridge *bridge)
37d6a0a6
AB
752{
753 struct device *parent = bridge->dev.parent;
754 struct resource_entry *window, *n;
755 struct pci_bus *bus, *b;
756 resource_size_t offset;
757 LIST_HEAD(resources);
758 struct resource *res;
759 char addr[64], *fmt;
760 const char *name;
761 int err;
762
763 bus = pci_alloc_bus(NULL);
764 if (!bus)
765 return -ENOMEM;
766
767 bridge->bus = bus;
768
769 /* temporarily move resources off the list */
770 list_splice_init(&bridge->windows, &resources);
771 bus->sysdata = bridge->sysdata;
772 bus->msi = bridge->msi;
773 bus->ops = bridge->ops;
774 bus->number = bus->busn_res.start = bridge->busnr;
775#ifdef CONFIG_PCI_DOMAINS_GENERIC
776 bus->domain_nr = pci_bus_find_domain_nr(bus, parent);
777#endif
778
779 b = pci_find_bus(pci_domain_nr(bus), bridge->busnr);
780 if (b) {
781 /* If we already got to this bus through a different bridge, ignore it */
782 dev_dbg(&b->dev, "bus already known\n");
783 err = -EEXIST;
784 goto free;
785 }
786
787 dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(bus),
788 bridge->busnr);
789
790 err = pcibios_root_bridge_prepare(bridge);
791 if (err)
792 goto free;
793
794 err = device_register(&bridge->dev);
795 if (err)
796 put_device(&bridge->dev);
797
798 bus->bridge = get_device(&bridge->dev);
799 device_enable_async_suspend(bus->bridge);
800 pci_set_bus_of_node(bus);
801 pci_set_bus_msi_domain(bus);
802
803 if (!parent)
804 set_dev_node(bus->bridge, pcibus_to_node(bus));
805
806 bus->dev.class = &pcibus_class;
807 bus->dev.parent = bus->bridge;
808
809 dev_set_name(&bus->dev, "%04x:%02x", pci_domain_nr(bus), bus->number);
810 name = dev_name(&bus->dev);
811
812 err = device_register(&bus->dev);
813 if (err)
814 goto unregister;
815
816 pcibios_add_bus(bus);
817
818 /* Create legacy_io and legacy_mem files for this bus */
819 pci_create_legacy_files(bus);
820
821 if (parent)
822 dev_info(parent, "PCI host bridge to bus %s\n", name);
823 else
824 pr_info("PCI host bridge to bus %s\n", name);
825
826 /* Add initial resources to the bus */
827 resource_list_for_each_entry_safe(window, n, &resources) {
828 list_move_tail(&window->node, &bridge->windows);
829 offset = window->offset;
830 res = window->res;
831
832 if (res->flags & IORESOURCE_BUS)
833 pci_bus_insert_busn_res(bus, bus->number, res->end);
834 else
835 pci_bus_add_resource(bus, res, 0);
836
837 if (offset) {
838 if (resource_type(res) == IORESOURCE_IO)
839 fmt = " (bus address [%#06llx-%#06llx])";
840 else
841 fmt = " (bus address [%#010llx-%#010llx])";
842
843 snprintf(addr, sizeof(addr), fmt,
844 (unsigned long long)(res->start - offset),
845 (unsigned long long)(res->end - offset));
846 } else
847 addr[0] = '\0';
848
849 dev_info(&bus->dev, "root bus resource %pR%s\n", res, addr);
850 }
851
852 down_write(&pci_bus_sem);
853 list_add_tail(&bus->node, &pci_root_buses);
854 up_write(&pci_bus_sem);
855
856 return 0;
857
858unregister:
859 put_device(&bridge->dev);
860 device_unregister(&bridge->dev);
861
862free:
863 kfree(bus);
864 return err;
865}
866
cbd4e055
AB
867static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
868 struct pci_dev *bridge, int busnr)
1da177e4
LT
869{
870 struct pci_bus *child;
871 int i;
4f535093 872 int ret;
1da177e4
LT
873
874 /*
875 * Allocate a new bus, and inherit stuff from the parent..
876 */
670ba0c8 877 child = pci_alloc_bus(parent);
1da177e4
LT
878 if (!child)
879 return NULL;
880
1da177e4
LT
881 child->parent = parent;
882 child->ops = parent->ops;
0cbdcfcf 883 child->msi = parent->msi;
1da177e4 884 child->sysdata = parent->sysdata;
6e325a62 885 child->bus_flags = parent->bus_flags;
1da177e4 886
fd7d1ced 887 /* initialize some portions of the bus device, but don't register it
4f535093 888 * now as the parent is not properly set up yet.
fd7d1ced
GKH
889 */
890 child->dev.class = &pcibus_class;
1a927133 891 dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
1da177e4
LT
892
893 /*
894 * Set up the primary, secondary and subordinate
895 * bus numbers.
896 */
b918c62e
YL
897 child->number = child->busn_res.start = busnr;
898 child->primary = parent->busn_res.start;
899 child->busn_res.end = 0xff;
1da177e4 900
4f535093
YL
901 if (!bridge) {
902 child->dev.parent = parent->bridge;
903 goto add_dev;
904 }
3789fa8a
YZ
905
906 child->self = bridge;
907 child->bridge = get_device(&bridge->dev);
4f535093 908 child->dev.parent = child->bridge;
98d9f30c 909 pci_set_bus_of_node(child);
9be60ca0
MW
910 pci_set_bus_speed(child);
911
1da177e4 912 /* Set up default resource pointers and names.. */
fde09c6d 913 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
1da177e4
LT
914 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
915 child->resource[i]->name = child->name;
916 }
917 bridge->subordinate = child;
918
4f535093 919add_dev:
44aa0c65 920 pci_set_bus_msi_domain(child);
4f535093
YL
921 ret = device_register(&child->dev);
922 WARN_ON(ret < 0);
923
10a95747
JL
924 pcibios_add_bus(child);
925
057bd2e0
TR
926 if (child->ops->add_bus) {
927 ret = child->ops->add_bus(child);
928 if (WARN_ON(ret < 0))
929 dev_err(&child->dev, "failed to add bus: %d\n", ret);
930 }
931
4f535093
YL
932 /* Create legacy_io and legacy_mem files for this bus */
933 pci_create_legacy_files(child);
934
1da177e4
LT
935 return child;
936}
937
3c78bc61
RD
938struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
939 int busnr)
1da177e4
LT
940{
941 struct pci_bus *child;
942
943 child = pci_alloc_child_bus(parent, dev, busnr);
e4ea9bb7 944 if (child) {
d71374da 945 down_write(&pci_bus_sem);
1da177e4 946 list_add_tail(&child->node, &parent->children);
d71374da 947 up_write(&pci_bus_sem);
e4ea9bb7 948 }
1da177e4
LT
949 return child;
950}
b7fe9434 951EXPORT_SYMBOL(pci_add_new_bus);
1da177e4 952
f3dbd802
RJ
953static void pci_enable_crs(struct pci_dev *pdev)
954{
955 u16 root_cap = 0;
956
957 /* Enable CRS Software Visibility if supported */
958 pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap);
959 if (root_cap & PCI_EXP_RTCAP_CRSVIS)
960 pcie_capability_set_word(pdev, PCI_EXP_RTCTL,
961 PCI_EXP_RTCTL_CRSSVE);
962}
963
1c02ea81
MW
964static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
965 unsigned int available_buses);
966
1da177e4 967/*
1c02ea81
MW
968 * pci_scan_bridge_extend() - Scan buses behind a bridge
969 * @bus: Parent bus the bridge is on
970 * @dev: Bridge itself
971 * @max: Starting subordinate number of buses behind this bridge
972 * @available_buses: Total number of buses available for this bridge and
973 * the devices below. After the minimal bus space has
974 * been allocated the remaining buses will be
975 * distributed equally between hotplug-capable bridges.
976 * @pass: Either %0 (scan already configured bridges) or %1 (scan bridges
977 * that need to be reconfigured.
978 *
1da177e4
LT
979 * If it's a bridge, configure it and scan the bus behind it.
980 * For CardBus bridges, we don't scan behind as the devices will
981 * be handled by the bridge driver itself.
982 *
983 * We need to process bridges in two passes -- first we scan those
984 * already configured by the BIOS and after we are done with all of
985 * them, we proceed to assigning numbers to the remaining buses in
986 * order to avoid overlaps between old and new bus numbers.
987 */
1c02ea81
MW
988static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
989 int max, unsigned int available_buses,
990 int pass)
1da177e4
LT
991{
992 struct pci_bus *child;
993 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
49887941 994 u32 buses, i, j = 0;
1da177e4 995 u16 bctl;
99ddd552 996 u8 primary, secondary, subordinate;
a1c19894 997 int broken = 0;
1da177e4 998
d963f651
MW
999 /*
1000 * Make sure the bridge is powered on to be able to access config
1001 * space of devices below it.
1002 */
1003 pm_runtime_get_sync(&dev->dev);
1004
1da177e4 1005 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
99ddd552
BH
1006 primary = buses & 0xFF;
1007 secondary = (buses >> 8) & 0xFF;
1008 subordinate = (buses >> 16) & 0xFF;
1da177e4 1009
99ddd552
BH
1010 dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
1011 secondary, subordinate, pass);
1da177e4 1012
71f6bd4a
YL
1013 if (!primary && (primary != bus->number) && secondary && subordinate) {
1014 dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
1015 primary = bus->number;
1016 }
1017
a1c19894
BH
1018 /* Check if setup is sensible at all */
1019 if (!pass &&
1965f66e 1020 (primary != bus->number || secondary <= bus->number ||
12d87069 1021 secondary > subordinate)) {
1965f66e
YL
1022 dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
1023 secondary, subordinate);
a1c19894
BH
1024 broken = 1;
1025 }
1026
1da177e4 1027 /* Disable MasterAbortMode during probing to avoid reporting
f7625980 1028 of bus errors (in some architectures) */
1da177e4
LT
1029 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
1030 pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
1031 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
1032
f3dbd802
RJ
1033 pci_enable_crs(dev);
1034
99ddd552
BH
1035 if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
1036 !is_cardbus && !broken) {
1037 unsigned int cmax;
1da177e4
LT
1038 /*
1039 * Bus already configured by firmware, process it in the first
1040 * pass and just note the configuration.
1041 */
1042 if (pass)
bbe8f9a3 1043 goto out;
1da177e4
LT
1044
1045 /*
2ed85823
AN
1046 * The bus might already exist for two reasons: Either we are
1047 * rescanning the bus or the bus is reachable through more than
1048 * one bridge. The second case can happen with the i450NX
1049 * chipset.
1da177e4 1050 */
99ddd552 1051 child = pci_find_bus(pci_domain_nr(bus), secondary);
74710ded 1052 if (!child) {
99ddd552 1053 child = pci_add_new_bus(bus, dev, secondary);
74710ded
AC
1054 if (!child)
1055 goto out;
99ddd552 1056 child->primary = primary;
bc76b731 1057 pci_bus_insert_busn_res(child, secondary, subordinate);
74710ded 1058 child->bridge_ctl = bctl;
1da177e4
LT
1059 }
1060
1da177e4 1061 cmax = pci_scan_child_bus(child);
c95b0bd6
AN
1062 if (cmax > subordinate)
1063 dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n",
1064 subordinate, cmax);
1065 /* subordinate should equal child->busn_res.end */
1066 if (subordinate > max)
1067 max = subordinate;
1da177e4
LT
1068 } else {
1069 /*
1070 * We need to assign a number to this bus which we always
1071 * do in the second pass.
1072 */
12f44f46 1073 if (!pass) {
619c8c31 1074 if (pcibios_assign_all_busses() || broken || is_cardbus)
12f44f46
IK
1075 /* Temporarily disable forwarding of the
1076 configuration cycles on all bridges in
1077 this bus segment to avoid possible
1078 conflicts in the second pass between two
1079 bridges programmed with overlapping
1080 bus ranges. */
1081 pci_write_config_dword(dev, PCI_PRIMARY_BUS,
1082 buses & ~0xffffff);
bbe8f9a3 1083 goto out;
12f44f46 1084 }
1da177e4
LT
1085
1086 /* Clear errors */
1087 pci_write_config_word(dev, PCI_STATUS, 0xffff);
1088
7a0b33d4
BH
1089 /* Prevent assigning a bus number that already exists.
1090 * This can happen when a bridge is hot-plugged, so in
1091 * this case we only re-scan this bus. */
b1a98b69
TC
1092 child = pci_find_bus(pci_domain_nr(bus), max+1);
1093 if (!child) {
9a4d7d87 1094 child = pci_add_new_bus(bus, dev, max+1);
b1a98b69
TC
1095 if (!child)
1096 goto out;
a20c7f36
MW
1097 pci_bus_insert_busn_res(child, max+1,
1098 bus->busn_res.end);
b1a98b69 1099 }
9a4d7d87 1100 max++;
1c02ea81
MW
1101 if (available_buses)
1102 available_buses--;
1103
1da177e4
LT
1104 buses = (buses & 0xff000000)
1105 | ((unsigned int)(child->primary) << 0)
b918c62e
YL
1106 | ((unsigned int)(child->busn_res.start) << 8)
1107 | ((unsigned int)(child->busn_res.end) << 16);
1da177e4
LT
1108
1109 /*
1110 * yenta.c forces a secondary latency timer of 176.
1111 * Copy that behaviour here.
1112 */
1113 if (is_cardbus) {
1114 buses &= ~0xff000000;
1115 buses |= CARDBUS_LATENCY_TIMER << 24;
1116 }
7c867c88 1117
1da177e4
LT
1118 /*
1119 * We need to blast all three values with a single write.
1120 */
1121 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
1122
1123 if (!is_cardbus) {
11949255 1124 child->bridge_ctl = bctl;
1c02ea81 1125 max = pci_scan_child_bus_extend(child, available_buses);
1da177e4
LT
1126 } else {
1127 /*
1128 * For CardBus bridges, we leave 4 bus numbers
1129 * as cards with a PCI-to-PCI bridge can be
1130 * inserted later.
1131 */
3c78bc61 1132 for (i = 0; i < CARDBUS_RESERVE_BUSNR; i++) {
49887941 1133 struct pci_bus *parent = bus;
cc57450f
RS
1134 if (pci_find_bus(pci_domain_nr(bus),
1135 max+i+1))
1136 break;
49887941
DB
1137 while (parent->parent) {
1138 if ((!pcibios_assign_all_busses()) &&
b918c62e
YL
1139 (parent->busn_res.end > max) &&
1140 (parent->busn_res.end <= max+i)) {
49887941
DB
1141 j = 1;
1142 }
1143 parent = parent->parent;
1144 }
1145 if (j) {
1146 /*
1147 * Often, there are two cardbus bridges
1148 * -- try to leave one valid bus number
1149 * for each one.
1150 */
1151 i /= 2;
1152 break;
1153 }
1154 }
cc57450f 1155 max += i;
1da177e4
LT
1156 }
1157 /*
1158 * Set the subordinate bus number to its real value.
1159 */
bc76b731 1160 pci_bus_update_busn_res_end(child, max);
1da177e4
LT
1161 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
1162 }
1163
cb3576fa
GH
1164 sprintf(child->name,
1165 (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
1166 pci_domain_nr(bus), child->number);
1da177e4 1167
d55bef51 1168 /* Has only triggered on CardBus, fixup is in yenta_socket */
49887941 1169 while (bus->parent) {
b918c62e
YL
1170 if ((child->busn_res.end > bus->busn_res.end) ||
1171 (child->number > bus->busn_res.end) ||
49887941 1172 (child->number < bus->number) ||
b918c62e 1173 (child->busn_res.end < bus->number)) {
227f0647 1174 dev_info(&child->dev, "%pR %s hidden behind%s bridge %s %pR\n",
b918c62e
YL
1175 &child->busn_res,
1176 (bus->number > child->busn_res.end &&
1177 bus->busn_res.end < child->number) ?
a6f29a98
JP
1178 "wholly" : "partially",
1179 bus->self->transparent ? " transparent" : "",
865df576 1180 dev_name(&bus->dev),
b918c62e 1181 &bus->busn_res);
49887941
DB
1182 }
1183 bus = bus->parent;
1184 }
1185
bbe8f9a3
RB
1186out:
1187 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
1188
d963f651
MW
1189 pm_runtime_put(&dev->dev);
1190
1da177e4
LT
1191 return max;
1192}
1c02ea81
MW
1193
1194/*
1195 * pci_scan_bridge() - Scan buses behind a bridge
1196 * @bus: Parent bus the bridge is on
1197 * @dev: Bridge itself
1198 * @max: Starting subordinate number of buses behind this bridge
1199 * @pass: Either %0 (scan already configured bridges) or %1 (scan bridges
1200 * that need to be reconfigured.
1201 *
1202 * If it's a bridge, configure it and scan the bus behind it.
1203 * For CardBus bridges, we don't scan behind as the devices will
1204 * be handled by the bridge driver itself.
1205 *
1206 * We need to process bridges in two passes -- first we scan those
1207 * already configured by the BIOS and after we are done with all of
1208 * them, we proceed to assigning numbers to the remaining buses in
1209 * order to avoid overlaps between old and new bus numbers.
1210 */
1211int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
1212{
1213 return pci_scan_bridge_extend(bus, dev, max, 0, pass);
1214}
b7fe9434 1215EXPORT_SYMBOL(pci_scan_bridge);
1da177e4
LT
1216
1217/*
1218 * Read interrupt line and base address registers.
1219 * The architecture-dependent code can tweak these, of course.
1220 */
1221static void pci_read_irq(struct pci_dev *dev)
1222{
1223 unsigned char irq;
1224
1225 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
ffeff788 1226 dev->pin = irq;
1da177e4
LT
1227 if (irq)
1228 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
1229 dev->irq = irq;
1230}
1231
bb209c82 1232void set_pcie_port_type(struct pci_dev *pdev)
480b93b7
YZ
1233{
1234 int pos;
1235 u16 reg16;
d0751b98
YW
1236 int type;
1237 struct pci_dev *parent;
480b93b7
YZ
1238
1239 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
1240 if (!pos)
1241 return;
51ebfc92 1242
0efea000 1243 pdev->pcie_cap = pos;
480b93b7 1244 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
786e2288 1245 pdev->pcie_flags_reg = reg16;
b03e7495
JM
1246 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
1247 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
d0751b98
YW
1248
1249 /*
51ebfc92
BH
1250 * A Root Port or a PCI-to-PCIe bridge is always the upstream end
1251 * of a Link. No PCIe component has two Links. Two Links are
1252 * connected by a Switch that has a Port on each Link and internal
1253 * logic to connect the two Ports.
d0751b98
YW
1254 */
1255 type = pci_pcie_type(pdev);
51ebfc92
BH
1256 if (type == PCI_EXP_TYPE_ROOT_PORT ||
1257 type == PCI_EXP_TYPE_PCIE_BRIDGE)
d0751b98
YW
1258 pdev->has_secondary_link = 1;
1259 else if (type == PCI_EXP_TYPE_UPSTREAM ||
1260 type == PCI_EXP_TYPE_DOWNSTREAM) {
1261 parent = pci_upstream_bridge(pdev);
b35b1df5
YW
1262
1263 /*
1264 * Usually there's an upstream device (Root Port or Switch
1265 * Downstream Port), but we can't assume one exists.
1266 */
1267 if (parent && !parent->has_secondary_link)
d0751b98
YW
1268 pdev->has_secondary_link = 1;
1269 }
480b93b7
YZ
1270}
1271
bb209c82 1272void set_pcie_hotplug_bridge(struct pci_dev *pdev)
28760489 1273{
28760489
EB
1274 u32 reg32;
1275
59875ae4 1276 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
28760489
EB
1277 if (reg32 & PCI_EXP_SLTCAP_HPC)
1278 pdev->is_hotplug_bridge = 1;
1279}
1280
8531e283
LW
1281static void set_pcie_thunderbolt(struct pci_dev *dev)
1282{
1283 int vsec = 0;
1284 u32 header;
1285
1286 while ((vsec = pci_find_next_ext_capability(dev, vsec,
1287 PCI_EXT_CAP_ID_VNDR))) {
1288 pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header);
1289
1290 /* Is the device part of a Thunderbolt controller? */
1291 if (dev->vendor == PCI_VENDOR_ID_INTEL &&
1292 PCI_VNDR_HEADER_ID(header) == PCI_VSEC_ID_INTEL_TBT) {
1293 dev->is_thunderbolt = 1;
1294 return;
1295 }
1296 }
1297}
1298
4fcbcb08
MW
1299static void set_pcie_untrusted(struct pci_dev *dev)
1300{
1301 struct pci_dev *parent;
1302
1303 /*
1304 * If the upstream bridge is untrusted we treat this device
1305 * untrusted as well.
1306 */
1307 parent = pci_upstream_bridge(dev);
1308 if (parent && parent->untrusted)
1309 dev->untrusted = true;
1310}
1311
78916b00
AW
1312/**
1313 * pci_ext_cfg_is_aliased - is ext config space just an alias of std config?
1314 * @dev: PCI device
1315 *
1316 * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that
1317 * when forwarding a type1 configuration request the bridge must check that
1318 * the extended register address field is zero. The bridge is not permitted
1319 * to forward the transactions and must handle it as an Unsupported Request.
1320 * Some bridges do not follow this rule and simply drop the extended register
1321 * bits, resulting in the standard config space being aliased, every 256
1322 * bytes across the entire configuration space. Test for this condition by
1323 * comparing the first dword of each potential alias to the vendor/device ID.
1324 * Known offenders:
1325 * ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03)
1326 * AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40)
1327 */
1328static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
1329{
1330#ifdef CONFIG_PCI_QUIRKS
1331 int pos;
1332 u32 header, tmp;
1333
1334 pci_read_config_dword(dev, PCI_VENDOR_ID, &header);
1335
1336 for (pos = PCI_CFG_SPACE_SIZE;
1337 pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) {
1338 if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL
1339 || header != tmp)
1340 return false;
1341 }
1342
1343 return true;
1344#else
1345 return false;
1346#endif
1347}
1348
0b950f0f
SH
1349/**
1350 * pci_cfg_space_size - get the configuration space size of the PCI device.
1351 * @dev: PCI device
1352 *
1353 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1354 * have 4096 bytes. Even if the device is capable, that doesn't mean we can
1355 * access it. Maybe we don't have a way to generate extended config space
1356 * accesses, or the device is behind a reverse Express bridge. So we try
1357 * reading the dword at 0x100 which must either be 0 or a valid extended
1358 * capability header.
1359 */
1360static int pci_cfg_space_size_ext(struct pci_dev *dev)
1361{
1362 u32 status;
1363 int pos = PCI_CFG_SPACE_SIZE;
1364
1365 if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
8e5a395a 1366 return PCI_CFG_SPACE_SIZE;
78916b00 1367 if (status == 0xffffffff || pci_ext_cfg_is_aliased(dev))
8e5a395a 1368 return PCI_CFG_SPACE_SIZE;
0b950f0f
SH
1369
1370 return PCI_CFG_SPACE_EXP_SIZE;
0b950f0f
SH
1371}
1372
1373int pci_cfg_space_size(struct pci_dev *dev)
1374{
1375 int pos;
1376 u32 status;
1377 u16 class;
1378
1379 class = dev->class >> 8;
1380 if (class == PCI_CLASS_BRIDGE_HOST)
1381 return pci_cfg_space_size_ext(dev);
1382
8e5a395a
BH
1383 if (pci_is_pcie(dev))
1384 return pci_cfg_space_size_ext(dev);
0b950f0f 1385
8e5a395a
BH
1386 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1387 if (!pos)
1388 return PCI_CFG_SPACE_SIZE;
0b950f0f 1389
8e5a395a
BH
1390 pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1391 if (status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ))
1392 return pci_cfg_space_size_ext(dev);
0b950f0f 1393
0b950f0f
SH
1394 return PCI_CFG_SPACE_SIZE;
1395}
1396
01abc2aa 1397#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
76e6a1d6 1398
e80e7edc 1399static void pci_msi_setup_pci_dev(struct pci_dev *dev)
1851617c
MT
1400{
1401 /*
1402 * Disable the MSI hardware to avoid screaming interrupts
1403 * during boot. This is the power on reset default so
1404 * usually this should be a noop.
1405 */
1406 dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI);
1407 if (dev->msi_cap)
1408 pci_msi_set_enable(dev, 0);
1409
1410 dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1411 if (dev->msix_cap)
1412 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
1413}
1414
99b3c58f
PG
1415/**
1416 * pci_intx_mask_broken - test PCI_COMMAND_INTX_DISABLE writability
1417 * @dev: PCI device
1418 *
1419 * Test whether PCI_COMMAND_INTX_DISABLE is writable for @dev. Check this
1420 * at enumeration-time to avoid modifying PCI_COMMAND at run-time.
1421 */
1422static int pci_intx_mask_broken(struct pci_dev *dev)
1423{
1424 u16 orig, toggle, new;
1425
1426 pci_read_config_word(dev, PCI_COMMAND, &orig);
1427 toggle = orig ^ PCI_COMMAND_INTX_DISABLE;
1428 pci_write_config_word(dev, PCI_COMMAND, toggle);
1429 pci_read_config_word(dev, PCI_COMMAND, &new);
1430
1431 pci_write_config_word(dev, PCI_COMMAND, orig);
1432
1433 /*
1434 * PCI_COMMAND_INTX_DISABLE was reserved and read-only prior to PCI
1435 * r2.3, so strictly speaking, a device is not *broken* if it's not
1436 * writable. But we'll live with the misnomer for now.
1437 */
1438 if (new != toggle)
1439 return 1;
1440 return 0;
1441}
1442
1da177e4
LT
1443/**
1444 * pci_setup_device - fill in class and map information of a device
1445 * @dev: the device structure to fill
1446 *
f7625980 1447 * Initialize the device structure with information about the device's
1da177e4
LT
1448 * vendor,class,memory and IO-space addresses,IRQ lines etc.
1449 * Called at initialisation of the PCI subsystem and by CardBus services.
480b93b7
YZ
1450 * Returns 0 on success and negative if unknown type of device (not normal,
1451 * bridge or CardBus).
1da177e4 1452 */
480b93b7 1453int pci_setup_device(struct pci_dev *dev)
1da177e4
LT
1454{
1455 u32 class;
b84106b4 1456 u16 cmd;
480b93b7 1457 u8 hdr_type;
bc577d2b 1458 int pos = 0;
5bfa14ed
BH
1459 struct pci_bus_region region;
1460 struct resource *res;
480b93b7
YZ
1461
1462 if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
1463 return -EIO;
1464
1465 dev->sysdata = dev->bus->sysdata;
1466 dev->dev.parent = dev->bus->bridge;
1467 dev->dev.bus = &pci_bus_type;
1468 dev->hdr_type = hdr_type & 0x7f;
1469 dev->multifunction = !!(hdr_type & 0x80);
480b93b7
YZ
1470 dev->error_state = pci_channel_io_normal;
1471 set_pcie_port_type(dev);
1472
017ffe64 1473 pci_dev_assign_slot(dev);
480b93b7
YZ
1474 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1475 set this higher, assuming the system even supports it. */
1476 dev->dma_mask = 0xffffffff;
1da177e4 1477
eebfcfb5
GKH
1478 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
1479 dev->bus->number, PCI_SLOT(dev->devfn),
1480 PCI_FUNC(dev->devfn));
1da177e4
LT
1481
1482 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
b8a3a521 1483 dev->revision = class & 0xff;
2dd8ba92 1484 dev->class = class >> 8; /* upper 3 bytes */
1da177e4 1485
2dd8ba92
YL
1486 dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
1487 dev->vendor, dev->device, dev->hdr_type, dev->class);
1da177e4 1488
853346e4
YZ
1489 /* need to have dev->class ready */
1490 dev->cfg_size = pci_cfg_space_size(dev);
1491
8531e283
LW
1492 /* need to have dev->cfg_size ready */
1493 set_pcie_thunderbolt(dev);
1494
4fcbcb08
MW
1495 set_pcie_untrusted(dev);
1496
1da177e4 1497 /* "Unknown power state" */
3fe9d19f 1498 dev->current_state = PCI_UNKNOWN;
1da177e4
LT
1499
1500 /* Early fixups, before probing the BARs */
1501 pci_fixup_device(pci_fixup_early, dev);
f79b1b14
YZ
1502 /* device class may be changed after fixup */
1503 class = dev->class >> 8;
1da177e4 1504
b84106b4
BH
1505 if (dev->non_compliant_bars) {
1506 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1507 if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
1508 dev_info(&dev->dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
1509 cmd &= ~PCI_COMMAND_IO;
1510 cmd &= ~PCI_COMMAND_MEMORY;
1511 pci_write_config_word(dev, PCI_COMMAND, cmd);
1512 }
1513 }
1514
99b3c58f
PG
1515 dev->broken_intx_masking = pci_intx_mask_broken(dev);
1516
1da177e4
LT
1517 switch (dev->hdr_type) { /* header type */
1518 case PCI_HEADER_TYPE_NORMAL: /* standard header */
1519 if (class == PCI_CLASS_BRIDGE_PCI)
1520 goto bad;
1521 pci_read_irq(dev);
1522 pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1523 pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1524 pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
368c73d4
AC
1525
1526 /*
075eb9e3
BH
1527 * Do the ugly legacy mode stuff here rather than broken chip
1528 * quirk code. Legacy mode ATA controllers have fixed
1529 * addresses. These are not always echoed in BAR0-3, and
1530 * BAR0-3 in a few cases contain junk!
368c73d4
AC
1531 */
1532 if (class == PCI_CLASS_STORAGE_IDE) {
1533 u8 progif;
1534 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1535 if ((progif & 1) == 0) {
5bfa14ed
BH
1536 region.start = 0x1F0;
1537 region.end = 0x1F7;
1538 res = &dev->resource[0];
1539 res->flags = LEGACY_IO_RESOURCE;
fc279850 1540 pcibios_bus_to_resource(dev->bus, res, &region);
075eb9e3
BH
1541 dev_info(&dev->dev, "legacy IDE quirk: reg 0x10: %pR\n",
1542 res);
5bfa14ed
BH
1543 region.start = 0x3F6;
1544 region.end = 0x3F6;
1545 res = &dev->resource[1];
1546 res->flags = LEGACY_IO_RESOURCE;
fc279850 1547 pcibios_bus_to_resource(dev->bus, res, &region);
075eb9e3
BH
1548 dev_info(&dev->dev, "legacy IDE quirk: reg 0x14: %pR\n",
1549 res);
368c73d4
AC
1550 }
1551 if ((progif & 4) == 0) {
5bfa14ed
BH
1552 region.start = 0x170;
1553 region.end = 0x177;
1554 res = &dev->resource[2];
1555 res->flags = LEGACY_IO_RESOURCE;
fc279850 1556 pcibios_bus_to_resource(dev->bus, res, &region);
075eb9e3
BH
1557 dev_info(&dev->dev, "legacy IDE quirk: reg 0x18: %pR\n",
1558 res);
5bfa14ed
BH
1559 region.start = 0x376;
1560 region.end = 0x376;
1561 res = &dev->resource[3];
1562 res->flags = LEGACY_IO_RESOURCE;
fc279850 1563 pcibios_bus_to_resource(dev->bus, res, &region);
075eb9e3
BH
1564 dev_info(&dev->dev, "legacy IDE quirk: reg 0x1c: %pR\n",
1565 res);
368c73d4
AC
1566 }
1567 }
1da177e4
LT
1568 break;
1569
1570 case PCI_HEADER_TYPE_BRIDGE: /* bridge header */
1571 if (class != PCI_CLASS_BRIDGE_PCI)
1572 goto bad;
1573 /* The PCI-to-PCI bridge spec requires that subtractive
1574 decoding (i.e. transparent) bridge must have programming
f7625980 1575 interface code of 0x01. */
3efd273b 1576 pci_read_irq(dev);
1da177e4
LT
1577 dev->transparent = ((dev->class & 0xff) == 1);
1578 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
28760489 1579 set_pcie_hotplug_bridge(dev);
bc577d2b
GB
1580 pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
1581 if (pos) {
1582 pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
1583 pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
1584 }
1da177e4
LT
1585 break;
1586
1587 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */
1588 if (class != PCI_CLASS_BRIDGE_CARDBUS)
1589 goto bad;
1590 pci_read_irq(dev);
1591 pci_read_bases(dev, 1, 0);
1592 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1593 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1594 break;
1595
1596 default: /* unknown header */
227f0647
RD
1597 dev_err(&dev->dev, "unknown header type %02x, ignoring device\n",
1598 dev->hdr_type);
480b93b7 1599 return -EIO;
1da177e4
LT
1600
1601 bad:
227f0647
RD
1602 dev_err(&dev->dev, "ignoring class %#08x (doesn't match header type %02x)\n",
1603 dev->class, dev->hdr_type);
2b4aed1d 1604 dev->class = PCI_CLASS_NOT_DEFINED << 8;
1da177e4
LT
1605 }
1606
1607 /* We found a fine healthy device, go go go... */
1608 return 0;
1609}
1610
9dae3a97
BH
1611static void pci_configure_mps(struct pci_dev *dev)
1612{
1613 struct pci_dev *bridge = pci_upstream_bridge(dev);
27d868b5 1614 int mps, p_mps, rc;
9dae3a97
BH
1615
1616 if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge))
1617 return;
1618
1619 mps = pcie_get_mps(dev);
1620 p_mps = pcie_get_mps(bridge);
1621
1622 if (mps == p_mps)
1623 return;
1624
1625 if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
1626 dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1627 mps, pci_name(bridge), p_mps);
1628 return;
1629 }
27d868b5
KB
1630
1631 /*
1632 * Fancier MPS configuration is done later by
1633 * pcie_bus_configure_settings()
1634 */
1635 if (pcie_bus_config != PCIE_BUS_DEFAULT)
1636 return;
1637
1638 rc = pcie_set_mps(dev, p_mps);
1639 if (rc) {
1640 dev_warn(&dev->dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1641 p_mps);
1642 return;
1643 }
1644
1645 dev_info(&dev->dev, "Max Payload Size set to %d (was %d, max %d)\n",
1646 p_mps, mps, 128 << dev->pcie_mpss);
9dae3a97
BH
1647}
1648
589fcc23
BH
1649static struct hpp_type0 pci_default_type0 = {
1650 .revision = 1,
1651 .cache_line_size = 8,
1652 .latency_timer = 0x40,
1653 .enable_serr = 0,
1654 .enable_perr = 0,
1655};
1656
1657static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
1658{
1659 u16 pci_cmd, pci_bctl;
1660
c6285fc5 1661 if (!hpp)
589fcc23 1662 hpp = &pci_default_type0;
589fcc23
BH
1663
1664 if (hpp->revision > 1) {
1665 dev_warn(&dev->dev,
1666 "PCI settings rev %d not supported; using defaults\n",
1667 hpp->revision);
1668 hpp = &pci_default_type0;
1669 }
1670
1671 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
1672 pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
1673 pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
1674 if (hpp->enable_serr)
1675 pci_cmd |= PCI_COMMAND_SERR;
589fcc23
BH
1676 if (hpp->enable_perr)
1677 pci_cmd |= PCI_COMMAND_PARITY;
589fcc23
BH
1678 pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
1679
1680 /* Program bridge control value */
1681 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
1682 pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
1683 hpp->latency_timer);
1684 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
1685 if (hpp->enable_serr)
1686 pci_bctl |= PCI_BRIDGE_CTL_SERR;
589fcc23
BH
1687 if (hpp->enable_perr)
1688 pci_bctl |= PCI_BRIDGE_CTL_PARITY;
589fcc23
BH
1689 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
1690 }
1691}
1692
1693static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
1694{
977509f7
BH
1695 int pos;
1696
1697 if (!hpp)
1698 return;
1699
1700 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1701 if (!pos)
1702 return;
1703
1704 dev_warn(&dev->dev, "PCI-X settings not supported\n");
589fcc23
BH
1705}
1706
e42010d8
JT
1707static bool pcie_root_rcb_set(struct pci_dev *dev)
1708{
1709 struct pci_dev *rp = pcie_find_root_port(dev);
1710 u16 lnkctl;
1711
1712 if (!rp)
1713 return false;
1714
1715 pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl);
1716 if (lnkctl & PCI_EXP_LNKCTL_RCB)
1717 return true;
1718
1719 return false;
1720}
1721
589fcc23
BH
1722static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
1723{
1724 int pos;
1725 u32 reg32;
1726
1727 if (!hpp)
1728 return;
1729
977509f7
BH
1730 if (!pci_is_pcie(dev))
1731 return;
1732
589fcc23
BH
1733 if (hpp->revision > 1) {
1734 dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
1735 hpp->revision);
1736 return;
1737 }
1738
302328c0
BH
1739 /*
1740 * Don't allow _HPX to change MPS or MRRS settings. We manage
1741 * those to make sure they're consistent with the rest of the
1742 * platform.
1743 */
1744 hpp->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
1745 PCI_EXP_DEVCTL_READRQ;
1746 hpp->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
1747 PCI_EXP_DEVCTL_READRQ);
1748
589fcc23
BH
1749 /* Initialize Device Control Register */
1750 pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
1751 ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
1752
1753 /* Initialize Link Control Register */
e42010d8
JT
1754 if (pcie_cap_has_lnkctl(dev)) {
1755
1756 /*
1757 * If the Root Port supports Read Completion Boundary of
1758 * 128, set RCB to 128. Otherwise, clear it.
1759 */
1760 hpp->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB;
1761 hpp->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB;
1762 if (pcie_root_rcb_set(dev))
1763 hpp->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB;
1764
589fcc23
BH
1765 pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
1766 ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
e42010d8 1767 }
589fcc23
BH
1768
1769 /* Find Advanced Error Reporting Enhanced Capability */
1770 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
1771 if (!pos)
1772 return;
1773
1774 /* Initialize Uncorrectable Error Mask Register */
1775 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
1776 reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
1777 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
1778
1779 /* Initialize Uncorrectable Error Severity Register */
1780 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
1781 reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
1782 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
1783
1784 /* Initialize Correctable Error Mask Register */
1785 pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
1786 reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
1787 pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
1788
1789 /* Initialize Advanced Error Capabilities and Control Register */
1790 pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
1791 reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
675734ba
BH
1792 /* Don't enable ECRC generation or checking if unsupported */
1793 if (!(reg32 & PCI_ERR_CAP_ECRC_GENC))
1794 reg32 &= ~PCI_ERR_CAP_ECRC_GENE;
1795 if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC))
1796 reg32 &= ~PCI_ERR_CAP_ECRC_CHKE;
589fcc23
BH
1797 pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
1798
1799 /*
1800 * FIXME: The following two registers are not supported yet.
1801 *
1802 * o Secondary Uncorrectable Error Severity Register
1803 * o Secondary Uncorrectable Error Mask Register
1804 */
1805}
1806
62ce94a7 1807int pci_configure_extended_tags(struct pci_dev *dev, void *ign)
60db3a4d 1808{
62ce94a7
SK
1809 struct pci_host_bridge *host;
1810 u32 cap;
1811 u16 ctl;
60db3a4d
SK
1812 int ret;
1813
1814 if (!pci_is_pcie(dev))
62ce94a7 1815 return 0;
60db3a4d 1816
62ce94a7 1817 ret = pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
60db3a4d 1818 if (ret)
62ce94a7
SK
1819 return 0;
1820
1821 if (!(cap & PCI_EXP_DEVCAP_EXT_TAG))
1822 return 0;
60db3a4d 1823
62ce94a7
SK
1824 ret = pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
1825 if (ret)
1826 return 0;
1827
1828 host = pci_find_host_bridge(dev->bus);
1829 if (!host)
1830 return 0;
60db3a4d 1831
62ce94a7
SK
1832 /*
1833 * If some device in the hierarchy doesn't handle Extended Tags
1834 * correctly, make sure they're disabled.
1835 */
1836 if (host->no_ext_tags) {
1837 if (ctl & PCI_EXP_DEVCTL_EXT_TAG) {
1838 dev_info(&dev->dev, "disabling Extended Tags\n");
1839 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
1840 PCI_EXP_DEVCTL_EXT_TAG);
1841 }
1842 return 0;
1843 }
1844
1845 if (!(ctl & PCI_EXP_DEVCTL_EXT_TAG)) {
1846 dev_info(&dev->dev, "enabling Extended Tags\n");
60db3a4d
SK
1847 pcie_capability_set_word(dev, PCI_EXP_DEVCTL,
1848 PCI_EXP_DEVCTL_EXT_TAG);
62ce94a7
SK
1849 }
1850 return 0;
60db3a4d
SK
1851}
1852
a99b646a 1853/**
1854 * pcie_relaxed_ordering_enabled - Probe for PCIe relaxed ordering enable
1855 * @dev: PCI device to query
1856 *
1857 * Returns true if the device has enabled relaxed ordering attribute.
1858 */
1859bool pcie_relaxed_ordering_enabled(struct pci_dev *dev)
1860{
1861 u16 v;
1862
1863 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &v);
1864
1865 return !!(v & PCI_EXP_DEVCTL_RELAX_EN);
1866}
1867EXPORT_SYMBOL(pcie_relaxed_ordering_enabled);
1868
1869static void pci_configure_relaxed_ordering(struct pci_dev *dev)
1870{
1871 struct pci_dev *root;
1872
1873 /* PCI_EXP_DEVICE_RELAX_EN is RsvdP in VFs */
1874 if (dev->is_virtfn)
1875 return;
1876
1877 if (!pcie_relaxed_ordering_enabled(dev))
1878 return;
1879
1880 /*
1881 * For now, we only deal with Relaxed Ordering issues with Root
1882 * Ports. Peer-to-Peer DMA is another can of worms.
1883 */
1884 root = pci_find_pcie_root_port(dev);
1885 if (!root)
1886 return;
1887
1888 if (root->dev_flags & PCI_DEV_FLAGS_NO_RELAXED_ORDERING) {
1889 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
1890 PCI_EXP_DEVCTL_RELAX_EN);
1891 dev_info(&dev->dev, "Disable Relaxed Ordering because the Root Port didn't support it\n");
1892 }
1893}
1894
2b78239e
BH
1895static void pci_configure_ltr(struct pci_dev *dev)
1896{
1897#ifdef CONFIG_PCIEASPM
1898 u32 cap;
1899 struct pci_dev *bridge;
1900
1901 if (!pci_is_pcie(dev))
1902 return;
1903
1904 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap);
1905 if (!(cap & PCI_EXP_DEVCAP2_LTR))
1906 return;
1907
1908 /*
1909 * Software must not enable LTR in an Endpoint unless the Root
1910 * Complex and all intermediate Switches indicate support for LTR.
1911 * PCIe r3.1, sec 6.18.
1912 */
1913 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
1914 dev->ltr_path = 1;
1915 else {
1916 bridge = pci_upstream_bridge(dev);
1917 if (bridge && bridge->ltr_path)
1918 dev->ltr_path = 1;
1919 }
1920
1921 if (dev->ltr_path)
1922 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
1923 PCI_EXP_DEVCTL2_LTR_EN);
1924#endif
1925}
1926
6cd33649
BH
1927static void pci_configure_device(struct pci_dev *dev)
1928{
1929 struct hotplug_params hpp;
1930 int ret;
1931
9dae3a97 1932 pci_configure_mps(dev);
62ce94a7 1933 pci_configure_extended_tags(dev, NULL);
a99b646a 1934 pci_configure_relaxed_ordering(dev);
2b78239e 1935 pci_configure_ltr(dev);
9dae3a97 1936
6cd33649
BH
1937 memset(&hpp, 0, sizeof(hpp));
1938 ret = pci_get_hp_params(dev, &hpp);
1939 if (ret)
1940 return;
1941
1942 program_hpp_type2(dev, hpp.t2);
1943 program_hpp_type1(dev, hpp.t1);
1944 program_hpp_type0(dev, hpp.t0);
1945}
1946
201de56e
ZY
1947static void pci_release_capabilities(struct pci_dev *dev)
1948{
1949 pci_vpd_release(dev);
d1b054da 1950 pci_iov_release(dev);
f796841e 1951 pci_free_cap_save_buffers(dev);
201de56e
ZY
1952}
1953
1da177e4
LT
1954/**
1955 * pci_release_dev - free a pci device structure when all users of it are finished.
1956 * @dev: device that's been disconnected
1957 *
1958 * Will be called only by the device core when all users of this pci device are
1959 * done.
1960 */
1961static void pci_release_dev(struct device *dev)
1962{
04480094 1963 struct pci_dev *pci_dev;
1da177e4 1964
04480094 1965 pci_dev = to_pci_dev(dev);
201de56e 1966 pci_release_capabilities(pci_dev);
98d9f30c 1967 pci_release_of_node(pci_dev);
6ae32c53 1968 pcibios_release_device(pci_dev);
8b1fce04 1969 pci_bus_put(pci_dev->bus);
782a985d 1970 kfree(pci_dev->driver_override);
338c3149 1971 kfree(pci_dev->dma_alias_mask);
1da177e4
LT
1972 kfree(pci_dev);
1973}
1974
3c6e6ae7 1975struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
65891215
ME
1976{
1977 struct pci_dev *dev;
1978
1979 dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
1980 if (!dev)
1981 return NULL;
1982
65891215 1983 INIT_LIST_HEAD(&dev->bus_list);
88e7b167 1984 dev->dev.type = &pci_dev_type;
3c6e6ae7 1985 dev->bus = pci_bus_get(bus);
65891215
ME
1986
1987 return dev;
1988}
3c6e6ae7
GZ
1989EXPORT_SYMBOL(pci_alloc_dev);
1990
62bc6a6f
SK
1991static bool pci_bus_crs_vendor_id(u32 l)
1992{
1993 return (l & 0xffff) == 0x0001;
1994}
1995
6a802ef0
SK
1996static bool pci_bus_wait_crs(struct pci_bus *bus, int devfn, u32 *l,
1997 int timeout)
1da177e4 1998{
1da177e4
LT
1999 int delay = 1;
2000
6a802ef0
SK
2001 if (!pci_bus_crs_vendor_id(*l))
2002 return true; /* not a CRS completion */
1da177e4 2003
6a802ef0
SK
2004 if (!timeout)
2005 return false; /* CRS, but caller doesn't want to wait */
1da177e4 2006
89665a6a 2007 /*
6a802ef0
SK
2008 * We got the reserved Vendor ID that indicates a completion with
2009 * Configuration Request Retry Status (CRS). Retry until we get a
2010 * valid Vendor ID or we time out.
89665a6a 2011 */
62bc6a6f 2012 while (pci_bus_crs_vendor_id(*l)) {
6a802ef0 2013 if (delay > timeout) {
e78e661f
SK
2014 pr_warn("pci %04x:%02x:%02x.%d: not ready after %dms; giving up\n",
2015 pci_domain_nr(bus), bus->number,
2016 PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1);
2017
efdc87da 2018 return false;
1da177e4 2019 }
e78e661f
SK
2020 if (delay >= 1000)
2021 pr_info("pci %04x:%02x:%02x.%d: not ready after %dms; waiting\n",
2022 pci_domain_nr(bus), bus->number,
2023 PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1);
efdc87da 2024
1da177e4
LT
2025 msleep(delay);
2026 delay *= 2;
9f982756 2027
efdc87da
YL
2028 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
2029 return false;
1da177e4
LT
2030 }
2031
e78e661f
SK
2032 if (delay >= 1000)
2033 pr_info("pci %04x:%02x:%02x.%d: ready after %dms\n",
2034 pci_domain_nr(bus), bus->number,
2035 PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1);
2036
efdc87da
YL
2037 return true;
2038}
6a802ef0
SK
2039
2040bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
2041 int timeout)
2042{
2043 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
2044 return false;
2045
2046 /* some broken boards return 0 or ~0 if a slot is empty: */
2047 if (*l == 0xffffffff || *l == 0x00000000 ||
2048 *l == 0x0000ffff || *l == 0xffff0000)
2049 return false;
2050
2051 if (pci_bus_crs_vendor_id(*l))
2052 return pci_bus_wait_crs(bus, devfn, l, timeout);
2053
efdc87da
YL
2054 return true;
2055}
2056EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
2057
2058/*
2059 * Read the config data for a PCI device, sanity-check it
2060 * and fill in the dev structure...
2061 */
2062static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
2063{
2064 struct pci_dev *dev;
2065 u32 l;
2066
2067 if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
2068 return NULL;
2069
8b1fce04 2070 dev = pci_alloc_dev(bus);
1da177e4
LT
2071 if (!dev)
2072 return NULL;
2073
1da177e4 2074 dev->devfn = devfn;
1da177e4
LT
2075 dev->vendor = l & 0xffff;
2076 dev->device = (l >> 16) & 0xffff;
cef354db 2077
98d9f30c
BH
2078 pci_set_of_node(dev);
2079
480b93b7 2080 if (pci_setup_device(dev)) {
8b1fce04 2081 pci_bus_put(dev->bus);
1da177e4
LT
2082 kfree(dev);
2083 return NULL;
2084 }
1da177e4
LT
2085
2086 return dev;
2087}
2088
201de56e
ZY
2089static void pci_init_capabilities(struct pci_dev *dev)
2090{
938174e5
SS
2091 /* Enhanced Allocation */
2092 pci_ea_init(dev);
2093
e80e7edc
GP
2094 /* Setup MSI caps & disable MSI/MSI-X interrupts */
2095 pci_msi_setup_pci_dev(dev);
201de56e 2096
63f4898a
RW
2097 /* Buffers for saving PCIe and PCI-X capabilities */
2098 pci_allocate_cap_save_buffers(dev);
2099
201de56e
ZY
2100 /* Power Management */
2101 pci_pm_init(dev);
2102
2103 /* Vital Product Data */
f1cd93f9 2104 pci_vpd_init(dev);
58c3a727
YZ
2105
2106 /* Alternative Routing-ID Forwarding */
31ab2476 2107 pci_configure_ari(dev);
d1b054da
YZ
2108
2109 /* Single Root I/O Virtualization */
2110 pci_iov_init(dev);
ae21ee65 2111
edc90fee
BH
2112 /* Address Translation Services */
2113 pci_ats_init(dev);
2114
ae21ee65 2115 /* Enable ACS P2P upstream forwarding */
5d990b62 2116 pci_enable_acs(dev);
b07461a8 2117
9bb04a0c
JY
2118 /* Precision Time Measurement */
2119 pci_ptm_init(dev);
4dc2db09 2120
66b80809
KB
2121 /* Advanced Error Reporting */
2122 pci_aer_init(dev);
201de56e
ZY
2123}
2124
098259eb
MZ
2125/*
2126 * This is the equivalent of pci_host_bridge_msi_domain that acts on
2127 * devices. Firmware interfaces that can select the MSI domain on a
2128 * per-device basis should be called from here.
2129 */
2130static struct irq_domain *pci_dev_msi_domain(struct pci_dev *dev)
2131{
2132 struct irq_domain *d;
2133
2134 /*
2135 * If a domain has been set through the pcibios_add_device
2136 * callback, then this is the one (platform code knows best).
2137 */
2138 d = dev_get_msi_domain(&dev->dev);
2139 if (d)
2140 return d;
2141
54fa97ee
MZ
2142 /*
2143 * Let's see if we have a firmware interface able to provide
2144 * the domain.
2145 */
2146 d = pci_msi_get_device_domain(dev);
2147 if (d)
2148 return d;
2149
098259eb
MZ
2150 return NULL;
2151}
2152
44aa0c65
MZ
2153static void pci_set_msi_domain(struct pci_dev *dev)
2154{
098259eb
MZ
2155 struct irq_domain *d;
2156
44aa0c65 2157 /*
098259eb
MZ
2158 * If the platform or firmware interfaces cannot supply a
2159 * device-specific MSI domain, then inherit the default domain
2160 * from the host bridge itself.
44aa0c65 2161 */
098259eb
MZ
2162 d = pci_dev_msi_domain(dev);
2163 if (!d)
2164 d = dev_get_msi_domain(&dev->bus->dev);
2165
2166 dev_set_msi_domain(&dev->dev, d);
44aa0c65
MZ
2167}
2168
96bde06a 2169void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
1da177e4 2170{
4f535093
YL
2171 int ret;
2172
6cd33649
BH
2173 pci_configure_device(dev);
2174
cdb9b9f7
PM
2175 device_initialize(&dev->dev);
2176 dev->dev.release = pci_release_dev;
1da177e4 2177
7629d19a 2178 set_dev_node(&dev->dev, pcibus_to_node(bus));
cdb9b9f7 2179 dev->dev.dma_mask = &dev->dma_mask;
4d57cdfa 2180 dev->dev.dma_parms = &dev->dma_parms;
cdb9b9f7 2181 dev->dev.coherent_dma_mask = 0xffffffffull;
1da177e4 2182
4d57cdfa 2183 pci_set_dma_max_seg_size(dev, 65536);
59fc67de 2184 pci_set_dma_seg_boundary(dev, 0xffffffff);
4d57cdfa 2185
1da177e4
LT
2186 /* Fix up broken headers */
2187 pci_fixup_device(pci_fixup_header, dev);
2188
2069ecfb
YL
2189 /* moved out from quirk header fixup code */
2190 pci_reassigndev_resource_alignment(dev);
2191
4b77b0a2
RW
2192 /* Clear the state_saved flag. */
2193 dev->state_saved = false;
2194
201de56e
ZY
2195 /* Initialize various capabilities */
2196 pci_init_capabilities(dev);
eb9d0fe4 2197
1da177e4
LT
2198 /*
2199 * Add the device to our list of discovered devices
2200 * and the bus list for fixup functions, etc.
2201 */
d71374da 2202 down_write(&pci_bus_sem);
1da177e4 2203 list_add_tail(&dev->bus_list, &bus->devices);
d71374da 2204 up_write(&pci_bus_sem);
4f535093 2205
4f535093
YL
2206 ret = pcibios_add_device(dev);
2207 WARN_ON(ret < 0);
2208
44aa0c65
MZ
2209 /* Setup MSI irq domain */
2210 pci_set_msi_domain(dev);
2211
4f535093
YL
2212 /* Notifier could use PCI capabilities */
2213 dev->match_driver = false;
2214 ret = device_add(&dev->dev);
2215 WARN_ON(ret < 0);
cdb9b9f7
PM
2216}
2217
10874f5a 2218struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
cdb9b9f7
PM
2219{
2220 struct pci_dev *dev;
2221
90bdb311
TP
2222 dev = pci_get_slot(bus, devfn);
2223 if (dev) {
2224 pci_dev_put(dev);
2225 return dev;
2226 }
2227
cdb9b9f7
PM
2228 dev = pci_scan_device(bus, devfn);
2229 if (!dev)
2230 return NULL;
2231
2232 pci_device_add(dev, bus);
1da177e4
LT
2233
2234 return dev;
2235}
b73e9687 2236EXPORT_SYMBOL(pci_scan_single_device);
1da177e4 2237
b1bd58e4 2238static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
f07852d6 2239{
b1bd58e4
YW
2240 int pos;
2241 u16 cap = 0;
2242 unsigned next_fn;
4fb88c1a 2243
b1bd58e4
YW
2244 if (pci_ari_enabled(bus)) {
2245 if (!dev)
2246 return 0;
2247 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
2248 if (!pos)
2249 return 0;
4fb88c1a 2250
b1bd58e4
YW
2251 pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
2252 next_fn = PCI_ARI_CAP_NFN(cap);
2253 if (next_fn <= fn)
2254 return 0; /* protect against malformed list */
f07852d6 2255
b1bd58e4
YW
2256 return next_fn;
2257 }
2258
2259 /* dev may be NULL for non-contiguous multifunction devices */
2260 if (!dev || dev->multifunction)
2261 return (fn + 1) % 8;
f07852d6 2262
f07852d6
MW
2263 return 0;
2264}
2265
2266static int only_one_child(struct pci_bus *bus)
2267{
04043fd8 2268 struct pci_dev *bridge = bus->self;
284f5f9d 2269
04043fd8
BH
2270 /*
2271 * Systems with unusual topologies set PCI_SCAN_ALL_PCIE_DEVS so
2272 * we scan for all possible devices, not just Device 0.
2273 */
2274 if (pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
f07852d6 2275 return 0;
5bbe029f
BH
2276
2277 /*
04043fd8
BH
2278 * A PCIe Downstream Port normally leads to a Link with only Device
2279 * 0 on it (PCIe spec r3.1, sec 7.3.1). As an optimization, scan
2280 * only for Device 0 in that situation.
2281 *
2282 * Checking has_secondary_link is a hack to identify Downstream
2283 * Ports because sometimes Switches are configured such that the
2284 * PCIe Port Type labels are backwards.
5bbe029f 2285 */
04043fd8 2286 if (bridge && pci_is_pcie(bridge) && bridge->has_secondary_link)
f07852d6 2287 return 1;
04043fd8 2288
f07852d6
MW
2289 return 0;
2290}
2291
1da177e4
LT
2292/**
2293 * pci_scan_slot - scan a PCI slot on a bus for devices.
2294 * @bus: PCI bus to scan
2295 * @devfn: slot number to scan (must have zero function.)
2296 *
2297 * Scan a PCI slot on the specified PCI bus for devices, adding
2298 * discovered devices to the @bus->devices list. New devices
8a1bc901 2299 * will not have is_added set.
1b69dfc6
TP
2300 *
2301 * Returns the number of new devices found.
1da177e4 2302 */
96bde06a 2303int pci_scan_slot(struct pci_bus *bus, int devfn)
1da177e4 2304{
f07852d6 2305 unsigned fn, nr = 0;
1b69dfc6 2306 struct pci_dev *dev;
f07852d6
MW
2307
2308 if (only_one_child(bus) && (devfn > 0))
2309 return 0; /* Already scanned the entire slot */
1da177e4 2310
1b69dfc6 2311 dev = pci_scan_single_device(bus, devfn);
4fb88c1a
MW
2312 if (!dev)
2313 return 0;
2314 if (!dev->is_added)
1b69dfc6
TP
2315 nr++;
2316
b1bd58e4 2317 for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
f07852d6
MW
2318 dev = pci_scan_single_device(bus, devfn + fn);
2319 if (dev) {
2320 if (!dev->is_added)
2321 nr++;
2322 dev->multifunction = 1;
1da177e4
LT
2323 }
2324 }
7d715a6c 2325
149e1637
SL
2326 /* only one slot has pcie device */
2327 if (bus->self && nr)
7d715a6c
SL
2328 pcie_aspm_init_link_state(bus->self);
2329
1da177e4
LT
2330 return nr;
2331}
b7fe9434 2332EXPORT_SYMBOL(pci_scan_slot);
1da177e4 2333
b03e7495
JM
2334static int pcie_find_smpss(struct pci_dev *dev, void *data)
2335{
2336 u8 *smpss = data;
2337
2338 if (!pci_is_pcie(dev))
2339 return 0;
2340
d4aa68f6
YW
2341 /*
2342 * We don't have a way to change MPS settings on devices that have
2343 * drivers attached. A hot-added device might support only the minimum
2344 * MPS setting (MPS=128). Therefore, if the fabric contains a bridge
2345 * where devices may be hot-added, we limit the fabric MPS to 128 so
2346 * hot-added devices will work correctly.
2347 *
2348 * However, if we hot-add a device to a slot directly below a Root
2349 * Port, it's impossible for there to be other existing devices below
2350 * the port. We don't limit the MPS in this case because we can
2351 * reconfigure MPS on both the Root Port and the hot-added device,
2352 * and there are no other devices involved.
2353 *
2354 * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
b03e7495 2355 */
d4aa68f6
YW
2356 if (dev->is_hotplug_bridge &&
2357 pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
b03e7495
JM
2358 *smpss = 0;
2359
2360 if (*smpss > dev->pcie_mpss)
2361 *smpss = dev->pcie_mpss;
2362
2363 return 0;
2364}
2365
2366static void pcie_write_mps(struct pci_dev *dev, int mps)
2367{
62f392ea 2368 int rc;
b03e7495
JM
2369
2370 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
62f392ea 2371 mps = 128 << dev->pcie_mpss;
b03e7495 2372
62f87c0e
YW
2373 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
2374 dev->bus->self)
62f392ea 2375 /* For "Performance", the assumption is made that
b03e7495
JM
2376 * downstream communication will never be larger than
2377 * the MRRS. So, the MPS only needs to be configured
2378 * for the upstream communication. This being the case,
2379 * walk from the top down and set the MPS of the child
2380 * to that of the parent bus.
62f392ea
JM
2381 *
2382 * Configure the device MPS with the smaller of the
2383 * device MPSS or the bridge MPS (which is assumed to be
2384 * properly configured at this point to the largest
2385 * allowable MPS based on its parent bus).
b03e7495 2386 */
62f392ea 2387 mps = min(mps, pcie_get_mps(dev->bus->self));
b03e7495
JM
2388 }
2389
2390 rc = pcie_set_mps(dev, mps);
2391 if (rc)
2392 dev_err(&dev->dev, "Failed attempting to set the MPS\n");
2393}
2394
62f392ea 2395static void pcie_write_mrrs(struct pci_dev *dev)
b03e7495 2396{
62f392ea 2397 int rc, mrrs;
b03e7495 2398
ed2888e9
JM
2399 /* In the "safe" case, do not configure the MRRS. There appear to be
2400 * issues with setting MRRS to 0 on a number of devices.
2401 */
ed2888e9
JM
2402 if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
2403 return;
2404
ed2888e9
JM
2405 /* For Max performance, the MRRS must be set to the largest supported
2406 * value. However, it cannot be configured larger than the MPS the
62f392ea
JM
2407 * device or the bus can support. This should already be properly
2408 * configured by a prior call to pcie_write_mps.
ed2888e9 2409 */
62f392ea 2410 mrrs = pcie_get_mps(dev);
b03e7495
JM
2411
2412 /* MRRS is a R/W register. Invalid values can be written, but a
ed2888e9 2413 * subsequent read will verify if the value is acceptable or not.
b03e7495
JM
2414 * If the MRRS value provided is not acceptable (e.g., too large),
2415 * shrink the value until it is acceptable to the HW.
f7625980 2416 */
b03e7495
JM
2417 while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
2418 rc = pcie_set_readrq(dev, mrrs);
62f392ea
JM
2419 if (!rc)
2420 break;
b03e7495 2421
62f392ea 2422 dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
b03e7495
JM
2423 mrrs /= 2;
2424 }
62f392ea
JM
2425
2426 if (mrrs < 128)
227f0647 2427 dev_err(&dev->dev, "MRRS was unable to be configured with a safe value. If problems are experienced, try running with pci=pcie_bus_safe\n");
b03e7495
JM
2428}
2429
2430static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
2431{
a513a99a 2432 int mps, orig_mps;
b03e7495
JM
2433
2434 if (!pci_is_pcie(dev))
2435 return 0;
2436
27d868b5
KB
2437 if (pcie_bus_config == PCIE_BUS_TUNE_OFF ||
2438 pcie_bus_config == PCIE_BUS_DEFAULT)
5895af79 2439 return 0;
5895af79 2440
a513a99a
JM
2441 mps = 128 << *(u8 *)data;
2442 orig_mps = pcie_get_mps(dev);
b03e7495
JM
2443
2444 pcie_write_mps(dev, mps);
62f392ea 2445 pcie_write_mrrs(dev);
b03e7495 2446
227f0647
RD
2447 dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), Max Read Rq %4d\n",
2448 pcie_get_mps(dev), 128 << dev->pcie_mpss,
a513a99a 2449 orig_mps, pcie_get_readrq(dev));
b03e7495
JM
2450
2451 return 0;
2452}
2453
a513a99a 2454/* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
b03e7495
JM
2455 * parents then children fashion. If this changes, then this code will not
2456 * work as designed.
2457 */
a58674ff 2458void pcie_bus_configure_settings(struct pci_bus *bus)
b03e7495 2459{
1e358f94 2460 u8 smpss = 0;
b03e7495 2461
a58674ff 2462 if (!bus->self)
b03e7495
JM
2463 return;
2464
b03e7495 2465 if (!pci_is_pcie(bus->self))
5f39e670
JM
2466 return;
2467
2468 /* FIXME - Peer to peer DMA is possible, though the endpoint would need
3315472c 2469 * to be aware of the MPS of the destination. To work around this,
5f39e670
JM
2470 * simply force the MPS of the entire system to the smallest possible.
2471 */
2472 if (pcie_bus_config == PCIE_BUS_PEER2PEER)
2473 smpss = 0;
2474
b03e7495 2475 if (pcie_bus_config == PCIE_BUS_SAFE) {
a58674ff 2476 smpss = bus->self->pcie_mpss;
5f39e670 2477
b03e7495
JM
2478 pcie_find_smpss(bus->self, &smpss);
2479 pci_walk_bus(bus, pcie_find_smpss, &smpss);
2480 }
2481
2482 pcie_bus_configure_set(bus->self, &smpss);
2483 pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
2484}
debc3b77 2485EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
b03e7495 2486
bccf90d6
PD
2487/*
2488 * Called after each bus is probed, but before its children are examined. This
2489 * is marked as __weak because multiple architectures define it.
2490 */
2491void __weak pcibios_fixup_bus(struct pci_bus *bus)
2492{
2493 /* nothing to do, expected to be removed in the future */
2494}
2495
1c02ea81
MW
2496/**
2497 * pci_scan_child_bus_extend() - Scan devices below a bus
2498 * @bus: Bus to scan for devices
2499 * @available_buses: Total number of buses available (%0 does not try to
2500 * extend beyond the minimal)
2501 *
2502 * Scans devices below @bus including subordinate buses. Returns new
2503 * subordinate number including all the found devices. Passing
2504 * @available_buses causes the remaining bus space to be distributed
2505 * equally between hotplug-capable bridges to allow future extension of the
2506 * hierarchy.
2507 */
2508static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
2509 unsigned int available_buses)
1da177e4 2510{
1c02ea81
MW
2511 unsigned int used_buses, normal_bridges = 0, hotplug_bridges = 0;
2512 unsigned int start = bus->busn_res.start;
2513 unsigned int devfn, cmax, max = start;
1da177e4
LT
2514 struct pci_dev *dev;
2515
0207c356 2516 dev_dbg(&bus->dev, "scanning bus\n");
1da177e4
LT
2517
2518 /* Go find them, Rover! */
2519 for (devfn = 0; devfn < 0x100; devfn += 8)
2520 pci_scan_slot(bus, devfn);
2521
a28724b0 2522 /* Reserve buses for SR-IOV capability. */
1c02ea81
MW
2523 used_buses = pci_iov_bus_range(bus);
2524 max += used_buses;
a28724b0 2525
1da177e4
LT
2526 /*
2527 * After performing arch-dependent fixup of the bus, look behind
2528 * all PCI-to-PCI bridges on this bus.
2529 */
74710ded 2530 if (!bus->is_added) {
0207c356 2531 dev_dbg(&bus->dev, "fixups for bus\n");
74710ded 2532 pcibios_fixup_bus(bus);
981cf9ea 2533 bus->is_added = 1;
74710ded
AC
2534 }
2535
1c02ea81
MW
2536 /*
2537 * Calculate how many hotplug bridges and normal bridges there
2538 * are on this bus. We will distribute the additional available
2539 * buses between hotplug bridges.
2540 */
2541 for_each_pci_bridge(dev, bus) {
2542 if (dev->is_hotplug_bridge)
2543 hotplug_bridges++;
2544 else
2545 normal_bridges++;
2546 }
2547
4147c2fd
MW
2548 /*
2549 * Scan bridges that are already configured. We don't touch them
2550 * unless they are misconfigured (which will be done in the second
2551 * scan below).
2552 */
1c02ea81
MW
2553 for_each_pci_bridge(dev, bus) {
2554 cmax = max;
2555 max = pci_scan_bridge_extend(bus, dev, max, 0, 0);
b729a311
MW
2556
2557 /*
2558 * Reserve one bus for each bridge now to avoid extending
2559 * hotplug bridges too much during the second scan below.
2560 */
2561 used_buses++;
2562 if (cmax - max > 1)
2563 used_buses += cmax - max - 1;
1c02ea81 2564 }
4147c2fd
MW
2565
2566 /* Scan bridges that need to be reconfigured */
1c02ea81
MW
2567 for_each_pci_bridge(dev, bus) {
2568 unsigned int buses = 0;
2569
2570 if (!hotplug_bridges && normal_bridges == 1) {
2571 /*
2572 * There is only one bridge on the bus (upstream
2573 * port) so it gets all available buses which it
2574 * can then distribute to the possible hotplug
2575 * bridges below.
2576 */
2577 buses = available_buses;
2578 } else if (dev->is_hotplug_bridge) {
2579 /*
2580 * Distribute the extra buses between hotplug
2581 * bridges if any.
2582 */
2583 buses = available_buses / hotplug_bridges;
b729a311 2584 buses = min(buses, available_buses - used_buses + 1);
1c02ea81
MW
2585 }
2586
2587 cmax = max;
2588 max = pci_scan_bridge_extend(bus, dev, cmax, buses, 1);
b729a311
MW
2589 /* One bus is already accounted so don't add it again */
2590 if (max - cmax > 1)
2591 used_buses += max - cmax - 1;
1c02ea81 2592 }
1da177e4 2593
e16b4660
KB
2594 /*
2595 * Make sure a hotplug bridge has at least the minimum requested
1c02ea81
MW
2596 * number of buses but allow it to grow up to the maximum available
2597 * bus number of there is room.
e16b4660 2598 */
1c02ea81
MW
2599 if (bus->self && bus->self->is_hotplug_bridge) {
2600 used_buses = max_t(unsigned int, available_buses,
2601 pci_hotplug_bus_size - 1);
2602 if (max - start < used_buses) {
2603 max = start + used_buses;
2604
2605 /* Do not allocate more buses than we have room left */
2606 if (max > bus->busn_res.end)
2607 max = bus->busn_res.end;
2608
2609 dev_dbg(&bus->dev, "%pR extended by %#02x\n",
2610 &bus->busn_res, max - start);
2611 }
e16b4660
KB
2612 }
2613
1da177e4
LT
2614 /*
2615 * We've scanned the bus and so we know all about what's on
2616 * the other side of any bridges that may be on this bus plus
2617 * any devices.
2618 *
2619 * Return how far we've got finding sub-buses.
2620 */
0207c356 2621 dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
1da177e4
LT
2622 return max;
2623}
1c02ea81
MW
2624
2625/**
2626 * pci_scan_child_bus() - Scan devices below a bus
2627 * @bus: Bus to scan for devices
2628 *
2629 * Scans devices below @bus including subordinate buses. Returns new
2630 * subordinate number including all the found devices.
2631 */
2632unsigned int pci_scan_child_bus(struct pci_bus *bus)
2633{
2634 return pci_scan_child_bus_extend(bus, 0);
2635}
b7fe9434 2636EXPORT_SYMBOL_GPL(pci_scan_child_bus);
1da177e4 2637
6c0cc950
RW
2638/**
2639 * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
2640 * @bridge: Host bridge to set up.
2641 *
2642 * Default empty implementation. Replace with an architecture-specific setup
2643 * routine, if necessary.
2644 */
2645int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
2646{
2647 return 0;
2648}
2649
10a95747
JL
2650void __weak pcibios_add_bus(struct pci_bus *bus)
2651{
2652}
2653
2654void __weak pcibios_remove_bus(struct pci_bus *bus)
2655{
2656}
2657
9ee8a1c4
LP
2658struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
2659 struct pci_ops *ops, void *sysdata, struct list_head *resources)
1da177e4 2660{
0efd5aab 2661 int error;
5a21d70d 2662 struct pci_host_bridge *bridge;
1da177e4 2663
59094065 2664 bridge = pci_alloc_host_bridge(0);
7b543663 2665 if (!bridge)
37d6a0a6 2666 return NULL;
7b543663
YL
2667
2668 bridge->dev.parent = parent;
a9d9f527 2669
37d6a0a6
AB
2670 list_splice_init(resources, &bridge->windows);
2671 bridge->sysdata = sysdata;
2672 bridge->busnr = bus;
2673 bridge->ops = ops;
a9d9f527 2674
37d6a0a6
AB
2675 error = pci_register_host_bridge(bridge);
2676 if (error < 0)
2677 goto err_out;
a5390aa6 2678
37d6a0a6 2679 return bridge->bus;
1da177e4 2680
1da177e4 2681err_out:
37d6a0a6 2682 kfree(bridge);
1da177e4
LT
2683 return NULL;
2684}
e6b29dea 2685EXPORT_SYMBOL_GPL(pci_create_root_bus);
cdb9b9f7 2686
98a35831
YL
2687int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
2688{
2689 struct resource *res = &b->busn_res;
2690 struct resource *parent_res, *conflict;
2691
2692 res->start = bus;
2693 res->end = bus_max;
2694 res->flags = IORESOURCE_BUS;
2695
2696 if (!pci_is_root_bus(b))
2697 parent_res = &b->parent->busn_res;
2698 else {
2699 parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
2700 res->flags |= IORESOURCE_PCI_FIXED;
2701 }
2702
ced04d15 2703 conflict = request_resource_conflict(parent_res, res);
98a35831
YL
2704
2705 if (conflict)
2706 dev_printk(KERN_DEBUG, &b->dev,
2707 "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
2708 res, pci_is_root_bus(b) ? "domain " : "",
2709 parent_res, conflict->name, conflict);
98a35831
YL
2710
2711 return conflict == NULL;
2712}
2713
2714int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
2715{
2716 struct resource *res = &b->busn_res;
2717 struct resource old_res = *res;
2718 resource_size_t size;
2719 int ret;
2720
2721 if (res->start > bus_max)
2722 return -EINVAL;
2723
2724 size = bus_max - res->start + 1;
2725 ret = adjust_resource(res, res->start, size);
2726 dev_printk(KERN_DEBUG, &b->dev,
2727 "busn_res: %pR end %s updated to %02x\n",
2728 &old_res, ret ? "can not be" : "is", bus_max);
2729
2730 if (!ret && !res->parent)
2731 pci_bus_insert_busn_res(b, res->start, res->end);
2732
2733 return ret;
2734}
2735
2736void pci_bus_release_busn_res(struct pci_bus *b)
2737{
2738 struct resource *res = &b->busn_res;
2739 int ret;
2740
2741 if (!res->flags || !res->parent)
2742 return;
2743
2744 ret = release_resource(res);
2745 dev_printk(KERN_DEBUG, &b->dev,
2746 "busn_res: %pR %s released\n",
2747 res, ret ? "can not be" : "is");
2748}
2749
1228c4b6 2750int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge)
a2ebb827 2751{
14d76b68 2752 struct resource_entry *window;
4d99f524 2753 bool found = false;
a2ebb827 2754 struct pci_bus *b;
1228c4b6 2755 int max, bus, ret;
4d99f524 2756
1228c4b6
LP
2757 if (!bridge)
2758 return -EINVAL;
2759
2760 resource_list_for_each_entry(window, &bridge->windows)
4d99f524
YL
2761 if (window->res->flags & IORESOURCE_BUS) {
2762 found = true;
2763 break;
2764 }
a2ebb827 2765
1228c4b6
LP
2766 ret = pci_register_host_bridge(bridge);
2767 if (ret < 0)
2768 return ret;
2769
2770 b = bridge->bus;
2771 bus = bridge->busnr;
a2ebb827 2772
4d99f524
YL
2773 if (!found) {
2774 dev_info(&b->dev,
2775 "No busn resource found for root bus, will use [bus %02x-ff]\n",
2776 bus);
2777 pci_bus_insert_busn_res(b, bus, 255);
2778 }
2779
2780 max = pci_scan_child_bus(b);
2781
2782 if (!found)
2783 pci_bus_update_busn_res_end(b, max);
2784
1228c4b6 2785 return 0;
a2ebb827 2786}
1228c4b6 2787EXPORT_SYMBOL(pci_scan_root_bus_bridge);
d2a7926d
LP
2788
2789struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
2790 struct pci_ops *ops, void *sysdata, struct list_head *resources)
2791{
14d76b68 2792 struct resource_entry *window;
4d99f524 2793 bool found = false;
a2ebb827 2794 struct pci_bus *b;
4d99f524
YL
2795 int max;
2796
14d76b68 2797 resource_list_for_each_entry(window, resources)
4d99f524
YL
2798 if (window->res->flags & IORESOURCE_BUS) {
2799 found = true;
2800 break;
2801 }
a2ebb827 2802
9ee8a1c4 2803 b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
a2ebb827
BH
2804 if (!b)
2805 return NULL;
2806
4d99f524
YL
2807 if (!found) {
2808 dev_info(&b->dev,
2809 "No busn resource found for root bus, will use [bus %02x-ff]\n",
2810 bus);
2811 pci_bus_insert_busn_res(b, bus, 255);
2812 }
2813
2814 max = pci_scan_child_bus(b);
2815
2816 if (!found)
2817 pci_bus_update_busn_res_end(b, max);
2818
a2ebb827 2819 return b;
d2a7926d 2820}
a2ebb827
BH
2821EXPORT_SYMBOL(pci_scan_root_bus);
2822
15856ad5 2823struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
de4b2f76
BH
2824 void *sysdata)
2825{
2826 LIST_HEAD(resources);
2827 struct pci_bus *b;
2828
2829 pci_add_resource(&resources, &ioport_resource);
2830 pci_add_resource(&resources, &iomem_resource);
857c3b66 2831 pci_add_resource(&resources, &busn_resource);
de4b2f76
BH
2832 b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
2833 if (b) {
857c3b66 2834 pci_scan_child_bus(b);
de4b2f76
BH
2835 } else {
2836 pci_free_resource_list(&resources);
2837 }
2838 return b;
2839}
2840EXPORT_SYMBOL(pci_scan_bus);
2841
2f320521
YL
2842/**
2843 * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
2844 * @bridge: PCI bridge for the bus to scan
2845 *
2846 * Scan a PCI bus and child buses for new devices, add them,
2847 * and enable them, resizing bridge mmio/io resource if necessary
2848 * and possible. The caller must ensure the child devices are already
2849 * removed for resizing to occur.
2850 *
2851 * Returns the max number of subordinate bus discovered.
2852 */
10874f5a 2853unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
2f320521
YL
2854{
2855 unsigned int max;
2856 struct pci_bus *bus = bridge->subordinate;
2857
2858 max = pci_scan_child_bus(bus);
2859
2860 pci_assign_unassigned_bridge_resources(bridge);
2861
2862 pci_bus_add_devices(bus);
2863
2864 return max;
2865}
2866
a5213a31
YL
2867/**
2868 * pci_rescan_bus - scan a PCI bus for devices.
2869 * @bus: PCI bus to scan
2870 *
2871 * Scan a PCI bus and child buses for new devices, adds them,
2872 * and enables them.
2873 *
2874 * Returns the max number of subordinate bus discovered.
2875 */
10874f5a 2876unsigned int pci_rescan_bus(struct pci_bus *bus)
a5213a31
YL
2877{
2878 unsigned int max;
2879
2880 max = pci_scan_child_bus(bus);
2881 pci_assign_unassigned_bus_resources(bus);
2882 pci_bus_add_devices(bus);
2883
2884 return max;
2885}
2886EXPORT_SYMBOL_GPL(pci_rescan_bus);
2887
9d16947b
RW
2888/*
2889 * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
2890 * routines should always be executed under this mutex.
2891 */
2892static DEFINE_MUTEX(pci_rescan_remove_lock);
2893
2894void pci_lock_rescan_remove(void)
2895{
2896 mutex_lock(&pci_rescan_remove_lock);
2897}
2898EXPORT_SYMBOL_GPL(pci_lock_rescan_remove);
2899
2900void pci_unlock_rescan_remove(void)
2901{
2902 mutex_unlock(&pci_rescan_remove_lock);
2903}
2904EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove);
2905
3c78bc61
RD
2906static int __init pci_sort_bf_cmp(const struct device *d_a,
2907 const struct device *d_b)
6b4b78fe 2908{
99178b03
GKH
2909 const struct pci_dev *a = to_pci_dev(d_a);
2910 const struct pci_dev *b = to_pci_dev(d_b);
2911
6b4b78fe
MD
2912 if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
2913 else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1;
2914
2915 if (a->bus->number < b->bus->number) return -1;
2916 else if (a->bus->number > b->bus->number) return 1;
2917
2918 if (a->devfn < b->devfn) return -1;
2919 else if (a->devfn > b->devfn) return 1;
2920
2921 return 0;
2922}
2923
5ff580c1 2924void __init pci_sort_breadthfirst(void)
6b4b78fe 2925{
99178b03 2926 bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
6b4b78fe 2927}
95e3ba97
MW
2928
2929int pci_hp_add_bridge(struct pci_dev *dev)
2930{
2931 struct pci_bus *parent = dev->bus;
4147c2fd 2932 int busnr, start = parent->busn_res.start;
1c02ea81 2933 unsigned int available_buses = 0;
95e3ba97
MW
2934 int end = parent->busn_res.end;
2935
2936 for (busnr = start; busnr <= end; busnr++) {
2937 if (!pci_find_bus(pci_domain_nr(parent), busnr))
2938 break;
2939 }
2940 if (busnr-- > end) {
2941 dev_err(&dev->dev, "No bus number available for hot-added bridge\n");
2942 return -1;
2943 }
4147c2fd
MW
2944
2945 /* Scan bridges that are already configured */
2946 busnr = pci_scan_bridge(parent, dev, busnr, 0);
2947
1c02ea81
MW
2948 /*
2949 * Distribute the available bus numbers between hotplug-capable
2950 * bridges to make extending the chain later possible.
2951 */
2952 available_buses = end - busnr;
2953
4147c2fd 2954 /* Scan bridges that need to be reconfigured */
1c02ea81 2955 pci_scan_bridge_extend(parent, dev, busnr, available_buses, 1);
4147c2fd 2956
95e3ba97
MW
2957 if (!dev->subordinate)
2958 return -1;
2959
2960 return 0;
2961}
2962EXPORT_SYMBOL_GPL(pci_hp_add_bridge);