]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - drivers/pci/probe.c
PCI: Fix devm_pci_alloc_host_bridge() memory leak
[mirror_ubuntu-bionic-kernel.git] / drivers / pci / probe.c
CommitLineData
1da177e4
LT
1/*
2 * probe.c - PCI detection and setup code
3 */
4
5#include <linux/kernel.h>
6#include <linux/delay.h>
7#include <linux/init.h>
8#include <linux/pci.h>
50230713 9#include <linux/of_device.h>
de335bb4 10#include <linux/of_pci.h>
589fcc23 11#include <linux/pci_hotplug.h>
1da177e4
LT
12#include <linux/slab.h>
13#include <linux/module.h>
14#include <linux/cpumask.h>
7d715a6c 15#include <linux/pci-aspm.h>
b07461a8 16#include <linux/aer.h>
29dbe1f0 17#include <linux/acpi.h>
788858eb 18#include <linux/irqdomain.h>
d963f651 19#include <linux/pm_runtime.h>
bc56b9e0 20#include "pci.h"
1da177e4
LT
21
22#define CARDBUS_LATENCY_TIMER 176 /* secondary latency timer */
23#define CARDBUS_RESERVE_BUSNR 3
1da177e4 24
0b950f0f 25static struct resource busn_resource = {
67cdc827
YL
26 .name = "PCI busn",
27 .start = 0,
28 .end = 255,
29 .flags = IORESOURCE_BUS,
30};
31
1da177e4
LT
32/* Ugh. Need to stop exporting this to modules. */
33LIST_HEAD(pci_root_buses);
34EXPORT_SYMBOL(pci_root_buses);
35
5cc62c20
YL
36static LIST_HEAD(pci_domain_busn_res_list);
37
38struct pci_domain_busn_res {
39 struct list_head list;
40 struct resource res;
41 int domain_nr;
42};
43
44static struct resource *get_pci_domain_busn_res(int domain_nr)
45{
46 struct pci_domain_busn_res *r;
47
48 list_for_each_entry(r, &pci_domain_busn_res_list, list)
49 if (r->domain_nr == domain_nr)
50 return &r->res;
51
52 r = kzalloc(sizeof(*r), GFP_KERNEL);
53 if (!r)
54 return NULL;
55
56 r->domain_nr = domain_nr;
57 r->res.start = 0;
58 r->res.end = 0xff;
59 r->res.flags = IORESOURCE_BUS | IORESOURCE_PCI_FIXED;
60
61 list_add_tail(&r->list, &pci_domain_busn_res_list);
62
63 return &r->res;
64}
65
70308923
GKH
66static int find_anything(struct device *dev, void *data)
67{
68 return 1;
69}
1da177e4 70
ed4aaadb
ZY
71/*
72 * Some device drivers need know if pci is initiated.
73 * Basically, we think pci is not initiated when there
70308923 74 * is no device to be found on the pci_bus_type.
ed4aaadb
ZY
75 */
76int no_pci_devices(void)
77{
70308923
GKH
78 struct device *dev;
79 int no_devices;
ed4aaadb 80
70308923
GKH
81 dev = bus_find_device(&pci_bus_type, NULL, NULL, find_anything);
82 no_devices = (dev == NULL);
83 put_device(dev);
84 return no_devices;
85}
ed4aaadb
ZY
86EXPORT_SYMBOL(no_pci_devices);
87
1da177e4
LT
88/*
89 * PCI Bus Class
90 */
fd7d1ced 91static void release_pcibus_dev(struct device *dev)
1da177e4 92{
fd7d1ced 93 struct pci_bus *pci_bus = to_pci_bus(dev);
1da177e4 94
ff0387c3 95 put_device(pci_bus->bridge);
2fe2abf8 96 pci_bus_remove_resources(pci_bus);
98d9f30c 97 pci_release_bus_of_node(pci_bus);
1da177e4
LT
98 kfree(pci_bus);
99}
100
101static struct class pcibus_class = {
102 .name = "pci_bus",
fd7d1ced 103 .dev_release = &release_pcibus_dev,
56039e65 104 .dev_groups = pcibus_groups,
1da177e4
LT
105};
106
107static int __init pcibus_class_init(void)
108{
109 return class_register(&pcibus_class);
110}
111postcore_initcall(pcibus_class_init);
112
6ac665c6 113static u64 pci_size(u64 base, u64 maxbase, u64 mask)
1da177e4 114{
6ac665c6 115 u64 size = mask & maxbase; /* Find the significant bits */
1da177e4
LT
116 if (!size)
117 return 0;
118
119 /* Get the lowest of them to find the decode size, and
120 from that the extent. */
121 size = (size & ~(size-1)) - 1;
122
123 /* base == maxbase can be valid only if the BAR has
124 already been programmed with all 1s. */
125 if (base == maxbase && ((base | size) & mask) != mask)
126 return 0;
127
128 return size;
129}
130
28c6821a 131static inline unsigned long decode_bar(struct pci_dev *dev, u32 bar)
6ac665c6 132{
8d6a6a47 133 u32 mem_type;
28c6821a 134 unsigned long flags;
8d6a6a47 135
6ac665c6 136 if ((bar & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) {
28c6821a
BH
137 flags = bar & ~PCI_BASE_ADDRESS_IO_MASK;
138 flags |= IORESOURCE_IO;
139 return flags;
6ac665c6 140 }
07eddf3d 141
28c6821a
BH
142 flags = bar & ~PCI_BASE_ADDRESS_MEM_MASK;
143 flags |= IORESOURCE_MEM;
144 if (flags & PCI_BASE_ADDRESS_MEM_PREFETCH)
145 flags |= IORESOURCE_PREFETCH;
07eddf3d 146
8d6a6a47
BH
147 mem_type = bar & PCI_BASE_ADDRESS_MEM_TYPE_MASK;
148 switch (mem_type) {
149 case PCI_BASE_ADDRESS_MEM_TYPE_32:
150 break;
151 case PCI_BASE_ADDRESS_MEM_TYPE_1M:
0ff9514b 152 /* 1M mem BAR treated as 32-bit BAR */
8d6a6a47
BH
153 break;
154 case PCI_BASE_ADDRESS_MEM_TYPE_64:
28c6821a
BH
155 flags |= IORESOURCE_MEM_64;
156 break;
8d6a6a47 157 default:
0ff9514b 158 /* mem unknown type treated as 32-bit BAR */
8d6a6a47
BH
159 break;
160 }
28c6821a 161 return flags;
07eddf3d
YL
162}
163
808e34e2
ZK
164#define PCI_COMMAND_DECODE_ENABLE (PCI_COMMAND_MEMORY | PCI_COMMAND_IO)
165
0b400c7e
YZ
166/**
167 * pci_read_base - read a PCI BAR
168 * @dev: the PCI device
169 * @type: type of the BAR
170 * @res: resource buffer to be filled in
171 * @pos: BAR position in the config space
172 *
173 * Returns 1 if the BAR is 64-bit, or 0 if 32-bit.
6ac665c6 174 */
0b400c7e 175int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
3c78bc61 176 struct resource *res, unsigned int pos)
07eddf3d 177{
dc5205ef 178 u32 l = 0, sz = 0, mask;
23b13bc7 179 u64 l64, sz64, mask64;
253d2e54 180 u16 orig_cmd;
cf4d1cf5 181 struct pci_bus_region region, inverted_region;
6ac665c6 182
1ed67439 183 mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
6ac665c6 184
0ff9514b 185 /* No printks while decoding is disabled! */
253d2e54
JP
186 if (!dev->mmio_always_on) {
187 pci_read_config_word(dev, PCI_COMMAND, &orig_cmd);
808e34e2
ZK
188 if (orig_cmd & PCI_COMMAND_DECODE_ENABLE) {
189 pci_write_config_word(dev, PCI_COMMAND,
190 orig_cmd & ~PCI_COMMAND_DECODE_ENABLE);
191 }
253d2e54
JP
192 }
193
6ac665c6
MW
194 res->name = pci_name(dev);
195
196 pci_read_config_dword(dev, pos, &l);
1ed67439 197 pci_write_config_dword(dev, pos, l | mask);
6ac665c6
MW
198 pci_read_config_dword(dev, pos, &sz);
199 pci_write_config_dword(dev, pos, l);
200
201 /*
202 * All bits set in sz means the device isn't working properly.
45aa23b4
BH
203 * If the BAR isn't implemented, all bits must be 0. If it's a
204 * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
205 * 1 must be clear.
6ac665c6 206 */
f795d86a
MS
207 if (sz == 0xffffffff)
208 sz = 0;
6ac665c6
MW
209
210 /*
211 * I don't know how l can have all bits set. Copied from old code.
212 * Maybe it fixes a bug on some ancient platform.
213 */
214 if (l == 0xffffffff)
215 l = 0;
216
217 if (type == pci_bar_unknown) {
28c6821a
BH
218 res->flags = decode_bar(dev, l);
219 res->flags |= IORESOURCE_SIZEALIGN;
220 if (res->flags & IORESOURCE_IO) {
f795d86a
MS
221 l64 = l & PCI_BASE_ADDRESS_IO_MASK;
222 sz64 = sz & PCI_BASE_ADDRESS_IO_MASK;
223 mask64 = PCI_BASE_ADDRESS_IO_MASK & (u32)IO_SPACE_LIMIT;
6ac665c6 224 } else {
f795d86a
MS
225 l64 = l & PCI_BASE_ADDRESS_MEM_MASK;
226 sz64 = sz & PCI_BASE_ADDRESS_MEM_MASK;
227 mask64 = (u32)PCI_BASE_ADDRESS_MEM_MASK;
6ac665c6
MW
228 }
229 } else {
7a6d312b
BH
230 if (l & PCI_ROM_ADDRESS_ENABLE)
231 res->flags |= IORESOURCE_ROM_ENABLE;
f795d86a
MS
232 l64 = l & PCI_ROM_ADDRESS_MASK;
233 sz64 = sz & PCI_ROM_ADDRESS_MASK;
76dc5268 234 mask64 = PCI_ROM_ADDRESS_MASK;
6ac665c6
MW
235 }
236
28c6821a 237 if (res->flags & IORESOURCE_MEM_64) {
6ac665c6
MW
238 pci_read_config_dword(dev, pos + 4, &l);
239 pci_write_config_dword(dev, pos + 4, ~0);
240 pci_read_config_dword(dev, pos + 4, &sz);
241 pci_write_config_dword(dev, pos + 4, l);
242
243 l64 |= ((u64)l << 32);
244 sz64 |= ((u64)sz << 32);
f795d86a
MS
245 mask64 |= ((u64)~0 << 32);
246 }
6ac665c6 247
f795d86a
MS
248 if (!dev->mmio_always_on && (orig_cmd & PCI_COMMAND_DECODE_ENABLE))
249 pci_write_config_word(dev, PCI_COMMAND, orig_cmd);
6ac665c6 250
f795d86a
MS
251 if (!sz64)
252 goto fail;
6ac665c6 253
f795d86a 254 sz64 = pci_size(l64, sz64, mask64);
7e79c5f8
MS
255 if (!sz64) {
256 dev_info(&dev->dev, FW_BUG "reg 0x%x: invalid BAR (can't size)\n",
257 pos);
f795d86a 258 goto fail;
7e79c5f8 259 }
f795d86a
MS
260
261 if (res->flags & IORESOURCE_MEM_64) {
3a9ad0b4
YL
262 if ((sizeof(pci_bus_addr_t) < 8 || sizeof(resource_size_t) < 8)
263 && sz64 > 0x100000000ULL) {
23b13bc7
BH
264 res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
265 res->start = 0;
266 res->end = 0;
f795d86a
MS
267 dev_err(&dev->dev, "reg 0x%x: can't handle BAR larger than 4GB (size %#010llx)\n",
268 pos, (unsigned long long)sz64);
23b13bc7 269 goto out;
c7dabef8
BH
270 }
271
3a9ad0b4 272 if ((sizeof(pci_bus_addr_t) < 8) && l) {
31e9dd25 273 /* Above 32-bit boundary; try to reallocate */
c83bd900 274 res->flags |= IORESOURCE_UNSET;
72dc5601
BH
275 res->start = 0;
276 res->end = sz64;
f795d86a
MS
277 dev_info(&dev->dev, "reg 0x%x: can't handle BAR above 4GB (bus address %#010llx)\n",
278 pos, (unsigned long long)l64);
72dc5601 279 goto out;
6ac665c6 280 }
6ac665c6
MW
281 }
282
f795d86a
MS
283 region.start = l64;
284 region.end = l64 + sz64;
285
fc279850
YL
286 pcibios_bus_to_resource(dev->bus, res, &region);
287 pcibios_resource_to_bus(dev->bus, &inverted_region, res);
cf4d1cf5
KH
288
289 /*
290 * If "A" is a BAR value (a bus address), "bus_to_resource(A)" is
291 * the corresponding resource address (the physical address used by
292 * the CPU. Converting that resource address back to a bus address
293 * should yield the original BAR value:
294 *
295 * resource_to_bus(bus_to_resource(A)) == A
296 *
297 * If it doesn't, CPU accesses to "bus_to_resource(A)" will not
298 * be claimed by the device.
299 */
300 if (inverted_region.start != region.start) {
cf4d1cf5 301 res->flags |= IORESOURCE_UNSET;
cf4d1cf5 302 res->start = 0;
26370fc6 303 res->end = region.end - region.start;
f795d86a
MS
304 dev_info(&dev->dev, "reg 0x%x: initial BAR value %#010llx invalid\n",
305 pos, (unsigned long long)region.start);
cf4d1cf5 306 }
96ddef25 307
0ff9514b
BH
308 goto out;
309
310
311fail:
312 res->flags = 0;
313out:
31e9dd25 314 if (res->flags)
33963e30 315 dev_printk(KERN_DEBUG, &dev->dev, "reg 0x%x: %pR\n", pos, res);
0ff9514b 316
28c6821a 317 return (res->flags & IORESOURCE_MEM_64) ? 1 : 0;
07eddf3d
YL
318}
319
1da177e4
LT
320static void pci_read_bases(struct pci_dev *dev, unsigned int howmany, int rom)
321{
6ac665c6 322 unsigned int pos, reg;
07eddf3d 323
ad67b437
PB
324 if (dev->non_compliant_bars)
325 return;
326
6ac665c6
MW
327 for (pos = 0; pos < howmany; pos++) {
328 struct resource *res = &dev->resource[pos];
1da177e4 329 reg = PCI_BASE_ADDRESS_0 + (pos << 2);
6ac665c6 330 pos += __pci_read_base(dev, pci_bar_unknown, res, reg);
1da177e4 331 }
6ac665c6 332
1da177e4 333 if (rom) {
6ac665c6 334 struct resource *res = &dev->resource[PCI_ROM_RESOURCE];
1da177e4 335 dev->rom_base_reg = rom;
6ac665c6 336 res->flags = IORESOURCE_MEM | IORESOURCE_PREFETCH |
92b19ff5 337 IORESOURCE_READONLY | IORESOURCE_SIZEALIGN;
6ac665c6 338 __pci_read_base(dev, pci_bar_mem32, res, rom);
1da177e4
LT
339 }
340}
341
15856ad5 342static void pci_read_bridge_io(struct pci_bus *child)
1da177e4
LT
343{
344 struct pci_dev *dev = child->self;
345 u8 io_base_lo, io_limit_lo;
2b28ae19 346 unsigned long io_mask, io_granularity, base, limit;
5bfa14ed 347 struct pci_bus_region region;
2b28ae19
BH
348 struct resource *res;
349
350 io_mask = PCI_IO_RANGE_MASK;
351 io_granularity = 0x1000;
352 if (dev->io_window_1k) {
353 /* Support 1K I/O space granularity */
354 io_mask = PCI_IO_1K_RANGE_MASK;
355 io_granularity = 0x400;
356 }
1da177e4 357
1da177e4
LT
358 res = child->resource[0];
359 pci_read_config_byte(dev, PCI_IO_BASE, &io_base_lo);
360 pci_read_config_byte(dev, PCI_IO_LIMIT, &io_limit_lo);
2b28ae19
BH
361 base = (io_base_lo & io_mask) << 8;
362 limit = (io_limit_lo & io_mask) << 8;
1da177e4
LT
363
364 if ((io_base_lo & PCI_IO_RANGE_TYPE_MASK) == PCI_IO_RANGE_TYPE_32) {
365 u16 io_base_hi, io_limit_hi;
8f38eaca 366
1da177e4
LT
367 pci_read_config_word(dev, PCI_IO_BASE_UPPER16, &io_base_hi);
368 pci_read_config_word(dev, PCI_IO_LIMIT_UPPER16, &io_limit_hi);
8f38eaca
BH
369 base |= ((unsigned long) io_base_hi << 16);
370 limit |= ((unsigned long) io_limit_hi << 16);
1da177e4
LT
371 }
372
5dde383e 373 if (base <= limit) {
1da177e4 374 res->flags = (io_base_lo & PCI_IO_RANGE_TYPE_MASK) | IORESOURCE_IO;
5bfa14ed 375 region.start = base;
2b28ae19 376 region.end = limit + io_granularity - 1;
fc279850 377 pcibios_bus_to_resource(dev->bus, res, &region);
c7dabef8 378 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
1da177e4 379 }
fa27b2d1
BH
380}
381
15856ad5 382static void pci_read_bridge_mmio(struct pci_bus *child)
fa27b2d1
BH
383{
384 struct pci_dev *dev = child->self;
385 u16 mem_base_lo, mem_limit_lo;
386 unsigned long base, limit;
5bfa14ed 387 struct pci_bus_region region;
fa27b2d1 388 struct resource *res;
1da177e4
LT
389
390 res = child->resource[1];
391 pci_read_config_word(dev, PCI_MEMORY_BASE, &mem_base_lo);
392 pci_read_config_word(dev, PCI_MEMORY_LIMIT, &mem_limit_lo);
8f38eaca
BH
393 base = ((unsigned long) mem_base_lo & PCI_MEMORY_RANGE_MASK) << 16;
394 limit = ((unsigned long) mem_limit_lo & PCI_MEMORY_RANGE_MASK) << 16;
5dde383e 395 if (base <= limit) {
1da177e4 396 res->flags = (mem_base_lo & PCI_MEMORY_RANGE_TYPE_MASK) | IORESOURCE_MEM;
5bfa14ed
BH
397 region.start = base;
398 region.end = limit + 0xfffff;
fc279850 399 pcibios_bus_to_resource(dev->bus, res, &region);
c7dabef8 400 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
1da177e4 401 }
fa27b2d1
BH
402}
403
15856ad5 404static void pci_read_bridge_mmio_pref(struct pci_bus *child)
fa27b2d1
BH
405{
406 struct pci_dev *dev = child->self;
407 u16 mem_base_lo, mem_limit_lo;
7fc986d8 408 u64 base64, limit64;
3a9ad0b4 409 pci_bus_addr_t base, limit;
5bfa14ed 410 struct pci_bus_region region;
fa27b2d1 411 struct resource *res;
1da177e4
LT
412
413 res = child->resource[2];
414 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
415 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
7fc986d8
YL
416 base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
417 limit64 = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
1da177e4
LT
418
419 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
420 u32 mem_base_hi, mem_limit_hi;
8f38eaca 421
1da177e4
LT
422 pci_read_config_dword(dev, PCI_PREF_BASE_UPPER32, &mem_base_hi);
423 pci_read_config_dword(dev, PCI_PREF_LIMIT_UPPER32, &mem_limit_hi);
424
425 /*
426 * Some bridges set the base > limit by default, and some
427 * (broken) BIOSes do not initialize them. If we find
428 * this, just assume they are not being used.
429 */
430 if (mem_base_hi <= mem_limit_hi) {
7fc986d8
YL
431 base64 |= (u64) mem_base_hi << 32;
432 limit64 |= (u64) mem_limit_hi << 32;
1da177e4
LT
433 }
434 }
7fc986d8 435
3a9ad0b4
YL
436 base = (pci_bus_addr_t) base64;
437 limit = (pci_bus_addr_t) limit64;
7fc986d8
YL
438
439 if (base != base64) {
440 dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n",
441 (unsigned long long) base64);
442 return;
443 }
444
5dde383e 445 if (base <= limit) {
1f82de10
YL
446 res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
447 IORESOURCE_MEM | IORESOURCE_PREFETCH;
448 if (res->flags & PCI_PREF_RANGE_TYPE_64)
449 res->flags |= IORESOURCE_MEM_64;
5bfa14ed
BH
450 region.start = base;
451 region.end = limit + 0xfffff;
fc279850 452 pcibios_bus_to_resource(dev->bus, res, &region);
c7dabef8 453 dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
1da177e4
LT
454 }
455}
456
15856ad5 457void pci_read_bridge_bases(struct pci_bus *child)
fa27b2d1
BH
458{
459 struct pci_dev *dev = child->self;
2fe2abf8 460 struct resource *res;
fa27b2d1
BH
461 int i;
462
463 if (pci_is_root_bus(child)) /* It's a host bus, nothing to read */
464 return;
465
b918c62e
YL
466 dev_info(&dev->dev, "PCI bridge to %pR%s\n",
467 &child->busn_res,
fa27b2d1
BH
468 dev->transparent ? " (subtractive decode)" : "");
469
2fe2abf8
BH
470 pci_bus_remove_resources(child);
471 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++)
472 child->resource[i] = &dev->resource[PCI_BRIDGE_RESOURCES+i];
473
fa27b2d1
BH
474 pci_read_bridge_io(child);
475 pci_read_bridge_mmio(child);
476 pci_read_bridge_mmio_pref(child);
2adf7516
BH
477
478 if (dev->transparent) {
2fe2abf8 479 pci_bus_for_each_resource(child->parent, res, i) {
d739a099 480 if (res && res->flags) {
2fe2abf8
BH
481 pci_bus_add_resource(child, res,
482 PCI_SUBTRACTIVE_DECODE);
2adf7516
BH
483 dev_printk(KERN_DEBUG, &dev->dev,
484 " bridge window %pR (subtractive decode)\n",
2fe2abf8
BH
485 res);
486 }
2adf7516
BH
487 }
488 }
fa27b2d1
BH
489}
490
670ba0c8 491static struct pci_bus *pci_alloc_bus(struct pci_bus *parent)
1da177e4
LT
492{
493 struct pci_bus *b;
494
f5afe806 495 b = kzalloc(sizeof(*b), GFP_KERNEL);
05013486
BH
496 if (!b)
497 return NULL;
498
499 INIT_LIST_HEAD(&b->node);
500 INIT_LIST_HEAD(&b->children);
501 INIT_LIST_HEAD(&b->devices);
502 INIT_LIST_HEAD(&b->slots);
503 INIT_LIST_HEAD(&b->resources);
504 b->max_bus_speed = PCI_SPEED_UNKNOWN;
505 b->cur_bus_speed = PCI_SPEED_UNKNOWN;
670ba0c8
CM
506#ifdef CONFIG_PCI_DOMAINS_GENERIC
507 if (parent)
508 b->domain_nr = parent->domain_nr;
509#endif
1da177e4
LT
510 return b;
511}
512
5c3f18cc 513static void devm_pci_release_host_bridge_dev(struct device *dev)
70efde2a
JL
514{
515 struct pci_host_bridge *bridge = to_pci_host_bridge(dev);
516
517 if (bridge->release_fn)
518 bridge->release_fn(bridge);
52b30c56
JK
519
520 pci_free_resource_list(&bridge->windows);
5c3f18cc 521}
70efde2a 522
5c3f18cc
LP
523static void pci_release_host_bridge_dev(struct device *dev)
524{
525 devm_pci_release_host_bridge_dev(dev);
52b30c56 526 kfree(to_pci_host_bridge(dev));
70efde2a
JL
527}
528
a52d1443 529struct pci_host_bridge *pci_alloc_host_bridge(size_t priv)
7b543663
YL
530{
531 struct pci_host_bridge *bridge;
532
59094065 533 bridge = kzalloc(sizeof(*bridge) + priv, GFP_KERNEL);
05013486
BH
534 if (!bridge)
535 return NULL;
7b543663 536
05013486 537 INIT_LIST_HEAD(&bridge->windows);
a1c0050a 538 bridge->dev.release = pci_release_host_bridge_dev;
37d6a0a6 539
7b543663
YL
540 return bridge;
541}
a52d1443 542EXPORT_SYMBOL(pci_alloc_host_bridge);
7b543663 543
5c3f18cc
LP
544struct pci_host_bridge *devm_pci_alloc_host_bridge(struct device *dev,
545 size_t priv)
546{
547 struct pci_host_bridge *bridge;
548
549 bridge = devm_kzalloc(dev, sizeof(*bridge) + priv, GFP_KERNEL);
550 if (!bridge)
551 return NULL;
552
553 INIT_LIST_HEAD(&bridge->windows);
554 bridge->dev.release = devm_pci_release_host_bridge_dev;
555
556 return bridge;
557}
558EXPORT_SYMBOL(devm_pci_alloc_host_bridge);
559
dff79b91
LP
560void pci_free_host_bridge(struct pci_host_bridge *bridge)
561{
562 pci_free_resource_list(&bridge->windows);
563
564 kfree(bridge);
565}
566EXPORT_SYMBOL(pci_free_host_bridge);
567
0b950f0f 568static const unsigned char pcix_bus_speed[] = {
9be60ca0
MW
569 PCI_SPEED_UNKNOWN, /* 0 */
570 PCI_SPEED_66MHz_PCIX, /* 1 */
571 PCI_SPEED_100MHz_PCIX, /* 2 */
572 PCI_SPEED_133MHz_PCIX, /* 3 */
573 PCI_SPEED_UNKNOWN, /* 4 */
574 PCI_SPEED_66MHz_PCIX_ECC, /* 5 */
575 PCI_SPEED_100MHz_PCIX_ECC, /* 6 */
576 PCI_SPEED_133MHz_PCIX_ECC, /* 7 */
577 PCI_SPEED_UNKNOWN, /* 8 */
578 PCI_SPEED_66MHz_PCIX_266, /* 9 */
579 PCI_SPEED_100MHz_PCIX_266, /* A */
580 PCI_SPEED_133MHz_PCIX_266, /* B */
581 PCI_SPEED_UNKNOWN, /* C */
582 PCI_SPEED_66MHz_PCIX_533, /* D */
583 PCI_SPEED_100MHz_PCIX_533, /* E */
584 PCI_SPEED_133MHz_PCIX_533 /* F */
585};
586
343e51ae 587const unsigned char pcie_link_speed[] = {
3749c51a
MW
588 PCI_SPEED_UNKNOWN, /* 0 */
589 PCIE_SPEED_2_5GT, /* 1 */
590 PCIE_SPEED_5_0GT, /* 2 */
9dfd97fe 591 PCIE_SPEED_8_0GT, /* 3 */
ac924662 592 PCIE_SPEED_16_0GT, /* 4 */
3749c51a
MW
593 PCI_SPEED_UNKNOWN, /* 5 */
594 PCI_SPEED_UNKNOWN, /* 6 */
595 PCI_SPEED_UNKNOWN, /* 7 */
596 PCI_SPEED_UNKNOWN, /* 8 */
597 PCI_SPEED_UNKNOWN, /* 9 */
598 PCI_SPEED_UNKNOWN, /* A */
599 PCI_SPEED_UNKNOWN, /* B */
600 PCI_SPEED_UNKNOWN, /* C */
601 PCI_SPEED_UNKNOWN, /* D */
602 PCI_SPEED_UNKNOWN, /* E */
603 PCI_SPEED_UNKNOWN /* F */
604};
605
606void pcie_update_link_speed(struct pci_bus *bus, u16 linksta)
607{
231afea1 608 bus->cur_bus_speed = pcie_link_speed[linksta & PCI_EXP_LNKSTA_CLS];
3749c51a
MW
609}
610EXPORT_SYMBOL_GPL(pcie_update_link_speed);
611
45b4cdd5
MW
612static unsigned char agp_speeds[] = {
613 AGP_UNKNOWN,
614 AGP_1X,
615 AGP_2X,
616 AGP_4X,
617 AGP_8X
618};
619
620static enum pci_bus_speed agp_speed(int agp3, int agpstat)
621{
622 int index = 0;
623
624 if (agpstat & 4)
625 index = 3;
626 else if (agpstat & 2)
627 index = 2;
628 else if (agpstat & 1)
629 index = 1;
630 else
631 goto out;
f7625980 632
45b4cdd5
MW
633 if (agp3) {
634 index += 2;
635 if (index == 5)
636 index = 0;
637 }
638
639 out:
640 return agp_speeds[index];
641}
642
9be60ca0
MW
643static void pci_set_bus_speed(struct pci_bus *bus)
644{
645 struct pci_dev *bridge = bus->self;
646 int pos;
647
45b4cdd5
MW
648 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP);
649 if (!pos)
650 pos = pci_find_capability(bridge, PCI_CAP_ID_AGP3);
651 if (pos) {
652 u32 agpstat, agpcmd;
653
654 pci_read_config_dword(bridge, pos + PCI_AGP_STATUS, &agpstat);
655 bus->max_bus_speed = agp_speed(agpstat & 8, agpstat & 7);
656
657 pci_read_config_dword(bridge, pos + PCI_AGP_COMMAND, &agpcmd);
658 bus->cur_bus_speed = agp_speed(agpstat & 8, agpcmd & 7);
659 }
660
9be60ca0
MW
661 pos = pci_find_capability(bridge, PCI_CAP_ID_PCIX);
662 if (pos) {
663 u16 status;
664 enum pci_bus_speed max;
9be60ca0 665
7793eeab
BH
666 pci_read_config_word(bridge, pos + PCI_X_BRIDGE_SSTATUS,
667 &status);
668
669 if (status & PCI_X_SSTATUS_533MHZ) {
9be60ca0 670 max = PCI_SPEED_133MHz_PCIX_533;
7793eeab 671 } else if (status & PCI_X_SSTATUS_266MHZ) {
9be60ca0 672 max = PCI_SPEED_133MHz_PCIX_266;
7793eeab 673 } else if (status & PCI_X_SSTATUS_133MHZ) {
3c78bc61 674 if ((status & PCI_X_SSTATUS_VERS) == PCI_X_SSTATUS_V2)
9be60ca0 675 max = PCI_SPEED_133MHz_PCIX_ECC;
3c78bc61 676 else
9be60ca0 677 max = PCI_SPEED_133MHz_PCIX;
9be60ca0
MW
678 } else {
679 max = PCI_SPEED_66MHz_PCIX;
680 }
681
682 bus->max_bus_speed = max;
7793eeab
BH
683 bus->cur_bus_speed = pcix_bus_speed[
684 (status & PCI_X_SSTATUS_FREQ) >> 6];
9be60ca0
MW
685
686 return;
687 }
688
fdfe1511 689 if (pci_is_pcie(bridge)) {
9be60ca0
MW
690 u32 linkcap;
691 u16 linksta;
692
59875ae4 693 pcie_capability_read_dword(bridge, PCI_EXP_LNKCAP, &linkcap);
231afea1 694 bus->max_bus_speed = pcie_link_speed[linkcap & PCI_EXP_LNKCAP_SLS];
9be60ca0 695
59875ae4 696 pcie_capability_read_word(bridge, PCI_EXP_LNKSTA, &linksta);
9be60ca0
MW
697 pcie_update_link_speed(bus, linksta);
698 }
699}
700
44aa0c65
MZ
701static struct irq_domain *pci_host_bridge_msi_domain(struct pci_bus *bus)
702{
b165e2b6
MZ
703 struct irq_domain *d;
704
44aa0c65
MZ
705 /*
706 * Any firmware interface that can resolve the msi_domain
707 * should be called from here.
708 */
b165e2b6 709 d = pci_host_bridge_of_msi_domain(bus);
471036b2
SS
710 if (!d)
711 d = pci_host_bridge_acpi_msi_domain(bus);
44aa0c65 712
788858eb
JO
713#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
714 /*
715 * If no IRQ domain was found via the OF tree, try looking it up
716 * directly through the fwnode_handle.
717 */
718 if (!d) {
719 struct fwnode_handle *fwnode = pci_root_bus_fwnode(bus);
720
721 if (fwnode)
722 d = irq_find_matching_fwnode(fwnode,
723 DOMAIN_BUS_PCI_MSI);
724 }
725#endif
726
b165e2b6 727 return d;
44aa0c65
MZ
728}
729
730static void pci_set_bus_msi_domain(struct pci_bus *bus)
731{
732 struct irq_domain *d;
38ea72bd 733 struct pci_bus *b;
44aa0c65
MZ
734
735 /*
38ea72bd
AW
736 * The bus can be a root bus, a subordinate bus, or a virtual bus
737 * created by an SR-IOV device. Walk up to the first bridge device
738 * found or derive the domain from the host bridge.
44aa0c65 739 */
38ea72bd
AW
740 for (b = bus, d = NULL; !d && !pci_is_root_bus(b); b = b->parent) {
741 if (b->self)
742 d = dev_get_msi_domain(&b->self->dev);
743 }
744
745 if (!d)
746 d = pci_host_bridge_msi_domain(b);
44aa0c65
MZ
747
748 dev_set_msi_domain(&bus->dev, d);
749}
750
cea9bc0b 751static int pci_register_host_bridge(struct pci_host_bridge *bridge)
37d6a0a6
AB
752{
753 struct device *parent = bridge->dev.parent;
754 struct resource_entry *window, *n;
755 struct pci_bus *bus, *b;
756 resource_size_t offset;
757 LIST_HEAD(resources);
758 struct resource *res;
759 char addr[64], *fmt;
760 const char *name;
761 int err;
762
763 bus = pci_alloc_bus(NULL);
764 if (!bus)
765 return -ENOMEM;
766
767 bridge->bus = bus;
768
769 /* temporarily move resources off the list */
770 list_splice_init(&bridge->windows, &resources);
771 bus->sysdata = bridge->sysdata;
772 bus->msi = bridge->msi;
773 bus->ops = bridge->ops;
774 bus->number = bus->busn_res.start = bridge->busnr;
775#ifdef CONFIG_PCI_DOMAINS_GENERIC
776 bus->domain_nr = pci_bus_find_domain_nr(bus, parent);
777#endif
778
779 b = pci_find_bus(pci_domain_nr(bus), bridge->busnr);
780 if (b) {
781 /* If we already got to this bus through a different bridge, ignore it */
782 dev_dbg(&b->dev, "bus already known\n");
783 err = -EEXIST;
784 goto free;
785 }
786
787 dev_set_name(&bridge->dev, "pci%04x:%02x", pci_domain_nr(bus),
788 bridge->busnr);
789
790 err = pcibios_root_bridge_prepare(bridge);
791 if (err)
792 goto free;
793
794 err = device_register(&bridge->dev);
795 if (err)
796 put_device(&bridge->dev);
797
798 bus->bridge = get_device(&bridge->dev);
799 device_enable_async_suspend(bus->bridge);
800 pci_set_bus_of_node(bus);
801 pci_set_bus_msi_domain(bus);
802
803 if (!parent)
804 set_dev_node(bus->bridge, pcibus_to_node(bus));
805
806 bus->dev.class = &pcibus_class;
807 bus->dev.parent = bus->bridge;
808
809 dev_set_name(&bus->dev, "%04x:%02x", pci_domain_nr(bus), bus->number);
810 name = dev_name(&bus->dev);
811
812 err = device_register(&bus->dev);
813 if (err)
814 goto unregister;
815
816 pcibios_add_bus(bus);
817
818 /* Create legacy_io and legacy_mem files for this bus */
819 pci_create_legacy_files(bus);
820
821 if (parent)
822 dev_info(parent, "PCI host bridge to bus %s\n", name);
823 else
824 pr_info("PCI host bridge to bus %s\n", name);
825
826 /* Add initial resources to the bus */
827 resource_list_for_each_entry_safe(window, n, &resources) {
828 list_move_tail(&window->node, &bridge->windows);
829 offset = window->offset;
830 res = window->res;
831
832 if (res->flags & IORESOURCE_BUS)
833 pci_bus_insert_busn_res(bus, bus->number, res->end);
834 else
835 pci_bus_add_resource(bus, res, 0);
836
837 if (offset) {
838 if (resource_type(res) == IORESOURCE_IO)
839 fmt = " (bus address [%#06llx-%#06llx])";
840 else
841 fmt = " (bus address [%#010llx-%#010llx])";
842
843 snprintf(addr, sizeof(addr), fmt,
844 (unsigned long long)(res->start - offset),
845 (unsigned long long)(res->end - offset));
846 } else
847 addr[0] = '\0';
848
849 dev_info(&bus->dev, "root bus resource %pR%s\n", res, addr);
850 }
851
852 down_write(&pci_bus_sem);
853 list_add_tail(&bus->node, &pci_root_buses);
854 up_write(&pci_bus_sem);
855
856 return 0;
857
858unregister:
859 put_device(&bridge->dev);
860 device_unregister(&bridge->dev);
861
862free:
863 kfree(bus);
864 return err;
865}
866
cbd4e055
AB
867static struct pci_bus *pci_alloc_child_bus(struct pci_bus *parent,
868 struct pci_dev *bridge, int busnr)
1da177e4
LT
869{
870 struct pci_bus *child;
871 int i;
4f535093 872 int ret;
1da177e4
LT
873
874 /*
875 * Allocate a new bus, and inherit stuff from the parent..
876 */
670ba0c8 877 child = pci_alloc_bus(parent);
1da177e4
LT
878 if (!child)
879 return NULL;
880
1da177e4
LT
881 child->parent = parent;
882 child->ops = parent->ops;
0cbdcfcf 883 child->msi = parent->msi;
1da177e4 884 child->sysdata = parent->sysdata;
6e325a62 885 child->bus_flags = parent->bus_flags;
1da177e4 886
fd7d1ced 887 /* initialize some portions of the bus device, but don't register it
4f535093 888 * now as the parent is not properly set up yet.
fd7d1ced
GKH
889 */
890 child->dev.class = &pcibus_class;
1a927133 891 dev_set_name(&child->dev, "%04x:%02x", pci_domain_nr(child), busnr);
1da177e4
LT
892
893 /*
894 * Set up the primary, secondary and subordinate
895 * bus numbers.
896 */
b918c62e
YL
897 child->number = child->busn_res.start = busnr;
898 child->primary = parent->busn_res.start;
899 child->busn_res.end = 0xff;
1da177e4 900
4f535093
YL
901 if (!bridge) {
902 child->dev.parent = parent->bridge;
903 goto add_dev;
904 }
3789fa8a
YZ
905
906 child->self = bridge;
907 child->bridge = get_device(&bridge->dev);
4f535093 908 child->dev.parent = child->bridge;
98d9f30c 909 pci_set_bus_of_node(child);
9be60ca0
MW
910 pci_set_bus_speed(child);
911
1da177e4 912 /* Set up default resource pointers and names.. */
fde09c6d 913 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
1da177e4
LT
914 child->resource[i] = &bridge->resource[PCI_BRIDGE_RESOURCES+i];
915 child->resource[i]->name = child->name;
916 }
917 bridge->subordinate = child;
918
4f535093 919add_dev:
44aa0c65 920 pci_set_bus_msi_domain(child);
4f535093
YL
921 ret = device_register(&child->dev);
922 WARN_ON(ret < 0);
923
10a95747
JL
924 pcibios_add_bus(child);
925
057bd2e0
TR
926 if (child->ops->add_bus) {
927 ret = child->ops->add_bus(child);
928 if (WARN_ON(ret < 0))
929 dev_err(&child->dev, "failed to add bus: %d\n", ret);
930 }
931
4f535093
YL
932 /* Create legacy_io and legacy_mem files for this bus */
933 pci_create_legacy_files(child);
934
1da177e4
LT
935 return child;
936}
937
3c78bc61
RD
938struct pci_bus *pci_add_new_bus(struct pci_bus *parent, struct pci_dev *dev,
939 int busnr)
1da177e4
LT
940{
941 struct pci_bus *child;
942
943 child = pci_alloc_child_bus(parent, dev, busnr);
e4ea9bb7 944 if (child) {
d71374da 945 down_write(&pci_bus_sem);
1da177e4 946 list_add_tail(&child->node, &parent->children);
d71374da 947 up_write(&pci_bus_sem);
e4ea9bb7 948 }
1da177e4
LT
949 return child;
950}
b7fe9434 951EXPORT_SYMBOL(pci_add_new_bus);
1da177e4 952
f3dbd802
RJ
953static void pci_enable_crs(struct pci_dev *pdev)
954{
955 u16 root_cap = 0;
956
957 /* Enable CRS Software Visibility if supported */
958 pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap);
959 if (root_cap & PCI_EXP_RTCAP_CRSVIS)
960 pcie_capability_set_word(pdev, PCI_EXP_RTCTL,
961 PCI_EXP_RTCTL_CRSSVE);
962}
963
1c02ea81
MW
964static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
965 unsigned int available_buses);
966
1da177e4 967/*
1c02ea81
MW
968 * pci_scan_bridge_extend() - Scan buses behind a bridge
969 * @bus: Parent bus the bridge is on
970 * @dev: Bridge itself
971 * @max: Starting subordinate number of buses behind this bridge
972 * @available_buses: Total number of buses available for this bridge and
973 * the devices below. After the minimal bus space has
974 * been allocated the remaining buses will be
975 * distributed equally between hotplug-capable bridges.
976 * @pass: Either %0 (scan already configured bridges) or %1 (scan bridges
977 * that need to be reconfigured.
978 *
1da177e4
LT
979 * If it's a bridge, configure it and scan the bus behind it.
980 * For CardBus bridges, we don't scan behind as the devices will
981 * be handled by the bridge driver itself.
982 *
983 * We need to process bridges in two passes -- first we scan those
984 * already configured by the BIOS and after we are done with all of
985 * them, we proceed to assigning numbers to the remaining buses in
986 * order to avoid overlaps between old and new bus numbers.
987 */
1c02ea81
MW
988static int pci_scan_bridge_extend(struct pci_bus *bus, struct pci_dev *dev,
989 int max, unsigned int available_buses,
990 int pass)
1da177e4
LT
991{
992 struct pci_bus *child;
993 int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
49887941 994 u32 buses, i, j = 0;
1da177e4 995 u16 bctl;
99ddd552 996 u8 primary, secondary, subordinate;
a1c19894 997 int broken = 0;
1da177e4 998
d963f651
MW
999 /*
1000 * Make sure the bridge is powered on to be able to access config
1001 * space of devices below it.
1002 */
1003 pm_runtime_get_sync(&dev->dev);
1004
1da177e4 1005 pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
99ddd552
BH
1006 primary = buses & 0xFF;
1007 secondary = (buses >> 8) & 0xFF;
1008 subordinate = (buses >> 16) & 0xFF;
1da177e4 1009
99ddd552
BH
1010 dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
1011 secondary, subordinate, pass);
1da177e4 1012
71f6bd4a
YL
1013 if (!primary && (primary != bus->number) && secondary && subordinate) {
1014 dev_warn(&dev->dev, "Primary bus is hard wired to 0\n");
1015 primary = bus->number;
1016 }
1017
a1c19894
BH
1018 /* Check if setup is sensible at all */
1019 if (!pass &&
1965f66e 1020 (primary != bus->number || secondary <= bus->number ||
12d87069 1021 secondary > subordinate)) {
1965f66e
YL
1022 dev_info(&dev->dev, "bridge configuration invalid ([bus %02x-%02x]), reconfiguring\n",
1023 secondary, subordinate);
a1c19894
BH
1024 broken = 1;
1025 }
1026
1da177e4 1027 /* Disable MasterAbortMode during probing to avoid reporting
f7625980 1028 of bus errors (in some architectures) */
1da177e4
LT
1029 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &bctl);
1030 pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
1031 bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
1032
f3dbd802
RJ
1033 pci_enable_crs(dev);
1034
99ddd552
BH
1035 if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
1036 !is_cardbus && !broken) {
1037 unsigned int cmax;
1da177e4
LT
1038 /*
1039 * Bus already configured by firmware, process it in the first
1040 * pass and just note the configuration.
1041 */
1042 if (pass)
bbe8f9a3 1043 goto out;
1da177e4
LT
1044
1045 /*
2ed85823
AN
1046 * The bus might already exist for two reasons: Either we are
1047 * rescanning the bus or the bus is reachable through more than
1048 * one bridge. The second case can happen with the i450NX
1049 * chipset.
1da177e4 1050 */
99ddd552 1051 child = pci_find_bus(pci_domain_nr(bus), secondary);
74710ded 1052 if (!child) {
99ddd552 1053 child = pci_add_new_bus(bus, dev, secondary);
74710ded
AC
1054 if (!child)
1055 goto out;
99ddd552 1056 child->primary = primary;
bc76b731 1057 pci_bus_insert_busn_res(child, secondary, subordinate);
74710ded 1058 child->bridge_ctl = bctl;
1da177e4
LT
1059 }
1060
1da177e4 1061 cmax = pci_scan_child_bus(child);
c95b0bd6
AN
1062 if (cmax > subordinate)
1063 dev_warn(&dev->dev, "bridge has subordinate %02x but max busn %02x\n",
1064 subordinate, cmax);
1065 /* subordinate should equal child->busn_res.end */
1066 if (subordinate > max)
1067 max = subordinate;
1da177e4
LT
1068 } else {
1069 /*
1070 * We need to assign a number to this bus which we always
1071 * do in the second pass.
1072 */
12f44f46 1073 if (!pass) {
619c8c31 1074 if (pcibios_assign_all_busses() || broken || is_cardbus)
12f44f46
IK
1075 /* Temporarily disable forwarding of the
1076 configuration cycles on all bridges in
1077 this bus segment to avoid possible
1078 conflicts in the second pass between two
1079 bridges programmed with overlapping
1080 bus ranges. */
1081 pci_write_config_dword(dev, PCI_PRIMARY_BUS,
1082 buses & ~0xffffff);
bbe8f9a3 1083 goto out;
12f44f46 1084 }
1da177e4
LT
1085
1086 /* Clear errors */
1087 pci_write_config_word(dev, PCI_STATUS, 0xffff);
1088
7a0b33d4
BH
1089 /* Prevent assigning a bus number that already exists.
1090 * This can happen when a bridge is hot-plugged, so in
1091 * this case we only re-scan this bus. */
b1a98b69
TC
1092 child = pci_find_bus(pci_domain_nr(bus), max+1);
1093 if (!child) {
9a4d7d87 1094 child = pci_add_new_bus(bus, dev, max+1);
b1a98b69
TC
1095 if (!child)
1096 goto out;
a20c7f36
MW
1097 pci_bus_insert_busn_res(child, max+1,
1098 bus->busn_res.end);
b1a98b69 1099 }
9a4d7d87 1100 max++;
1c02ea81
MW
1101 if (available_buses)
1102 available_buses--;
1103
1da177e4
LT
1104 buses = (buses & 0xff000000)
1105 | ((unsigned int)(child->primary) << 0)
b918c62e
YL
1106 | ((unsigned int)(child->busn_res.start) << 8)
1107 | ((unsigned int)(child->busn_res.end) << 16);
1da177e4
LT
1108
1109 /*
1110 * yenta.c forces a secondary latency timer of 176.
1111 * Copy that behaviour here.
1112 */
1113 if (is_cardbus) {
1114 buses &= ~0xff000000;
1115 buses |= CARDBUS_LATENCY_TIMER << 24;
1116 }
7c867c88 1117
1da177e4
LT
1118 /*
1119 * We need to blast all three values with a single write.
1120 */
1121 pci_write_config_dword(dev, PCI_PRIMARY_BUS, buses);
1122
1123 if (!is_cardbus) {
11949255 1124 child->bridge_ctl = bctl;
1c02ea81 1125 max = pci_scan_child_bus_extend(child, available_buses);
1da177e4
LT
1126 } else {
1127 /*
1128 * For CardBus bridges, we leave 4 bus numbers
1129 * as cards with a PCI-to-PCI bridge can be
1130 * inserted later.
1131 */
3c78bc61 1132 for (i = 0; i < CARDBUS_RESERVE_BUSNR; i++) {
49887941 1133 struct pci_bus *parent = bus;
cc57450f
RS
1134 if (pci_find_bus(pci_domain_nr(bus),
1135 max+i+1))
1136 break;
49887941
DB
1137 while (parent->parent) {
1138 if ((!pcibios_assign_all_busses()) &&
b918c62e
YL
1139 (parent->busn_res.end > max) &&
1140 (parent->busn_res.end <= max+i)) {
49887941
DB
1141 j = 1;
1142 }
1143 parent = parent->parent;
1144 }
1145 if (j) {
1146 /*
1147 * Often, there are two cardbus bridges
1148 * -- try to leave one valid bus number
1149 * for each one.
1150 */
1151 i /= 2;
1152 break;
1153 }
1154 }
cc57450f 1155 max += i;
1da177e4
LT
1156 }
1157 /*
1158 * Set the subordinate bus number to its real value.
1159 */
bc76b731 1160 pci_bus_update_busn_res_end(child, max);
1da177e4
LT
1161 pci_write_config_byte(dev, PCI_SUBORDINATE_BUS, max);
1162 }
1163
cb3576fa
GH
1164 sprintf(child->name,
1165 (is_cardbus ? "PCI CardBus %04x:%02x" : "PCI Bus %04x:%02x"),
1166 pci_domain_nr(bus), child->number);
1da177e4 1167
d55bef51 1168 /* Has only triggered on CardBus, fixup is in yenta_socket */
49887941 1169 while (bus->parent) {
b918c62e
YL
1170 if ((child->busn_res.end > bus->busn_res.end) ||
1171 (child->number > bus->busn_res.end) ||
49887941 1172 (child->number < bus->number) ||
b918c62e 1173 (child->busn_res.end < bus->number)) {
227f0647 1174 dev_info(&child->dev, "%pR %s hidden behind%s bridge %s %pR\n",
b918c62e
YL
1175 &child->busn_res,
1176 (bus->number > child->busn_res.end &&
1177 bus->busn_res.end < child->number) ?
a6f29a98
JP
1178 "wholly" : "partially",
1179 bus->self->transparent ? " transparent" : "",
865df576 1180 dev_name(&bus->dev),
b918c62e 1181 &bus->busn_res);
49887941
DB
1182 }
1183 bus = bus->parent;
1184 }
1185
bbe8f9a3
RB
1186out:
1187 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, bctl);
1188
d963f651
MW
1189 pm_runtime_put(&dev->dev);
1190
1da177e4
LT
1191 return max;
1192}
1c02ea81
MW
1193
1194/*
1195 * pci_scan_bridge() - Scan buses behind a bridge
1196 * @bus: Parent bus the bridge is on
1197 * @dev: Bridge itself
1198 * @max: Starting subordinate number of buses behind this bridge
1199 * @pass: Either %0 (scan already configured bridges) or %1 (scan bridges
1200 * that need to be reconfigured.
1201 *
1202 * If it's a bridge, configure it and scan the bus behind it.
1203 * For CardBus bridges, we don't scan behind as the devices will
1204 * be handled by the bridge driver itself.
1205 *
1206 * We need to process bridges in two passes -- first we scan those
1207 * already configured by the BIOS and after we are done with all of
1208 * them, we proceed to assigning numbers to the remaining buses in
1209 * order to avoid overlaps between old and new bus numbers.
1210 */
1211int pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, int pass)
1212{
1213 return pci_scan_bridge_extend(bus, dev, max, 0, pass);
1214}
b7fe9434 1215EXPORT_SYMBOL(pci_scan_bridge);
1da177e4
LT
1216
1217/*
1218 * Read interrupt line and base address registers.
1219 * The architecture-dependent code can tweak these, of course.
1220 */
1221static void pci_read_irq(struct pci_dev *dev)
1222{
1223 unsigned char irq;
1224
1225 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &irq);
ffeff788 1226 dev->pin = irq;
1da177e4
LT
1227 if (irq)
1228 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
1229 dev->irq = irq;
1230}
1231
bb209c82 1232void set_pcie_port_type(struct pci_dev *pdev)
480b93b7
YZ
1233{
1234 int pos;
1235 u16 reg16;
d0751b98
YW
1236 int type;
1237 struct pci_dev *parent;
480b93b7
YZ
1238
1239 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
1240 if (!pos)
1241 return;
51ebfc92 1242
0efea000 1243 pdev->pcie_cap = pos;
480b93b7 1244 pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, &reg16);
786e2288 1245 pdev->pcie_flags_reg = reg16;
b03e7495
JM
1246 pci_read_config_word(pdev, pos + PCI_EXP_DEVCAP, &reg16);
1247 pdev->pcie_mpss = reg16 & PCI_EXP_DEVCAP_PAYLOAD;
d0751b98
YW
1248
1249 /*
51ebfc92
BH
1250 * A Root Port or a PCI-to-PCIe bridge is always the upstream end
1251 * of a Link. No PCIe component has two Links. Two Links are
1252 * connected by a Switch that has a Port on each Link and internal
1253 * logic to connect the two Ports.
d0751b98
YW
1254 */
1255 type = pci_pcie_type(pdev);
51ebfc92
BH
1256 if (type == PCI_EXP_TYPE_ROOT_PORT ||
1257 type == PCI_EXP_TYPE_PCIE_BRIDGE)
d0751b98
YW
1258 pdev->has_secondary_link = 1;
1259 else if (type == PCI_EXP_TYPE_UPSTREAM ||
1260 type == PCI_EXP_TYPE_DOWNSTREAM) {
1261 parent = pci_upstream_bridge(pdev);
b35b1df5
YW
1262
1263 /*
1264 * Usually there's an upstream device (Root Port or Switch
1265 * Downstream Port), but we can't assume one exists.
1266 */
1267 if (parent && !parent->has_secondary_link)
d0751b98
YW
1268 pdev->has_secondary_link = 1;
1269 }
480b93b7
YZ
1270}
1271
bb209c82 1272void set_pcie_hotplug_bridge(struct pci_dev *pdev)
28760489 1273{
28760489
EB
1274 u32 reg32;
1275
59875ae4 1276 pcie_capability_read_dword(pdev, PCI_EXP_SLTCAP, &reg32);
28760489
EB
1277 if (reg32 & PCI_EXP_SLTCAP_HPC)
1278 pdev->is_hotplug_bridge = 1;
1279}
1280
8531e283
LW
1281static void set_pcie_thunderbolt(struct pci_dev *dev)
1282{
1283 int vsec = 0;
1284 u32 header;
1285
1286 while ((vsec = pci_find_next_ext_capability(dev, vsec,
1287 PCI_EXT_CAP_ID_VNDR))) {
1288 pci_read_config_dword(dev, vsec + PCI_VNDR_HEADER, &header);
1289
1290 /* Is the device part of a Thunderbolt controller? */
1291 if (dev->vendor == PCI_VENDOR_ID_INTEL &&
1292 PCI_VNDR_HEADER_ID(header) == PCI_VSEC_ID_INTEL_TBT) {
1293 dev->is_thunderbolt = 1;
1294 return;
1295 }
1296 }
1297}
1298
78916b00
AW
1299/**
1300 * pci_ext_cfg_is_aliased - is ext config space just an alias of std config?
1301 * @dev: PCI device
1302 *
1303 * PCI Express to PCI/PCI-X Bridge Specification, rev 1.0, 4.1.4 says that
1304 * when forwarding a type1 configuration request the bridge must check that
1305 * the extended register address field is zero. The bridge is not permitted
1306 * to forward the transactions and must handle it as an Unsupported Request.
1307 * Some bridges do not follow this rule and simply drop the extended register
1308 * bits, resulting in the standard config space being aliased, every 256
1309 * bytes across the entire configuration space. Test for this condition by
1310 * comparing the first dword of each potential alias to the vendor/device ID.
1311 * Known offenders:
1312 * ASM1083/1085 PCIe-to-PCI Reversible Bridge (1b21:1080, rev 01 & 03)
1313 * AMD/ATI SBx00 PCI to PCI Bridge (1002:4384, rev 40)
1314 */
1315static bool pci_ext_cfg_is_aliased(struct pci_dev *dev)
1316{
1317#ifdef CONFIG_PCI_QUIRKS
1318 int pos;
1319 u32 header, tmp;
1320
1321 pci_read_config_dword(dev, PCI_VENDOR_ID, &header);
1322
1323 for (pos = PCI_CFG_SPACE_SIZE;
1324 pos < PCI_CFG_SPACE_EXP_SIZE; pos += PCI_CFG_SPACE_SIZE) {
1325 if (pci_read_config_dword(dev, pos, &tmp) != PCIBIOS_SUCCESSFUL
1326 || header != tmp)
1327 return false;
1328 }
1329
1330 return true;
1331#else
1332 return false;
1333#endif
1334}
1335
0b950f0f
SH
1336/**
1337 * pci_cfg_space_size - get the configuration space size of the PCI device.
1338 * @dev: PCI device
1339 *
1340 * Regular PCI devices have 256 bytes, but PCI-X 2 and PCI Express devices
1341 * have 4096 bytes. Even if the device is capable, that doesn't mean we can
1342 * access it. Maybe we don't have a way to generate extended config space
1343 * accesses, or the device is behind a reverse Express bridge. So we try
1344 * reading the dword at 0x100 which must either be 0 or a valid extended
1345 * capability header.
1346 */
1347static int pci_cfg_space_size_ext(struct pci_dev *dev)
1348{
1349 u32 status;
1350 int pos = PCI_CFG_SPACE_SIZE;
1351
1352 if (pci_read_config_dword(dev, pos, &status) != PCIBIOS_SUCCESSFUL)
8e5a395a 1353 return PCI_CFG_SPACE_SIZE;
78916b00 1354 if (status == 0xffffffff || pci_ext_cfg_is_aliased(dev))
8e5a395a 1355 return PCI_CFG_SPACE_SIZE;
0b950f0f
SH
1356
1357 return PCI_CFG_SPACE_EXP_SIZE;
0b950f0f
SH
1358}
1359
1360int pci_cfg_space_size(struct pci_dev *dev)
1361{
1362 int pos;
1363 u32 status;
1364 u16 class;
1365
1366 class = dev->class >> 8;
1367 if (class == PCI_CLASS_BRIDGE_HOST)
1368 return pci_cfg_space_size_ext(dev);
1369
8e5a395a
BH
1370 if (pci_is_pcie(dev))
1371 return pci_cfg_space_size_ext(dev);
0b950f0f 1372
8e5a395a
BH
1373 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1374 if (!pos)
1375 return PCI_CFG_SPACE_SIZE;
0b950f0f 1376
8e5a395a
BH
1377 pci_read_config_dword(dev, pos + PCI_X_STATUS, &status);
1378 if (status & (PCI_X_STATUS_266MHZ | PCI_X_STATUS_533MHZ))
1379 return pci_cfg_space_size_ext(dev);
0b950f0f 1380
0b950f0f
SH
1381 return PCI_CFG_SPACE_SIZE;
1382}
1383
01abc2aa 1384#define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED)
76e6a1d6 1385
e80e7edc 1386static void pci_msi_setup_pci_dev(struct pci_dev *dev)
1851617c
MT
1387{
1388 /*
1389 * Disable the MSI hardware to avoid screaming interrupts
1390 * during boot. This is the power on reset default so
1391 * usually this should be a noop.
1392 */
1393 dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI);
1394 if (dev->msi_cap)
1395 pci_msi_set_enable(dev, 0);
1396
1397 dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX);
1398 if (dev->msix_cap)
1399 pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0);
1400}
1401
99b3c58f
PG
1402/**
1403 * pci_intx_mask_broken - test PCI_COMMAND_INTX_DISABLE writability
1404 * @dev: PCI device
1405 *
1406 * Test whether PCI_COMMAND_INTX_DISABLE is writable for @dev. Check this
1407 * at enumeration-time to avoid modifying PCI_COMMAND at run-time.
1408 */
1409static int pci_intx_mask_broken(struct pci_dev *dev)
1410{
1411 u16 orig, toggle, new;
1412
1413 pci_read_config_word(dev, PCI_COMMAND, &orig);
1414 toggle = orig ^ PCI_COMMAND_INTX_DISABLE;
1415 pci_write_config_word(dev, PCI_COMMAND, toggle);
1416 pci_read_config_word(dev, PCI_COMMAND, &new);
1417
1418 pci_write_config_word(dev, PCI_COMMAND, orig);
1419
1420 /*
1421 * PCI_COMMAND_INTX_DISABLE was reserved and read-only prior to PCI
1422 * r2.3, so strictly speaking, a device is not *broken* if it's not
1423 * writable. But we'll live with the misnomer for now.
1424 */
1425 if (new != toggle)
1426 return 1;
1427 return 0;
1428}
1429
1da177e4
LT
1430/**
1431 * pci_setup_device - fill in class and map information of a device
1432 * @dev: the device structure to fill
1433 *
f7625980 1434 * Initialize the device structure with information about the device's
1da177e4
LT
1435 * vendor,class,memory and IO-space addresses,IRQ lines etc.
1436 * Called at initialisation of the PCI subsystem and by CardBus services.
480b93b7
YZ
1437 * Returns 0 on success and negative if unknown type of device (not normal,
1438 * bridge or CardBus).
1da177e4 1439 */
480b93b7 1440int pci_setup_device(struct pci_dev *dev)
1da177e4
LT
1441{
1442 u32 class;
b84106b4 1443 u16 cmd;
480b93b7 1444 u8 hdr_type;
bc577d2b 1445 int pos = 0;
5bfa14ed
BH
1446 struct pci_bus_region region;
1447 struct resource *res;
480b93b7
YZ
1448
1449 if (pci_read_config_byte(dev, PCI_HEADER_TYPE, &hdr_type))
1450 return -EIO;
1451
1452 dev->sysdata = dev->bus->sysdata;
1453 dev->dev.parent = dev->bus->bridge;
1454 dev->dev.bus = &pci_bus_type;
1455 dev->hdr_type = hdr_type & 0x7f;
1456 dev->multifunction = !!(hdr_type & 0x80);
480b93b7
YZ
1457 dev->error_state = pci_channel_io_normal;
1458 set_pcie_port_type(dev);
1459
017ffe64 1460 pci_dev_assign_slot(dev);
480b93b7
YZ
1461 /* Assume 32-bit PCI; let 64-bit PCI cards (which are far rarer)
1462 set this higher, assuming the system even supports it. */
1463 dev->dma_mask = 0xffffffff;
1da177e4 1464
eebfcfb5
GKH
1465 dev_set_name(&dev->dev, "%04x:%02x:%02x.%d", pci_domain_nr(dev->bus),
1466 dev->bus->number, PCI_SLOT(dev->devfn),
1467 PCI_FUNC(dev->devfn));
1da177e4
LT
1468
1469 pci_read_config_dword(dev, PCI_CLASS_REVISION, &class);
b8a3a521 1470 dev->revision = class & 0xff;
2dd8ba92 1471 dev->class = class >> 8; /* upper 3 bytes */
1da177e4 1472
2dd8ba92
YL
1473 dev_printk(KERN_DEBUG, &dev->dev, "[%04x:%04x] type %02x class %#08x\n",
1474 dev->vendor, dev->device, dev->hdr_type, dev->class);
1da177e4 1475
853346e4
YZ
1476 /* need to have dev->class ready */
1477 dev->cfg_size = pci_cfg_space_size(dev);
1478
8531e283
LW
1479 /* need to have dev->cfg_size ready */
1480 set_pcie_thunderbolt(dev);
1481
1da177e4 1482 /* "Unknown power state" */
3fe9d19f 1483 dev->current_state = PCI_UNKNOWN;
1da177e4
LT
1484
1485 /* Early fixups, before probing the BARs */
1486 pci_fixup_device(pci_fixup_early, dev);
f79b1b14
YZ
1487 /* device class may be changed after fixup */
1488 class = dev->class >> 8;
1da177e4 1489
b84106b4
BH
1490 if (dev->non_compliant_bars) {
1491 pci_read_config_word(dev, PCI_COMMAND, &cmd);
1492 if (cmd & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY)) {
1493 dev_info(&dev->dev, "device has non-compliant BARs; disabling IO/MEM decoding\n");
1494 cmd &= ~PCI_COMMAND_IO;
1495 cmd &= ~PCI_COMMAND_MEMORY;
1496 pci_write_config_word(dev, PCI_COMMAND, cmd);
1497 }
1498 }
1499
99b3c58f
PG
1500 dev->broken_intx_masking = pci_intx_mask_broken(dev);
1501
1da177e4
LT
1502 switch (dev->hdr_type) { /* header type */
1503 case PCI_HEADER_TYPE_NORMAL: /* standard header */
1504 if (class == PCI_CLASS_BRIDGE_PCI)
1505 goto bad;
1506 pci_read_irq(dev);
1507 pci_read_bases(dev, 6, PCI_ROM_ADDRESS);
1508 pci_read_config_word(dev, PCI_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1509 pci_read_config_word(dev, PCI_SUBSYSTEM_ID, &dev->subsystem_device);
368c73d4
AC
1510
1511 /*
075eb9e3
BH
1512 * Do the ugly legacy mode stuff here rather than broken chip
1513 * quirk code. Legacy mode ATA controllers have fixed
1514 * addresses. These are not always echoed in BAR0-3, and
1515 * BAR0-3 in a few cases contain junk!
368c73d4
AC
1516 */
1517 if (class == PCI_CLASS_STORAGE_IDE) {
1518 u8 progif;
1519 pci_read_config_byte(dev, PCI_CLASS_PROG, &progif);
1520 if ((progif & 1) == 0) {
5bfa14ed
BH
1521 region.start = 0x1F0;
1522 region.end = 0x1F7;
1523 res = &dev->resource[0];
1524 res->flags = LEGACY_IO_RESOURCE;
fc279850 1525 pcibios_bus_to_resource(dev->bus, res, &region);
075eb9e3
BH
1526 dev_info(&dev->dev, "legacy IDE quirk: reg 0x10: %pR\n",
1527 res);
5bfa14ed
BH
1528 region.start = 0x3F6;
1529 region.end = 0x3F6;
1530 res = &dev->resource[1];
1531 res->flags = LEGACY_IO_RESOURCE;
fc279850 1532 pcibios_bus_to_resource(dev->bus, res, &region);
075eb9e3
BH
1533 dev_info(&dev->dev, "legacy IDE quirk: reg 0x14: %pR\n",
1534 res);
368c73d4
AC
1535 }
1536 if ((progif & 4) == 0) {
5bfa14ed
BH
1537 region.start = 0x170;
1538 region.end = 0x177;
1539 res = &dev->resource[2];
1540 res->flags = LEGACY_IO_RESOURCE;
fc279850 1541 pcibios_bus_to_resource(dev->bus, res, &region);
075eb9e3
BH
1542 dev_info(&dev->dev, "legacy IDE quirk: reg 0x18: %pR\n",
1543 res);
5bfa14ed
BH
1544 region.start = 0x376;
1545 region.end = 0x376;
1546 res = &dev->resource[3];
1547 res->flags = LEGACY_IO_RESOURCE;
fc279850 1548 pcibios_bus_to_resource(dev->bus, res, &region);
075eb9e3
BH
1549 dev_info(&dev->dev, "legacy IDE quirk: reg 0x1c: %pR\n",
1550 res);
368c73d4
AC
1551 }
1552 }
1da177e4
LT
1553 break;
1554
1555 case PCI_HEADER_TYPE_BRIDGE: /* bridge header */
1556 if (class != PCI_CLASS_BRIDGE_PCI)
1557 goto bad;
1558 /* The PCI-to-PCI bridge spec requires that subtractive
1559 decoding (i.e. transparent) bridge must have programming
f7625980 1560 interface code of 0x01. */
3efd273b 1561 pci_read_irq(dev);
1da177e4
LT
1562 dev->transparent = ((dev->class & 0xff) == 1);
1563 pci_read_bases(dev, 2, PCI_ROM_ADDRESS1);
28760489 1564 set_pcie_hotplug_bridge(dev);
bc577d2b
GB
1565 pos = pci_find_capability(dev, PCI_CAP_ID_SSVID);
1566 if (pos) {
1567 pci_read_config_word(dev, pos + PCI_SSVID_VENDOR_ID, &dev->subsystem_vendor);
1568 pci_read_config_word(dev, pos + PCI_SSVID_DEVICE_ID, &dev->subsystem_device);
1569 }
1da177e4
LT
1570 break;
1571
1572 case PCI_HEADER_TYPE_CARDBUS: /* CardBus bridge header */
1573 if (class != PCI_CLASS_BRIDGE_CARDBUS)
1574 goto bad;
1575 pci_read_irq(dev);
1576 pci_read_bases(dev, 1, 0);
1577 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_VENDOR_ID, &dev->subsystem_vendor);
1578 pci_read_config_word(dev, PCI_CB_SUBSYSTEM_ID, &dev->subsystem_device);
1579 break;
1580
1581 default: /* unknown header */
227f0647
RD
1582 dev_err(&dev->dev, "unknown header type %02x, ignoring device\n",
1583 dev->hdr_type);
480b93b7 1584 return -EIO;
1da177e4
LT
1585
1586 bad:
227f0647
RD
1587 dev_err(&dev->dev, "ignoring class %#08x (doesn't match header type %02x)\n",
1588 dev->class, dev->hdr_type);
2b4aed1d 1589 dev->class = PCI_CLASS_NOT_DEFINED << 8;
1da177e4
LT
1590 }
1591
1592 /* We found a fine healthy device, go go go... */
1593 return 0;
1594}
1595
9dae3a97
BH
1596static void pci_configure_mps(struct pci_dev *dev)
1597{
1598 struct pci_dev *bridge = pci_upstream_bridge(dev);
27d868b5 1599 int mps, p_mps, rc;
9dae3a97
BH
1600
1601 if (!pci_is_pcie(dev) || !bridge || !pci_is_pcie(bridge))
1602 return;
1603
1604 mps = pcie_get_mps(dev);
1605 p_mps = pcie_get_mps(bridge);
1606
1607 if (mps == p_mps)
1608 return;
1609
1610 if (pcie_bus_config == PCIE_BUS_TUNE_OFF) {
1611 dev_warn(&dev->dev, "Max Payload Size %d, but upstream %s set to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1612 mps, pci_name(bridge), p_mps);
1613 return;
1614 }
27d868b5
KB
1615
1616 /*
1617 * Fancier MPS configuration is done later by
1618 * pcie_bus_configure_settings()
1619 */
1620 if (pcie_bus_config != PCIE_BUS_DEFAULT)
1621 return;
1622
1623 rc = pcie_set_mps(dev, p_mps);
1624 if (rc) {
1625 dev_warn(&dev->dev, "can't set Max Payload Size to %d; if necessary, use \"pci=pcie_bus_safe\" and report a bug\n",
1626 p_mps);
1627 return;
1628 }
1629
1630 dev_info(&dev->dev, "Max Payload Size set to %d (was %d, max %d)\n",
1631 p_mps, mps, 128 << dev->pcie_mpss);
9dae3a97
BH
1632}
1633
589fcc23
BH
1634static struct hpp_type0 pci_default_type0 = {
1635 .revision = 1,
1636 .cache_line_size = 8,
1637 .latency_timer = 0x40,
1638 .enable_serr = 0,
1639 .enable_perr = 0,
1640};
1641
1642static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
1643{
1644 u16 pci_cmd, pci_bctl;
1645
c6285fc5 1646 if (!hpp)
589fcc23 1647 hpp = &pci_default_type0;
589fcc23
BH
1648
1649 if (hpp->revision > 1) {
1650 dev_warn(&dev->dev,
1651 "PCI settings rev %d not supported; using defaults\n",
1652 hpp->revision);
1653 hpp = &pci_default_type0;
1654 }
1655
1656 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
1657 pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
1658 pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
1659 if (hpp->enable_serr)
1660 pci_cmd |= PCI_COMMAND_SERR;
589fcc23
BH
1661 if (hpp->enable_perr)
1662 pci_cmd |= PCI_COMMAND_PARITY;
589fcc23
BH
1663 pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
1664
1665 /* Program bridge control value */
1666 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
1667 pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
1668 hpp->latency_timer);
1669 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
1670 if (hpp->enable_serr)
1671 pci_bctl |= PCI_BRIDGE_CTL_SERR;
589fcc23
BH
1672 if (hpp->enable_perr)
1673 pci_bctl |= PCI_BRIDGE_CTL_PARITY;
589fcc23
BH
1674 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
1675 }
1676}
1677
1678static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
1679{
977509f7
BH
1680 int pos;
1681
1682 if (!hpp)
1683 return;
1684
1685 pos = pci_find_capability(dev, PCI_CAP_ID_PCIX);
1686 if (!pos)
1687 return;
1688
1689 dev_warn(&dev->dev, "PCI-X settings not supported\n");
589fcc23
BH
1690}
1691
e42010d8
JT
1692static bool pcie_root_rcb_set(struct pci_dev *dev)
1693{
1694 struct pci_dev *rp = pcie_find_root_port(dev);
1695 u16 lnkctl;
1696
1697 if (!rp)
1698 return false;
1699
1700 pcie_capability_read_word(rp, PCI_EXP_LNKCTL, &lnkctl);
1701 if (lnkctl & PCI_EXP_LNKCTL_RCB)
1702 return true;
1703
1704 return false;
1705}
1706
589fcc23
BH
1707static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
1708{
1709 int pos;
1710 u32 reg32;
1711
1712 if (!hpp)
1713 return;
1714
977509f7
BH
1715 if (!pci_is_pcie(dev))
1716 return;
1717
589fcc23
BH
1718 if (hpp->revision > 1) {
1719 dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
1720 hpp->revision);
1721 return;
1722 }
1723
302328c0
BH
1724 /*
1725 * Don't allow _HPX to change MPS or MRRS settings. We manage
1726 * those to make sure they're consistent with the rest of the
1727 * platform.
1728 */
1729 hpp->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
1730 PCI_EXP_DEVCTL_READRQ;
1731 hpp->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
1732 PCI_EXP_DEVCTL_READRQ);
1733
589fcc23
BH
1734 /* Initialize Device Control Register */
1735 pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
1736 ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
1737
1738 /* Initialize Link Control Register */
e42010d8
JT
1739 if (pcie_cap_has_lnkctl(dev)) {
1740
1741 /*
1742 * If the Root Port supports Read Completion Boundary of
1743 * 128, set RCB to 128. Otherwise, clear it.
1744 */
1745 hpp->pci_exp_lnkctl_and |= PCI_EXP_LNKCTL_RCB;
1746 hpp->pci_exp_lnkctl_or &= ~PCI_EXP_LNKCTL_RCB;
1747 if (pcie_root_rcb_set(dev))
1748 hpp->pci_exp_lnkctl_or |= PCI_EXP_LNKCTL_RCB;
1749
589fcc23
BH
1750 pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
1751 ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
e42010d8 1752 }
589fcc23
BH
1753
1754 /* Find Advanced Error Reporting Enhanced Capability */
1755 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
1756 if (!pos)
1757 return;
1758
1759 /* Initialize Uncorrectable Error Mask Register */
1760 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
1761 reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
1762 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
1763
1764 /* Initialize Uncorrectable Error Severity Register */
1765 pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
1766 reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
1767 pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
1768
1769 /* Initialize Correctable Error Mask Register */
1770 pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
1771 reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
1772 pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
1773
1774 /* Initialize Advanced Error Capabilities and Control Register */
1775 pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
1776 reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
675734ba
BH
1777 /* Don't enable ECRC generation or checking if unsupported */
1778 if (!(reg32 & PCI_ERR_CAP_ECRC_GENC))
1779 reg32 &= ~PCI_ERR_CAP_ECRC_GENE;
1780 if (!(reg32 & PCI_ERR_CAP_ECRC_CHKC))
1781 reg32 &= ~PCI_ERR_CAP_ECRC_CHKE;
589fcc23
BH
1782 pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
1783
1784 /*
1785 * FIXME: The following two registers are not supported yet.
1786 *
1787 * o Secondary Uncorrectable Error Severity Register
1788 * o Secondary Uncorrectable Error Mask Register
1789 */
1790}
1791
62ce94a7 1792int pci_configure_extended_tags(struct pci_dev *dev, void *ign)
60db3a4d 1793{
62ce94a7
SK
1794 struct pci_host_bridge *host;
1795 u32 cap;
1796 u16 ctl;
60db3a4d
SK
1797 int ret;
1798
1799 if (!pci_is_pcie(dev))
62ce94a7 1800 return 0;
60db3a4d 1801
62ce94a7 1802 ret = pcie_capability_read_dword(dev, PCI_EXP_DEVCAP, &cap);
60db3a4d 1803 if (ret)
62ce94a7
SK
1804 return 0;
1805
1806 if (!(cap & PCI_EXP_DEVCAP_EXT_TAG))
1807 return 0;
60db3a4d 1808
62ce94a7
SK
1809 ret = pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl);
1810 if (ret)
1811 return 0;
1812
1813 host = pci_find_host_bridge(dev->bus);
1814 if (!host)
1815 return 0;
60db3a4d 1816
62ce94a7
SK
1817 /*
1818 * If some device in the hierarchy doesn't handle Extended Tags
1819 * correctly, make sure they're disabled.
1820 */
1821 if (host->no_ext_tags) {
1822 if (ctl & PCI_EXP_DEVCTL_EXT_TAG) {
1823 dev_info(&dev->dev, "disabling Extended Tags\n");
1824 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
1825 PCI_EXP_DEVCTL_EXT_TAG);
1826 }
1827 return 0;
1828 }
1829
1830 if (!(ctl & PCI_EXP_DEVCTL_EXT_TAG)) {
1831 dev_info(&dev->dev, "enabling Extended Tags\n");
60db3a4d
SK
1832 pcie_capability_set_word(dev, PCI_EXP_DEVCTL,
1833 PCI_EXP_DEVCTL_EXT_TAG);
62ce94a7
SK
1834 }
1835 return 0;
60db3a4d
SK
1836}
1837
a99b646a 1838/**
1839 * pcie_relaxed_ordering_enabled - Probe for PCIe relaxed ordering enable
1840 * @dev: PCI device to query
1841 *
1842 * Returns true if the device has enabled relaxed ordering attribute.
1843 */
1844bool pcie_relaxed_ordering_enabled(struct pci_dev *dev)
1845{
1846 u16 v;
1847
1848 pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &v);
1849
1850 return !!(v & PCI_EXP_DEVCTL_RELAX_EN);
1851}
1852EXPORT_SYMBOL(pcie_relaxed_ordering_enabled);
1853
1854static void pci_configure_relaxed_ordering(struct pci_dev *dev)
1855{
1856 struct pci_dev *root;
1857
1858 /* PCI_EXP_DEVICE_RELAX_EN is RsvdP in VFs */
1859 if (dev->is_virtfn)
1860 return;
1861
1862 if (!pcie_relaxed_ordering_enabled(dev))
1863 return;
1864
1865 /*
1866 * For now, we only deal with Relaxed Ordering issues with Root
1867 * Ports. Peer-to-Peer DMA is another can of worms.
1868 */
1869 root = pci_find_pcie_root_port(dev);
1870 if (!root)
1871 return;
1872
1873 if (root->dev_flags & PCI_DEV_FLAGS_NO_RELAXED_ORDERING) {
1874 pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
1875 PCI_EXP_DEVCTL_RELAX_EN);
1876 dev_info(&dev->dev, "Disable Relaxed Ordering because the Root Port didn't support it\n");
1877 }
1878}
1879
2b78239e
BH
1880static void pci_configure_ltr(struct pci_dev *dev)
1881{
1882#ifdef CONFIG_PCIEASPM
1883 u32 cap;
1884 struct pci_dev *bridge;
1885
1886 if (!pci_is_pcie(dev))
1887 return;
1888
1889 pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap);
1890 if (!(cap & PCI_EXP_DEVCAP2_LTR))
1891 return;
1892
1893 /*
1894 * Software must not enable LTR in an Endpoint unless the Root
1895 * Complex and all intermediate Switches indicate support for LTR.
1896 * PCIe r3.1, sec 6.18.
1897 */
1898 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
1899 dev->ltr_path = 1;
1900 else {
1901 bridge = pci_upstream_bridge(dev);
1902 if (bridge && bridge->ltr_path)
1903 dev->ltr_path = 1;
1904 }
1905
1906 if (dev->ltr_path)
1907 pcie_capability_set_word(dev, PCI_EXP_DEVCTL2,
1908 PCI_EXP_DEVCTL2_LTR_EN);
1909#endif
1910}
1911
6cd33649
BH
1912static void pci_configure_device(struct pci_dev *dev)
1913{
1914 struct hotplug_params hpp;
1915 int ret;
1916
9dae3a97 1917 pci_configure_mps(dev);
62ce94a7 1918 pci_configure_extended_tags(dev, NULL);
a99b646a 1919 pci_configure_relaxed_ordering(dev);
2b78239e 1920 pci_configure_ltr(dev);
9dae3a97 1921
6cd33649
BH
1922 memset(&hpp, 0, sizeof(hpp));
1923 ret = pci_get_hp_params(dev, &hpp);
1924 if (ret)
1925 return;
1926
1927 program_hpp_type2(dev, hpp.t2);
1928 program_hpp_type1(dev, hpp.t1);
1929 program_hpp_type0(dev, hpp.t0);
1930}
1931
201de56e
ZY
1932static void pci_release_capabilities(struct pci_dev *dev)
1933{
1934 pci_vpd_release(dev);
d1b054da 1935 pci_iov_release(dev);
f796841e 1936 pci_free_cap_save_buffers(dev);
201de56e
ZY
1937}
1938
1da177e4
LT
1939/**
1940 * pci_release_dev - free a pci device structure when all users of it are finished.
1941 * @dev: device that's been disconnected
1942 *
1943 * Will be called only by the device core when all users of this pci device are
1944 * done.
1945 */
1946static void pci_release_dev(struct device *dev)
1947{
04480094 1948 struct pci_dev *pci_dev;
1da177e4 1949
04480094 1950 pci_dev = to_pci_dev(dev);
201de56e 1951 pci_release_capabilities(pci_dev);
98d9f30c 1952 pci_release_of_node(pci_dev);
6ae32c53 1953 pcibios_release_device(pci_dev);
8b1fce04 1954 pci_bus_put(pci_dev->bus);
782a985d 1955 kfree(pci_dev->driver_override);
338c3149 1956 kfree(pci_dev->dma_alias_mask);
1da177e4
LT
1957 kfree(pci_dev);
1958}
1959
3c6e6ae7 1960struct pci_dev *pci_alloc_dev(struct pci_bus *bus)
65891215
ME
1961{
1962 struct pci_dev *dev;
1963
1964 dev = kzalloc(sizeof(struct pci_dev), GFP_KERNEL);
1965 if (!dev)
1966 return NULL;
1967
65891215 1968 INIT_LIST_HEAD(&dev->bus_list);
88e7b167 1969 dev->dev.type = &pci_dev_type;
3c6e6ae7 1970 dev->bus = pci_bus_get(bus);
65891215
ME
1971
1972 return dev;
1973}
3c6e6ae7
GZ
1974EXPORT_SYMBOL(pci_alloc_dev);
1975
62bc6a6f
SK
1976static bool pci_bus_crs_vendor_id(u32 l)
1977{
1978 return (l & 0xffff) == 0x0001;
1979}
1980
6a802ef0
SK
1981static bool pci_bus_wait_crs(struct pci_bus *bus, int devfn, u32 *l,
1982 int timeout)
1da177e4 1983{
1da177e4
LT
1984 int delay = 1;
1985
6a802ef0
SK
1986 if (!pci_bus_crs_vendor_id(*l))
1987 return true; /* not a CRS completion */
1da177e4 1988
6a802ef0
SK
1989 if (!timeout)
1990 return false; /* CRS, but caller doesn't want to wait */
1da177e4 1991
89665a6a 1992 /*
6a802ef0
SK
1993 * We got the reserved Vendor ID that indicates a completion with
1994 * Configuration Request Retry Status (CRS). Retry until we get a
1995 * valid Vendor ID or we time out.
89665a6a 1996 */
62bc6a6f 1997 while (pci_bus_crs_vendor_id(*l)) {
6a802ef0 1998 if (delay > timeout) {
e78e661f
SK
1999 pr_warn("pci %04x:%02x:%02x.%d: not ready after %dms; giving up\n",
2000 pci_domain_nr(bus), bus->number,
2001 PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1);
2002
efdc87da 2003 return false;
1da177e4 2004 }
e78e661f
SK
2005 if (delay >= 1000)
2006 pr_info("pci %04x:%02x:%02x.%d: not ready after %dms; waiting\n",
2007 pci_domain_nr(bus), bus->number,
2008 PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1);
efdc87da 2009
1da177e4
LT
2010 msleep(delay);
2011 delay *= 2;
9f982756 2012
efdc87da
YL
2013 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
2014 return false;
1da177e4
LT
2015 }
2016
e78e661f
SK
2017 if (delay >= 1000)
2018 pr_info("pci %04x:%02x:%02x.%d: ready after %dms\n",
2019 pci_domain_nr(bus), bus->number,
2020 PCI_SLOT(devfn), PCI_FUNC(devfn), delay - 1);
2021
efdc87da
YL
2022 return true;
2023}
6a802ef0
SK
2024
2025bool pci_bus_read_dev_vendor_id(struct pci_bus *bus, int devfn, u32 *l,
2026 int timeout)
2027{
2028 if (pci_bus_read_config_dword(bus, devfn, PCI_VENDOR_ID, l))
2029 return false;
2030
2031 /* some broken boards return 0 or ~0 if a slot is empty: */
2032 if (*l == 0xffffffff || *l == 0x00000000 ||
2033 *l == 0x0000ffff || *l == 0xffff0000)
2034 return false;
2035
2036 if (pci_bus_crs_vendor_id(*l))
2037 return pci_bus_wait_crs(bus, devfn, l, timeout);
2038
efdc87da
YL
2039 return true;
2040}
2041EXPORT_SYMBOL(pci_bus_read_dev_vendor_id);
2042
2043/*
2044 * Read the config data for a PCI device, sanity-check it
2045 * and fill in the dev structure...
2046 */
2047static struct pci_dev *pci_scan_device(struct pci_bus *bus, int devfn)
2048{
2049 struct pci_dev *dev;
2050 u32 l;
2051
2052 if (!pci_bus_read_dev_vendor_id(bus, devfn, &l, 60*1000))
2053 return NULL;
2054
8b1fce04 2055 dev = pci_alloc_dev(bus);
1da177e4
LT
2056 if (!dev)
2057 return NULL;
2058
1da177e4 2059 dev->devfn = devfn;
1da177e4
LT
2060 dev->vendor = l & 0xffff;
2061 dev->device = (l >> 16) & 0xffff;
cef354db 2062
98d9f30c
BH
2063 pci_set_of_node(dev);
2064
480b93b7 2065 if (pci_setup_device(dev)) {
8b1fce04 2066 pci_bus_put(dev->bus);
1da177e4
LT
2067 kfree(dev);
2068 return NULL;
2069 }
1da177e4
LT
2070
2071 return dev;
2072}
2073
201de56e
ZY
2074static void pci_init_capabilities(struct pci_dev *dev)
2075{
938174e5
SS
2076 /* Enhanced Allocation */
2077 pci_ea_init(dev);
2078
e80e7edc
GP
2079 /* Setup MSI caps & disable MSI/MSI-X interrupts */
2080 pci_msi_setup_pci_dev(dev);
201de56e 2081
63f4898a
RW
2082 /* Buffers for saving PCIe and PCI-X capabilities */
2083 pci_allocate_cap_save_buffers(dev);
2084
201de56e
ZY
2085 /* Power Management */
2086 pci_pm_init(dev);
2087
2088 /* Vital Product Data */
f1cd93f9 2089 pci_vpd_init(dev);
58c3a727
YZ
2090
2091 /* Alternative Routing-ID Forwarding */
31ab2476 2092 pci_configure_ari(dev);
d1b054da
YZ
2093
2094 /* Single Root I/O Virtualization */
2095 pci_iov_init(dev);
ae21ee65 2096
edc90fee
BH
2097 /* Address Translation Services */
2098 pci_ats_init(dev);
2099
ae21ee65 2100 /* Enable ACS P2P upstream forwarding */
5d990b62 2101 pci_enable_acs(dev);
b07461a8 2102
9bb04a0c
JY
2103 /* Precision Time Measurement */
2104 pci_ptm_init(dev);
4dc2db09 2105
66b80809
KB
2106 /* Advanced Error Reporting */
2107 pci_aer_init(dev);
201de56e
ZY
2108}
2109
098259eb
MZ
2110/*
2111 * This is the equivalent of pci_host_bridge_msi_domain that acts on
2112 * devices. Firmware interfaces that can select the MSI domain on a
2113 * per-device basis should be called from here.
2114 */
2115static struct irq_domain *pci_dev_msi_domain(struct pci_dev *dev)
2116{
2117 struct irq_domain *d;
2118
2119 /*
2120 * If a domain has been set through the pcibios_add_device
2121 * callback, then this is the one (platform code knows best).
2122 */
2123 d = dev_get_msi_domain(&dev->dev);
2124 if (d)
2125 return d;
2126
54fa97ee
MZ
2127 /*
2128 * Let's see if we have a firmware interface able to provide
2129 * the domain.
2130 */
2131 d = pci_msi_get_device_domain(dev);
2132 if (d)
2133 return d;
2134
098259eb
MZ
2135 return NULL;
2136}
2137
44aa0c65
MZ
2138static void pci_set_msi_domain(struct pci_dev *dev)
2139{
098259eb
MZ
2140 struct irq_domain *d;
2141
44aa0c65 2142 /*
098259eb
MZ
2143 * If the platform or firmware interfaces cannot supply a
2144 * device-specific MSI domain, then inherit the default domain
2145 * from the host bridge itself.
44aa0c65 2146 */
098259eb
MZ
2147 d = pci_dev_msi_domain(dev);
2148 if (!d)
2149 d = dev_get_msi_domain(&dev->bus->dev);
2150
2151 dev_set_msi_domain(&dev->dev, d);
44aa0c65
MZ
2152}
2153
96bde06a 2154void pci_device_add(struct pci_dev *dev, struct pci_bus *bus)
1da177e4 2155{
4f535093
YL
2156 int ret;
2157
6cd33649
BH
2158 pci_configure_device(dev);
2159
cdb9b9f7
PM
2160 device_initialize(&dev->dev);
2161 dev->dev.release = pci_release_dev;
1da177e4 2162
7629d19a 2163 set_dev_node(&dev->dev, pcibus_to_node(bus));
cdb9b9f7 2164 dev->dev.dma_mask = &dev->dma_mask;
4d57cdfa 2165 dev->dev.dma_parms = &dev->dma_parms;
cdb9b9f7 2166 dev->dev.coherent_dma_mask = 0xffffffffull;
1da177e4 2167
4d57cdfa 2168 pci_set_dma_max_seg_size(dev, 65536);
59fc67de 2169 pci_set_dma_seg_boundary(dev, 0xffffffff);
4d57cdfa 2170
1da177e4
LT
2171 /* Fix up broken headers */
2172 pci_fixup_device(pci_fixup_header, dev);
2173
2069ecfb
YL
2174 /* moved out from quirk header fixup code */
2175 pci_reassigndev_resource_alignment(dev);
2176
4b77b0a2
RW
2177 /* Clear the state_saved flag. */
2178 dev->state_saved = false;
2179
201de56e
ZY
2180 /* Initialize various capabilities */
2181 pci_init_capabilities(dev);
eb9d0fe4 2182
1da177e4
LT
2183 /*
2184 * Add the device to our list of discovered devices
2185 * and the bus list for fixup functions, etc.
2186 */
d71374da 2187 down_write(&pci_bus_sem);
1da177e4 2188 list_add_tail(&dev->bus_list, &bus->devices);
d71374da 2189 up_write(&pci_bus_sem);
4f535093 2190
4f535093
YL
2191 ret = pcibios_add_device(dev);
2192 WARN_ON(ret < 0);
2193
44aa0c65
MZ
2194 /* Setup MSI irq domain */
2195 pci_set_msi_domain(dev);
2196
4f535093
YL
2197 /* Notifier could use PCI capabilities */
2198 dev->match_driver = false;
2199 ret = device_add(&dev->dev);
2200 WARN_ON(ret < 0);
cdb9b9f7
PM
2201}
2202
10874f5a 2203struct pci_dev *pci_scan_single_device(struct pci_bus *bus, int devfn)
cdb9b9f7
PM
2204{
2205 struct pci_dev *dev;
2206
90bdb311
TP
2207 dev = pci_get_slot(bus, devfn);
2208 if (dev) {
2209 pci_dev_put(dev);
2210 return dev;
2211 }
2212
cdb9b9f7
PM
2213 dev = pci_scan_device(bus, devfn);
2214 if (!dev)
2215 return NULL;
2216
2217 pci_device_add(dev, bus);
1da177e4
LT
2218
2219 return dev;
2220}
b73e9687 2221EXPORT_SYMBOL(pci_scan_single_device);
1da177e4 2222
b1bd58e4 2223static unsigned next_fn(struct pci_bus *bus, struct pci_dev *dev, unsigned fn)
f07852d6 2224{
b1bd58e4
YW
2225 int pos;
2226 u16 cap = 0;
2227 unsigned next_fn;
4fb88c1a 2228
b1bd58e4
YW
2229 if (pci_ari_enabled(bus)) {
2230 if (!dev)
2231 return 0;
2232 pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI);
2233 if (!pos)
2234 return 0;
4fb88c1a 2235
b1bd58e4
YW
2236 pci_read_config_word(dev, pos + PCI_ARI_CAP, &cap);
2237 next_fn = PCI_ARI_CAP_NFN(cap);
2238 if (next_fn <= fn)
2239 return 0; /* protect against malformed list */
f07852d6 2240
b1bd58e4
YW
2241 return next_fn;
2242 }
2243
2244 /* dev may be NULL for non-contiguous multifunction devices */
2245 if (!dev || dev->multifunction)
2246 return (fn + 1) % 8;
f07852d6 2247
f07852d6
MW
2248 return 0;
2249}
2250
2251static int only_one_child(struct pci_bus *bus)
2252{
04043fd8 2253 struct pci_dev *bridge = bus->self;
284f5f9d 2254
04043fd8
BH
2255 /*
2256 * Systems with unusual topologies set PCI_SCAN_ALL_PCIE_DEVS so
2257 * we scan for all possible devices, not just Device 0.
2258 */
2259 if (pci_has_flag(PCI_SCAN_ALL_PCIE_DEVS))
f07852d6 2260 return 0;
5bbe029f
BH
2261
2262 /*
04043fd8
BH
2263 * A PCIe Downstream Port normally leads to a Link with only Device
2264 * 0 on it (PCIe spec r3.1, sec 7.3.1). As an optimization, scan
2265 * only for Device 0 in that situation.
2266 *
2267 * Checking has_secondary_link is a hack to identify Downstream
2268 * Ports because sometimes Switches are configured such that the
2269 * PCIe Port Type labels are backwards.
5bbe029f 2270 */
04043fd8 2271 if (bridge && pci_is_pcie(bridge) && bridge->has_secondary_link)
f07852d6 2272 return 1;
04043fd8 2273
f07852d6
MW
2274 return 0;
2275}
2276
1da177e4
LT
2277/**
2278 * pci_scan_slot - scan a PCI slot on a bus for devices.
2279 * @bus: PCI bus to scan
2280 * @devfn: slot number to scan (must have zero function.)
2281 *
2282 * Scan a PCI slot on the specified PCI bus for devices, adding
2283 * discovered devices to the @bus->devices list. New devices
8a1bc901 2284 * will not have is_added set.
1b69dfc6
TP
2285 *
2286 * Returns the number of new devices found.
1da177e4 2287 */
96bde06a 2288int pci_scan_slot(struct pci_bus *bus, int devfn)
1da177e4 2289{
f07852d6 2290 unsigned fn, nr = 0;
1b69dfc6 2291 struct pci_dev *dev;
f07852d6
MW
2292
2293 if (only_one_child(bus) && (devfn > 0))
2294 return 0; /* Already scanned the entire slot */
1da177e4 2295
1b69dfc6 2296 dev = pci_scan_single_device(bus, devfn);
4fb88c1a
MW
2297 if (!dev)
2298 return 0;
2299 if (!dev->is_added)
1b69dfc6
TP
2300 nr++;
2301
b1bd58e4 2302 for (fn = next_fn(bus, dev, 0); fn > 0; fn = next_fn(bus, dev, fn)) {
f07852d6
MW
2303 dev = pci_scan_single_device(bus, devfn + fn);
2304 if (dev) {
2305 if (!dev->is_added)
2306 nr++;
2307 dev->multifunction = 1;
1da177e4
LT
2308 }
2309 }
7d715a6c 2310
149e1637
SL
2311 /* only one slot has pcie device */
2312 if (bus->self && nr)
7d715a6c
SL
2313 pcie_aspm_init_link_state(bus->self);
2314
1da177e4
LT
2315 return nr;
2316}
b7fe9434 2317EXPORT_SYMBOL(pci_scan_slot);
1da177e4 2318
b03e7495
JM
2319static int pcie_find_smpss(struct pci_dev *dev, void *data)
2320{
2321 u8 *smpss = data;
2322
2323 if (!pci_is_pcie(dev))
2324 return 0;
2325
d4aa68f6
YW
2326 /*
2327 * We don't have a way to change MPS settings on devices that have
2328 * drivers attached. A hot-added device might support only the minimum
2329 * MPS setting (MPS=128). Therefore, if the fabric contains a bridge
2330 * where devices may be hot-added, we limit the fabric MPS to 128 so
2331 * hot-added devices will work correctly.
2332 *
2333 * However, if we hot-add a device to a slot directly below a Root
2334 * Port, it's impossible for there to be other existing devices below
2335 * the port. We don't limit the MPS in this case because we can
2336 * reconfigure MPS on both the Root Port and the hot-added device,
2337 * and there are no other devices involved.
2338 *
2339 * Note that this PCIE_BUS_SAFE path assumes no peer-to-peer DMA.
b03e7495 2340 */
d4aa68f6
YW
2341 if (dev->is_hotplug_bridge &&
2342 pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT)
b03e7495
JM
2343 *smpss = 0;
2344
2345 if (*smpss > dev->pcie_mpss)
2346 *smpss = dev->pcie_mpss;
2347
2348 return 0;
2349}
2350
2351static void pcie_write_mps(struct pci_dev *dev, int mps)
2352{
62f392ea 2353 int rc;
b03e7495
JM
2354
2355 if (pcie_bus_config == PCIE_BUS_PERFORMANCE) {
62f392ea 2356 mps = 128 << dev->pcie_mpss;
b03e7495 2357
62f87c0e
YW
2358 if (pci_pcie_type(dev) != PCI_EXP_TYPE_ROOT_PORT &&
2359 dev->bus->self)
62f392ea 2360 /* For "Performance", the assumption is made that
b03e7495
JM
2361 * downstream communication will never be larger than
2362 * the MRRS. So, the MPS only needs to be configured
2363 * for the upstream communication. This being the case,
2364 * walk from the top down and set the MPS of the child
2365 * to that of the parent bus.
62f392ea
JM
2366 *
2367 * Configure the device MPS with the smaller of the
2368 * device MPSS or the bridge MPS (which is assumed to be
2369 * properly configured at this point to the largest
2370 * allowable MPS based on its parent bus).
b03e7495 2371 */
62f392ea 2372 mps = min(mps, pcie_get_mps(dev->bus->self));
b03e7495
JM
2373 }
2374
2375 rc = pcie_set_mps(dev, mps);
2376 if (rc)
2377 dev_err(&dev->dev, "Failed attempting to set the MPS\n");
2378}
2379
62f392ea 2380static void pcie_write_mrrs(struct pci_dev *dev)
b03e7495 2381{
62f392ea 2382 int rc, mrrs;
b03e7495 2383
ed2888e9
JM
2384 /* In the "safe" case, do not configure the MRRS. There appear to be
2385 * issues with setting MRRS to 0 on a number of devices.
2386 */
ed2888e9
JM
2387 if (pcie_bus_config != PCIE_BUS_PERFORMANCE)
2388 return;
2389
ed2888e9
JM
2390 /* For Max performance, the MRRS must be set to the largest supported
2391 * value. However, it cannot be configured larger than the MPS the
62f392ea
JM
2392 * device or the bus can support. This should already be properly
2393 * configured by a prior call to pcie_write_mps.
ed2888e9 2394 */
62f392ea 2395 mrrs = pcie_get_mps(dev);
b03e7495
JM
2396
2397 /* MRRS is a R/W register. Invalid values can be written, but a
ed2888e9 2398 * subsequent read will verify if the value is acceptable or not.
b03e7495
JM
2399 * If the MRRS value provided is not acceptable (e.g., too large),
2400 * shrink the value until it is acceptable to the HW.
f7625980 2401 */
b03e7495
JM
2402 while (mrrs != pcie_get_readrq(dev) && mrrs >= 128) {
2403 rc = pcie_set_readrq(dev, mrrs);
62f392ea
JM
2404 if (!rc)
2405 break;
b03e7495 2406
62f392ea 2407 dev_warn(&dev->dev, "Failed attempting to set the MRRS\n");
b03e7495
JM
2408 mrrs /= 2;
2409 }
62f392ea
JM
2410
2411 if (mrrs < 128)
227f0647 2412 dev_err(&dev->dev, "MRRS was unable to be configured with a safe value. If problems are experienced, try running with pci=pcie_bus_safe\n");
b03e7495
JM
2413}
2414
2415static int pcie_bus_configure_set(struct pci_dev *dev, void *data)
2416{
a513a99a 2417 int mps, orig_mps;
b03e7495
JM
2418
2419 if (!pci_is_pcie(dev))
2420 return 0;
2421
27d868b5
KB
2422 if (pcie_bus_config == PCIE_BUS_TUNE_OFF ||
2423 pcie_bus_config == PCIE_BUS_DEFAULT)
5895af79 2424 return 0;
5895af79 2425
a513a99a
JM
2426 mps = 128 << *(u8 *)data;
2427 orig_mps = pcie_get_mps(dev);
b03e7495
JM
2428
2429 pcie_write_mps(dev, mps);
62f392ea 2430 pcie_write_mrrs(dev);
b03e7495 2431
227f0647
RD
2432 dev_info(&dev->dev, "Max Payload Size set to %4d/%4d (was %4d), Max Read Rq %4d\n",
2433 pcie_get_mps(dev), 128 << dev->pcie_mpss,
a513a99a 2434 orig_mps, pcie_get_readrq(dev));
b03e7495
JM
2435
2436 return 0;
2437}
2438
a513a99a 2439/* pcie_bus_configure_settings requires that pci_walk_bus work in a top-down,
b03e7495
JM
2440 * parents then children fashion. If this changes, then this code will not
2441 * work as designed.
2442 */
a58674ff 2443void pcie_bus_configure_settings(struct pci_bus *bus)
b03e7495 2444{
1e358f94 2445 u8 smpss = 0;
b03e7495 2446
a58674ff 2447 if (!bus->self)
b03e7495
JM
2448 return;
2449
b03e7495 2450 if (!pci_is_pcie(bus->self))
5f39e670
JM
2451 return;
2452
2453 /* FIXME - Peer to peer DMA is possible, though the endpoint would need
3315472c 2454 * to be aware of the MPS of the destination. To work around this,
5f39e670
JM
2455 * simply force the MPS of the entire system to the smallest possible.
2456 */
2457 if (pcie_bus_config == PCIE_BUS_PEER2PEER)
2458 smpss = 0;
2459
b03e7495 2460 if (pcie_bus_config == PCIE_BUS_SAFE) {
a58674ff 2461 smpss = bus->self->pcie_mpss;
5f39e670 2462
b03e7495
JM
2463 pcie_find_smpss(bus->self, &smpss);
2464 pci_walk_bus(bus, pcie_find_smpss, &smpss);
2465 }
2466
2467 pcie_bus_configure_set(bus->self, &smpss);
2468 pci_walk_bus(bus, pcie_bus_configure_set, &smpss);
2469}
debc3b77 2470EXPORT_SYMBOL_GPL(pcie_bus_configure_settings);
b03e7495 2471
bccf90d6
PD
2472/*
2473 * Called after each bus is probed, but before its children are examined. This
2474 * is marked as __weak because multiple architectures define it.
2475 */
2476void __weak pcibios_fixup_bus(struct pci_bus *bus)
2477{
2478 /* nothing to do, expected to be removed in the future */
2479}
2480
1c02ea81
MW
2481/**
2482 * pci_scan_child_bus_extend() - Scan devices below a bus
2483 * @bus: Bus to scan for devices
2484 * @available_buses: Total number of buses available (%0 does not try to
2485 * extend beyond the minimal)
2486 *
2487 * Scans devices below @bus including subordinate buses. Returns new
2488 * subordinate number including all the found devices. Passing
2489 * @available_buses causes the remaining bus space to be distributed
2490 * equally between hotplug-capable bridges to allow future extension of the
2491 * hierarchy.
2492 */
2493static unsigned int pci_scan_child_bus_extend(struct pci_bus *bus,
2494 unsigned int available_buses)
1da177e4 2495{
1c02ea81
MW
2496 unsigned int used_buses, normal_bridges = 0, hotplug_bridges = 0;
2497 unsigned int start = bus->busn_res.start;
2498 unsigned int devfn, cmax, max = start;
1da177e4
LT
2499 struct pci_dev *dev;
2500
0207c356 2501 dev_dbg(&bus->dev, "scanning bus\n");
1da177e4
LT
2502
2503 /* Go find them, Rover! */
2504 for (devfn = 0; devfn < 0x100; devfn += 8)
2505 pci_scan_slot(bus, devfn);
2506
a28724b0 2507 /* Reserve buses for SR-IOV capability. */
1c02ea81
MW
2508 used_buses = pci_iov_bus_range(bus);
2509 max += used_buses;
a28724b0 2510
1da177e4
LT
2511 /*
2512 * After performing arch-dependent fixup of the bus, look behind
2513 * all PCI-to-PCI bridges on this bus.
2514 */
74710ded 2515 if (!bus->is_added) {
0207c356 2516 dev_dbg(&bus->dev, "fixups for bus\n");
74710ded 2517 pcibios_fixup_bus(bus);
981cf9ea 2518 bus->is_added = 1;
74710ded
AC
2519 }
2520
1c02ea81
MW
2521 /*
2522 * Calculate how many hotplug bridges and normal bridges there
2523 * are on this bus. We will distribute the additional available
2524 * buses between hotplug bridges.
2525 */
2526 for_each_pci_bridge(dev, bus) {
2527 if (dev->is_hotplug_bridge)
2528 hotplug_bridges++;
2529 else
2530 normal_bridges++;
2531 }
2532
4147c2fd
MW
2533 /*
2534 * Scan bridges that are already configured. We don't touch them
2535 * unless they are misconfigured (which will be done in the second
2536 * scan below).
2537 */
1c02ea81
MW
2538 for_each_pci_bridge(dev, bus) {
2539 cmax = max;
2540 max = pci_scan_bridge_extend(bus, dev, max, 0, 0);
b729a311
MW
2541
2542 /*
2543 * Reserve one bus for each bridge now to avoid extending
2544 * hotplug bridges too much during the second scan below.
2545 */
2546 used_buses++;
2547 if (cmax - max > 1)
2548 used_buses += cmax - max - 1;
1c02ea81 2549 }
4147c2fd
MW
2550
2551 /* Scan bridges that need to be reconfigured */
1c02ea81
MW
2552 for_each_pci_bridge(dev, bus) {
2553 unsigned int buses = 0;
2554
2555 if (!hotplug_bridges && normal_bridges == 1) {
2556 /*
2557 * There is only one bridge on the bus (upstream
2558 * port) so it gets all available buses which it
2559 * can then distribute to the possible hotplug
2560 * bridges below.
2561 */
2562 buses = available_buses;
2563 } else if (dev->is_hotplug_bridge) {
2564 /*
2565 * Distribute the extra buses between hotplug
2566 * bridges if any.
2567 */
2568 buses = available_buses / hotplug_bridges;
b729a311 2569 buses = min(buses, available_buses - used_buses + 1);
1c02ea81
MW
2570 }
2571
2572 cmax = max;
2573 max = pci_scan_bridge_extend(bus, dev, cmax, buses, 1);
b729a311
MW
2574 /* One bus is already accounted so don't add it again */
2575 if (max - cmax > 1)
2576 used_buses += max - cmax - 1;
1c02ea81 2577 }
1da177e4 2578
e16b4660
KB
2579 /*
2580 * Make sure a hotplug bridge has at least the minimum requested
1c02ea81
MW
2581 * number of buses but allow it to grow up to the maximum available
2582 * bus number of there is room.
e16b4660 2583 */
1c02ea81
MW
2584 if (bus->self && bus->self->is_hotplug_bridge) {
2585 used_buses = max_t(unsigned int, available_buses,
2586 pci_hotplug_bus_size - 1);
2587 if (max - start < used_buses) {
2588 max = start + used_buses;
2589
2590 /* Do not allocate more buses than we have room left */
2591 if (max > bus->busn_res.end)
2592 max = bus->busn_res.end;
2593
2594 dev_dbg(&bus->dev, "%pR extended by %#02x\n",
2595 &bus->busn_res, max - start);
2596 }
e16b4660
KB
2597 }
2598
1da177e4
LT
2599 /*
2600 * We've scanned the bus and so we know all about what's on
2601 * the other side of any bridges that may be on this bus plus
2602 * any devices.
2603 *
2604 * Return how far we've got finding sub-buses.
2605 */
0207c356 2606 dev_dbg(&bus->dev, "bus scan returning with max=%02x\n", max);
1da177e4
LT
2607 return max;
2608}
1c02ea81
MW
2609
2610/**
2611 * pci_scan_child_bus() - Scan devices below a bus
2612 * @bus: Bus to scan for devices
2613 *
2614 * Scans devices below @bus including subordinate buses. Returns new
2615 * subordinate number including all the found devices.
2616 */
2617unsigned int pci_scan_child_bus(struct pci_bus *bus)
2618{
2619 return pci_scan_child_bus_extend(bus, 0);
2620}
b7fe9434 2621EXPORT_SYMBOL_GPL(pci_scan_child_bus);
1da177e4 2622
6c0cc950
RW
2623/**
2624 * pcibios_root_bridge_prepare - Platform-specific host bridge setup.
2625 * @bridge: Host bridge to set up.
2626 *
2627 * Default empty implementation. Replace with an architecture-specific setup
2628 * routine, if necessary.
2629 */
2630int __weak pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
2631{
2632 return 0;
2633}
2634
10a95747
JL
2635void __weak pcibios_add_bus(struct pci_bus *bus)
2636{
2637}
2638
2639void __weak pcibios_remove_bus(struct pci_bus *bus)
2640{
2641}
2642
9ee8a1c4
LP
2643struct pci_bus *pci_create_root_bus(struct device *parent, int bus,
2644 struct pci_ops *ops, void *sysdata, struct list_head *resources)
1da177e4 2645{
0efd5aab 2646 int error;
5a21d70d 2647 struct pci_host_bridge *bridge;
1da177e4 2648
59094065 2649 bridge = pci_alloc_host_bridge(0);
7b543663 2650 if (!bridge)
37d6a0a6 2651 return NULL;
7b543663
YL
2652
2653 bridge->dev.parent = parent;
a9d9f527 2654
37d6a0a6
AB
2655 list_splice_init(resources, &bridge->windows);
2656 bridge->sysdata = sysdata;
2657 bridge->busnr = bus;
2658 bridge->ops = ops;
a9d9f527 2659
37d6a0a6
AB
2660 error = pci_register_host_bridge(bridge);
2661 if (error < 0)
2662 goto err_out;
a5390aa6 2663
37d6a0a6 2664 return bridge->bus;
1da177e4 2665
1da177e4 2666err_out:
37d6a0a6 2667 kfree(bridge);
1da177e4
LT
2668 return NULL;
2669}
e6b29dea 2670EXPORT_SYMBOL_GPL(pci_create_root_bus);
cdb9b9f7 2671
98a35831
YL
2672int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max)
2673{
2674 struct resource *res = &b->busn_res;
2675 struct resource *parent_res, *conflict;
2676
2677 res->start = bus;
2678 res->end = bus_max;
2679 res->flags = IORESOURCE_BUS;
2680
2681 if (!pci_is_root_bus(b))
2682 parent_res = &b->parent->busn_res;
2683 else {
2684 parent_res = get_pci_domain_busn_res(pci_domain_nr(b));
2685 res->flags |= IORESOURCE_PCI_FIXED;
2686 }
2687
ced04d15 2688 conflict = request_resource_conflict(parent_res, res);
98a35831
YL
2689
2690 if (conflict)
2691 dev_printk(KERN_DEBUG, &b->dev,
2692 "busn_res: can not insert %pR under %s%pR (conflicts with %s %pR)\n",
2693 res, pci_is_root_bus(b) ? "domain " : "",
2694 parent_res, conflict->name, conflict);
98a35831
YL
2695
2696 return conflict == NULL;
2697}
2698
2699int pci_bus_update_busn_res_end(struct pci_bus *b, int bus_max)
2700{
2701 struct resource *res = &b->busn_res;
2702 struct resource old_res = *res;
2703 resource_size_t size;
2704 int ret;
2705
2706 if (res->start > bus_max)
2707 return -EINVAL;
2708
2709 size = bus_max - res->start + 1;
2710 ret = adjust_resource(res, res->start, size);
2711 dev_printk(KERN_DEBUG, &b->dev,
2712 "busn_res: %pR end %s updated to %02x\n",
2713 &old_res, ret ? "can not be" : "is", bus_max);
2714
2715 if (!ret && !res->parent)
2716 pci_bus_insert_busn_res(b, res->start, res->end);
2717
2718 return ret;
2719}
2720
2721void pci_bus_release_busn_res(struct pci_bus *b)
2722{
2723 struct resource *res = &b->busn_res;
2724 int ret;
2725
2726 if (!res->flags || !res->parent)
2727 return;
2728
2729 ret = release_resource(res);
2730 dev_printk(KERN_DEBUG, &b->dev,
2731 "busn_res: %pR %s released\n",
2732 res, ret ? "can not be" : "is");
2733}
2734
1228c4b6 2735int pci_scan_root_bus_bridge(struct pci_host_bridge *bridge)
a2ebb827 2736{
14d76b68 2737 struct resource_entry *window;
4d99f524 2738 bool found = false;
a2ebb827 2739 struct pci_bus *b;
1228c4b6 2740 int max, bus, ret;
4d99f524 2741
1228c4b6
LP
2742 if (!bridge)
2743 return -EINVAL;
2744
2745 resource_list_for_each_entry(window, &bridge->windows)
4d99f524
YL
2746 if (window->res->flags & IORESOURCE_BUS) {
2747 found = true;
2748 break;
2749 }
a2ebb827 2750
1228c4b6
LP
2751 ret = pci_register_host_bridge(bridge);
2752 if (ret < 0)
2753 return ret;
2754
2755 b = bridge->bus;
2756 bus = bridge->busnr;
a2ebb827 2757
4d99f524
YL
2758 if (!found) {
2759 dev_info(&b->dev,
2760 "No busn resource found for root bus, will use [bus %02x-ff]\n",
2761 bus);
2762 pci_bus_insert_busn_res(b, bus, 255);
2763 }
2764
2765 max = pci_scan_child_bus(b);
2766
2767 if (!found)
2768 pci_bus_update_busn_res_end(b, max);
2769
1228c4b6 2770 return 0;
a2ebb827 2771}
1228c4b6 2772EXPORT_SYMBOL(pci_scan_root_bus_bridge);
d2a7926d
LP
2773
2774struct pci_bus *pci_scan_root_bus(struct device *parent, int bus,
2775 struct pci_ops *ops, void *sysdata, struct list_head *resources)
2776{
14d76b68 2777 struct resource_entry *window;
4d99f524 2778 bool found = false;
a2ebb827 2779 struct pci_bus *b;
4d99f524
YL
2780 int max;
2781
14d76b68 2782 resource_list_for_each_entry(window, resources)
4d99f524
YL
2783 if (window->res->flags & IORESOURCE_BUS) {
2784 found = true;
2785 break;
2786 }
a2ebb827 2787
9ee8a1c4 2788 b = pci_create_root_bus(parent, bus, ops, sysdata, resources);
a2ebb827
BH
2789 if (!b)
2790 return NULL;
2791
4d99f524
YL
2792 if (!found) {
2793 dev_info(&b->dev,
2794 "No busn resource found for root bus, will use [bus %02x-ff]\n",
2795 bus);
2796 pci_bus_insert_busn_res(b, bus, 255);
2797 }
2798
2799 max = pci_scan_child_bus(b);
2800
2801 if (!found)
2802 pci_bus_update_busn_res_end(b, max);
2803
a2ebb827 2804 return b;
d2a7926d 2805}
a2ebb827
BH
2806EXPORT_SYMBOL(pci_scan_root_bus);
2807
15856ad5 2808struct pci_bus *pci_scan_bus(int bus, struct pci_ops *ops,
de4b2f76
BH
2809 void *sysdata)
2810{
2811 LIST_HEAD(resources);
2812 struct pci_bus *b;
2813
2814 pci_add_resource(&resources, &ioport_resource);
2815 pci_add_resource(&resources, &iomem_resource);
857c3b66 2816 pci_add_resource(&resources, &busn_resource);
de4b2f76
BH
2817 b = pci_create_root_bus(NULL, bus, ops, sysdata, &resources);
2818 if (b) {
857c3b66 2819 pci_scan_child_bus(b);
de4b2f76
BH
2820 } else {
2821 pci_free_resource_list(&resources);
2822 }
2823 return b;
2824}
2825EXPORT_SYMBOL(pci_scan_bus);
2826
2f320521
YL
2827/**
2828 * pci_rescan_bus_bridge_resize - scan a PCI bus for devices.
2829 * @bridge: PCI bridge for the bus to scan
2830 *
2831 * Scan a PCI bus and child buses for new devices, add them,
2832 * and enable them, resizing bridge mmio/io resource if necessary
2833 * and possible. The caller must ensure the child devices are already
2834 * removed for resizing to occur.
2835 *
2836 * Returns the max number of subordinate bus discovered.
2837 */
10874f5a 2838unsigned int pci_rescan_bus_bridge_resize(struct pci_dev *bridge)
2f320521
YL
2839{
2840 unsigned int max;
2841 struct pci_bus *bus = bridge->subordinate;
2842
2843 max = pci_scan_child_bus(bus);
2844
2845 pci_assign_unassigned_bridge_resources(bridge);
2846
2847 pci_bus_add_devices(bus);
2848
2849 return max;
2850}
2851
a5213a31
YL
2852/**
2853 * pci_rescan_bus - scan a PCI bus for devices.
2854 * @bus: PCI bus to scan
2855 *
2856 * Scan a PCI bus and child buses for new devices, adds them,
2857 * and enables them.
2858 *
2859 * Returns the max number of subordinate bus discovered.
2860 */
10874f5a 2861unsigned int pci_rescan_bus(struct pci_bus *bus)
a5213a31
YL
2862{
2863 unsigned int max;
2864
2865 max = pci_scan_child_bus(bus);
2866 pci_assign_unassigned_bus_resources(bus);
2867 pci_bus_add_devices(bus);
2868
2869 return max;
2870}
2871EXPORT_SYMBOL_GPL(pci_rescan_bus);
2872
9d16947b
RW
2873/*
2874 * pci_rescan_bus(), pci_rescan_bus_bridge_resize() and PCI device removal
2875 * routines should always be executed under this mutex.
2876 */
2877static DEFINE_MUTEX(pci_rescan_remove_lock);
2878
2879void pci_lock_rescan_remove(void)
2880{
2881 mutex_lock(&pci_rescan_remove_lock);
2882}
2883EXPORT_SYMBOL_GPL(pci_lock_rescan_remove);
2884
2885void pci_unlock_rescan_remove(void)
2886{
2887 mutex_unlock(&pci_rescan_remove_lock);
2888}
2889EXPORT_SYMBOL_GPL(pci_unlock_rescan_remove);
2890
3c78bc61
RD
2891static int __init pci_sort_bf_cmp(const struct device *d_a,
2892 const struct device *d_b)
6b4b78fe 2893{
99178b03
GKH
2894 const struct pci_dev *a = to_pci_dev(d_a);
2895 const struct pci_dev *b = to_pci_dev(d_b);
2896
6b4b78fe
MD
2897 if (pci_domain_nr(a->bus) < pci_domain_nr(b->bus)) return -1;
2898 else if (pci_domain_nr(a->bus) > pci_domain_nr(b->bus)) return 1;
2899
2900 if (a->bus->number < b->bus->number) return -1;
2901 else if (a->bus->number > b->bus->number) return 1;
2902
2903 if (a->devfn < b->devfn) return -1;
2904 else if (a->devfn > b->devfn) return 1;
2905
2906 return 0;
2907}
2908
5ff580c1 2909void __init pci_sort_breadthfirst(void)
6b4b78fe 2910{
99178b03 2911 bus_sort_breadthfirst(&pci_bus_type, &pci_sort_bf_cmp);
6b4b78fe 2912}
95e3ba97
MW
2913
2914int pci_hp_add_bridge(struct pci_dev *dev)
2915{
2916 struct pci_bus *parent = dev->bus;
4147c2fd 2917 int busnr, start = parent->busn_res.start;
1c02ea81 2918 unsigned int available_buses = 0;
95e3ba97
MW
2919 int end = parent->busn_res.end;
2920
2921 for (busnr = start; busnr <= end; busnr++) {
2922 if (!pci_find_bus(pci_domain_nr(parent), busnr))
2923 break;
2924 }
2925 if (busnr-- > end) {
2926 dev_err(&dev->dev, "No bus number available for hot-added bridge\n");
2927 return -1;
2928 }
4147c2fd
MW
2929
2930 /* Scan bridges that are already configured */
2931 busnr = pci_scan_bridge(parent, dev, busnr, 0);
2932
1c02ea81
MW
2933 /*
2934 * Distribute the available bus numbers between hotplug-capable
2935 * bridges to make extending the chain later possible.
2936 */
2937 available_buses = end - busnr;
2938
4147c2fd 2939 /* Scan bridges that need to be reconfigured */
1c02ea81 2940 pci_scan_bridge_extend(parent, dev, busnr, available_buses, 1);
4147c2fd 2941
95e3ba97
MW
2942 if (!dev->subordinate)
2943 return -1;
2944
2945 return 0;
2946}
2947EXPORT_SYMBOL_GPL(pci_hp_add_bridge);