]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/pci/common.c
fbeec31316cf84d13a6c60ecd4a65c802244b93e
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / pci / common.c
1 /*
2 * Low-Level PCI Support for PC
3 *
4 * (c) 1999--2000 Martin Mares <mj@ucw.cz>
5 */
6
7 #include <linux/sched.h>
8 #include <linux/pci.h>
9 #include <linux/ioport.h>
10 #include <linux/init.h>
11 #include <linux/dmi.h>
12
13 #include <asm/acpi.h>
14 #include <asm/segment.h>
15 #include <asm/io.h>
16 #include <asm/smp.h>
17 #include <asm/pci_x86.h>
18
19 unsigned int pci_probe = PCI_PROBE_BIOS | PCI_PROBE_CONF1 | PCI_PROBE_CONF2 |
20 PCI_PROBE_MMCONF;
21
22 unsigned int pci_early_dump_regs;
23 static int pci_bf_sort;
24 int pci_routeirq;
25 int noioapicquirk;
26 #ifdef CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS
27 int noioapicreroute = 0;
28 #else
29 int noioapicreroute = 1;
30 #endif
31 int pcibios_last_bus = -1;
32 unsigned long pirq_table_addr;
33 struct pci_bus *pci_root_bus;
34 struct pci_raw_ops *raw_pci_ops;
35 struct pci_raw_ops *raw_pci_ext_ops;
36
37 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
38 int reg, int len, u32 *val)
39 {
40 if (domain == 0 && reg < 256 && raw_pci_ops)
41 return raw_pci_ops->read(domain, bus, devfn, reg, len, val);
42 if (raw_pci_ext_ops)
43 return raw_pci_ext_ops->read(domain, bus, devfn, reg, len, val);
44 return -EINVAL;
45 }
46
47 int raw_pci_write(unsigned int domain, unsigned int bus, unsigned int devfn,
48 int reg, int len, u32 val)
49 {
50 if (domain == 0 && reg < 256 && raw_pci_ops)
51 return raw_pci_ops->write(domain, bus, devfn, reg, len, val);
52 if (raw_pci_ext_ops)
53 return raw_pci_ext_ops->write(domain, bus, devfn, reg, len, val);
54 return -EINVAL;
55 }
56
57 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value)
58 {
59 return raw_pci_read(pci_domain_nr(bus), bus->number,
60 devfn, where, size, value);
61 }
62
63 static int pci_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value)
64 {
65 return raw_pci_write(pci_domain_nr(bus), bus->number,
66 devfn, where, size, value);
67 }
68
69 struct pci_ops pci_root_ops = {
70 .read = pci_read,
71 .write = pci_write,
72 };
73
74 /*
75 * legacy, numa, and acpi all want to call pcibios_scan_root
76 * from their initcalls. This flag prevents that.
77 */
78 int pcibios_scanned;
79
80 /*
81 * This interrupt-safe spinlock protects all accesses to PCI
82 * configuration space.
83 */
84 DEFINE_SPINLOCK(pci_config_lock);
85
86 static int __devinit can_skip_ioresource_align(const struct dmi_system_id *d)
87 {
88 pci_probe |= PCI_CAN_SKIP_ISA_ALIGN;
89 printk(KERN_INFO "PCI: %s detected, can skip ISA alignment\n", d->ident);
90 return 0;
91 }
92
93 static const struct dmi_system_id can_skip_pciprobe_dmi_table[] __devinitconst = {
94 /*
95 * Systems where PCI IO resource ISA alignment can be skipped
96 * when the ISA enable bit in the bridge control is not set
97 */
98 {
99 .callback = can_skip_ioresource_align,
100 .ident = "IBM System x3800",
101 .matches = {
102 DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
103 DMI_MATCH(DMI_PRODUCT_NAME, "x3800"),
104 },
105 },
106 {
107 .callback = can_skip_ioresource_align,
108 .ident = "IBM System x3850",
109 .matches = {
110 DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
111 DMI_MATCH(DMI_PRODUCT_NAME, "x3850"),
112 },
113 },
114 {
115 .callback = can_skip_ioresource_align,
116 .ident = "IBM System x3950",
117 .matches = {
118 DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
119 DMI_MATCH(DMI_PRODUCT_NAME, "x3950"),
120 },
121 },
122 {}
123 };
124
125 void __init dmi_check_skip_isa_align(void)
126 {
127 dmi_check_system(can_skip_pciprobe_dmi_table);
128 }
129
130 static void __devinit pcibios_fixup_device_resources(struct pci_dev *dev)
131 {
132 struct resource *rom_r = &dev->resource[PCI_ROM_RESOURCE];
133
134 if (pci_probe & PCI_NOASSIGN_ROMS) {
135 if (rom_r->parent)
136 return;
137 if (rom_r->start) {
138 /* we deal with BIOS assigned ROM later */
139 return;
140 }
141 rom_r->start = rom_r->end = rom_r->flags = 0;
142 }
143 }
144
145 /*
146 * Called after each bus is probed, but before its children
147 * are examined.
148 */
149
150 void __devinit pcibios_fixup_bus(struct pci_bus *b)
151 {
152 struct pci_dev *dev;
153
154 /* root bus? */
155 if (!b->parent)
156 x86_pci_root_bus_res_quirks(b);
157 pci_read_bridge_bases(b);
158 list_for_each_entry(dev, &b->devices, bus_list)
159 pcibios_fixup_device_resources(dev);
160 }
161
162 /*
163 * Only use DMI information to set this if nothing was passed
164 * on the kernel command line (which was parsed earlier).
165 */
166
167 static int __devinit set_bf_sort(const struct dmi_system_id *d)
168 {
169 if (pci_bf_sort == pci_bf_sort_default) {
170 pci_bf_sort = pci_dmi_bf;
171 printk(KERN_INFO "PCI: %s detected, enabling pci=bfsort.\n", d->ident);
172 }
173 return 0;
174 }
175
176 /*
177 * Enable renumbering of PCI bus# ranges to reach all PCI busses (Cardbus)
178 */
179 #ifdef __i386__
180 static int __devinit assign_all_busses(const struct dmi_system_id *d)
181 {
182 pci_probe |= PCI_ASSIGN_ALL_BUSSES;
183 printk(KERN_INFO "%s detected: enabling PCI bus# renumbering"
184 " (pci=assign-busses)\n", d->ident);
185 return 0;
186 }
187 #endif
188
189 static const struct dmi_system_id __devinitconst pciprobe_dmi_table[] = {
190 #ifdef __i386__
191 /*
192 * Laptops which need pci=assign-busses to see Cardbus cards
193 */
194 {
195 .callback = assign_all_busses,
196 .ident = "Samsung X20 Laptop",
197 .matches = {
198 DMI_MATCH(DMI_SYS_VENDOR, "Samsung Electronics"),
199 DMI_MATCH(DMI_PRODUCT_NAME, "SX20S"),
200 },
201 },
202 #endif /* __i386__ */
203 {
204 .callback = set_bf_sort,
205 .ident = "Dell PowerEdge 1950",
206 .matches = {
207 DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
208 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1950"),
209 },
210 },
211 {
212 .callback = set_bf_sort,
213 .ident = "Dell PowerEdge 1955",
214 .matches = {
215 DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
216 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 1955"),
217 },
218 },
219 {
220 .callback = set_bf_sort,
221 .ident = "Dell PowerEdge 2900",
222 .matches = {
223 DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
224 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2900"),
225 },
226 },
227 {
228 .callback = set_bf_sort,
229 .ident = "Dell PowerEdge 2950",
230 .matches = {
231 DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
232 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge 2950"),
233 },
234 },
235 {
236 .callback = set_bf_sort,
237 .ident = "Dell PowerEdge R900",
238 .matches = {
239 DMI_MATCH(DMI_SYS_VENDOR, "Dell"),
240 DMI_MATCH(DMI_PRODUCT_NAME, "PowerEdge R900"),
241 },
242 },
243 {
244 .callback = set_bf_sort,
245 .ident = "HP ProLiant BL20p G3",
246 .matches = {
247 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
248 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL20p G3"),
249 },
250 },
251 {
252 .callback = set_bf_sort,
253 .ident = "HP ProLiant BL20p G4",
254 .matches = {
255 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
256 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL20p G4"),
257 },
258 },
259 {
260 .callback = set_bf_sort,
261 .ident = "HP ProLiant BL30p G1",
262 .matches = {
263 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
264 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL30p G1"),
265 },
266 },
267 {
268 .callback = set_bf_sort,
269 .ident = "HP ProLiant BL25p G1",
270 .matches = {
271 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
272 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL25p G1"),
273 },
274 },
275 {
276 .callback = set_bf_sort,
277 .ident = "HP ProLiant BL35p G1",
278 .matches = {
279 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
280 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL35p G1"),
281 },
282 },
283 {
284 .callback = set_bf_sort,
285 .ident = "HP ProLiant BL45p G1",
286 .matches = {
287 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
288 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL45p G1"),
289 },
290 },
291 {
292 .callback = set_bf_sort,
293 .ident = "HP ProLiant BL45p G2",
294 .matches = {
295 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
296 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL45p G2"),
297 },
298 },
299 {
300 .callback = set_bf_sort,
301 .ident = "HP ProLiant BL460c G1",
302 .matches = {
303 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
304 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL460c G1"),
305 },
306 },
307 {
308 .callback = set_bf_sort,
309 .ident = "HP ProLiant BL465c G1",
310 .matches = {
311 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
312 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL465c G1"),
313 },
314 },
315 {
316 .callback = set_bf_sort,
317 .ident = "HP ProLiant BL480c G1",
318 .matches = {
319 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
320 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL480c G1"),
321 },
322 },
323 {
324 .callback = set_bf_sort,
325 .ident = "HP ProLiant BL685c G1",
326 .matches = {
327 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
328 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant BL685c G1"),
329 },
330 },
331 {
332 .callback = set_bf_sort,
333 .ident = "HP ProLiant DL360",
334 .matches = {
335 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
336 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL360"),
337 },
338 },
339 {
340 .callback = set_bf_sort,
341 .ident = "HP ProLiant DL380",
342 .matches = {
343 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
344 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL380"),
345 },
346 },
347 #ifdef __i386__
348 {
349 .callback = assign_all_busses,
350 .ident = "Compaq EVO N800c",
351 .matches = {
352 DMI_MATCH(DMI_SYS_VENDOR, "Compaq"),
353 DMI_MATCH(DMI_PRODUCT_NAME, "EVO N800c"),
354 },
355 },
356 #endif
357 {
358 .callback = set_bf_sort,
359 .ident = "HP ProLiant DL385 G2",
360 .matches = {
361 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
362 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL385 G2"),
363 },
364 },
365 {
366 .callback = set_bf_sort,
367 .ident = "HP ProLiant DL585 G2",
368 .matches = {
369 DMI_MATCH(DMI_SYS_VENDOR, "HP"),
370 DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL585 G2"),
371 },
372 },
373 {}
374 };
375
376 void __init dmi_check_pciprobe(void)
377 {
378 dmi_check_system(pciprobe_dmi_table);
379 }
380
381 struct pci_bus * __devinit pcibios_scan_root(int busnum)
382 {
383 struct pci_bus *bus = NULL;
384 struct pci_sysdata *sd;
385
386 while ((bus = pci_find_next_bus(bus)) != NULL) {
387 if (bus->number == busnum) {
388 /* Already scanned */
389 return bus;
390 }
391 }
392
393 /* Allocate per-root-bus (not per bus) arch-specific data.
394 * TODO: leak; this memory is never freed.
395 * It's arguable whether it's worth the trouble to care.
396 */
397 sd = kzalloc(sizeof(*sd), GFP_KERNEL);
398 if (!sd) {
399 printk(KERN_ERR "PCI: OOM, not probing PCI bus %02x\n", busnum);
400 return NULL;
401 }
402
403 sd->node = get_mp_bus_to_node(busnum);
404
405 printk(KERN_DEBUG "PCI: Probing PCI hardware (bus %02x)\n", busnum);
406 bus = pci_scan_bus_parented(NULL, busnum, &pci_root_ops, sd);
407 if (!bus)
408 kfree(sd);
409
410 return bus;
411 }
412
413 int __init pcibios_init(void)
414 {
415 struct cpuinfo_x86 *c = &boot_cpu_data;
416
417 if (!raw_pci_ops) {
418 printk(KERN_WARNING "PCI: System does not support PCI\n");
419 return 0;
420 }
421
422 /*
423 * Assume PCI cacheline size of 32 bytes for all x86s except K7/K8
424 * and P4. It's also good for 386/486s (which actually have 16)
425 * as quite a few PCI devices do not support smaller values.
426 */
427 pci_dfl_cache_line_size = 32 >> 2;
428 if (c->x86 >= 6 && c->x86_vendor == X86_VENDOR_AMD)
429 pci_dfl_cache_line_size = 64 >> 2; /* K7 & K8 */
430 else if (c->x86 > 6 && c->x86_vendor == X86_VENDOR_INTEL)
431 pci_dfl_cache_line_size = 128 >> 2; /* P4 */
432
433 pcibios_resource_survey();
434
435 if (pci_bf_sort >= pci_force_bf)
436 pci_sort_breadthfirst();
437 return 0;
438 }
439
440 char * __devinit pcibios_setup(char *str)
441 {
442 if (!strcmp(str, "off")) {
443 pci_probe = 0;
444 return NULL;
445 } else if (!strcmp(str, "bfsort")) {
446 pci_bf_sort = pci_force_bf;
447 return NULL;
448 } else if (!strcmp(str, "nobfsort")) {
449 pci_bf_sort = pci_force_nobf;
450 return NULL;
451 }
452 #ifdef CONFIG_PCI_BIOS
453 else if (!strcmp(str, "bios")) {
454 pci_probe = PCI_PROBE_BIOS;
455 return NULL;
456 } else if (!strcmp(str, "nobios")) {
457 pci_probe &= ~PCI_PROBE_BIOS;
458 return NULL;
459 } else if (!strcmp(str, "biosirq")) {
460 pci_probe |= PCI_BIOS_IRQ_SCAN;
461 return NULL;
462 } else if (!strncmp(str, "pirqaddr=", 9)) {
463 pirq_table_addr = simple_strtoul(str+9, NULL, 0);
464 return NULL;
465 }
466 #endif
467 #ifdef CONFIG_PCI_DIRECT
468 else if (!strcmp(str, "conf1")) {
469 pci_probe = PCI_PROBE_CONF1 | PCI_NO_CHECKS;
470 return NULL;
471 }
472 else if (!strcmp(str, "conf2")) {
473 pci_probe = PCI_PROBE_CONF2 | PCI_NO_CHECKS;
474 return NULL;
475 }
476 #endif
477 #ifdef CONFIG_PCI_MMCONFIG
478 else if (!strcmp(str, "nommconf")) {
479 pci_probe &= ~PCI_PROBE_MMCONF;
480 return NULL;
481 }
482 else if (!strcmp(str, "check_enable_amd_mmconf")) {
483 pci_probe |= PCI_CHECK_ENABLE_AMD_MMCONF;
484 return NULL;
485 }
486 #endif
487 else if (!strcmp(str, "noacpi")) {
488 acpi_noirq_set();
489 return NULL;
490 }
491 else if (!strcmp(str, "noearly")) {
492 pci_probe |= PCI_PROBE_NOEARLY;
493 return NULL;
494 }
495 #ifndef CONFIG_X86_VISWS
496 else if (!strcmp(str, "usepirqmask")) {
497 pci_probe |= PCI_USE_PIRQ_MASK;
498 return NULL;
499 } else if (!strncmp(str, "irqmask=", 8)) {
500 pcibios_irq_mask = simple_strtol(str+8, NULL, 0);
501 return NULL;
502 } else if (!strncmp(str, "lastbus=", 8)) {
503 pcibios_last_bus = simple_strtol(str+8, NULL, 0);
504 return NULL;
505 }
506 #endif
507 else if (!strcmp(str, "rom")) {
508 pci_probe |= PCI_ASSIGN_ROMS;
509 return NULL;
510 } else if (!strcmp(str, "norom")) {
511 pci_probe |= PCI_NOASSIGN_ROMS;
512 return NULL;
513 } else if (!strcmp(str, "assign-busses")) {
514 pci_probe |= PCI_ASSIGN_ALL_BUSSES;
515 return NULL;
516 } else if (!strcmp(str, "use_crs")) {
517 pci_probe |= PCI_USE__CRS;
518 return NULL;
519 } else if (!strcmp(str, "earlydump")) {
520 pci_early_dump_regs = 1;
521 return NULL;
522 } else if (!strcmp(str, "routeirq")) {
523 pci_routeirq = 1;
524 return NULL;
525 } else if (!strcmp(str, "skip_isa_align")) {
526 pci_probe |= PCI_CAN_SKIP_ISA_ALIGN;
527 return NULL;
528 } else if (!strcmp(str, "noioapicquirk")) {
529 noioapicquirk = 1;
530 return NULL;
531 } else if (!strcmp(str, "ioapicreroute")) {
532 if (noioapicreroute != -1)
533 noioapicreroute = 0;
534 return NULL;
535 } else if (!strcmp(str, "noioapicreroute")) {
536 if (noioapicreroute != -1)
537 noioapicreroute = 1;
538 return NULL;
539 }
540 return str;
541 }
542
543 unsigned int pcibios_assign_all_busses(void)
544 {
545 return (pci_probe & PCI_ASSIGN_ALL_BUSSES) ? 1 : 0;
546 }
547
548 int pcibios_enable_device(struct pci_dev *dev, int mask)
549 {
550 int err;
551
552 if ((err = pci_enable_resources(dev, mask)) < 0)
553 return err;
554
555 if (!pci_dev_msi_enabled(dev))
556 return pcibios_enable_irq(dev);
557 return 0;
558 }
559
560 void pcibios_disable_device (struct pci_dev *dev)
561 {
562 if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq)
563 pcibios_disable_irq(dev);
564 }
565
566 int pci_ext_cfg_avail(struct pci_dev *dev)
567 {
568 if (raw_pci_ext_ops)
569 return 1;
570 else
571 return 0;
572 }
573
574 struct pci_bus * __devinit pci_scan_bus_on_node(int busno, struct pci_ops *ops, int node)
575 {
576 struct pci_bus *bus = NULL;
577 struct pci_sysdata *sd;
578
579 /*
580 * Allocate per-root-bus (not per bus) arch-specific data.
581 * TODO: leak; this memory is never freed.
582 * It's arguable whether it's worth the trouble to care.
583 */
584 sd = kzalloc(sizeof(*sd), GFP_KERNEL);
585 if (!sd) {
586 printk(KERN_ERR "PCI: OOM, skipping PCI bus %02x\n", busno);
587 return NULL;
588 }
589 sd->node = node;
590 bus = pci_scan_bus(busno, ops, sd);
591 if (!bus)
592 kfree(sd);
593
594 return bus;
595 }
596
597 struct pci_bus * __devinit pci_scan_bus_with_sysdata(int busno)
598 {
599 return pci_scan_bus_on_node(busno, &pci_root_ops, -1);
600 }
601
602 /*
603 * NUMA info for PCI busses
604 *
605 * Early arch code is responsible for filling in reasonable values here.
606 * A node id of "-1" means "use current node". In other words, if a bus
607 * has a -1 node id, it's not tightly coupled to any particular chunk
608 * of memory (as is the case on some Nehalem systems).
609 */
610 #ifdef CONFIG_NUMA
611
612 #define BUS_NR 256
613
614 #ifdef CONFIG_X86_64
615
616 static int mp_bus_to_node[BUS_NR] = {
617 [0 ... BUS_NR - 1] = -1
618 };
619
620 void set_mp_bus_to_node(int busnum, int node)
621 {
622 if (busnum >= 0 && busnum < BUS_NR)
623 mp_bus_to_node[busnum] = node;
624 }
625
626 int get_mp_bus_to_node(int busnum)
627 {
628 int node = -1;
629
630 if (busnum < 0 || busnum > (BUS_NR - 1))
631 return node;
632
633 node = mp_bus_to_node[busnum];
634
635 /*
636 * let numa_node_id to decide it later in dma_alloc_pages
637 * if there is no ram on that node
638 */
639 if (node != -1 && !node_online(node))
640 node = -1;
641
642 return node;
643 }
644
645 #else /* CONFIG_X86_32 */
646
647 static int mp_bus_to_node[BUS_NR] = {
648 [0 ... BUS_NR - 1] = -1
649 };
650
651 void set_mp_bus_to_node(int busnum, int node)
652 {
653 if (busnum >= 0 && busnum < BUS_NR)
654 mp_bus_to_node[busnum] = (unsigned char) node;
655 }
656
657 int get_mp_bus_to_node(int busnum)
658 {
659 int node;
660
661 if (busnum < 0 || busnum > (BUS_NR - 1))
662 return 0;
663 node = mp_bus_to_node[busnum];
664 return node;
665 }
666
667 #endif /* CONFIG_X86_32 */
668
669 #endif /* CONFIG_NUMA */