]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/arm/kernel/bios32.c
Merge branches 'acpi-scan', 'acpi-utils' and 'acpi-pm'
[mirror_ubuntu-bionic-kernel.git] / arch / arm / kernel / bios32.c
1 /*
2 * linux/arch/arm/kernel/bios32.c
3 *
4 * PCI bios-type initialisation for PCI machines
5 *
6 * Bits taken from various places.
7 */
8 #include <linux/export.h>
9 #include <linux/kernel.h>
10 #include <linux/pci.h>
11 #include <linux/slab.h>
12 #include <linux/init.h>
13 #include <linux/io.h>
14
15 #include <asm/mach-types.h>
16 #include <asm/mach/map.h>
17 #include <asm/mach/pci.h>
18
19 static int debug_pci;
20
21 #ifdef CONFIG_PCI_MSI
22 struct msi_controller *pcibios_msi_controller(struct pci_dev *dev)
23 {
24 struct pci_sys_data *sysdata = dev->bus->sysdata;
25
26 return sysdata->msi_ctrl;
27 }
28 #endif
29
30 /*
31 * We can't use pci_get_device() here since we are
32 * called from interrupt context.
33 */
34 static void pcibios_bus_report_status(struct pci_bus *bus, u_int status_mask, int warn)
35 {
36 struct pci_dev *dev;
37
38 list_for_each_entry(dev, &bus->devices, bus_list) {
39 u16 status;
40
41 /*
42 * ignore host bridge - we handle
43 * that separately
44 */
45 if (dev->bus->number == 0 && dev->devfn == 0)
46 continue;
47
48 pci_read_config_word(dev, PCI_STATUS, &status);
49 if (status == 0xffff)
50 continue;
51
52 if ((status & status_mask) == 0)
53 continue;
54
55 /* clear the status errors */
56 pci_write_config_word(dev, PCI_STATUS, status & status_mask);
57
58 if (warn)
59 printk("(%s: %04X) ", pci_name(dev), status);
60 }
61
62 list_for_each_entry(dev, &bus->devices, bus_list)
63 if (dev->subordinate)
64 pcibios_bus_report_status(dev->subordinate, status_mask, warn);
65 }
66
67 void pcibios_report_status(u_int status_mask, int warn)
68 {
69 struct pci_bus *bus;
70
71 list_for_each_entry(bus, &pci_root_buses, node)
72 pcibios_bus_report_status(bus, status_mask, warn);
73 }
74
75 /*
76 * We don't use this to fix the device, but initialisation of it.
77 * It's not the correct use for this, but it works.
78 * Note that the arbiter/ISA bridge appears to be buggy, specifically in
79 * the following area:
80 * 1. park on CPU
81 * 2. ISA bridge ping-pong
82 * 3. ISA bridge master handling of target RETRY
83 *
84 * Bug 3 is responsible for the sound DMA grinding to a halt. We now
85 * live with bug 2.
86 */
87 static void pci_fixup_83c553(struct pci_dev *dev)
88 {
89 /*
90 * Set memory region to start at address 0, and enable IO
91 */
92 pci_write_config_dword(dev, PCI_BASE_ADDRESS_0, PCI_BASE_ADDRESS_SPACE_MEMORY);
93 pci_write_config_word(dev, PCI_COMMAND, PCI_COMMAND_IO);
94
95 dev->resource[0].end -= dev->resource[0].start;
96 dev->resource[0].start = 0;
97
98 /*
99 * All memory requests from ISA to be channelled to PCI
100 */
101 pci_write_config_byte(dev, 0x48, 0xff);
102
103 /*
104 * Enable ping-pong on bus master to ISA bridge transactions.
105 * This improves the sound DMA substantially. The fixed
106 * priority arbiter also helps (see below).
107 */
108 pci_write_config_byte(dev, 0x42, 0x01);
109
110 /*
111 * Enable PCI retry
112 */
113 pci_write_config_byte(dev, 0x40, 0x22);
114
115 /*
116 * We used to set the arbiter to "park on last master" (bit
117 * 1 set), but unfortunately the CyberPro does not park the
118 * bus. We must therefore park on CPU. Unfortunately, this
119 * may trigger yet another bug in the 553.
120 */
121 pci_write_config_byte(dev, 0x83, 0x02);
122
123 /*
124 * Make the ISA DMA request lowest priority, and disable
125 * rotating priorities completely.
126 */
127 pci_write_config_byte(dev, 0x80, 0x11);
128 pci_write_config_byte(dev, 0x81, 0x00);
129
130 /*
131 * Route INTA input to IRQ 11, and set IRQ11 to be level
132 * sensitive.
133 */
134 pci_write_config_word(dev, 0x44, 0xb000);
135 outb(0x08, 0x4d1);
136 }
137 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND, PCI_DEVICE_ID_WINBOND_83C553, pci_fixup_83c553);
138
139 static void pci_fixup_unassign(struct pci_dev *dev)
140 {
141 dev->resource[0].end -= dev->resource[0].start;
142 dev->resource[0].start = 0;
143 }
144 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_WINBOND2, PCI_DEVICE_ID_WINBOND2_89C940F, pci_fixup_unassign);
145
146 /*
147 * Prevent the PCI layer from seeing the resources allocated to this device
148 * if it is the host bridge by marking it as such. These resources are of
149 * no consequence to the PCI layer (they are handled elsewhere).
150 */
151 static void pci_fixup_dec21285(struct pci_dev *dev)
152 {
153 int i;
154
155 if (dev->devfn == 0) {
156 dev->class &= 0xff;
157 dev->class |= PCI_CLASS_BRIDGE_HOST << 8;
158 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
159 dev->resource[i].start = 0;
160 dev->resource[i].end = 0;
161 dev->resource[i].flags = 0;
162 }
163 }
164 }
165 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21285, pci_fixup_dec21285);
166
167 /*
168 * PCI IDE controllers use non-standard I/O port decoding, respect it.
169 */
170 static void pci_fixup_ide_bases(struct pci_dev *dev)
171 {
172 struct resource *r;
173 int i;
174
175 if ((dev->class >> 8) != PCI_CLASS_STORAGE_IDE)
176 return;
177
178 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
179 r = dev->resource + i;
180 if ((r->start & ~0x80) == 0x374) {
181 r->start |= 2;
182 r->end = r->start;
183 }
184 }
185 }
186 DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pci_fixup_ide_bases);
187
188 /*
189 * Put the DEC21142 to sleep
190 */
191 static void pci_fixup_dec21142(struct pci_dev *dev)
192 {
193 pci_write_config_dword(dev, 0x40, 0x80000000);
194 }
195 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_DEC, PCI_DEVICE_ID_DEC_21142, pci_fixup_dec21142);
196
197 /*
198 * The CY82C693 needs some rather major fixups to ensure that it does
199 * the right thing. Idea from the Alpha people, with a few additions.
200 *
201 * We ensure that the IDE base registers are set to 1f0/3f4 for the
202 * primary bus, and 170/374 for the secondary bus. Also, hide them
203 * from the PCI subsystem view as well so we won't try to perform
204 * our own auto-configuration on them.
205 *
206 * In addition, we ensure that the PCI IDE interrupts are routed to
207 * IRQ 14 and IRQ 15 respectively.
208 *
209 * The above gets us to a point where the IDE on this device is
210 * functional. However, The CY82C693U _does not work_ in bus
211 * master mode without locking the PCI bus solid.
212 */
213 static void pci_fixup_cy82c693(struct pci_dev *dev)
214 {
215 if ((dev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
216 u32 base0, base1;
217
218 if (dev->class & 0x80) { /* primary */
219 base0 = 0x1f0;
220 base1 = 0x3f4;
221 } else { /* secondary */
222 base0 = 0x170;
223 base1 = 0x374;
224 }
225
226 pci_write_config_dword(dev, PCI_BASE_ADDRESS_0,
227 base0 | PCI_BASE_ADDRESS_SPACE_IO);
228 pci_write_config_dword(dev, PCI_BASE_ADDRESS_1,
229 base1 | PCI_BASE_ADDRESS_SPACE_IO);
230
231 dev->resource[0].start = 0;
232 dev->resource[0].end = 0;
233 dev->resource[0].flags = 0;
234
235 dev->resource[1].start = 0;
236 dev->resource[1].end = 0;
237 dev->resource[1].flags = 0;
238 } else if (PCI_FUNC(dev->devfn) == 0) {
239 /*
240 * Setup IDE IRQ routing.
241 */
242 pci_write_config_byte(dev, 0x4b, 14);
243 pci_write_config_byte(dev, 0x4c, 15);
244
245 /*
246 * Disable FREQACK handshake, enable USB.
247 */
248 pci_write_config_byte(dev, 0x4d, 0x41);
249
250 /*
251 * Enable PCI retry, and PCI post-write buffer.
252 */
253 pci_write_config_byte(dev, 0x44, 0x17);
254
255 /*
256 * Enable ISA master and DMA post write buffering.
257 */
258 pci_write_config_byte(dev, 0x45, 0x03);
259 }
260 }
261 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693, pci_fixup_cy82c693);
262
263 static void pci_fixup_it8152(struct pci_dev *dev)
264 {
265 int i;
266 /* fixup for ITE 8152 devices */
267 /* FIXME: add defines for class 0x68000 and 0x80103 */
268 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_HOST ||
269 dev->class == 0x68000 ||
270 dev->class == 0x80103) {
271 for (i = 0; i < PCI_NUM_RESOURCES; i++) {
272 dev->resource[i].start = 0;
273 dev->resource[i].end = 0;
274 dev->resource[i].flags = 0;
275 }
276 }
277 }
278 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ITE, PCI_DEVICE_ID_ITE_8152, pci_fixup_it8152);
279
280 /*
281 * If the bus contains any of these devices, then we must not turn on
282 * parity checking of any kind. Currently this is CyberPro 20x0 only.
283 */
284 static inline int pdev_bad_for_parity(struct pci_dev *dev)
285 {
286 return ((dev->vendor == PCI_VENDOR_ID_INTERG &&
287 (dev->device == PCI_DEVICE_ID_INTERG_2000 ||
288 dev->device == PCI_DEVICE_ID_INTERG_2010)) ||
289 (dev->vendor == PCI_VENDOR_ID_ITE &&
290 dev->device == PCI_DEVICE_ID_ITE_8152));
291
292 }
293
294 /*
295 * pcibios_fixup_bus - Called after each bus is probed,
296 * but before its children are examined.
297 */
298 void pcibios_fixup_bus(struct pci_bus *bus)
299 {
300 struct pci_dev *dev;
301 u16 features = PCI_COMMAND_SERR | PCI_COMMAND_PARITY | PCI_COMMAND_FAST_BACK;
302
303 /*
304 * Walk the devices on this bus, working out what we can
305 * and can't support.
306 */
307 list_for_each_entry(dev, &bus->devices, bus_list) {
308 u16 status;
309
310 pci_read_config_word(dev, PCI_STATUS, &status);
311
312 /*
313 * If any device on this bus does not support fast back
314 * to back transfers, then the bus as a whole is not able
315 * to support them. Having fast back to back transfers
316 * on saves us one PCI cycle per transaction.
317 */
318 if (!(status & PCI_STATUS_FAST_BACK))
319 features &= ~PCI_COMMAND_FAST_BACK;
320
321 if (pdev_bad_for_parity(dev))
322 features &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
323
324 switch (dev->class >> 8) {
325 case PCI_CLASS_BRIDGE_PCI:
326 pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &status);
327 status |= PCI_BRIDGE_CTL_PARITY|PCI_BRIDGE_CTL_MASTER_ABORT;
328 status &= ~(PCI_BRIDGE_CTL_BUS_RESET|PCI_BRIDGE_CTL_FAST_BACK);
329 pci_write_config_word(dev, PCI_BRIDGE_CONTROL, status);
330 break;
331
332 case PCI_CLASS_BRIDGE_CARDBUS:
333 pci_read_config_word(dev, PCI_CB_BRIDGE_CONTROL, &status);
334 status |= PCI_CB_BRIDGE_CTL_PARITY|PCI_CB_BRIDGE_CTL_MASTER_ABORT;
335 pci_write_config_word(dev, PCI_CB_BRIDGE_CONTROL, status);
336 break;
337 }
338 }
339
340 /*
341 * Now walk the devices again, this time setting them up.
342 */
343 list_for_each_entry(dev, &bus->devices, bus_list) {
344 u16 cmd;
345
346 pci_read_config_word(dev, PCI_COMMAND, &cmd);
347 cmd |= features;
348 pci_write_config_word(dev, PCI_COMMAND, cmd);
349
350 pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE,
351 L1_CACHE_BYTES >> 2);
352 }
353
354 /*
355 * Propagate the flags to the PCI bridge.
356 */
357 if (bus->self && bus->self->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
358 if (features & PCI_COMMAND_FAST_BACK)
359 bus->bridge_ctl |= PCI_BRIDGE_CTL_FAST_BACK;
360 if (features & PCI_COMMAND_PARITY)
361 bus->bridge_ctl |= PCI_BRIDGE_CTL_PARITY;
362 }
363
364 /*
365 * Report what we did for this bus
366 */
367 printk(KERN_INFO "PCI: bus%d: Fast back to back transfers %sabled\n",
368 bus->number, (features & PCI_COMMAND_FAST_BACK) ? "en" : "dis");
369 }
370 EXPORT_SYMBOL(pcibios_fixup_bus);
371
372 /*
373 * Swizzle the device pin each time we cross a bridge. If a platform does
374 * not provide a swizzle function, we perform the standard PCI swizzling.
375 *
376 * The default swizzling walks up the bus tree one level at a time, applying
377 * the standard swizzle function at each step, stopping when it finds the PCI
378 * root bus. This will return the slot number of the bridge device on the
379 * root bus and the interrupt pin on that device which should correspond
380 * with the downstream device interrupt.
381 *
382 * Platforms may override this, in which case the slot and pin returned
383 * depend entirely on the platform code. However, please note that the
384 * PCI standard swizzle is implemented on plug-in cards and Cardbus based
385 * PCI extenders, so it can not be ignored.
386 */
387 static u8 pcibios_swizzle(struct pci_dev *dev, u8 *pin)
388 {
389 struct pci_sys_data *sys = dev->sysdata;
390 int slot, oldpin = *pin;
391
392 if (sys->swizzle)
393 slot = sys->swizzle(dev, pin);
394 else
395 slot = pci_common_swizzle(dev, pin);
396
397 if (debug_pci)
398 printk("PCI: %s swizzling pin %d => pin %d slot %d\n",
399 pci_name(dev), oldpin, *pin, slot);
400
401 return slot;
402 }
403
404 /*
405 * Map a slot/pin to an IRQ.
406 */
407 static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
408 {
409 struct pci_sys_data *sys = dev->sysdata;
410 int irq = -1;
411
412 if (sys->map_irq)
413 irq = sys->map_irq(dev, slot, pin);
414
415 if (debug_pci)
416 printk("PCI: %s mapping slot %d pin %d => irq %d\n",
417 pci_name(dev), slot, pin, irq);
418
419 return irq;
420 }
421
422 static int pcibios_init_resources(int busnr, struct pci_sys_data *sys)
423 {
424 int ret;
425 struct pci_host_bridge_window *window;
426
427 if (list_empty(&sys->resources)) {
428 pci_add_resource_offset(&sys->resources,
429 &iomem_resource, sys->mem_offset);
430 }
431
432 list_for_each_entry(window, &sys->resources, list) {
433 if (resource_type(window->res) == IORESOURCE_IO)
434 return 0;
435 }
436
437 sys->io_res.start = (busnr * SZ_64K) ? : pcibios_min_io;
438 sys->io_res.end = (busnr + 1) * SZ_64K - 1;
439 sys->io_res.flags = IORESOURCE_IO;
440 sys->io_res.name = sys->io_res_name;
441 sprintf(sys->io_res_name, "PCI%d I/O", busnr);
442
443 ret = request_resource(&ioport_resource, &sys->io_res);
444 if (ret) {
445 pr_err("PCI: unable to allocate I/O port region (%d)\n", ret);
446 return ret;
447 }
448 pci_add_resource_offset(&sys->resources, &sys->io_res,
449 sys->io_offset);
450
451 return 0;
452 }
453
454 static void pcibios_init_hw(struct device *parent, struct hw_pci *hw,
455 struct list_head *head)
456 {
457 struct pci_sys_data *sys = NULL;
458 int ret;
459 int nr, busnr;
460
461 for (nr = busnr = 0; nr < hw->nr_controllers; nr++) {
462 sys = kzalloc(sizeof(struct pci_sys_data), GFP_KERNEL);
463 if (!sys)
464 panic("PCI: unable to allocate sys data!");
465
466 #ifdef CONFIG_PCI_DOMAINS
467 sys->domain = hw->domain;
468 #endif
469 #ifdef CONFIG_PCI_MSI
470 sys->msi_ctrl = hw->msi_ctrl;
471 #endif
472 sys->busnr = busnr;
473 sys->swizzle = hw->swizzle;
474 sys->map_irq = hw->map_irq;
475 sys->align_resource = hw->align_resource;
476 INIT_LIST_HEAD(&sys->resources);
477
478 if (hw->private_data)
479 sys->private_data = hw->private_data[nr];
480
481 ret = hw->setup(nr, sys);
482
483 if (ret > 0) {
484 ret = pcibios_init_resources(nr, sys);
485 if (ret) {
486 kfree(sys);
487 break;
488 }
489
490 if (hw->scan)
491 sys->bus = hw->scan(nr, sys);
492 else
493 sys->bus = pci_scan_root_bus(parent, sys->busnr,
494 hw->ops, sys, &sys->resources);
495
496 if (!sys->bus)
497 panic("PCI: unable to scan bus!");
498
499 busnr = sys->bus->busn_res.end + 1;
500
501 list_add(&sys->node, head);
502 } else {
503 kfree(sys);
504 if (ret < 0)
505 break;
506 }
507 }
508 }
509
510 void pci_common_init_dev(struct device *parent, struct hw_pci *hw)
511 {
512 struct pci_sys_data *sys;
513 LIST_HEAD(head);
514
515 pci_add_flags(PCI_REASSIGN_ALL_RSRC);
516 if (hw->preinit)
517 hw->preinit();
518 pcibios_init_hw(parent, hw, &head);
519 if (hw->postinit)
520 hw->postinit();
521
522 pci_fixup_irqs(pcibios_swizzle, pcibios_map_irq);
523
524 list_for_each_entry(sys, &head, node) {
525 struct pci_bus *bus = sys->bus;
526
527 if (!pci_has_flag(PCI_PROBE_ONLY)) {
528 /*
529 * Size the bridge windows.
530 */
531 pci_bus_size_bridges(bus);
532
533 /*
534 * Assign resources.
535 */
536 pci_bus_assign_resources(bus);
537 }
538
539 /*
540 * Tell drivers about devices found.
541 */
542 pci_bus_add_devices(bus);
543 }
544
545 list_for_each_entry(sys, &head, node) {
546 struct pci_bus *bus = sys->bus;
547
548 /* Configure PCI Express settings */
549 if (bus && !pci_has_flag(PCI_PROBE_ONLY)) {
550 struct pci_bus *child;
551
552 list_for_each_entry(child, &bus->children, node)
553 pcie_bus_configure_settings(child);
554 }
555 }
556 }
557
558 #ifndef CONFIG_PCI_HOST_ITE8152
559 void pcibios_set_master(struct pci_dev *dev)
560 {
561 /* No special bus mastering setup handling */
562 }
563 #endif
564
565 char * __init pcibios_setup(char *str)
566 {
567 if (!strcmp(str, "debug")) {
568 debug_pci = 1;
569 return NULL;
570 } else if (!strcmp(str, "firmware")) {
571 pci_add_flags(PCI_PROBE_ONLY);
572 return NULL;
573 }
574 return str;
575 }
576
577 /*
578 * From arch/i386/kernel/pci-i386.c:
579 *
580 * We need to avoid collisions with `mirrored' VGA ports
581 * and other strange ISA hardware, so we always want the
582 * addresses to be allocated in the 0x000-0x0ff region
583 * modulo 0x400.
584 *
585 * Why? Because some silly external IO cards only decode
586 * the low 10 bits of the IO address. The 0x00-0xff region
587 * is reserved for motherboard devices that decode all 16
588 * bits, so it's ok to allocate at, say, 0x2800-0x28ff,
589 * but we want to try to avoid allocating at 0x2900-0x2bff
590 * which might be mirrored at 0x0100-0x03ff..
591 */
592 resource_size_t pcibios_align_resource(void *data, const struct resource *res,
593 resource_size_t size, resource_size_t align)
594 {
595 struct pci_dev *dev = data;
596 struct pci_sys_data *sys = dev->sysdata;
597 resource_size_t start = res->start;
598
599 if (res->flags & IORESOURCE_IO && start & 0x300)
600 start = (start + 0x3ff) & ~0x3ff;
601
602 start = (start + align - 1) & ~(align - 1);
603
604 if (sys->align_resource)
605 return sys->align_resource(dev, res, start, size, align);
606
607 return start;
608 }
609
610 /**
611 * pcibios_enable_device - Enable I/O and memory.
612 * @dev: PCI device to be enabled
613 */
614 int pcibios_enable_device(struct pci_dev *dev, int mask)
615 {
616 if (pci_has_flag(PCI_PROBE_ONLY))
617 return 0;
618
619 return pci_enable_resources(dev, mask);
620 }
621
622 int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
623 enum pci_mmap_state mmap_state, int write_combine)
624 {
625 struct pci_sys_data *root = dev->sysdata;
626 unsigned long phys;
627
628 if (mmap_state == pci_mmap_io) {
629 return -EINVAL;
630 } else {
631 phys = vma->vm_pgoff + (root->mem_offset >> PAGE_SHIFT);
632 }
633
634 /*
635 * Mark this as IO
636 */
637 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
638
639 if (remap_pfn_range(vma, vma->vm_start, phys,
640 vma->vm_end - vma->vm_start,
641 vma->vm_page_prot))
642 return -EAGAIN;
643
644 return 0;
645 }
646
647 void __init pci_map_io_early(unsigned long pfn)
648 {
649 struct map_desc pci_io_desc = {
650 .virtual = PCI_IO_VIRT_BASE,
651 .type = MT_DEVICE,
652 .length = SZ_64K,
653 };
654
655 pci_io_desc.pfn = pfn;
656 iotable_init(&pci_io_desc, 1);
657 }