]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Port for PPC64 David Engebretsen, IBM Corp. | |
3 | * Contains common pci routines for ppc64 platform, pSeries and iSeries brands. | |
4 | * | |
5 | * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM | |
6 | * Rework, based on alpha PCI code. | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or | |
9 | * modify it under the terms of the GNU General Public License | |
10 | * as published by the Free Software Foundation; either version | |
11 | * 2 of the License, or (at your option) any later version. | |
12 | */ | |
13 | ||
14 | #undef DEBUG | |
15 | ||
16 | #include <linux/config.h> | |
17 | #include <linux/kernel.h> | |
18 | #include <linux/pci.h> | |
19 | #include <linux/string.h> | |
20 | #include <linux/init.h> | |
21 | #include <linux/bootmem.h> | |
22 | #include <linux/mm.h> | |
23 | #include <linux/list.h> | |
24 | ||
25 | #include <asm/processor.h> | |
26 | #include <asm/io.h> | |
27 | #include <asm/prom.h> | |
28 | #include <asm/pci-bridge.h> | |
29 | #include <asm/byteorder.h> | |
30 | #include <asm/irq.h> | |
31 | #include <asm/machdep.h> | |
32 | #include <asm/udbg.h> | |
33 | ||
34 | #include "pci.h" | |
35 | ||
36 | #ifdef DEBUG | |
37 | #define DBG(fmt...) udbg_printf(fmt) | |
38 | #else | |
39 | #define DBG(fmt...) | |
40 | #endif | |
41 | ||
42 | unsigned long pci_probe_only = 1; | |
43 | unsigned long pci_assign_all_buses = 0; | |
44 | ||
45 | /* | |
46 | * legal IO pages under MAX_ISA_PORT. This is to ensure we don't touch | |
47 | * devices we don't have access to. | |
48 | */ | |
49 | unsigned long io_page_mask; | |
50 | ||
51 | EXPORT_SYMBOL(io_page_mask); | |
52 | ||
53 | ||
54 | unsigned int pcibios_assign_all_busses(void) | |
55 | { | |
56 | return pci_assign_all_buses; | |
57 | } | |
58 | ||
59 | /* pci_io_base -- the base address from which io bars are offsets. | |
60 | * This is the lowest I/O base address (so bar values are always positive), | |
61 | * and it *must* be the start of ISA space if an ISA bus exists because | |
62 | * ISA drivers use hard coded offsets. If no ISA bus exists a dummy | |
63 | * page is mapped and isa_io_limit prevents access to it. | |
64 | */ | |
65 | unsigned long isa_io_base; /* NULL if no ISA bus */ | |
66 | EXPORT_SYMBOL(isa_io_base); | |
67 | unsigned long pci_io_base; | |
68 | EXPORT_SYMBOL(pci_io_base); | |
69 | ||
70 | void iSeries_pcibios_init(void); | |
71 | ||
72 | LIST_HEAD(hose_list); | |
73 | ||
74 | struct dma_mapping_ops pci_dma_ops; | |
75 | EXPORT_SYMBOL(pci_dma_ops); | |
76 | ||
77 | int global_phb_number; /* Global phb counter */ | |
78 | ||
79 | /* Cached ISA bridge dev. */ | |
80 | struct pci_dev *ppc64_isabridge_dev = NULL; | |
81 | ||
82 | static void fixup_broken_pcnet32(struct pci_dev* dev) | |
83 | { | |
84 | if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) { | |
85 | dev->vendor = PCI_VENDOR_ID_AMD; | |
86 | pci_write_config_word(dev, PCI_VENDOR_ID, PCI_VENDOR_ID_AMD); | |
87 | pci_name_device(dev); | |
88 | } | |
89 | } | |
90 | DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_TRIDENT, PCI_ANY_ID, fixup_broken_pcnet32); | |
91 | ||
92 | void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region, | |
93 | struct resource *res) | |
94 | { | |
95 | unsigned long offset = 0; | |
96 | struct pci_controller *hose = pci_bus_to_host(dev->bus); | |
97 | ||
98 | if (!hose) | |
99 | return; | |
100 | ||
101 | if (res->flags & IORESOURCE_IO) | |
102 | offset = (unsigned long)hose->io_base_virt - pci_io_base; | |
103 | ||
104 | if (res->flags & IORESOURCE_MEM) | |
105 | offset = hose->pci_mem_offset; | |
106 | ||
107 | region->start = res->start - offset; | |
108 | region->end = res->end - offset; | |
109 | } | |
110 | ||
111 | #ifdef CONFIG_HOTPLUG | |
112 | EXPORT_SYMBOL(pcibios_resource_to_bus); | |
113 | #endif | |
114 | ||
115 | /* | |
116 | * We need to avoid collisions with `mirrored' VGA ports | |
117 | * and other strange ISA hardware, so we always want the | |
118 | * addresses to be allocated in the 0x000-0x0ff region | |
119 | * modulo 0x400. | |
120 | * | |
121 | * Why? Because some silly external IO cards only decode | |
122 | * the low 10 bits of the IO address. The 0x00-0xff region | |
123 | * is reserved for motherboard devices that decode all 16 | |
124 | * bits, so it's ok to allocate at, say, 0x2800-0x28ff, | |
125 | * but we want to try to avoid allocating at 0x2900-0x2bff | |
126 | * which might have be mirrored at 0x0100-0x03ff.. | |
127 | */ | |
128 | void pcibios_align_resource(void *data, struct resource *res, | |
129 | unsigned long size, unsigned long align) | |
130 | { | |
131 | struct pci_dev *dev = data; | |
132 | struct pci_controller *hose = pci_bus_to_host(dev->bus); | |
133 | unsigned long start = res->start; | |
134 | unsigned long alignto; | |
135 | ||
136 | if (res->flags & IORESOURCE_IO) { | |
137 | unsigned long offset = (unsigned long)hose->io_base_virt - | |
138 | pci_io_base; | |
139 | /* Make sure we start at our min on all hoses */ | |
140 | if (start - offset < PCIBIOS_MIN_IO) | |
141 | start = PCIBIOS_MIN_IO + offset; | |
142 | ||
143 | /* | |
144 | * Put everything into 0x00-0xff region modulo 0x400 | |
145 | */ | |
146 | if (start & 0x300) | |
147 | start = (start + 0x3ff) & ~0x3ff; | |
148 | ||
149 | } else if (res->flags & IORESOURCE_MEM) { | |
150 | /* Make sure we start at our min on all hoses */ | |
151 | if (start - hose->pci_mem_offset < PCIBIOS_MIN_MEM) | |
152 | start = PCIBIOS_MIN_MEM + hose->pci_mem_offset; | |
153 | ||
154 | /* Align to multiple of size of minimum base. */ | |
155 | alignto = max(0x1000UL, align); | |
156 | start = ALIGN(start, alignto); | |
157 | } | |
158 | ||
159 | res->start = start; | |
160 | } | |
161 | ||
162 | static DEFINE_SPINLOCK(hose_spinlock); | |
163 | ||
164 | /* | |
165 | * pci_controller(phb) initialized common variables. | |
166 | */ | |
167 | void __devinit pci_setup_pci_controller(struct pci_controller *hose) | |
168 | { | |
169 | memset(hose, 0, sizeof(struct pci_controller)); | |
170 | ||
171 | spin_lock(&hose_spinlock); | |
172 | hose->global_number = global_phb_number++; | |
173 | list_add_tail(&hose->list_node, &hose_list); | |
174 | spin_unlock(&hose_spinlock); | |
175 | } | |
176 | ||
177 | static void __init pcibios_claim_one_bus(struct pci_bus *b) | |
178 | { | |
179 | struct pci_dev *dev; | |
180 | struct pci_bus *child_bus; | |
181 | ||
182 | list_for_each_entry(dev, &b->devices, bus_list) { | |
183 | int i; | |
184 | ||
185 | for (i = 0; i < PCI_NUM_RESOURCES; i++) { | |
186 | struct resource *r = &dev->resource[i]; | |
187 | ||
188 | if (r->parent || !r->start || !r->flags) | |
189 | continue; | |
190 | pci_claim_resource(dev, i); | |
191 | } | |
192 | } | |
193 | ||
194 | list_for_each_entry(child_bus, &b->children, node) | |
195 | pcibios_claim_one_bus(child_bus); | |
196 | } | |
197 | ||
198 | #ifndef CONFIG_PPC_ISERIES | |
199 | static void __init pcibios_claim_of_setup(void) | |
200 | { | |
201 | struct pci_bus *b; | |
202 | ||
203 | list_for_each_entry(b, &pci_root_buses, node) | |
204 | pcibios_claim_one_bus(b); | |
205 | } | |
206 | #endif | |
207 | ||
208 | static int __init pcibios_init(void) | |
209 | { | |
210 | struct pci_controller *hose, *tmp; | |
211 | struct pci_bus *bus; | |
212 | ||
213 | /* For now, override phys_mem_access_prot. If we need it, | |
214 | * later, we may move that initialization to each ppc_md | |
215 | */ | |
216 | ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot; | |
217 | ||
218 | #ifdef CONFIG_PPC_ISERIES | |
219 | iSeries_pcibios_init(); | |
220 | #endif | |
221 | ||
222 | printk("PCI: Probing PCI hardware\n"); | |
223 | ||
224 | /* Scan all of the recorded PCI controllers. */ | |
225 | list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { | |
226 | hose->last_busno = 0xff; | |
227 | bus = pci_scan_bus(hose->first_busno, hose->ops, | |
228 | hose->arch_data); | |
229 | hose->bus = bus; | |
230 | hose->last_busno = bus->subordinate; | |
231 | } | |
232 | ||
233 | #ifndef CONFIG_PPC_ISERIES | |
234 | if (pci_probe_only) | |
235 | pcibios_claim_of_setup(); | |
236 | else | |
237 | /* FIXME: `else' will be removed when | |
238 | pci_assign_unassigned_resources() is able to work | |
239 | correctly with [partially] allocated PCI tree. */ | |
240 | pci_assign_unassigned_resources(); | |
241 | #endif /* !CONFIG_PPC_ISERIES */ | |
242 | ||
243 | /* Call machine dependent final fixup */ | |
244 | if (ppc_md.pcibios_fixup) | |
245 | ppc_md.pcibios_fixup(); | |
246 | ||
247 | /* Cache the location of the ISA bridge (if we have one) */ | |
248 | ppc64_isabridge_dev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); | |
249 | if (ppc64_isabridge_dev != NULL) | |
250 | printk("ISA bridge at %s\n", pci_name(ppc64_isabridge_dev)); | |
251 | ||
252 | printk("PCI: Probing PCI hardware done\n"); | |
253 | ||
254 | return 0; | |
255 | } | |
256 | ||
257 | subsys_initcall(pcibios_init); | |
258 | ||
259 | char __init *pcibios_setup(char *str) | |
260 | { | |
261 | return str; | |
262 | } | |
263 | ||
264 | int pcibios_enable_device(struct pci_dev *dev, int mask) | |
265 | { | |
266 | u16 cmd, oldcmd; | |
267 | int i; | |
268 | ||
269 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | |
270 | oldcmd = cmd; | |
271 | ||
272 | for (i = 0; i < PCI_NUM_RESOURCES; i++) { | |
273 | struct resource *res = &dev->resource[i]; | |
274 | ||
275 | /* Only set up the requested stuff */ | |
276 | if (!(mask & (1<<i))) | |
277 | continue; | |
278 | ||
279 | if (res->flags & IORESOURCE_IO) | |
280 | cmd |= PCI_COMMAND_IO; | |
281 | if (res->flags & IORESOURCE_MEM) | |
282 | cmd |= PCI_COMMAND_MEMORY; | |
283 | } | |
284 | ||
285 | if (cmd != oldcmd) { | |
286 | printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n", | |
287 | pci_name(dev), cmd); | |
288 | /* Enable the appropriate bits in the PCI command register. */ | |
289 | pci_write_config_word(dev, PCI_COMMAND, cmd); | |
290 | } | |
291 | return 0; | |
292 | } | |
293 | ||
294 | /* | |
295 | * Return the domain number for this bus. | |
296 | */ | |
297 | int pci_domain_nr(struct pci_bus *bus) | |
298 | { | |
299 | #ifdef CONFIG_PPC_ISERIES | |
300 | return 0; | |
301 | #else | |
302 | struct pci_controller *hose = pci_bus_to_host(bus); | |
303 | ||
304 | return hose->global_number; | |
305 | #endif | |
306 | } | |
307 | ||
308 | EXPORT_SYMBOL(pci_domain_nr); | |
309 | ||
310 | /* Decide whether to display the domain number in /proc */ | |
311 | int pci_proc_domain(struct pci_bus *bus) | |
312 | { | |
313 | #ifdef CONFIG_PPC_ISERIES | |
314 | return 0; | |
315 | #else | |
316 | struct pci_controller *hose = pci_bus_to_host(bus); | |
317 | return hose->buid; | |
318 | #endif | |
319 | } | |
320 | ||
321 | /* | |
322 | * Platform support for /proc/bus/pci/X/Y mmap()s, | |
323 | * modelled on the sparc64 implementation by Dave Miller. | |
324 | * -- paulus. | |
325 | */ | |
326 | ||
327 | /* | |
328 | * Adjust vm_pgoff of VMA such that it is the physical page offset | |
329 | * corresponding to the 32-bit pci bus offset for DEV requested by the user. | |
330 | * | |
331 | * Basically, the user finds the base address for his device which he wishes | |
332 | * to mmap. They read the 32-bit value from the config space base register, | |
333 | * add whatever PAGE_SIZE multiple offset they wish, and feed this into the | |
334 | * offset parameter of mmap on /proc/bus/pci/XXX for that device. | |
335 | * | |
336 | * Returns negative error code on failure, zero on success. | |
337 | */ | |
338 | static struct resource *__pci_mmap_make_offset(struct pci_dev *dev, | |
339 | unsigned long *offset, | |
340 | enum pci_mmap_state mmap_state) | |
341 | { | |
342 | struct pci_controller *hose = pci_bus_to_host(dev->bus); | |
343 | unsigned long io_offset = 0; | |
344 | int i, res_bit; | |
345 | ||
346 | if (hose == 0) | |
347 | return NULL; /* should never happen */ | |
348 | ||
349 | /* If memory, add on the PCI bridge address offset */ | |
350 | if (mmap_state == pci_mmap_mem) { | |
351 | *offset += hose->pci_mem_offset; | |
352 | res_bit = IORESOURCE_MEM; | |
353 | } else { | |
354 | io_offset = (unsigned long)hose->io_base_virt; | |
355 | *offset += io_offset; | |
356 | res_bit = IORESOURCE_IO; | |
357 | } | |
358 | ||
359 | /* | |
360 | * Check that the offset requested corresponds to one of the | |
361 | * resources of the device. | |
362 | */ | |
363 | for (i = 0; i <= PCI_ROM_RESOURCE; i++) { | |
364 | struct resource *rp = &dev->resource[i]; | |
365 | int flags = rp->flags; | |
366 | ||
367 | /* treat ROM as memory (should be already) */ | |
368 | if (i == PCI_ROM_RESOURCE) | |
369 | flags |= IORESOURCE_MEM; | |
370 | ||
371 | /* Active and same type? */ | |
372 | if ((flags & res_bit) == 0) | |
373 | continue; | |
374 | ||
375 | /* In the range of this resource? */ | |
376 | if (*offset < (rp->start & PAGE_MASK) || *offset > rp->end) | |
377 | continue; | |
378 | ||
379 | /* found it! construct the final physical address */ | |
380 | if (mmap_state == pci_mmap_io) | |
381 | *offset += hose->io_base_phys - io_offset; | |
382 | return rp; | |
383 | } | |
384 | ||
385 | return NULL; | |
386 | } | |
387 | ||
388 | /* | |
389 | * Set vm_page_prot of VMA, as appropriate for this architecture, for a pci | |
390 | * device mapping. | |
391 | */ | |
392 | static pgprot_t __pci_mmap_set_pgprot(struct pci_dev *dev, struct resource *rp, | |
393 | pgprot_t protection, | |
394 | enum pci_mmap_state mmap_state, | |
395 | int write_combine) | |
396 | { | |
397 | unsigned long prot = pgprot_val(protection); | |
398 | ||
399 | /* Write combine is always 0 on non-memory space mappings. On | |
400 | * memory space, if the user didn't pass 1, we check for a | |
401 | * "prefetchable" resource. This is a bit hackish, but we use | |
402 | * this to workaround the inability of /sysfs to provide a write | |
403 | * combine bit | |
404 | */ | |
405 | if (mmap_state != pci_mmap_mem) | |
406 | write_combine = 0; | |
407 | else if (write_combine == 0) { | |
408 | if (rp->flags & IORESOURCE_PREFETCH) | |
409 | write_combine = 1; | |
410 | } | |
411 | ||
412 | /* XXX would be nice to have a way to ask for write-through */ | |
413 | prot |= _PAGE_NO_CACHE; | |
414 | if (write_combine) | |
415 | prot &= ~_PAGE_GUARDED; | |
416 | else | |
417 | prot |= _PAGE_GUARDED; | |
418 | ||
419 | printk("PCI map for %s:%lx, prot: %lx\n", pci_name(dev), rp->start, | |
420 | prot); | |
421 | ||
422 | return __pgprot(prot); | |
423 | } | |
424 | ||
425 | /* | |
426 | * This one is used by /dev/mem and fbdev who have no clue about the | |
427 | * PCI device, it tries to find the PCI device first and calls the | |
428 | * above routine | |
429 | */ | |
430 | pgprot_t pci_phys_mem_access_prot(struct file *file, | |
431 | unsigned long offset, | |
432 | unsigned long size, | |
433 | pgprot_t protection) | |
434 | { | |
435 | struct pci_dev *pdev = NULL; | |
436 | struct resource *found = NULL; | |
437 | unsigned long prot = pgprot_val(protection); | |
438 | int i; | |
439 | ||
440 | if (page_is_ram(offset >> PAGE_SHIFT)) | |
441 | return prot; | |
442 | ||
443 | prot |= _PAGE_NO_CACHE | _PAGE_GUARDED; | |
444 | ||
445 | for_each_pci_dev(pdev) { | |
446 | for (i = 0; i <= PCI_ROM_RESOURCE; i++) { | |
447 | struct resource *rp = &pdev->resource[i]; | |
448 | int flags = rp->flags; | |
449 | ||
450 | /* Active and same type? */ | |
451 | if ((flags & IORESOURCE_MEM) == 0) | |
452 | continue; | |
453 | /* In the range of this resource? */ | |
454 | if (offset < (rp->start & PAGE_MASK) || | |
455 | offset > rp->end) | |
456 | continue; | |
457 | found = rp; | |
458 | break; | |
459 | } | |
460 | if (found) | |
461 | break; | |
462 | } | |
463 | if (found) { | |
464 | if (found->flags & IORESOURCE_PREFETCH) | |
465 | prot &= ~_PAGE_GUARDED; | |
466 | pci_dev_put(pdev); | |
467 | } | |
468 | ||
469 | DBG("non-PCI map for %lx, prot: %lx\n", offset, prot); | |
470 | ||
471 | return __pgprot(prot); | |
472 | } | |
473 | ||
474 | ||
475 | /* | |
476 | * Perform the actual remap of the pages for a PCI device mapping, as | |
477 | * appropriate for this architecture. The region in the process to map | |
478 | * is described by vm_start and vm_end members of VMA, the base physical | |
479 | * address is found in vm_pgoff. | |
480 | * The pci device structure is provided so that architectures may make mapping | |
481 | * decisions on a per-device or per-bus basis. | |
482 | * | |
483 | * Returns a negative error code on failure, zero on success. | |
484 | */ | |
485 | int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | |
486 | enum pci_mmap_state mmap_state, | |
487 | int write_combine) | |
488 | { | |
489 | unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; | |
490 | struct resource *rp; | |
491 | int ret; | |
492 | ||
493 | rp = __pci_mmap_make_offset(dev, &offset, mmap_state); | |
494 | if (rp == NULL) | |
495 | return -EINVAL; | |
496 | ||
497 | vma->vm_pgoff = offset >> PAGE_SHIFT; | |
498 | vma->vm_flags |= VM_SHM | VM_LOCKED | VM_IO; | |
499 | vma->vm_page_prot = __pci_mmap_set_pgprot(dev, rp, | |
500 | vma->vm_page_prot, | |
501 | mmap_state, write_combine); | |
502 | ||
503 | ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, | |
504 | vma->vm_end - vma->vm_start, vma->vm_page_prot); | |
505 | ||
506 | return ret; | |
507 | } | |
508 | ||
509 | #ifdef CONFIG_PPC_MULTIPLATFORM | |
510 | static ssize_t pci_show_devspec(struct device *dev, char *buf) | |
511 | { | |
512 | struct pci_dev *pdev; | |
513 | struct device_node *np; | |
514 | ||
515 | pdev = to_pci_dev (dev); | |
516 | np = pci_device_to_OF_node(pdev); | |
517 | if (np == NULL || np->full_name == NULL) | |
518 | return 0; | |
519 | return sprintf(buf, "%s", np->full_name); | |
520 | } | |
521 | static DEVICE_ATTR(devspec, S_IRUGO, pci_show_devspec, NULL); | |
522 | #endif /* CONFIG_PPC_MULTIPLATFORM */ | |
523 | ||
524 | void pcibios_add_platform_entries(struct pci_dev *pdev) | |
525 | { | |
526 | #ifdef CONFIG_PPC_MULTIPLATFORM | |
527 | device_create_file(&pdev->dev, &dev_attr_devspec); | |
528 | #endif /* CONFIG_PPC_MULTIPLATFORM */ | |
529 | } | |
530 | ||
531 | #ifdef CONFIG_PPC_MULTIPLATFORM | |
532 | ||
533 | #define ISA_SPACE_MASK 0x1 | |
534 | #define ISA_SPACE_IO 0x1 | |
535 | ||
536 | static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node, | |
537 | unsigned long phb_io_base_phys, | |
538 | void __iomem * phb_io_base_virt) | |
539 | { | |
540 | struct isa_range *range; | |
541 | unsigned long pci_addr; | |
542 | unsigned int isa_addr; | |
543 | unsigned int size; | |
544 | int rlen = 0; | |
545 | ||
546 | range = (struct isa_range *) get_property(isa_node, "ranges", &rlen); | |
547 | if (range == NULL || (rlen < sizeof(struct isa_range))) { | |
548 | printk(KERN_ERR "no ISA ranges or unexpected isa range size," | |
549 | "mapping 64k\n"); | |
550 | __ioremap_explicit(phb_io_base_phys, (unsigned long)phb_io_base_virt, | |
551 | 0x10000, _PAGE_NO_CACHE); | |
552 | return; | |
553 | } | |
554 | ||
555 | /* From "ISA Binding to 1275" | |
556 | * The ranges property is laid out as an array of elements, | |
557 | * each of which comprises: | |
558 | * cells 0 - 1: an ISA address | |
559 | * cells 2 - 4: a PCI address | |
560 | * (size depending on dev->n_addr_cells) | |
561 | * cell 5: the size of the range | |
562 | */ | |
563 | if ((range->isa_addr.a_hi && ISA_SPACE_MASK) == ISA_SPACE_IO) { | |
564 | isa_addr = range->isa_addr.a_lo; | |
565 | pci_addr = (unsigned long) range->pci_addr.a_mid << 32 | | |
566 | range->pci_addr.a_lo; | |
567 | ||
568 | /* Assume these are both zero */ | |
569 | if ((pci_addr != 0) || (isa_addr != 0)) { | |
570 | printk(KERN_ERR "unexpected isa to pci mapping: %s\n", | |
571 | __FUNCTION__); | |
572 | return; | |
573 | } | |
574 | ||
575 | size = PAGE_ALIGN(range->size); | |
576 | ||
577 | __ioremap_explicit(phb_io_base_phys, | |
578 | (unsigned long) phb_io_base_virt, | |
579 | size, _PAGE_NO_CACHE); | |
580 | } | |
581 | } | |
582 | ||
583 | void __devinit pci_process_bridge_OF_ranges(struct pci_controller *hose, | |
584 | struct device_node *dev) | |
585 | { | |
586 | unsigned int *ranges; | |
587 | unsigned long size; | |
588 | int rlen = 0; | |
589 | int memno = 0; | |
590 | struct resource *res; | |
591 | int np, na = prom_n_addr_cells(dev); | |
592 | unsigned long pci_addr, cpu_phys_addr; | |
593 | ||
594 | np = na + 5; | |
595 | ||
596 | /* From "PCI Binding to 1275" | |
597 | * The ranges property is laid out as an array of elements, | |
598 | * each of which comprises: | |
599 | * cells 0 - 2: a PCI address | |
600 | * cells 3 or 3+4: a CPU physical address | |
601 | * (size depending on dev->n_addr_cells) | |
602 | * cells 4+5 or 5+6: the size of the range | |
603 | */ | |
604 | rlen = 0; | |
605 | hose->io_base_phys = 0; | |
606 | ranges = (unsigned int *) get_property(dev, "ranges", &rlen); | |
607 | while ((rlen -= np * sizeof(unsigned int)) >= 0) { | |
608 | res = NULL; | |
609 | pci_addr = (unsigned long)ranges[1] << 32 | ranges[2]; | |
610 | ||
611 | cpu_phys_addr = ranges[3]; | |
612 | if (na == 2) | |
613 | cpu_phys_addr = cpu_phys_addr << 32 | ranges[4]; | |
614 | ||
615 | size = (unsigned long)ranges[na+3] << 32 | ranges[na+4]; | |
616 | if (size == 0) | |
617 | continue; | |
618 | switch ((ranges[0] >> 24) & 0x3) { | |
619 | case 1: /* I/O space */ | |
620 | hose->io_base_phys = cpu_phys_addr; | |
621 | hose->pci_io_size = size; | |
622 | ||
623 | res = &hose->io_resource; | |
624 | res->flags = IORESOURCE_IO; | |
625 | res->start = pci_addr; | |
626 | DBG("phb%d: IO 0x%lx -> 0x%lx\n", hose->global_number, | |
627 | res->start, res->start + size - 1); | |
628 | break; | |
629 | case 2: /* memory space */ | |
630 | memno = 0; | |
631 | while (memno < 3 && hose->mem_resources[memno].flags) | |
632 | ++memno; | |
633 | ||
634 | if (memno == 0) | |
635 | hose->pci_mem_offset = cpu_phys_addr - pci_addr; | |
636 | if (memno < 3) { | |
637 | res = &hose->mem_resources[memno]; | |
638 | res->flags = IORESOURCE_MEM; | |
639 | res->start = cpu_phys_addr; | |
640 | DBG("phb%d: MEM 0x%lx -> 0x%lx\n", hose->global_number, | |
641 | res->start, res->start + size - 1); | |
642 | } | |
643 | break; | |
644 | } | |
645 | if (res != NULL) { | |
646 | res->name = dev->full_name; | |
647 | res->end = res->start + size - 1; | |
648 | res->parent = NULL; | |
649 | res->sibling = NULL; | |
650 | res->child = NULL; | |
651 | } | |
652 | ranges += np; | |
653 | } | |
654 | } | |
655 | ||
656 | void __init pci_setup_phb_io(struct pci_controller *hose, int primary) | |
657 | { | |
658 | unsigned long size = hose->pci_io_size; | |
659 | unsigned long io_virt_offset; | |
660 | struct resource *res; | |
661 | struct device_node *isa_dn; | |
662 | ||
663 | hose->io_base_virt = reserve_phb_iospace(size); | |
664 | DBG("phb%d io_base_phys 0x%lx io_base_virt 0x%lx\n", | |
665 | hose->global_number, hose->io_base_phys, | |
666 | (unsigned long) hose->io_base_virt); | |
667 | ||
668 | if (primary) { | |
669 | pci_io_base = (unsigned long)hose->io_base_virt; | |
670 | isa_dn = of_find_node_by_type(NULL, "isa"); | |
671 | if (isa_dn) { | |
672 | isa_io_base = pci_io_base; | |
673 | pci_process_ISA_OF_ranges(isa_dn, hose->io_base_phys, | |
674 | hose->io_base_virt); | |
675 | of_node_put(isa_dn); | |
676 | /* Allow all IO */ | |
677 | io_page_mask = -1; | |
678 | } | |
679 | } | |
680 | ||
681 | io_virt_offset = (unsigned long)hose->io_base_virt - pci_io_base; | |
682 | res = &hose->io_resource; | |
683 | res->start += io_virt_offset; | |
684 | res->end += io_virt_offset; | |
685 | } | |
686 | ||
687 | void __devinit pci_setup_phb_io_dynamic(struct pci_controller *hose, | |
688 | int primary) | |
689 | { | |
690 | unsigned long size = hose->pci_io_size; | |
691 | unsigned long io_virt_offset; | |
692 | struct resource *res; | |
693 | ||
694 | hose->io_base_virt = __ioremap(hose->io_base_phys, size, | |
695 | _PAGE_NO_CACHE); | |
696 | DBG("phb%d io_base_phys 0x%lx io_base_virt 0x%lx\n", | |
697 | hose->global_number, hose->io_base_phys, | |
698 | (unsigned long) hose->io_base_virt); | |
699 | ||
700 | if (primary) | |
701 | pci_io_base = (unsigned long)hose->io_base_virt; | |
702 | ||
703 | io_virt_offset = (unsigned long)hose->io_base_virt - pci_io_base; | |
704 | res = &hose->io_resource; | |
705 | res->start += io_virt_offset; | |
706 | res->end += io_virt_offset; | |
707 | } | |
708 | ||
709 | ||
710 | static int get_bus_io_range(struct pci_bus *bus, unsigned long *start_phys, | |
711 | unsigned long *start_virt, unsigned long *size) | |
712 | { | |
713 | struct pci_controller *hose = pci_bus_to_host(bus); | |
714 | struct pci_bus_region region; | |
715 | struct resource *res; | |
716 | ||
717 | if (bus->self) { | |
718 | res = bus->resource[0]; | |
719 | pcibios_resource_to_bus(bus->self, ®ion, res); | |
720 | *start_phys = hose->io_base_phys + region.start; | |
721 | *start_virt = (unsigned long) hose->io_base_virt + | |
722 | region.start; | |
723 | if (region.end > region.start) | |
724 | *size = region.end - region.start + 1; | |
725 | else { | |
726 | printk("%s(): unexpected region 0x%lx->0x%lx\n", | |
727 | __FUNCTION__, region.start, region.end); | |
728 | return 1; | |
729 | } | |
730 | ||
731 | } else { | |
732 | /* Root Bus */ | |
733 | res = &hose->io_resource; | |
734 | *start_phys = hose->io_base_phys; | |
735 | *start_virt = (unsigned long) hose->io_base_virt; | |
736 | if (res->end > res->start) | |
737 | *size = res->end - res->start + 1; | |
738 | else { | |
739 | printk("%s(): unexpected region 0x%lx->0x%lx\n", | |
740 | __FUNCTION__, res->start, res->end); | |
741 | return 1; | |
742 | } | |
743 | } | |
744 | ||
745 | return 0; | |
746 | } | |
747 | ||
748 | int unmap_bus_range(struct pci_bus *bus) | |
749 | { | |
750 | unsigned long start_phys; | |
751 | unsigned long start_virt; | |
752 | unsigned long size; | |
753 | ||
754 | if (!bus) { | |
755 | printk(KERN_ERR "%s() expected bus\n", __FUNCTION__); | |
756 | return 1; | |
757 | } | |
758 | ||
759 | if (get_bus_io_range(bus, &start_phys, &start_virt, &size)) | |
760 | return 1; | |
761 | if (iounmap_explicit((void __iomem *) start_virt, size)) | |
762 | return 1; | |
763 | ||
764 | return 0; | |
765 | } | |
766 | EXPORT_SYMBOL(unmap_bus_range); | |
767 | ||
768 | int remap_bus_range(struct pci_bus *bus) | |
769 | { | |
770 | unsigned long start_phys; | |
771 | unsigned long start_virt; | |
772 | unsigned long size; | |
773 | ||
774 | if (!bus) { | |
775 | printk(KERN_ERR "%s() expected bus\n", __FUNCTION__); | |
776 | return 1; | |
777 | } | |
778 | ||
779 | ||
780 | if (get_bus_io_range(bus, &start_phys, &start_virt, &size)) | |
781 | return 1; | |
782 | printk("mapping IO %lx -> %lx, size: %lx\n", start_phys, start_virt, size); | |
783 | if (__ioremap_explicit(start_phys, start_virt, size, _PAGE_NO_CACHE)) | |
784 | return 1; | |
785 | ||
786 | return 0; | |
787 | } | |
788 | EXPORT_SYMBOL(remap_bus_range); | |
789 | ||
790 | void phbs_remap_io(void) | |
791 | { | |
792 | struct pci_controller *hose, *tmp; | |
793 | ||
794 | list_for_each_entry_safe(hose, tmp, &hose_list, list_node) | |
795 | remap_bus_range(hose->bus); | |
796 | } | |
797 | ||
798 | /* | |
799 | * ppc64 can have multifunction devices that do not respond to function 0. | |
800 | * In this case we must scan all functions. | |
801 | */ | |
802 | int pcibios_scan_all_fns(struct pci_bus *bus, int devfn) | |
803 | { | |
804 | struct device_node *busdn, *dn; | |
805 | ||
806 | if (bus->self) | |
807 | busdn = pci_device_to_OF_node(bus->self); | |
808 | else | |
809 | busdn = bus->sysdata; /* must be a phb */ | |
810 | ||
811 | if (busdn == NULL) | |
812 | return 0; | |
813 | ||
814 | /* | |
815 | * Check to see if there is any of the 8 functions are in the | |
816 | * device tree. If they are then we need to scan all the | |
817 | * functions of this slot. | |
818 | */ | |
819 | for (dn = busdn->child; dn; dn = dn->sibling) | |
820 | if ((dn->devfn >> 3) == (devfn >> 3)) | |
821 | return 1; | |
822 | ||
823 | return 0; | |
824 | } | |
825 | ||
826 | ||
827 | void __devinit pcibios_fixup_device_resources(struct pci_dev *dev, | |
828 | struct pci_bus *bus) | |
829 | { | |
830 | /* Update device resources. */ | |
831 | struct pci_controller *hose = pci_bus_to_host(bus); | |
832 | int i; | |
833 | ||
834 | for (i = 0; i < PCI_NUM_RESOURCES; i++) { | |
835 | if (dev->resource[i].flags & IORESOURCE_IO) { | |
836 | unsigned long offset = (unsigned long)hose->io_base_virt | |
837 | - pci_io_base; | |
838 | unsigned long start, end, mask; | |
839 | ||
840 | start = dev->resource[i].start += offset; | |
841 | end = dev->resource[i].end += offset; | |
842 | ||
843 | /* Need to allow IO access to pages that are in the | |
844 | ISA range */ | |
845 | if (start < MAX_ISA_PORT) { | |
846 | if (end > MAX_ISA_PORT) | |
847 | end = MAX_ISA_PORT; | |
848 | ||
849 | start >>= PAGE_SHIFT; | |
850 | end >>= PAGE_SHIFT; | |
851 | ||
852 | /* get the range of pages for the map */ | |
853 | mask = ((1 << (end+1))-1) ^ ((1 << start)-1); | |
854 | io_page_mask |= mask; | |
855 | } | |
856 | } | |
857 | else if (dev->resource[i].flags & IORESOURCE_MEM) { | |
858 | dev->resource[i].start += hose->pci_mem_offset; | |
859 | dev->resource[i].end += hose->pci_mem_offset; | |
860 | } | |
861 | } | |
862 | } | |
863 | EXPORT_SYMBOL(pcibios_fixup_device_resources); | |
864 | ||
865 | void __devinit pcibios_fixup_bus(struct pci_bus *bus) | |
866 | { | |
867 | struct pci_controller *hose = pci_bus_to_host(bus); | |
868 | struct pci_dev *dev = bus->self; | |
869 | struct resource *res; | |
870 | int i; | |
871 | ||
872 | if (!dev) { | |
873 | /* Root bus. */ | |
874 | ||
875 | hose->bus = bus; | |
876 | bus->resource[0] = res = &hose->io_resource; | |
877 | ||
878 | if (res->flags && request_resource(&ioport_resource, res)) | |
879 | printk(KERN_ERR "Failed to request IO on " | |
880 | "PCI domain %d\n", pci_domain_nr(bus)); | |
881 | ||
882 | for (i = 0; i < 3; ++i) { | |
883 | res = &hose->mem_resources[i]; | |
884 | bus->resource[i+1] = res; | |
885 | if (res->flags && request_resource(&iomem_resource, res)) | |
886 | printk(KERN_ERR "Failed to request MEM on " | |
887 | "PCI domain %d\n", | |
888 | pci_domain_nr(bus)); | |
889 | } | |
890 | } else if (pci_probe_only && | |
891 | (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) { | |
892 | /* This is a subordinate bridge */ | |
893 | ||
894 | pci_read_bridge_bases(bus); | |
895 | pcibios_fixup_device_resources(dev, bus); | |
896 | } | |
897 | ||
898 | ppc_md.iommu_bus_setup(bus); | |
899 | ||
900 | list_for_each_entry(dev, &bus->devices, bus_list) | |
901 | ppc_md.iommu_dev_setup(dev); | |
902 | ||
903 | if (!pci_probe_only) | |
904 | return; | |
905 | ||
906 | list_for_each_entry(dev, &bus->devices, bus_list) { | |
907 | if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI) | |
908 | pcibios_fixup_device_resources(dev, bus); | |
909 | } | |
910 | } | |
911 | EXPORT_SYMBOL(pcibios_fixup_bus); | |
912 | ||
913 | /* | |
914 | * Reads the interrupt pin to determine if interrupt is use by card. | |
915 | * If the interrupt is used, then gets the interrupt line from the | |
916 | * openfirmware and sets it in the pci_dev and pci_config line. | |
917 | */ | |
918 | int pci_read_irq_line(struct pci_dev *pci_dev) | |
919 | { | |
920 | u8 intpin; | |
921 | struct device_node *node; | |
922 | ||
923 | pci_read_config_byte(pci_dev, PCI_INTERRUPT_PIN, &intpin); | |
924 | if (intpin == 0) | |
925 | return 0; | |
926 | ||
927 | node = pci_device_to_OF_node(pci_dev); | |
928 | if (node == NULL) | |
929 | return -1; | |
930 | ||
931 | if (node->n_intrs == 0) | |
932 | return -1; | |
933 | ||
934 | pci_dev->irq = node->intrs[0].line; | |
935 | ||
936 | pci_write_config_byte(pci_dev, PCI_INTERRUPT_LINE, pci_dev->irq); | |
937 | ||
938 | return 0; | |
939 | } | |
940 | EXPORT_SYMBOL(pci_read_irq_line); | |
941 | ||
942 | #endif /* CONFIG_PPC_MULTIPLATFORM */ |