]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/ia64/pci/pci.c
UBUNTU: Ubuntu-4.13.0-45.50
[mirror_ubuntu-artful-kernel.git] / arch / ia64 / pci / pci.c
1 /*
2 * pci.c - Low-Level PCI Access in IA-64
3 *
4 * Derived from bios32.c of i386 tree.
5 *
6 * (c) Copyright 2002, 2005 Hewlett-Packard Development Company, L.P.
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 * Bjorn Helgaas <bjorn.helgaas@hp.com>
9 * Copyright (C) 2004 Silicon Graphics, Inc.
10 *
11 * Note: Above list of copyright holders is incomplete...
12 */
13
14 #include <linux/acpi.h>
15 #include <linux/types.h>
16 #include <linux/kernel.h>
17 #include <linux/pci.h>
18 #include <linux/pci-acpi.h>
19 #include <linux/init.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/spinlock.h>
23 #include <linux/bootmem.h>
24 #include <linux/export.h>
25
26 #include <asm/machvec.h>
27 #include <asm/page.h>
28 #include <asm/io.h>
29 #include <asm/sal.h>
30 #include <asm/smp.h>
31 #include <asm/irq.h>
32 #include <asm/hw_irq.h>
33
34 /*
35 * Low-level SAL-based PCI configuration access functions. Note that SAL
36 * calls are already serialized (via sal_lock), so we don't need another
37 * synchronization mechanism here.
38 */
39
40 #define PCI_SAL_ADDRESS(seg, bus, devfn, reg) \
41 (((u64) seg << 24) | (bus << 16) | (devfn << 8) | (reg))
42
43 /* SAL 3.2 adds support for extended config space. */
44
45 #define PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg) \
46 (((u64) seg << 28) | (bus << 20) | (devfn << 12) | (reg))
47
48 int raw_pci_read(unsigned int seg, unsigned int bus, unsigned int devfn,
49 int reg, int len, u32 *value)
50 {
51 u64 addr, data = 0;
52 int mode, result;
53
54 if (!value || (seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095))
55 return -EINVAL;
56
57 if ((seg | reg) <= 255) {
58 addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
59 mode = 0;
60 } else if (sal_revision >= SAL_VERSION_CODE(3,2)) {
61 addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
62 mode = 1;
63 } else {
64 return -EINVAL;
65 }
66
67 result = ia64_sal_pci_config_read(addr, mode, len, &data);
68 if (result != 0)
69 return -EINVAL;
70
71 *value = (u32) data;
72 return 0;
73 }
74
75 int raw_pci_write(unsigned int seg, unsigned int bus, unsigned int devfn,
76 int reg, int len, u32 value)
77 {
78 u64 addr;
79 int mode, result;
80
81 if ((seg > 65535) || (bus > 255) || (devfn > 255) || (reg > 4095))
82 return -EINVAL;
83
84 if ((seg | reg) <= 255) {
85 addr = PCI_SAL_ADDRESS(seg, bus, devfn, reg);
86 mode = 0;
87 } else if (sal_revision >= SAL_VERSION_CODE(3,2)) {
88 addr = PCI_SAL_EXT_ADDRESS(seg, bus, devfn, reg);
89 mode = 1;
90 } else {
91 return -EINVAL;
92 }
93 result = ia64_sal_pci_config_write(addr, mode, len, value);
94 if (result != 0)
95 return -EINVAL;
96 return 0;
97 }
98
99 static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
100 int size, u32 *value)
101 {
102 return raw_pci_read(pci_domain_nr(bus), bus->number,
103 devfn, where, size, value);
104 }
105
106 static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
107 int size, u32 value)
108 {
109 return raw_pci_write(pci_domain_nr(bus), bus->number,
110 devfn, where, size, value);
111 }
112
113 struct pci_ops pci_root_ops = {
114 .read = pci_read,
115 .write = pci_write,
116 };
117
118 struct pci_root_info {
119 struct acpi_pci_root_info common;
120 struct pci_controller controller;
121 struct list_head io_resources;
122 };
123
124 static unsigned int new_space(u64 phys_base, int sparse)
125 {
126 u64 mmio_base;
127 int i;
128
129 if (phys_base == 0)
130 return 0; /* legacy I/O port space */
131
132 mmio_base = (u64) ioremap(phys_base, 0);
133 for (i = 0; i < num_io_spaces; i++)
134 if (io_space[i].mmio_base == mmio_base &&
135 io_space[i].sparse == sparse)
136 return i;
137
138 if (num_io_spaces == MAX_IO_SPACES) {
139 pr_err("PCI: Too many IO port spaces "
140 "(MAX_IO_SPACES=%lu)\n", MAX_IO_SPACES);
141 return ~0;
142 }
143
144 i = num_io_spaces++;
145 io_space[i].mmio_base = mmio_base;
146 io_space[i].sparse = sparse;
147
148 return i;
149 }
150
151 static int add_io_space(struct device *dev, struct pci_root_info *info,
152 struct resource_entry *entry)
153 {
154 struct resource_entry *iospace;
155 struct resource *resource, *res = entry->res;
156 char *name;
157 unsigned long base, min, max, base_port;
158 unsigned int sparse = 0, space_nr, len;
159
160 len = strlen(info->common.name) + 32;
161 iospace = resource_list_create_entry(NULL, len);
162 if (!iospace) {
163 dev_err(dev, "PCI: No memory for %s I/O port space\n",
164 info->common.name);
165 return -ENOMEM;
166 }
167
168 if (res->flags & IORESOURCE_IO_SPARSE)
169 sparse = 1;
170 space_nr = new_space(entry->offset, sparse);
171 if (space_nr == ~0)
172 goto free_resource;
173
174 name = (char *)(iospace + 1);
175 min = res->start - entry->offset;
176 max = res->end - entry->offset;
177 base = __pa(io_space[space_nr].mmio_base);
178 base_port = IO_SPACE_BASE(space_nr);
179 snprintf(name, len, "%s I/O Ports %08lx-%08lx", info->common.name,
180 base_port + min, base_port + max);
181
182 /*
183 * The SDM guarantees the legacy 0-64K space is sparse, but if the
184 * mapping is done by the processor (not the bridge), ACPI may not
185 * mark it as sparse.
186 */
187 if (space_nr == 0)
188 sparse = 1;
189
190 resource = iospace->res;
191 resource->name = name;
192 resource->flags = IORESOURCE_MEM;
193 resource->start = base + (sparse ? IO_SPACE_SPARSE_ENCODING(min) : min);
194 resource->end = base + (sparse ? IO_SPACE_SPARSE_ENCODING(max) : max);
195 if (insert_resource(&iomem_resource, resource)) {
196 dev_err(dev,
197 "can't allocate host bridge io space resource %pR\n",
198 resource);
199 goto free_resource;
200 }
201
202 entry->offset = base_port;
203 res->start = min + base_port;
204 res->end = max + base_port;
205 resource_list_add_tail(iospace, &info->io_resources);
206
207 return 0;
208
209 free_resource:
210 resource_list_free_entry(iospace);
211 return -ENOSPC;
212 }
213
214 /*
215 * An IO port or MMIO resource assigned to a PCI host bridge may be
216 * consumed by the host bridge itself or available to its child
217 * bus/devices. The ACPI specification defines a bit (Producer/Consumer)
218 * to tell whether the resource is consumed by the host bridge itself,
219 * but firmware hasn't used that bit consistently, so we can't rely on it.
220 *
221 * On x86 and IA64 platforms, all IO port and MMIO resources are assumed
222 * to be available to child bus/devices except one special case:
223 * IO port [0xCF8-0xCFF] is consumed by the host bridge itself
224 * to access PCI configuration space.
225 *
226 * So explicitly filter out PCI CFG IO ports[0xCF8-0xCFF].
227 */
228 static bool resource_is_pcicfg_ioport(struct resource *res)
229 {
230 return (res->flags & IORESOURCE_IO) &&
231 res->start == 0xCF8 && res->end == 0xCFF;
232 }
233
234 static int pci_acpi_root_prepare_resources(struct acpi_pci_root_info *ci)
235 {
236 struct device *dev = &ci->bridge->dev;
237 struct pci_root_info *info;
238 struct resource *res;
239 struct resource_entry *entry, *tmp;
240 int status;
241
242 status = acpi_pci_probe_root_resources(ci);
243 if (status > 0) {
244 info = container_of(ci, struct pci_root_info, common);
245 resource_list_for_each_entry_safe(entry, tmp, &ci->resources) {
246 res = entry->res;
247 if (res->flags & IORESOURCE_MEM) {
248 /*
249 * HP's firmware has a hack to work around a
250 * Windows bug. Ignore these tiny memory ranges.
251 */
252 if (resource_size(res) <= 16) {
253 resource_list_del(entry);
254 insert_resource(&iomem_resource,
255 entry->res);
256 resource_list_add_tail(entry,
257 &info->io_resources);
258 }
259 } else if (res->flags & IORESOURCE_IO) {
260 if (resource_is_pcicfg_ioport(entry->res))
261 resource_list_destroy_entry(entry);
262 else if (add_io_space(dev, info, entry))
263 resource_list_destroy_entry(entry);
264 }
265 }
266 }
267
268 return status;
269 }
270
271 static void pci_acpi_root_release_info(struct acpi_pci_root_info *ci)
272 {
273 struct pci_root_info *info;
274 struct resource_entry *entry, *tmp;
275
276 info = container_of(ci, struct pci_root_info, common);
277 resource_list_for_each_entry_safe(entry, tmp, &info->io_resources) {
278 release_resource(entry->res);
279 resource_list_destroy_entry(entry);
280 }
281 kfree(info);
282 }
283
284 static struct acpi_pci_root_ops pci_acpi_root_ops = {
285 .pci_ops = &pci_root_ops,
286 .release_info = pci_acpi_root_release_info,
287 .prepare_resources = pci_acpi_root_prepare_resources,
288 };
289
290 struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
291 {
292 struct acpi_device *device = root->device;
293 struct pci_root_info *info;
294
295 info = kzalloc(sizeof(*info), GFP_KERNEL);
296 if (!info) {
297 dev_err(&device->dev,
298 "pci_bus %04x:%02x: ignored (out of memory)\n",
299 root->segment, (int)root->secondary.start);
300 return NULL;
301 }
302
303 info->controller.segment = root->segment;
304 info->controller.companion = device;
305 info->controller.node = acpi_get_node(device->handle);
306 INIT_LIST_HEAD(&info->io_resources);
307 return acpi_pci_root_create(root, &pci_acpi_root_ops,
308 &info->common, &info->controller);
309 }
310
311 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
312 {
313 /*
314 * We pass NULL as parent to pci_create_root_bus(), so if it is not NULL
315 * here, pci_create_root_bus() has been called by someone else and
316 * sysdata is likely to be different from what we expect. Let it go in
317 * that case.
318 */
319 if (!bridge->dev.parent) {
320 struct pci_controller *controller = bridge->bus->sysdata;
321 ACPI_COMPANION_SET(&bridge->dev, controller->companion);
322 }
323 return 0;
324 }
325
326 void pcibios_fixup_device_resources(struct pci_dev *dev)
327 {
328 int idx;
329
330 if (!dev->bus)
331 return;
332
333 for (idx = 0; idx < PCI_BRIDGE_RESOURCES; idx++) {
334 struct resource *r = &dev->resource[idx];
335
336 if (!r->flags || r->parent || !r->start)
337 continue;
338
339 pci_claim_resource(dev, idx);
340 }
341 }
342 EXPORT_SYMBOL_GPL(pcibios_fixup_device_resources);
343
344 static void pcibios_fixup_bridge_resources(struct pci_dev *dev)
345 {
346 int idx;
347
348 if (!dev->bus)
349 return;
350
351 for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
352 struct resource *r = &dev->resource[idx];
353
354 if (!r->flags || r->parent || !r->start)
355 continue;
356
357 pci_claim_bridge_resource(dev, idx);
358 }
359 }
360
361 /*
362 * Called after each bus is probed, but before its children are examined.
363 */
364 void pcibios_fixup_bus(struct pci_bus *b)
365 {
366 struct pci_dev *dev;
367
368 if (b->self) {
369 pci_read_bridge_bases(b);
370 pcibios_fixup_bridge_resources(b->self);
371 }
372 list_for_each_entry(dev, &b->devices, bus_list)
373 pcibios_fixup_device_resources(dev);
374 platform_pci_fixup_bus(b);
375 }
376
377 void pcibios_add_bus(struct pci_bus *bus)
378 {
379 acpi_pci_add_bus(bus);
380 }
381
382 void pcibios_remove_bus(struct pci_bus *bus)
383 {
384 acpi_pci_remove_bus(bus);
385 }
386
387 void pcibios_set_master (struct pci_dev *dev)
388 {
389 /* No special bus mastering setup handling */
390 }
391
392 int
393 pcibios_enable_device (struct pci_dev *dev, int mask)
394 {
395 int ret;
396
397 ret = pci_enable_resources(dev, mask);
398 if (ret < 0)
399 return ret;
400
401 if (!dev->msi_enabled)
402 return acpi_pci_irq_enable(dev);
403 return 0;
404 }
405
406 void
407 pcibios_disable_device (struct pci_dev *dev)
408 {
409 BUG_ON(atomic_read(&dev->enable_cnt));
410 if (!dev->msi_enabled)
411 acpi_pci_irq_disable(dev);
412 }
413
414 resource_size_t
415 pcibios_align_resource (void *data, const struct resource *res,
416 resource_size_t size, resource_size_t align)
417 {
418 return res->start;
419 }
420
421 /**
422 * ia64_pci_get_legacy_mem - generic legacy mem routine
423 * @bus: bus to get legacy memory base address for
424 *
425 * Find the base of legacy memory for @bus. This is typically the first
426 * megabyte of bus address space for @bus or is simply 0 on platforms whose
427 * chipsets support legacy I/O and memory routing. Returns the base address
428 * or an error pointer if an error occurred.
429 *
430 * This is the ia64 generic version of this routine. Other platforms
431 * are free to override it with a machine vector.
432 */
433 char *ia64_pci_get_legacy_mem(struct pci_bus *bus)
434 {
435 return (char *)__IA64_UNCACHED_OFFSET;
436 }
437
438 /**
439 * pci_mmap_legacy_page_range - map legacy memory space to userland
440 * @bus: bus whose legacy space we're mapping
441 * @vma: vma passed in by mmap
442 *
443 * Map legacy memory space for this device back to userspace using a machine
444 * vector to get the base address.
445 */
446 int
447 pci_mmap_legacy_page_range(struct pci_bus *bus, struct vm_area_struct *vma,
448 enum pci_mmap_state mmap_state)
449 {
450 unsigned long size = vma->vm_end - vma->vm_start;
451 pgprot_t prot;
452 char *addr;
453
454 /* We only support mmap'ing of legacy memory space */
455 if (mmap_state != pci_mmap_mem)
456 return -ENOSYS;
457
458 /*
459 * Avoid attribute aliasing. See Documentation/ia64/aliasing.txt
460 * for more details.
461 */
462 if (!valid_mmap_phys_addr_range(vma->vm_pgoff, size))
463 return -EINVAL;
464 prot = phys_mem_access_prot(NULL, vma->vm_pgoff, size,
465 vma->vm_page_prot);
466
467 addr = pci_get_legacy_mem(bus);
468 if (IS_ERR(addr))
469 return PTR_ERR(addr);
470
471 vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT;
472 vma->vm_page_prot = prot;
473
474 if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
475 size, vma->vm_page_prot))
476 return -EAGAIN;
477
478 return 0;
479 }
480
481 /**
482 * ia64_pci_legacy_read - read from legacy I/O space
483 * @bus: bus to read
484 * @port: legacy port value
485 * @val: caller allocated storage for returned value
486 * @size: number of bytes to read
487 *
488 * Simply reads @size bytes from @port and puts the result in @val.
489 *
490 * Again, this (and the write routine) are generic versions that can be
491 * overridden by the platform. This is necessary on platforms that don't
492 * support legacy I/O routing or that hard fail on legacy I/O timeouts.
493 */
494 int ia64_pci_legacy_read(struct pci_bus *bus, u16 port, u32 *val, u8 size)
495 {
496 int ret = size;
497
498 switch (size) {
499 case 1:
500 *val = inb(port);
501 break;
502 case 2:
503 *val = inw(port);
504 break;
505 case 4:
506 *val = inl(port);
507 break;
508 default:
509 ret = -EINVAL;
510 break;
511 }
512
513 return ret;
514 }
515
516 /**
517 * ia64_pci_legacy_write - perform a legacy I/O write
518 * @bus: bus pointer
519 * @port: port to write
520 * @val: value to write
521 * @size: number of bytes to write from @val
522 *
523 * Simply writes @size bytes of @val to @port.
524 */
525 int ia64_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
526 {
527 int ret = size;
528
529 switch (size) {
530 case 1:
531 outb(val, port);
532 break;
533 case 2:
534 outw(val, port);
535 break;
536 case 4:
537 outl(val, port);
538 break;
539 default:
540 ret = -EINVAL;
541 break;
542 }
543
544 return ret;
545 }
546
547 /**
548 * set_pci_cacheline_size - determine cacheline size for PCI devices
549 *
550 * We want to use the line-size of the outer-most cache. We assume
551 * that this line-size is the same for all CPUs.
552 *
553 * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info().
554 */
555 static void __init set_pci_dfl_cacheline_size(void)
556 {
557 unsigned long levels, unique_caches;
558 long status;
559 pal_cache_config_info_t cci;
560
561 status = ia64_pal_cache_summary(&levels, &unique_caches);
562 if (status != 0) {
563 pr_err("%s: ia64_pal_cache_summary() failed "
564 "(status=%ld)\n", __func__, status);
565 return;
566 }
567
568 status = ia64_pal_cache_config_info(levels - 1,
569 /* cache_type (data_or_unified)= */ 2, &cci);
570 if (status != 0) {
571 pr_err("%s: ia64_pal_cache_config_info() failed "
572 "(status=%ld)\n", __func__, status);
573 return;
574 }
575 pci_dfl_cache_line_size = (1 << cci.pcci_line_size) / 4;
576 }
577
578 u64 ia64_dma_get_required_mask(struct device *dev)
579 {
580 u32 low_totalram = ((max_pfn - 1) << PAGE_SHIFT);
581 u32 high_totalram = ((max_pfn - 1) >> (32 - PAGE_SHIFT));
582 u64 mask;
583
584 if (!high_totalram) {
585 /* convert to mask just covering totalram */
586 low_totalram = (1 << (fls(low_totalram) - 1));
587 low_totalram += low_totalram - 1;
588 mask = low_totalram;
589 } else {
590 high_totalram = (1 << (fls(high_totalram) - 1));
591 high_totalram += high_totalram - 1;
592 mask = (((u64)high_totalram) << 32) + 0xffffffff;
593 }
594 return mask;
595 }
596 EXPORT_SYMBOL_GPL(ia64_dma_get_required_mask);
597
598 u64 dma_get_required_mask(struct device *dev)
599 {
600 return platform_dma_get_required_mask(dev);
601 }
602 EXPORT_SYMBOL_GPL(dma_get_required_mask);
603
604 static int __init pcibios_init(void)
605 {
606 set_pci_dfl_cacheline_size();
607 return 0;
608 }
609
610 subsys_initcall(pcibios_init);