]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/kernel/pci-dma.c
Merge commit 'v2.6.26' into core/locking
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kernel / pci-dma.c
1 #include <linux/dma-mapping.h>
2 #include <linux/dmar.h>
3 #include <linux/bootmem.h>
4 #include <linux/pci.h>
5
6 #include <asm/proto.h>
7 #include <asm/dma.h>
8 #include <asm/gart.h>
9 #include <asm/calgary.h>
10
11 int forbid_dac __read_mostly;
12 EXPORT_SYMBOL(forbid_dac);
13
14 const struct dma_mapping_ops *dma_ops;
15 EXPORT_SYMBOL(dma_ops);
16
17 static int iommu_sac_force __read_mostly;
18
19 #ifdef CONFIG_IOMMU_DEBUG
20 int panic_on_overflow __read_mostly = 1;
21 int force_iommu __read_mostly = 1;
22 #else
23 int panic_on_overflow __read_mostly = 0;
24 int force_iommu __read_mostly = 0;
25 #endif
26
27 int iommu_merge __read_mostly = 0;
28
29 int no_iommu __read_mostly;
30 /* Set this to 1 if there is a HW IOMMU in the system */
31 int iommu_detected __read_mostly = 0;
32
33 /* This tells the BIO block layer to assume merging. Default to off
34 because we cannot guarantee merging later. */
35 int iommu_bio_merge __read_mostly = 0;
36 EXPORT_SYMBOL(iommu_bio_merge);
37
38 dma_addr_t bad_dma_address __read_mostly = 0;
39 EXPORT_SYMBOL(bad_dma_address);
40
41 /* Dummy device used for NULL arguments (normally ISA). Better would
42 be probably a smaller DMA mask, but this is bug-to-bug compatible
43 to older i386. */
44 struct device fallback_dev = {
45 .bus_id = "fallback device",
46 .coherent_dma_mask = DMA_32BIT_MASK,
47 .dma_mask = &fallback_dev.coherent_dma_mask,
48 };
49
50 int dma_set_mask(struct device *dev, u64 mask)
51 {
52 if (!dev->dma_mask || !dma_supported(dev, mask))
53 return -EIO;
54
55 *dev->dma_mask = mask;
56
57 return 0;
58 }
59 EXPORT_SYMBOL(dma_set_mask);
60
61 #ifdef CONFIG_X86_64
62 static __initdata void *dma32_bootmem_ptr;
63 static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
64
65 static int __init parse_dma32_size_opt(char *p)
66 {
67 if (!p)
68 return -EINVAL;
69 dma32_bootmem_size = memparse(p, &p);
70 return 0;
71 }
72 early_param("dma32_size", parse_dma32_size_opt);
73
74 void __init dma32_reserve_bootmem(void)
75 {
76 unsigned long size, align;
77 if (end_pfn <= MAX_DMA32_PFN)
78 return;
79
80 align = 64ULL<<20;
81 size = round_up(dma32_bootmem_size, align);
82 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
83 __pa(MAX_DMA_ADDRESS));
84 if (dma32_bootmem_ptr)
85 dma32_bootmem_size = size;
86 else
87 dma32_bootmem_size = 0;
88 }
89 static void __init dma32_free_bootmem(void)
90 {
91 int node;
92
93 if (end_pfn <= MAX_DMA32_PFN)
94 return;
95
96 if (!dma32_bootmem_ptr)
97 return;
98
99 for_each_online_node(node)
100 free_bootmem_node(NODE_DATA(node), __pa(dma32_bootmem_ptr),
101 dma32_bootmem_size);
102
103 dma32_bootmem_ptr = NULL;
104 dma32_bootmem_size = 0;
105 }
106
107 void __init pci_iommu_alloc(void)
108 {
109 /* free the range so iommu could get some range less than 4G */
110 dma32_free_bootmem();
111 /*
112 * The order of these functions is important for
113 * fall-back/fail-over reasons
114 */
115 #ifdef CONFIG_GART_IOMMU
116 gart_iommu_hole_init();
117 #endif
118
119 #ifdef CONFIG_CALGARY_IOMMU
120 detect_calgary();
121 #endif
122
123 detect_intel_iommu();
124
125 #ifdef CONFIG_SWIOTLB
126 pci_swiotlb_init();
127 #endif
128 }
129 #endif
130
131 /*
132 * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
133 * documentation.
134 */
135 static __init int iommu_setup(char *p)
136 {
137 iommu_merge = 1;
138
139 if (!p)
140 return -EINVAL;
141
142 while (*p) {
143 if (!strncmp(p, "off", 3))
144 no_iommu = 1;
145 /* gart_parse_options has more force support */
146 if (!strncmp(p, "force", 5))
147 force_iommu = 1;
148 if (!strncmp(p, "noforce", 7)) {
149 iommu_merge = 0;
150 force_iommu = 0;
151 }
152
153 if (!strncmp(p, "biomerge", 8)) {
154 iommu_bio_merge = 4096;
155 iommu_merge = 1;
156 force_iommu = 1;
157 }
158 if (!strncmp(p, "panic", 5))
159 panic_on_overflow = 1;
160 if (!strncmp(p, "nopanic", 7))
161 panic_on_overflow = 0;
162 if (!strncmp(p, "merge", 5)) {
163 iommu_merge = 1;
164 force_iommu = 1;
165 }
166 if (!strncmp(p, "nomerge", 7))
167 iommu_merge = 0;
168 if (!strncmp(p, "forcesac", 8))
169 iommu_sac_force = 1;
170 if (!strncmp(p, "allowdac", 8))
171 forbid_dac = 0;
172 if (!strncmp(p, "nodac", 5))
173 forbid_dac = -1;
174 if (!strncmp(p, "usedac", 6)) {
175 forbid_dac = -1;
176 return 1;
177 }
178 #ifdef CONFIG_SWIOTLB
179 if (!strncmp(p, "soft", 4))
180 swiotlb = 1;
181 #endif
182
183 #ifdef CONFIG_GART_IOMMU
184 gart_parse_options(p);
185 #endif
186
187 #ifdef CONFIG_CALGARY_IOMMU
188 if (!strncmp(p, "calgary", 7))
189 use_calgary = 1;
190 #endif /* CONFIG_CALGARY_IOMMU */
191
192 p += strcspn(p, ",");
193 if (*p == ',')
194 ++p;
195 }
196 return 0;
197 }
198 early_param("iommu", iommu_setup);
199
200 #ifdef CONFIG_X86_32
201 int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
202 dma_addr_t device_addr, size_t size, int flags)
203 {
204 void __iomem *mem_base = NULL;
205 int pages = size >> PAGE_SHIFT;
206 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
207
208 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
209 goto out;
210 if (!size)
211 goto out;
212 if (dev->dma_mem)
213 goto out;
214
215 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
216
217 mem_base = ioremap(bus_addr, size);
218 if (!mem_base)
219 goto out;
220
221 dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
222 if (!dev->dma_mem)
223 goto out;
224 dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
225 if (!dev->dma_mem->bitmap)
226 goto free1_out;
227
228 dev->dma_mem->virt_base = mem_base;
229 dev->dma_mem->device_base = device_addr;
230 dev->dma_mem->size = pages;
231 dev->dma_mem->flags = flags;
232
233 if (flags & DMA_MEMORY_MAP)
234 return DMA_MEMORY_MAP;
235
236 return DMA_MEMORY_IO;
237
238 free1_out:
239 kfree(dev->dma_mem);
240 out:
241 if (mem_base)
242 iounmap(mem_base);
243 return 0;
244 }
245 EXPORT_SYMBOL(dma_declare_coherent_memory);
246
247 void dma_release_declared_memory(struct device *dev)
248 {
249 struct dma_coherent_mem *mem = dev->dma_mem;
250
251 if (!mem)
252 return;
253 dev->dma_mem = NULL;
254 iounmap(mem->virt_base);
255 kfree(mem->bitmap);
256 kfree(mem);
257 }
258 EXPORT_SYMBOL(dma_release_declared_memory);
259
260 void *dma_mark_declared_memory_occupied(struct device *dev,
261 dma_addr_t device_addr, size_t size)
262 {
263 struct dma_coherent_mem *mem = dev->dma_mem;
264 int pos, err;
265 int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1);
266
267 pages >>= PAGE_SHIFT;
268
269 if (!mem)
270 return ERR_PTR(-EINVAL);
271
272 pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
273 err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
274 if (err != 0)
275 return ERR_PTR(err);
276 return mem->virt_base + (pos << PAGE_SHIFT);
277 }
278 EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
279
280 static int dma_alloc_from_coherent_mem(struct device *dev, ssize_t size,
281 dma_addr_t *dma_handle, void **ret)
282 {
283 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
284 int order = get_order(size);
285
286 if (mem) {
287 int page = bitmap_find_free_region(mem->bitmap, mem->size,
288 order);
289 if (page >= 0) {
290 *dma_handle = mem->device_base + (page << PAGE_SHIFT);
291 *ret = mem->virt_base + (page << PAGE_SHIFT);
292 memset(*ret, 0, size);
293 }
294 if (mem->flags & DMA_MEMORY_EXCLUSIVE)
295 *ret = NULL;
296 }
297 return (mem != NULL);
298 }
299
300 static int dma_release_coherent(struct device *dev, int order, void *vaddr)
301 {
302 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
303
304 if (mem && vaddr >= mem->virt_base && vaddr <
305 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
306 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
307
308 bitmap_release_region(mem->bitmap, page, order);
309 return 1;
310 }
311 return 0;
312 }
313 #else
314 #define dma_alloc_from_coherent_mem(dev, size, handle, ret) (0)
315 #define dma_release_coherent(dev, order, vaddr) (0)
316 #endif /* CONFIG_X86_32 */
317
318 int dma_supported(struct device *dev, u64 mask)
319 {
320 #ifdef CONFIG_PCI
321 if (mask > 0xffffffff && forbid_dac > 0) {
322 printk(KERN_INFO "PCI: Disallowing DAC for device %s\n",
323 dev->bus_id);
324 return 0;
325 }
326 #endif
327
328 if (dma_ops->dma_supported)
329 return dma_ops->dma_supported(dev, mask);
330
331 /* Copied from i386. Doesn't make much sense, because it will
332 only work for pci_alloc_coherent.
333 The caller just has to use GFP_DMA in this case. */
334 if (mask < DMA_24BIT_MASK)
335 return 0;
336
337 /* Tell the device to use SAC when IOMMU force is on. This
338 allows the driver to use cheaper accesses in some cases.
339
340 Problem with this is that if we overflow the IOMMU area and
341 return DAC as fallback address the device may not handle it
342 correctly.
343
344 As a special case some controllers have a 39bit address
345 mode that is as efficient as 32bit (aic79xx). Don't force
346 SAC for these. Assume all masks <= 40 bits are of this
347 type. Normally this doesn't make any difference, but gives
348 more gentle handling of IOMMU overflow. */
349 if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
350 printk(KERN_INFO "%s: Force SAC with mask %Lx\n",
351 dev->bus_id, mask);
352 return 0;
353 }
354
355 return 1;
356 }
357 EXPORT_SYMBOL(dma_supported);
358
359 /* Allocate DMA memory on node near device */
360 noinline struct page *
361 dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
362 {
363 int node;
364
365 node = dev_to_node(dev);
366
367 return alloc_pages_node(node, gfp, order);
368 }
369
370 /*
371 * Allocate memory for a coherent mapping.
372 */
373 void *
374 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
375 gfp_t gfp)
376 {
377 void *memory = NULL;
378 struct page *page;
379 unsigned long dma_mask = 0;
380 dma_addr_t bus;
381 int noretry = 0;
382
383 /* ignore region specifiers */
384 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
385
386 if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory))
387 return memory;
388
389 if (!dev) {
390 dev = &fallback_dev;
391 gfp |= GFP_DMA;
392 }
393 dma_mask = dev->coherent_dma_mask;
394 if (dma_mask == 0)
395 dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
396
397 /* Device not DMA able */
398 if (dev->dma_mask == NULL)
399 return NULL;
400
401 /* Don't invoke OOM killer or retry in lower 16MB DMA zone */
402 if (gfp & __GFP_DMA)
403 noretry = 1;
404
405 #ifdef CONFIG_X86_64
406 /* Why <=? Even when the mask is smaller than 4GB it is often
407 larger than 16MB and in this case we have a chance of
408 finding fitting memory in the next higher zone first. If
409 not retry with true GFP_DMA. -AK */
410 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
411 gfp |= GFP_DMA32;
412 if (dma_mask < DMA_32BIT_MASK)
413 noretry = 1;
414 }
415 #endif
416
417 again:
418 page = dma_alloc_pages(dev,
419 noretry ? gfp | __GFP_NORETRY : gfp, get_order(size));
420 if (page == NULL)
421 return NULL;
422
423 {
424 int high, mmu;
425 bus = page_to_phys(page);
426 memory = page_address(page);
427 high = (bus + size) >= dma_mask;
428 mmu = high;
429 if (force_iommu && !(gfp & GFP_DMA))
430 mmu = 1;
431 else if (high) {
432 free_pages((unsigned long)memory,
433 get_order(size));
434
435 /* Don't use the 16MB ZONE_DMA unless absolutely
436 needed. It's better to use remapping first. */
437 if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
438 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
439 goto again;
440 }
441
442 /* Let low level make its own zone decisions */
443 gfp &= ~(GFP_DMA32|GFP_DMA);
444
445 if (dma_ops->alloc_coherent)
446 return dma_ops->alloc_coherent(dev, size,
447 dma_handle, gfp);
448 return NULL;
449 }
450
451 memset(memory, 0, size);
452 if (!mmu) {
453 *dma_handle = bus;
454 return memory;
455 }
456 }
457
458 if (dma_ops->alloc_coherent) {
459 free_pages((unsigned long)memory, get_order(size));
460 gfp &= ~(GFP_DMA|GFP_DMA32);
461 return dma_ops->alloc_coherent(dev, size, dma_handle, gfp);
462 }
463
464 if (dma_ops->map_simple) {
465 *dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory),
466 size,
467 PCI_DMA_BIDIRECTIONAL);
468 if (*dma_handle != bad_dma_address)
469 return memory;
470 }
471
472 if (panic_on_overflow)
473 panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",
474 (unsigned long)size);
475 free_pages((unsigned long)memory, get_order(size));
476 return NULL;
477 }
478 EXPORT_SYMBOL(dma_alloc_coherent);
479
480 /*
481 * Unmap coherent memory.
482 * The caller must ensure that the device has finished accessing the mapping.
483 */
484 void dma_free_coherent(struct device *dev, size_t size,
485 void *vaddr, dma_addr_t bus)
486 {
487 int order = get_order(size);
488 WARN_ON(irqs_disabled()); /* for portability */
489 if (dma_release_coherent(dev, order, vaddr))
490 return;
491 if (dma_ops->unmap_single)
492 dma_ops->unmap_single(dev, bus, size, 0);
493 free_pages((unsigned long)vaddr, order);
494 }
495 EXPORT_SYMBOL(dma_free_coherent);
496
497 static int __init pci_iommu_init(void)
498 {
499 #ifdef CONFIG_CALGARY_IOMMU
500 calgary_iommu_init();
501 #endif
502
503 intel_iommu_init();
504
505 #ifdef CONFIG_GART_IOMMU
506 gart_iommu_init();
507 #endif
508
509 no_iommu_init();
510 return 0;
511 }
512
513 void pci_iommu_shutdown(void)
514 {
515 gart_iommu_shutdown();
516 }
517 /* Must execute after PCI subsystem */
518 fs_initcall(pci_iommu_init);
519
520 #ifdef CONFIG_PCI
521 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
522
523 static __devinit void via_no_dac(struct pci_dev *dev)
524 {
525 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
526 printk(KERN_INFO "PCI: VIA PCI bridge detected."
527 "Disabling DAC.\n");
528 forbid_dac = 1;
529 }
530 }
531 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
532 #endif