]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/x86/kernel/pci-dma.c
Merge branch 'x86/paravirt-spinlocks' into x86/for-linus
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / kernel / pci-dma.c
1 #include <linux/dma-mapping.h>
2 #include <linux/dmar.h>
3 #include <linux/bootmem.h>
4 #include <linux/pci.h>
5
6 #include <asm/proto.h>
7 #include <asm/dma.h>
8 #include <asm/iommu.h>
9 #include <asm/calgary.h>
10 #include <asm/amd_iommu.h>
11
12 static int forbid_dac __read_mostly;
13
14 const struct dma_mapping_ops *dma_ops;
15 EXPORT_SYMBOL(dma_ops);
16
17 static int iommu_sac_force __read_mostly;
18
19 #ifdef CONFIG_IOMMU_DEBUG
20 int panic_on_overflow __read_mostly = 1;
21 int force_iommu __read_mostly = 1;
22 #else
23 int panic_on_overflow __read_mostly = 0;
24 int force_iommu __read_mostly = 0;
25 #endif
26
27 int iommu_merge __read_mostly = 0;
28
29 int no_iommu __read_mostly;
30 /* Set this to 1 if there is a HW IOMMU in the system */
31 int iommu_detected __read_mostly = 0;
32
33 /* This tells the BIO block layer to assume merging. Default to off
34 because we cannot guarantee merging later. */
35 int iommu_bio_merge __read_mostly = 0;
36 EXPORT_SYMBOL(iommu_bio_merge);
37
38 dma_addr_t bad_dma_address __read_mostly = 0;
39 EXPORT_SYMBOL(bad_dma_address);
40
41 /* Dummy device used for NULL arguments (normally ISA). Better would
42 be probably a smaller DMA mask, but this is bug-to-bug compatible
43 to older i386. */
44 struct device fallback_dev = {
45 .bus_id = "fallback device",
46 .coherent_dma_mask = DMA_32BIT_MASK,
47 .dma_mask = &fallback_dev.coherent_dma_mask,
48 };
49
50 int dma_set_mask(struct device *dev, u64 mask)
51 {
52 if (!dev->dma_mask || !dma_supported(dev, mask))
53 return -EIO;
54
55 *dev->dma_mask = mask;
56
57 return 0;
58 }
59 EXPORT_SYMBOL(dma_set_mask);
60
61 #ifdef CONFIG_X86_64
62 static __initdata void *dma32_bootmem_ptr;
63 static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
64
65 static int __init parse_dma32_size_opt(char *p)
66 {
67 if (!p)
68 return -EINVAL;
69 dma32_bootmem_size = memparse(p, &p);
70 return 0;
71 }
72 early_param("dma32_size", parse_dma32_size_opt);
73
74 void __init dma32_reserve_bootmem(void)
75 {
76 unsigned long size, align;
77 if (max_pfn <= MAX_DMA32_PFN)
78 return;
79
80 /*
81 * check aperture_64.c allocate_aperture() for reason about
82 * using 512M as goal
83 */
84 align = 64ULL<<20;
85 size = round_up(dma32_bootmem_size, align);
86 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
87 512ULL<<20);
88 if (dma32_bootmem_ptr)
89 dma32_bootmem_size = size;
90 else
91 dma32_bootmem_size = 0;
92 }
93 static void __init dma32_free_bootmem(void)
94 {
95
96 if (max_pfn <= MAX_DMA32_PFN)
97 return;
98
99 if (!dma32_bootmem_ptr)
100 return;
101
102 free_bootmem(__pa(dma32_bootmem_ptr), dma32_bootmem_size);
103
104 dma32_bootmem_ptr = NULL;
105 dma32_bootmem_size = 0;
106 }
107
108 void __init pci_iommu_alloc(void)
109 {
110 /* free the range so iommu could get some range less than 4G */
111 dma32_free_bootmem();
112 /*
113 * The order of these functions is important for
114 * fall-back/fail-over reasons
115 */
116 gart_iommu_hole_init();
117
118 detect_calgary();
119
120 detect_intel_iommu();
121
122 amd_iommu_detect();
123
124 pci_swiotlb_init();
125 }
126 #endif
127
128 /*
129 * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
130 * documentation.
131 */
132 static __init int iommu_setup(char *p)
133 {
134 iommu_merge = 1;
135
136 if (!p)
137 return -EINVAL;
138
139 while (*p) {
140 if (!strncmp(p, "off", 3))
141 no_iommu = 1;
142 /* gart_parse_options has more force support */
143 if (!strncmp(p, "force", 5))
144 force_iommu = 1;
145 if (!strncmp(p, "noforce", 7)) {
146 iommu_merge = 0;
147 force_iommu = 0;
148 }
149
150 if (!strncmp(p, "biomerge", 8)) {
151 iommu_bio_merge = 4096;
152 iommu_merge = 1;
153 force_iommu = 1;
154 }
155 if (!strncmp(p, "panic", 5))
156 panic_on_overflow = 1;
157 if (!strncmp(p, "nopanic", 7))
158 panic_on_overflow = 0;
159 if (!strncmp(p, "merge", 5)) {
160 iommu_merge = 1;
161 force_iommu = 1;
162 }
163 if (!strncmp(p, "nomerge", 7))
164 iommu_merge = 0;
165 if (!strncmp(p, "forcesac", 8))
166 iommu_sac_force = 1;
167 if (!strncmp(p, "allowdac", 8))
168 forbid_dac = 0;
169 if (!strncmp(p, "nodac", 5))
170 forbid_dac = -1;
171 if (!strncmp(p, "usedac", 6)) {
172 forbid_dac = -1;
173 return 1;
174 }
175 #ifdef CONFIG_SWIOTLB
176 if (!strncmp(p, "soft", 4))
177 swiotlb = 1;
178 #endif
179
180 gart_parse_options(p);
181
182 #ifdef CONFIG_CALGARY_IOMMU
183 if (!strncmp(p, "calgary", 7))
184 use_calgary = 1;
185 #endif /* CONFIG_CALGARY_IOMMU */
186
187 p += strcspn(p, ",");
188 if (*p == ',')
189 ++p;
190 }
191 return 0;
192 }
193 early_param("iommu", iommu_setup);
194
195 #ifdef CONFIG_X86_32
196 int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
197 dma_addr_t device_addr, size_t size, int flags)
198 {
199 void __iomem *mem_base = NULL;
200 int pages = size >> PAGE_SHIFT;
201 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
202
203 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
204 goto out;
205 if (!size)
206 goto out;
207 if (dev->dma_mem)
208 goto out;
209
210 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
211
212 mem_base = ioremap(bus_addr, size);
213 if (!mem_base)
214 goto out;
215
216 dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
217 if (!dev->dma_mem)
218 goto out;
219 dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
220 if (!dev->dma_mem->bitmap)
221 goto free1_out;
222
223 dev->dma_mem->virt_base = mem_base;
224 dev->dma_mem->device_base = device_addr;
225 dev->dma_mem->size = pages;
226 dev->dma_mem->flags = flags;
227
228 if (flags & DMA_MEMORY_MAP)
229 return DMA_MEMORY_MAP;
230
231 return DMA_MEMORY_IO;
232
233 free1_out:
234 kfree(dev->dma_mem);
235 out:
236 if (mem_base)
237 iounmap(mem_base);
238 return 0;
239 }
240 EXPORT_SYMBOL(dma_declare_coherent_memory);
241
242 void dma_release_declared_memory(struct device *dev)
243 {
244 struct dma_coherent_mem *mem = dev->dma_mem;
245
246 if (!mem)
247 return;
248 dev->dma_mem = NULL;
249 iounmap(mem->virt_base);
250 kfree(mem->bitmap);
251 kfree(mem);
252 }
253 EXPORT_SYMBOL(dma_release_declared_memory);
254
255 void *dma_mark_declared_memory_occupied(struct device *dev,
256 dma_addr_t device_addr, size_t size)
257 {
258 struct dma_coherent_mem *mem = dev->dma_mem;
259 int pos, err;
260 int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1);
261
262 pages >>= PAGE_SHIFT;
263
264 if (!mem)
265 return ERR_PTR(-EINVAL);
266
267 pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
268 err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
269 if (err != 0)
270 return ERR_PTR(err);
271 return mem->virt_base + (pos << PAGE_SHIFT);
272 }
273 EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
274
275 static int dma_alloc_from_coherent_mem(struct device *dev, ssize_t size,
276 dma_addr_t *dma_handle, void **ret)
277 {
278 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
279 int order = get_order(size);
280
281 if (mem) {
282 int page = bitmap_find_free_region(mem->bitmap, mem->size,
283 order);
284 if (page >= 0) {
285 *dma_handle = mem->device_base + (page << PAGE_SHIFT);
286 *ret = mem->virt_base + (page << PAGE_SHIFT);
287 memset(*ret, 0, size);
288 }
289 if (mem->flags & DMA_MEMORY_EXCLUSIVE)
290 *ret = NULL;
291 }
292 return (mem != NULL);
293 }
294
295 static int dma_release_coherent(struct device *dev, int order, void *vaddr)
296 {
297 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
298
299 if (mem && vaddr >= mem->virt_base && vaddr <
300 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
301 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
302
303 bitmap_release_region(mem->bitmap, page, order);
304 return 1;
305 }
306 return 0;
307 }
308 #else
309 #define dma_alloc_from_coherent_mem(dev, size, handle, ret) (0)
310 #define dma_release_coherent(dev, order, vaddr) (0)
311 #endif /* CONFIG_X86_32 */
312
313 int dma_supported(struct device *dev, u64 mask)
314 {
315 #ifdef CONFIG_PCI
316 if (mask > 0xffffffff && forbid_dac > 0) {
317 printk(KERN_INFO "PCI: Disallowing DAC for device %s\n",
318 dev->bus_id);
319 return 0;
320 }
321 #endif
322
323 if (dma_ops->dma_supported)
324 return dma_ops->dma_supported(dev, mask);
325
326 /* Copied from i386. Doesn't make much sense, because it will
327 only work for pci_alloc_coherent.
328 The caller just has to use GFP_DMA in this case. */
329 if (mask < DMA_24BIT_MASK)
330 return 0;
331
332 /* Tell the device to use SAC when IOMMU force is on. This
333 allows the driver to use cheaper accesses in some cases.
334
335 Problem with this is that if we overflow the IOMMU area and
336 return DAC as fallback address the device may not handle it
337 correctly.
338
339 As a special case some controllers have a 39bit address
340 mode that is as efficient as 32bit (aic79xx). Don't force
341 SAC for these. Assume all masks <= 40 bits are of this
342 type. Normally this doesn't make any difference, but gives
343 more gentle handling of IOMMU overflow. */
344 if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
345 printk(KERN_INFO "%s: Force SAC with mask %Lx\n",
346 dev->bus_id, mask);
347 return 0;
348 }
349
350 return 1;
351 }
352 EXPORT_SYMBOL(dma_supported);
353
354 /* Allocate DMA memory on node near device */
355 static noinline struct page *
356 dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
357 {
358 int node;
359
360 node = dev_to_node(dev);
361
362 return alloc_pages_node(node, gfp, order);
363 }
364
365 /*
366 * Allocate memory for a coherent mapping.
367 */
368 void *
369 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
370 gfp_t gfp)
371 {
372 void *memory = NULL;
373 struct page *page;
374 unsigned long dma_mask = 0;
375 dma_addr_t bus;
376 int noretry = 0;
377
378 /* ignore region specifiers */
379 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
380
381 if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory))
382 return memory;
383
384 if (!dev) {
385 dev = &fallback_dev;
386 gfp |= GFP_DMA;
387 }
388 dma_mask = dev->coherent_dma_mask;
389 if (dma_mask == 0)
390 dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
391
392 /* Device not DMA able */
393 if (dev->dma_mask == NULL)
394 return NULL;
395
396 /* Don't invoke OOM killer or retry in lower 16MB DMA zone */
397 if (gfp & __GFP_DMA)
398 noretry = 1;
399
400 #ifdef CONFIG_X86_64
401 /* Why <=? Even when the mask is smaller than 4GB it is often
402 larger than 16MB and in this case we have a chance of
403 finding fitting memory in the next higher zone first. If
404 not retry with true GFP_DMA. -AK */
405 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
406 gfp |= GFP_DMA32;
407 if (dma_mask < DMA_32BIT_MASK)
408 noretry = 1;
409 }
410 #endif
411
412 again:
413 page = dma_alloc_pages(dev,
414 noretry ? gfp | __GFP_NORETRY : gfp, get_order(size));
415 if (page == NULL)
416 return NULL;
417
418 {
419 int high, mmu;
420 bus = page_to_phys(page);
421 memory = page_address(page);
422 high = (bus + size) >= dma_mask;
423 mmu = high;
424 if (force_iommu && !(gfp & GFP_DMA))
425 mmu = 1;
426 else if (high) {
427 free_pages((unsigned long)memory,
428 get_order(size));
429
430 /* Don't use the 16MB ZONE_DMA unless absolutely
431 needed. It's better to use remapping first. */
432 if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
433 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
434 goto again;
435 }
436
437 /* Let low level make its own zone decisions */
438 gfp &= ~(GFP_DMA32|GFP_DMA);
439
440 if (dma_ops->alloc_coherent)
441 return dma_ops->alloc_coherent(dev, size,
442 dma_handle, gfp);
443 return NULL;
444 }
445
446 memset(memory, 0, size);
447 if (!mmu) {
448 *dma_handle = bus;
449 return memory;
450 }
451 }
452
453 if (dma_ops->alloc_coherent) {
454 free_pages((unsigned long)memory, get_order(size));
455 gfp &= ~(GFP_DMA|GFP_DMA32);
456 return dma_ops->alloc_coherent(dev, size, dma_handle, gfp);
457 }
458
459 if (dma_ops->map_simple) {
460 *dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory),
461 size,
462 PCI_DMA_BIDIRECTIONAL);
463 if (*dma_handle != bad_dma_address)
464 return memory;
465 }
466
467 if (panic_on_overflow)
468 panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",
469 (unsigned long)size);
470 free_pages((unsigned long)memory, get_order(size));
471 return NULL;
472 }
473 EXPORT_SYMBOL(dma_alloc_coherent);
474
475 /*
476 * Unmap coherent memory.
477 * The caller must ensure that the device has finished accessing the mapping.
478 */
479 void dma_free_coherent(struct device *dev, size_t size,
480 void *vaddr, dma_addr_t bus)
481 {
482 int order = get_order(size);
483 WARN_ON(irqs_disabled()); /* for portability */
484 if (dma_release_coherent(dev, order, vaddr))
485 return;
486 if (dma_ops->unmap_single)
487 dma_ops->unmap_single(dev, bus, size, 0);
488 free_pages((unsigned long)vaddr, order);
489 }
490 EXPORT_SYMBOL(dma_free_coherent);
491
492 static int __init pci_iommu_init(void)
493 {
494 calgary_iommu_init();
495
496 intel_iommu_init();
497
498 amd_iommu_init();
499
500 gart_iommu_init();
501
502 no_iommu_init();
503 return 0;
504 }
505
506 void pci_iommu_shutdown(void)
507 {
508 gart_iommu_shutdown();
509 }
510 /* Must execute after PCI subsystem */
511 fs_initcall(pci_iommu_init);
512
513 #ifdef CONFIG_PCI
514 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
515
516 static __devinit void via_no_dac(struct pci_dev *dev)
517 {
518 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
519 printk(KERN_INFO "PCI: VIA PCI bridge detected."
520 "Disabling DAC.\n");
521 forbid_dac = 1;
522 }
523 }
524 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);
525 #endif