]>
Commit | Line | Data |
---|---|---|
459121c9 | 1 | #include <linux/dma-mapping.h> |
cb5867a5 | 2 | #include <linux/dmar.h> |
116890d5 | 3 | #include <linux/bootmem.h> |
bca5c096 | 4 | #include <linux/pci.h> |
cb5867a5 | 5 | |
116890d5 GC |
6 | #include <asm/proto.h> |
7 | #include <asm/dma.h> | |
46a7fa27 | 8 | #include <asm/iommu.h> |
cb5867a5 | 9 | #include <asm/calgary.h> |
a69ca340 | 10 | #include <asm/amd_iommu.h> |
459121c9 | 11 | |
08e1a13e | 12 | static int forbid_dac __read_mostly; |
bca5c096 | 13 | |
8d8bb39b | 14 | struct dma_mapping_ops *dma_ops; |
85c246ee GC |
15 | EXPORT_SYMBOL(dma_ops); |
16 | ||
b4cdc430 | 17 | static int iommu_sac_force __read_mostly; |
8e0c3797 | 18 | |
f9c258de GC |
19 | #ifdef CONFIG_IOMMU_DEBUG |
20 | int panic_on_overflow __read_mostly = 1; | |
21 | int force_iommu __read_mostly = 1; | |
22 | #else | |
23 | int panic_on_overflow __read_mostly = 0; | |
24 | int force_iommu __read_mostly = 0; | |
25 | #endif | |
26 | ||
fae9a0d8 GC |
27 | int iommu_merge __read_mostly = 0; |
28 | ||
29 | int no_iommu __read_mostly; | |
30 | /* Set this to 1 if there is a HW IOMMU in the system */ | |
31 | int iommu_detected __read_mostly = 0; | |
32 | ||
33 | /* This tells the BIO block layer to assume merging. Default to off | |
34 | because we cannot guarantee merging later. */ | |
35 | int iommu_bio_merge __read_mostly = 0; | |
36 | EXPORT_SYMBOL(iommu_bio_merge); | |
37 | ||
cac67877 GC |
38 | dma_addr_t bad_dma_address __read_mostly = 0; |
39 | EXPORT_SYMBOL(bad_dma_address); | |
fae9a0d8 | 40 | |
098cb7f2 GC |
41 | /* Dummy device used for NULL arguments (normally ISA). Better would |
42 | be probably a smaller DMA mask, but this is bug-to-bug compatible | |
43 | to older i386. */ | |
44 | struct device fallback_dev = { | |
45 | .bus_id = "fallback device", | |
46 | .coherent_dma_mask = DMA_32BIT_MASK, | |
47 | .dma_mask = &fallback_dev.coherent_dma_mask, | |
48 | }; | |
49 | ||
459121c9 GC |
50 | int dma_set_mask(struct device *dev, u64 mask) |
51 | { | |
52 | if (!dev->dma_mask || !dma_supported(dev, mask)) | |
53 | return -EIO; | |
54 | ||
55 | *dev->dma_mask = mask; | |
56 | ||
57 | return 0; | |
58 | } | |
59 | EXPORT_SYMBOL(dma_set_mask); | |
60 | ||
116890d5 GC |
61 | #ifdef CONFIG_X86_64 |
62 | static __initdata void *dma32_bootmem_ptr; | |
63 | static unsigned long dma32_bootmem_size __initdata = (128ULL<<20); | |
64 | ||
65 | static int __init parse_dma32_size_opt(char *p) | |
66 | { | |
67 | if (!p) | |
68 | return -EINVAL; | |
69 | dma32_bootmem_size = memparse(p, &p); | |
70 | return 0; | |
71 | } | |
72 | early_param("dma32_size", parse_dma32_size_opt); | |
73 | ||
74 | void __init dma32_reserve_bootmem(void) | |
75 | { | |
76 | unsigned long size, align; | |
c987d12f | 77 | if (max_pfn <= MAX_DMA32_PFN) |
116890d5 GC |
78 | return; |
79 | ||
7677b2ef YL |
80 | /* |
81 | * check aperture_64.c allocate_aperture() for reason about | |
82 | * using 512M as goal | |
83 | */ | |
116890d5 GC |
84 | align = 64ULL<<20; |
85 | size = round_up(dma32_bootmem_size, align); | |
86 | dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align, | |
7677b2ef | 87 | 512ULL<<20); |
116890d5 GC |
88 | if (dma32_bootmem_ptr) |
89 | dma32_bootmem_size = size; | |
90 | else | |
91 | dma32_bootmem_size = 0; | |
92 | } | |
93 | static void __init dma32_free_bootmem(void) | |
94 | { | |
116890d5 | 95 | |
c987d12f | 96 | if (max_pfn <= MAX_DMA32_PFN) |
116890d5 GC |
97 | return; |
98 | ||
99 | if (!dma32_bootmem_ptr) | |
100 | return; | |
101 | ||
330fce23 | 102 | free_bootmem(__pa(dma32_bootmem_ptr), dma32_bootmem_size); |
116890d5 GC |
103 | |
104 | dma32_bootmem_ptr = NULL; | |
105 | dma32_bootmem_size = 0; | |
106 | } | |
107 | ||
108 | void __init pci_iommu_alloc(void) | |
109 | { | |
110 | /* free the range so iommu could get some range less than 4G */ | |
111 | dma32_free_bootmem(); | |
112 | /* | |
113 | * The order of these functions is important for | |
114 | * fall-back/fail-over reasons | |
115 | */ | |
116890d5 | 116 | gart_iommu_hole_init(); |
116890d5 | 117 | |
116890d5 | 118 | detect_calgary(); |
116890d5 GC |
119 | |
120 | detect_intel_iommu(); | |
121 | ||
a69ca340 JR |
122 | amd_iommu_detect(); |
123 | ||
116890d5 | 124 | pci_swiotlb_init(); |
116890d5 GC |
125 | } |
126 | #endif | |
127 | ||
fae9a0d8 GC |
128 | /* |
129 | * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter | |
130 | * documentation. | |
131 | */ | |
132 | static __init int iommu_setup(char *p) | |
133 | { | |
134 | iommu_merge = 1; | |
135 | ||
136 | if (!p) | |
137 | return -EINVAL; | |
138 | ||
139 | while (*p) { | |
140 | if (!strncmp(p, "off", 3)) | |
141 | no_iommu = 1; | |
142 | /* gart_parse_options has more force support */ | |
143 | if (!strncmp(p, "force", 5)) | |
144 | force_iommu = 1; | |
145 | if (!strncmp(p, "noforce", 7)) { | |
146 | iommu_merge = 0; | |
147 | force_iommu = 0; | |
148 | } | |
149 | ||
150 | if (!strncmp(p, "biomerge", 8)) { | |
151 | iommu_bio_merge = 4096; | |
152 | iommu_merge = 1; | |
153 | force_iommu = 1; | |
154 | } | |
155 | if (!strncmp(p, "panic", 5)) | |
156 | panic_on_overflow = 1; | |
157 | if (!strncmp(p, "nopanic", 7)) | |
158 | panic_on_overflow = 0; | |
159 | if (!strncmp(p, "merge", 5)) { | |
160 | iommu_merge = 1; | |
161 | force_iommu = 1; | |
162 | } | |
163 | if (!strncmp(p, "nomerge", 7)) | |
164 | iommu_merge = 0; | |
165 | if (!strncmp(p, "forcesac", 8)) | |
166 | iommu_sac_force = 1; | |
167 | if (!strncmp(p, "allowdac", 8)) | |
168 | forbid_dac = 0; | |
169 | if (!strncmp(p, "nodac", 5)) | |
170 | forbid_dac = -1; | |
171 | if (!strncmp(p, "usedac", 6)) { | |
172 | forbid_dac = -1; | |
173 | return 1; | |
174 | } | |
175 | #ifdef CONFIG_SWIOTLB | |
176 | if (!strncmp(p, "soft", 4)) | |
177 | swiotlb = 1; | |
178 | #endif | |
179 | ||
fae9a0d8 | 180 | gart_parse_options(p); |
fae9a0d8 GC |
181 | |
182 | #ifdef CONFIG_CALGARY_IOMMU | |
183 | if (!strncmp(p, "calgary", 7)) | |
184 | use_calgary = 1; | |
185 | #endif /* CONFIG_CALGARY_IOMMU */ | |
186 | ||
187 | p += strcspn(p, ","); | |
188 | if (*p == ',') | |
189 | ++p; | |
190 | } | |
191 | return 0; | |
192 | } | |
193 | early_param("iommu", iommu_setup); | |
194 | ||
8e8edc64 GC |
195 | #ifdef CONFIG_X86_32 |
196 | int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, | |
197 | dma_addr_t device_addr, size_t size, int flags) | |
198 | { | |
199 | void __iomem *mem_base = NULL; | |
200 | int pages = size >> PAGE_SHIFT; | |
201 | int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); | |
202 | ||
203 | if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0) | |
204 | goto out; | |
205 | if (!size) | |
206 | goto out; | |
207 | if (dev->dma_mem) | |
208 | goto out; | |
209 | ||
210 | /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */ | |
211 | ||
212 | mem_base = ioremap(bus_addr, size); | |
213 | if (!mem_base) | |
214 | goto out; | |
215 | ||
216 | dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL); | |
217 | if (!dev->dma_mem) | |
218 | goto out; | |
219 | dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL); | |
220 | if (!dev->dma_mem->bitmap) | |
221 | goto free1_out; | |
222 | ||
223 | dev->dma_mem->virt_base = mem_base; | |
224 | dev->dma_mem->device_base = device_addr; | |
225 | dev->dma_mem->size = pages; | |
226 | dev->dma_mem->flags = flags; | |
227 | ||
228 | if (flags & DMA_MEMORY_MAP) | |
229 | return DMA_MEMORY_MAP; | |
230 | ||
231 | return DMA_MEMORY_IO; | |
232 | ||
233 | free1_out: | |
234 | kfree(dev->dma_mem); | |
235 | out: | |
236 | if (mem_base) | |
237 | iounmap(mem_base); | |
238 | return 0; | |
239 | } | |
240 | EXPORT_SYMBOL(dma_declare_coherent_memory); | |
241 | ||
242 | void dma_release_declared_memory(struct device *dev) | |
243 | { | |
244 | struct dma_coherent_mem *mem = dev->dma_mem; | |
245 | ||
246 | if (!mem) | |
247 | return; | |
248 | dev->dma_mem = NULL; | |
249 | iounmap(mem->virt_base); | |
250 | kfree(mem->bitmap); | |
251 | kfree(mem); | |
252 | } | |
253 | EXPORT_SYMBOL(dma_release_declared_memory); | |
254 | ||
255 | void *dma_mark_declared_memory_occupied(struct device *dev, | |
256 | dma_addr_t device_addr, size_t size) | |
257 | { | |
258 | struct dma_coherent_mem *mem = dev->dma_mem; | |
259 | int pos, err; | |
260 | int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1); | |
261 | ||
262 | pages >>= PAGE_SHIFT; | |
263 | ||
264 | if (!mem) | |
265 | return ERR_PTR(-EINVAL); | |
266 | ||
267 | pos = (device_addr - mem->device_base) >> PAGE_SHIFT; | |
268 | err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages)); | |
269 | if (err != 0) | |
270 | return ERR_PTR(err); | |
271 | return mem->virt_base + (pos << PAGE_SHIFT); | |
272 | } | |
273 | EXPORT_SYMBOL(dma_mark_declared_memory_occupied); | |
098cb7f2 GC |
274 | |
275 | static int dma_alloc_from_coherent_mem(struct device *dev, ssize_t size, | |
276 | dma_addr_t *dma_handle, void **ret) | |
277 | { | |
278 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | |
279 | int order = get_order(size); | |
280 | ||
281 | if (mem) { | |
282 | int page = bitmap_find_free_region(mem->bitmap, mem->size, | |
283 | order); | |
284 | if (page >= 0) { | |
285 | *dma_handle = mem->device_base + (page << PAGE_SHIFT); | |
286 | *ret = mem->virt_base + (page << PAGE_SHIFT); | |
287 | memset(*ret, 0, size); | |
288 | } | |
289 | if (mem->flags & DMA_MEMORY_EXCLUSIVE) | |
290 | *ret = NULL; | |
291 | } | |
292 | return (mem != NULL); | |
293 | } | |
294 | ||
295 | static int dma_release_coherent(struct device *dev, int order, void *vaddr) | |
296 | { | |
297 | struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL; | |
298 | ||
299 | if (mem && vaddr >= mem->virt_base && vaddr < | |
300 | (mem->virt_base + (mem->size << PAGE_SHIFT))) { | |
301 | int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; | |
302 | ||
303 | bitmap_release_region(mem->bitmap, page, order); | |
304 | return 1; | |
305 | } | |
306 | return 0; | |
307 | } | |
308 | #else | |
309 | #define dma_alloc_from_coherent_mem(dev, size, handle, ret) (0) | |
310 | #define dma_release_coherent(dev, order, vaddr) (0) | |
8e8edc64 GC |
311 | #endif /* CONFIG_X86_32 */ |
312 | ||
8e0c3797 GC |
313 | int dma_supported(struct device *dev, u64 mask) |
314 | { | |
8d8bb39b FT |
315 | struct dma_mapping_ops *ops = get_dma_ops(dev); |
316 | ||
8e0c3797 GC |
317 | #ifdef CONFIG_PCI |
318 | if (mask > 0xffffffff && forbid_dac > 0) { | |
fc3a8828 | 319 | dev_info(dev, "PCI: Disallowing DAC for device\n"); |
8e0c3797 GC |
320 | return 0; |
321 | } | |
322 | #endif | |
323 | ||
8d8bb39b FT |
324 | if (ops->dma_supported) |
325 | return ops->dma_supported(dev, mask); | |
8e0c3797 GC |
326 | |
327 | /* Copied from i386. Doesn't make much sense, because it will | |
328 | only work for pci_alloc_coherent. | |
329 | The caller just has to use GFP_DMA in this case. */ | |
330 | if (mask < DMA_24BIT_MASK) | |
331 | return 0; | |
332 | ||
333 | /* Tell the device to use SAC when IOMMU force is on. This | |
334 | allows the driver to use cheaper accesses in some cases. | |
335 | ||
336 | Problem with this is that if we overflow the IOMMU area and | |
337 | return DAC as fallback address the device may not handle it | |
338 | correctly. | |
339 | ||
340 | As a special case some controllers have a 39bit address | |
341 | mode that is as efficient as 32bit (aic79xx). Don't force | |
342 | SAC for these. Assume all masks <= 40 bits are of this | |
343 | type. Normally this doesn't make any difference, but gives | |
344 | more gentle handling of IOMMU overflow. */ | |
345 | if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) { | |
fc3a8828 | 346 | dev_info(dev, "Force SAC with mask %Lx\n", mask); |
8e0c3797 GC |
347 | return 0; |
348 | } | |
349 | ||
350 | return 1; | |
351 | } | |
352 | EXPORT_SYMBOL(dma_supported); | |
353 | ||
098cb7f2 | 354 | /* Allocate DMA memory on node near device */ |
311f8349 | 355 | static noinline struct page * |
098cb7f2 GC |
356 | dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order) |
357 | { | |
358 | int node; | |
359 | ||
360 | node = dev_to_node(dev); | |
361 | ||
362 | return alloc_pages_node(node, gfp, order); | |
363 | } | |
364 | ||
365 | /* | |
366 | * Allocate memory for a coherent mapping. | |
367 | */ | |
368 | void * | |
369 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, | |
370 | gfp_t gfp) | |
371 | { | |
8d8bb39b | 372 | struct dma_mapping_ops *ops = get_dma_ops(dev); |
098cb7f2 GC |
373 | void *memory = NULL; |
374 | struct page *page; | |
375 | unsigned long dma_mask = 0; | |
376 | dma_addr_t bus; | |
b7f09ae5 | 377 | int noretry = 0; |
098cb7f2 GC |
378 | |
379 | /* ignore region specifiers */ | |
380 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); | |
381 | ||
382 | if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory)) | |
383 | return memory; | |
384 | ||
4a367f3a | 385 | if (!dev) { |
098cb7f2 | 386 | dev = &fallback_dev; |
4a367f3a TI |
387 | gfp |= GFP_DMA; |
388 | } | |
098cb7f2 GC |
389 | dma_mask = dev->coherent_dma_mask; |
390 | if (dma_mask == 0) | |
4a367f3a | 391 | dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK; |
098cb7f2 GC |
392 | |
393 | /* Device not DMA able */ | |
394 | if (dev->dma_mask == NULL) | |
395 | return NULL; | |
396 | ||
b7f09ae5 MS |
397 | /* Don't invoke OOM killer or retry in lower 16MB DMA zone */ |
398 | if (gfp & __GFP_DMA) | |
399 | noretry = 1; | |
098cb7f2 GC |
400 | |
401 | #ifdef CONFIG_X86_64 | |
402 | /* Why <=? Even when the mask is smaller than 4GB it is often | |
403 | larger than 16MB and in this case we have a chance of | |
404 | finding fitting memory in the next higher zone first. If | |
405 | not retry with true GFP_DMA. -AK */ | |
b7f09ae5 | 406 | if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) { |
098cb7f2 | 407 | gfp |= GFP_DMA32; |
b7f09ae5 MS |
408 | if (dma_mask < DMA_32BIT_MASK) |
409 | noretry = 1; | |
410 | } | |
098cb7f2 GC |
411 | #endif |
412 | ||
413 | again: | |
db9f600b | 414 | page = dma_alloc_pages(dev, |
b7f09ae5 | 415 | noretry ? gfp | __GFP_NORETRY : gfp, get_order(size)); |
098cb7f2 GC |
416 | if (page == NULL) |
417 | return NULL; | |
418 | ||
419 | { | |
420 | int high, mmu; | |
421 | bus = page_to_phys(page); | |
422 | memory = page_address(page); | |
423 | high = (bus + size) >= dma_mask; | |
424 | mmu = high; | |
425 | if (force_iommu && !(gfp & GFP_DMA)) | |
426 | mmu = 1; | |
427 | else if (high) { | |
428 | free_pages((unsigned long)memory, | |
429 | get_order(size)); | |
430 | ||
431 | /* Don't use the 16MB ZONE_DMA unless absolutely | |
432 | needed. It's better to use remapping first. */ | |
433 | if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) { | |
434 | gfp = (gfp & ~GFP_DMA32) | GFP_DMA; | |
435 | goto again; | |
436 | } | |
437 | ||
438 | /* Let low level make its own zone decisions */ | |
439 | gfp &= ~(GFP_DMA32|GFP_DMA); | |
440 | ||
8d8bb39b FT |
441 | if (ops->alloc_coherent) |
442 | return ops->alloc_coherent(dev, size, | |
098cb7f2 GC |
443 | dma_handle, gfp); |
444 | return NULL; | |
445 | } | |
446 | ||
447 | memset(memory, 0, size); | |
448 | if (!mmu) { | |
449 | *dma_handle = bus; | |
450 | return memory; | |
451 | } | |
452 | } | |
453 | ||
8d8bb39b | 454 | if (ops->alloc_coherent) { |
098cb7f2 GC |
455 | free_pages((unsigned long)memory, get_order(size)); |
456 | gfp &= ~(GFP_DMA|GFP_DMA32); | |
8d8bb39b | 457 | return ops->alloc_coherent(dev, size, dma_handle, gfp); |
098cb7f2 GC |
458 | } |
459 | ||
8d8bb39b FT |
460 | if (ops->map_simple) { |
461 | *dma_handle = ops->map_simple(dev, virt_to_phys(memory), | |
098cb7f2 GC |
462 | size, |
463 | PCI_DMA_BIDIRECTIONAL); | |
464 | if (*dma_handle != bad_dma_address) | |
465 | return memory; | |
466 | } | |
467 | ||
468 | if (panic_on_overflow) | |
469 | panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n", | |
470 | (unsigned long)size); | |
471 | free_pages((unsigned long)memory, get_order(size)); | |
472 | return NULL; | |
473 | } | |
474 | EXPORT_SYMBOL(dma_alloc_coherent); | |
475 | ||
476 | /* | |
477 | * Unmap coherent memory. | |
478 | * The caller must ensure that the device has finished accessing the mapping. | |
479 | */ | |
480 | void dma_free_coherent(struct device *dev, size_t size, | |
481 | void *vaddr, dma_addr_t bus) | |
482 | { | |
8d8bb39b FT |
483 | struct dma_mapping_ops *ops = get_dma_ops(dev); |
484 | ||
098cb7f2 GC |
485 | int order = get_order(size); |
486 | WARN_ON(irqs_disabled()); /* for portability */ | |
487 | if (dma_release_coherent(dev, order, vaddr)) | |
488 | return; | |
8d8bb39b FT |
489 | if (ops->unmap_single) |
490 | ops->unmap_single(dev, bus, size, 0); | |
098cb7f2 GC |
491 | free_pages((unsigned long)vaddr, order); |
492 | } | |
493 | EXPORT_SYMBOL(dma_free_coherent); | |
8e0c3797 | 494 | |
cb5867a5 GC |
495 | static int __init pci_iommu_init(void) |
496 | { | |
cb5867a5 | 497 | calgary_iommu_init(); |
cb5867a5 GC |
498 | |
499 | intel_iommu_init(); | |
500 | ||
a69ca340 JR |
501 | amd_iommu_init(); |
502 | ||
cb5867a5 | 503 | gart_iommu_init(); |
459121c9 | 504 | |
cb5867a5 GC |
505 | no_iommu_init(); |
506 | return 0; | |
507 | } | |
508 | ||
509 | void pci_iommu_shutdown(void) | |
510 | { | |
511 | gart_iommu_shutdown(); | |
512 | } | |
513 | /* Must execute after PCI subsystem */ | |
514 | fs_initcall(pci_iommu_init); | |
bca5c096 GC |
515 | |
516 | #ifdef CONFIG_PCI | |
517 | /* Many VIA bridges seem to corrupt data for DAC. Disable it here */ | |
518 | ||
519 | static __devinit void via_no_dac(struct pci_dev *dev) | |
520 | { | |
521 | if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) { | |
522 | printk(KERN_INFO "PCI: VIA PCI bridge detected." | |
523 | "Disabling DAC.\n"); | |
524 | forbid_dac = 1; | |
525 | } | |
526 | } | |
527 | DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac); | |
528 | #endif |