]>
Commit | Line | Data |
---|---|---|
1da177e4 | 1 | /* |
1da177e4 LT |
2 | * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation |
3 | * | |
4 | * Rewrite, cleanup, new allocation schemes, virtual merging: | |
5 | * Copyright (C) 2004 Olof Johansson, IBM Corporation | |
6 | * and Ben. Herrenschmidt, IBM Corporation | |
7 | * | |
8 | * Dynamic DMA mapping support, bus-independent parts. | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License as published by | |
12 | * the Free Software Foundation; either version 2 of the License, or | |
13 | * (at your option) any later version. | |
14 | * | |
15 | * This program is distributed in the hope that it will be useful, | |
16 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
18 | * GNU General Public License for more details. | |
19 | * | |
20 | * You should have received a copy of the GNU General Public License | |
21 | * along with this program; if not, write to the Free Software | |
22 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | |
23 | */ | |
24 | ||
25 | ||
26 | #include <linux/config.h> | |
27 | #include <linux/init.h> | |
28 | #include <linux/types.h> | |
29 | #include <linux/slab.h> | |
30 | #include <linux/mm.h> | |
31 | #include <linux/spinlock.h> | |
32 | #include <linux/string.h> | |
33 | #include <linux/dma-mapping.h> | |
34 | #include <linux/init.h> | |
35 | #include <linux/bitops.h> | |
36 | #include <asm/io.h> | |
37 | #include <asm/prom.h> | |
38 | #include <asm/iommu.h> | |
39 | #include <asm/pci-bridge.h> | |
40 | #include <asm/machdep.h> | |
41 | ||
42 | #define DBG(...) | |
43 | ||
44 | #ifdef CONFIG_IOMMU_VMERGE | |
45 | static int novmerge = 0; | |
46 | #else | |
47 | static int novmerge = 1; | |
48 | #endif | |
49 | ||
50 | static int __init setup_iommu(char *str) | |
51 | { | |
52 | if (!strcmp(str, "novmerge")) | |
53 | novmerge = 1; | |
54 | else if (!strcmp(str, "vmerge")) | |
55 | novmerge = 0; | |
56 | return 1; | |
57 | } | |
58 | ||
59 | __setup("iommu=", setup_iommu); | |
60 | ||
61 | static unsigned long iommu_range_alloc(struct iommu_table *tbl, | |
62 | unsigned long npages, | |
63 | unsigned long *handle, | |
7daa411b | 64 | unsigned long mask, |
1da177e4 LT |
65 | unsigned int align_order) |
66 | { | |
67 | unsigned long n, end, i, start; | |
68 | unsigned long limit; | |
69 | int largealloc = npages > 15; | |
70 | int pass = 0; | |
71 | unsigned long align_mask; | |
72 | ||
73 | align_mask = 0xffffffffffffffffl >> (64 - align_order); | |
74 | ||
75 | /* This allocator was derived from x86_64's bit string search */ | |
76 | ||
77 | /* Sanity check */ | |
78 | if (unlikely(npages) == 0) { | |
79 | if (printk_ratelimit()) | |
80 | WARN_ON(1); | |
81 | return DMA_ERROR_CODE; | |
82 | } | |
83 | ||
84 | if (handle && *handle) | |
85 | start = *handle; | |
86 | else | |
87 | start = largealloc ? tbl->it_largehint : tbl->it_hint; | |
88 | ||
89 | /* Use only half of the table for small allocs (15 pages or less) */ | |
90 | limit = largealloc ? tbl->it_size : tbl->it_halfpoint; | |
91 | ||
92 | if (largealloc && start < tbl->it_halfpoint) | |
93 | start = tbl->it_halfpoint; | |
94 | ||
95 | /* The case below can happen if we have a small segment appended | |
96 | * to a large, or when the previous alloc was at the very end of | |
97 | * the available space. If so, go back to the initial start. | |
98 | */ | |
99 | if (start >= limit) | |
100 | start = largealloc ? tbl->it_largehint : tbl->it_hint; | |
7daa411b | 101 | |
1da177e4 LT |
102 | again: |
103 | ||
7daa411b OJ |
104 | if (limit + tbl->it_offset > mask) { |
105 | limit = mask - tbl->it_offset + 1; | |
106 | /* If we're constrained on address range, first try | |
107 | * at the masked hint to avoid O(n) search complexity, | |
108 | * but on second pass, start at 0. | |
109 | */ | |
110 | if ((start & mask) >= limit || pass > 0) | |
111 | start = 0; | |
112 | else | |
113 | start &= mask; | |
114 | } | |
115 | ||
1da177e4 LT |
116 | n = find_next_zero_bit(tbl->it_map, limit, start); |
117 | ||
118 | /* Align allocation */ | |
119 | n = (n + align_mask) & ~align_mask; | |
120 | ||
121 | end = n + npages; | |
122 | ||
123 | if (unlikely(end >= limit)) { | |
124 | if (likely(pass < 2)) { | |
125 | /* First failure, just rescan the half of the table. | |
126 | * Second failure, rescan the other half of the table. | |
127 | */ | |
128 | start = (largealloc ^ pass) ? tbl->it_halfpoint : 0; | |
129 | limit = pass ? tbl->it_size : limit; | |
130 | pass++; | |
131 | goto again; | |
132 | } else { | |
133 | /* Third failure, give up */ | |
134 | return DMA_ERROR_CODE; | |
135 | } | |
136 | } | |
137 | ||
138 | for (i = n; i < end; i++) | |
139 | if (test_bit(i, tbl->it_map)) { | |
140 | start = i+1; | |
141 | goto again; | |
142 | } | |
143 | ||
144 | for (i = n; i < end; i++) | |
145 | __set_bit(i, tbl->it_map); | |
146 | ||
147 | /* Bump the hint to a new block for small allocs. */ | |
148 | if (largealloc) { | |
149 | /* Don't bump to new block to avoid fragmentation */ | |
150 | tbl->it_largehint = end; | |
151 | } else { | |
152 | /* Overflow will be taken care of at the next allocation */ | |
153 | tbl->it_hint = (end + tbl->it_blocksize - 1) & | |
154 | ~(tbl->it_blocksize - 1); | |
155 | } | |
156 | ||
157 | /* Update handle for SG allocations */ | |
158 | if (handle) | |
159 | *handle = end; | |
160 | ||
161 | return n; | |
162 | } | |
163 | ||
164 | static dma_addr_t iommu_alloc(struct iommu_table *tbl, void *page, | |
165 | unsigned int npages, enum dma_data_direction direction, | |
7daa411b | 166 | unsigned long mask, unsigned int align_order) |
1da177e4 LT |
167 | { |
168 | unsigned long entry, flags; | |
169 | dma_addr_t ret = DMA_ERROR_CODE; | |
7daa411b | 170 | |
1da177e4 LT |
171 | spin_lock_irqsave(&(tbl->it_lock), flags); |
172 | ||
7daa411b | 173 | entry = iommu_range_alloc(tbl, npages, NULL, mask, align_order); |
1da177e4 LT |
174 | |
175 | if (unlikely(entry == DMA_ERROR_CODE)) { | |
176 | spin_unlock_irqrestore(&(tbl->it_lock), flags); | |
177 | return DMA_ERROR_CODE; | |
178 | } | |
179 | ||
180 | entry += tbl->it_offset; /* Offset into real TCE table */ | |
181 | ret = entry << PAGE_SHIFT; /* Set the return dma address */ | |
182 | ||
183 | /* Put the TCEs in the HW table */ | |
184 | ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & PAGE_MASK, | |
185 | direction); | |
186 | ||
187 | ||
188 | /* Flush/invalidate TLB caches if necessary */ | |
189 | if (ppc_md.tce_flush) | |
190 | ppc_md.tce_flush(tbl); | |
191 | ||
192 | spin_unlock_irqrestore(&(tbl->it_lock), flags); | |
193 | ||
194 | /* Make sure updates are seen by hardware */ | |
195 | mb(); | |
196 | ||
197 | return ret; | |
198 | } | |
199 | ||
200 | static void __iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, | |
201 | unsigned int npages) | |
202 | { | |
203 | unsigned long entry, free_entry; | |
204 | unsigned long i; | |
205 | ||
206 | entry = dma_addr >> PAGE_SHIFT; | |
207 | free_entry = entry - tbl->it_offset; | |
208 | ||
209 | if (((free_entry + npages) > tbl->it_size) || | |
210 | (entry < tbl->it_offset)) { | |
211 | if (printk_ratelimit()) { | |
212 | printk(KERN_INFO "iommu_free: invalid entry\n"); | |
213 | printk(KERN_INFO "\tentry = 0x%lx\n", entry); | |
214 | printk(KERN_INFO "\tdma_addr = 0x%lx\n", (u64)dma_addr); | |
215 | printk(KERN_INFO "\tTable = 0x%lx\n", (u64)tbl); | |
216 | printk(KERN_INFO "\tbus# = 0x%lx\n", (u64)tbl->it_busno); | |
217 | printk(KERN_INFO "\tsize = 0x%lx\n", (u64)tbl->it_size); | |
218 | printk(KERN_INFO "\tstartOff = 0x%lx\n", (u64)tbl->it_offset); | |
219 | printk(KERN_INFO "\tindex = 0x%lx\n", (u64)tbl->it_index); | |
220 | WARN_ON(1); | |
221 | } | |
222 | return; | |
223 | } | |
224 | ||
225 | ppc_md.tce_free(tbl, entry, npages); | |
226 | ||
227 | for (i = 0; i < npages; i++) | |
228 | __clear_bit(free_entry+i, tbl->it_map); | |
229 | } | |
230 | ||
231 | static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, | |
232 | unsigned int npages) | |
233 | { | |
234 | unsigned long flags; | |
235 | ||
236 | spin_lock_irqsave(&(tbl->it_lock), flags); | |
237 | ||
238 | __iommu_free(tbl, dma_addr, npages); | |
239 | ||
240 | /* Make sure TLB cache is flushed if the HW needs it. We do | |
241 | * not do an mb() here on purpose, it is not needed on any of | |
242 | * the current platforms. | |
243 | */ | |
244 | if (ppc_md.tce_flush) | |
245 | ppc_md.tce_flush(tbl); | |
246 | ||
247 | spin_unlock_irqrestore(&(tbl->it_lock), flags); | |
248 | } | |
249 | ||
250 | int iommu_map_sg(struct device *dev, struct iommu_table *tbl, | |
251 | struct scatterlist *sglist, int nelems, | |
7daa411b | 252 | unsigned long mask, enum dma_data_direction direction) |
1da177e4 LT |
253 | { |
254 | dma_addr_t dma_next = 0, dma_addr; | |
255 | unsigned long flags; | |
256 | struct scatterlist *s, *outs, *segstart; | |
ac9af7cb | 257 | int outcount, incount; |
1da177e4 LT |
258 | unsigned long handle; |
259 | ||
260 | BUG_ON(direction == DMA_NONE); | |
261 | ||
262 | if ((nelems == 0) || !tbl) | |
263 | return 0; | |
264 | ||
265 | outs = s = segstart = &sglist[0]; | |
266 | outcount = 1; | |
ac9af7cb | 267 | incount = nelems; |
1da177e4 LT |
268 | handle = 0; |
269 | ||
270 | /* Init first segment length for backout at failure */ | |
271 | outs->dma_length = 0; | |
272 | ||
273 | DBG("mapping %d elements:\n", nelems); | |
274 | ||
275 | spin_lock_irqsave(&(tbl->it_lock), flags); | |
276 | ||
277 | for (s = outs; nelems; nelems--, s++) { | |
278 | unsigned long vaddr, npages, entry, slen; | |
279 | ||
280 | slen = s->length; | |
281 | /* Sanity check */ | |
282 | if (slen == 0) { | |
283 | dma_next = 0; | |
284 | continue; | |
285 | } | |
286 | /* Allocate iommu entries for that segment */ | |
287 | vaddr = (unsigned long)page_address(s->page) + s->offset; | |
288 | npages = PAGE_ALIGN(vaddr + slen) - (vaddr & PAGE_MASK); | |
289 | npages >>= PAGE_SHIFT; | |
7daa411b | 290 | entry = iommu_range_alloc(tbl, npages, &handle, mask >> PAGE_SHIFT, 0); |
1da177e4 LT |
291 | |
292 | DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); | |
293 | ||
294 | /* Handle failure */ | |
295 | if (unlikely(entry == DMA_ERROR_CODE)) { | |
296 | if (printk_ratelimit()) | |
297 | printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx" | |
298 | " npages %lx\n", tbl, vaddr, npages); | |
299 | goto failure; | |
300 | } | |
301 | ||
302 | /* Convert entry to a dma_addr_t */ | |
303 | entry += tbl->it_offset; | |
304 | dma_addr = entry << PAGE_SHIFT; | |
305 | dma_addr |= s->offset; | |
306 | ||
307 | DBG(" - %lx pages, entry: %lx, dma_addr: %lx\n", | |
308 | npages, entry, dma_addr); | |
309 | ||
310 | /* Insert into HW table */ | |
311 | ppc_md.tce_build(tbl, entry, npages, vaddr & PAGE_MASK, direction); | |
312 | ||
313 | /* If we are in an open segment, try merging */ | |
314 | if (segstart != s) { | |
315 | DBG(" - trying merge...\n"); | |
316 | /* We cannot merge if: | |
317 | * - allocated dma_addr isn't contiguous to previous allocation | |
318 | */ | |
319 | if (novmerge || (dma_addr != dma_next)) { | |
320 | /* Can't merge: create a new segment */ | |
321 | segstart = s; | |
322 | outcount++; outs++; | |
323 | DBG(" can't merge, new segment.\n"); | |
324 | } else { | |
325 | outs->dma_length += s->length; | |
326 | DBG(" merged, new len: %lx\n", outs->dma_length); | |
327 | } | |
328 | } | |
329 | ||
330 | if (segstart == s) { | |
331 | /* This is a new segment, fill entries */ | |
332 | DBG(" - filling new segment.\n"); | |
333 | outs->dma_address = dma_addr; | |
334 | outs->dma_length = slen; | |
335 | } | |
336 | ||
337 | /* Calculate next page pointer for contiguous check */ | |
338 | dma_next = dma_addr + slen; | |
339 | ||
340 | DBG(" - dma next is: %lx\n", dma_next); | |
341 | } | |
342 | ||
343 | /* Flush/invalidate TLB caches if necessary */ | |
344 | if (ppc_md.tce_flush) | |
345 | ppc_md.tce_flush(tbl); | |
346 | ||
347 | spin_unlock_irqrestore(&(tbl->it_lock), flags); | |
348 | ||
1da177e4 LT |
349 | DBG("mapped %d elements:\n", outcount); |
350 | ||
ac9af7cb | 351 | /* For the sake of iommu_unmap_sg, we clear out the length in the |
1da177e4 LT |
352 | * next entry of the sglist if we didn't fill the list completely |
353 | */ | |
ac9af7cb | 354 | if (outcount < incount) { |
1da177e4 LT |
355 | outs++; |
356 | outs->dma_address = DMA_ERROR_CODE; | |
357 | outs->dma_length = 0; | |
358 | } | |
a958a264 JM |
359 | |
360 | /* Make sure updates are seen by hardware */ | |
361 | mb(); | |
362 | ||
1da177e4 LT |
363 | return outcount; |
364 | ||
365 | failure: | |
366 | for (s = &sglist[0]; s <= outs; s++) { | |
367 | if (s->dma_length != 0) { | |
368 | unsigned long vaddr, npages; | |
369 | ||
370 | vaddr = s->dma_address & PAGE_MASK; | |
371 | npages = (PAGE_ALIGN(s->dma_address + s->dma_length) - vaddr) | |
372 | >> PAGE_SHIFT; | |
373 | __iommu_free(tbl, vaddr, npages); | |
a958a264 JM |
374 | s->dma_address = DMA_ERROR_CODE; |
375 | s->dma_length = 0; | |
1da177e4 LT |
376 | } |
377 | } | |
378 | spin_unlock_irqrestore(&(tbl->it_lock), flags); | |
379 | return 0; | |
380 | } | |
381 | ||
382 | ||
383 | void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, | |
384 | int nelems, enum dma_data_direction direction) | |
385 | { | |
386 | unsigned long flags; | |
387 | ||
388 | BUG_ON(direction == DMA_NONE); | |
389 | ||
390 | if (!tbl) | |
391 | return; | |
392 | ||
393 | spin_lock_irqsave(&(tbl->it_lock), flags); | |
394 | ||
395 | while (nelems--) { | |
396 | unsigned int npages; | |
397 | dma_addr_t dma_handle = sglist->dma_address; | |
398 | ||
399 | if (sglist->dma_length == 0) | |
400 | break; | |
401 | npages = (PAGE_ALIGN(dma_handle + sglist->dma_length) | |
402 | - (dma_handle & PAGE_MASK)) >> PAGE_SHIFT; | |
403 | __iommu_free(tbl, dma_handle, npages); | |
404 | sglist++; | |
405 | } | |
406 | ||
407 | /* Flush/invalidate TLBs if necessary. As for iommu_free(), we | |
408 | * do not do an mb() here, the affected platforms do not need it | |
409 | * when freeing. | |
410 | */ | |
411 | if (ppc_md.tce_flush) | |
412 | ppc_md.tce_flush(tbl); | |
413 | ||
414 | spin_unlock_irqrestore(&(tbl->it_lock), flags); | |
415 | } | |
416 | ||
417 | /* | |
418 | * Build a iommu_table structure. This contains a bit map which | |
419 | * is used to manage allocation of the tce space. | |
420 | */ | |
421 | struct iommu_table *iommu_init_table(struct iommu_table *tbl) | |
422 | { | |
423 | unsigned long sz; | |
424 | static int welcomed = 0; | |
425 | ||
426 | /* Set aside 1/4 of the table for large allocations. */ | |
427 | tbl->it_halfpoint = tbl->it_size * 3 / 4; | |
428 | ||
429 | /* number of bytes needed for the bitmap */ | |
430 | sz = (tbl->it_size + 7) >> 3; | |
431 | ||
432 | tbl->it_map = (unsigned long *)__get_free_pages(GFP_ATOMIC, get_order(sz)); | |
433 | if (!tbl->it_map) | |
434 | panic("iommu_init_table: Can't allocate %ld bytes\n", sz); | |
435 | ||
436 | memset(tbl->it_map, 0, sz); | |
437 | ||
438 | tbl->it_hint = 0; | |
439 | tbl->it_largehint = tbl->it_halfpoint; | |
440 | spin_lock_init(&tbl->it_lock); | |
441 | ||
d3588ba9 JR |
442 | /* Clear the hardware table in case firmware left allocations in it */ |
443 | ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size); | |
444 | ||
1da177e4 LT |
445 | if (!welcomed) { |
446 | printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n", | |
447 | novmerge ? "disabled" : "enabled"); | |
448 | welcomed = 1; | |
449 | } | |
450 | ||
451 | return tbl; | |
452 | } | |
453 | ||
454 | void iommu_free_table(struct device_node *dn) | |
455 | { | |
1635317f PM |
456 | struct pci_dn *pdn = dn->data; |
457 | struct iommu_table *tbl = pdn->iommu_table; | |
1da177e4 LT |
458 | unsigned long bitmap_sz, i; |
459 | unsigned int order; | |
460 | ||
461 | if (!tbl || !tbl->it_map) { | |
462 | printk(KERN_ERR "%s: expected TCE map for %s\n", __FUNCTION__, | |
463 | dn->full_name); | |
464 | return; | |
465 | } | |
466 | ||
467 | /* verify that table contains no entries */ | |
468 | /* it_size is in entries, and we're examining 64 at a time */ | |
469 | for (i = 0; i < (tbl->it_size/64); i++) { | |
470 | if (tbl->it_map[i] != 0) { | |
471 | printk(KERN_WARNING "%s: Unexpected TCEs for %s\n", | |
472 | __FUNCTION__, dn->full_name); | |
473 | break; | |
474 | } | |
475 | } | |
476 | ||
477 | /* calculate bitmap size in bytes */ | |
478 | bitmap_sz = (tbl->it_size + 7) / 8; | |
479 | ||
480 | /* free bitmap */ | |
481 | order = get_order(bitmap_sz); | |
482 | free_pages((unsigned long) tbl->it_map, order); | |
483 | ||
484 | /* free table */ | |
485 | kfree(tbl); | |
486 | } | |
487 | ||
488 | /* Creates TCEs for a user provided buffer. The user buffer must be | |
489 | * contiguous real kernel storage (not vmalloc). The address of the buffer | |
490 | * passed here is the kernel (virtual) address of the buffer. The buffer | |
491 | * need not be page aligned, the dma_addr_t returned will point to the same | |
492 | * byte within the page as vaddr. | |
493 | */ | |
494 | dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr, | |
7daa411b OJ |
495 | size_t size, unsigned long mask, |
496 | enum dma_data_direction direction) | |
1da177e4 LT |
497 | { |
498 | dma_addr_t dma_handle = DMA_ERROR_CODE; | |
499 | unsigned long uaddr; | |
500 | unsigned int npages; | |
501 | ||
502 | BUG_ON(direction == DMA_NONE); | |
503 | ||
504 | uaddr = (unsigned long)vaddr; | |
505 | npages = PAGE_ALIGN(uaddr + size) - (uaddr & PAGE_MASK); | |
506 | npages >>= PAGE_SHIFT; | |
507 | ||
508 | if (tbl) { | |
7daa411b OJ |
509 | dma_handle = iommu_alloc(tbl, vaddr, npages, direction, |
510 | mask >> PAGE_SHIFT, 0); | |
1da177e4 LT |
511 | if (dma_handle == DMA_ERROR_CODE) { |
512 | if (printk_ratelimit()) { | |
513 | printk(KERN_INFO "iommu_alloc failed, " | |
514 | "tbl %p vaddr %p npages %d\n", | |
515 | tbl, vaddr, npages); | |
516 | } | |
517 | } else | |
518 | dma_handle |= (uaddr & ~PAGE_MASK); | |
519 | } | |
520 | ||
521 | return dma_handle; | |
522 | } | |
523 | ||
524 | void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle, | |
525 | size_t size, enum dma_data_direction direction) | |
526 | { | |
527 | BUG_ON(direction == DMA_NONE); | |
528 | ||
529 | if (tbl) | |
530 | iommu_free(tbl, dma_handle, (PAGE_ALIGN(dma_handle + size) - | |
531 | (dma_handle & PAGE_MASK)) >> PAGE_SHIFT); | |
532 | } | |
533 | ||
534 | /* Allocates a contiguous real buffer and creates mappings over it. | |
535 | * Returns the virtual address of the buffer and sets dma_handle | |
536 | * to the dma address (mapping) of the first page. | |
537 | */ | |
538 | void *iommu_alloc_coherent(struct iommu_table *tbl, size_t size, | |
7daa411b | 539 | dma_addr_t *dma_handle, unsigned long mask, gfp_t flag) |
1da177e4 LT |
540 | { |
541 | void *ret = NULL; | |
542 | dma_addr_t mapping; | |
543 | unsigned int npages, order; | |
544 | ||
545 | size = PAGE_ALIGN(size); | |
546 | npages = size >> PAGE_SHIFT; | |
547 | order = get_order(size); | |
548 | ||
549 | /* | |
550 | * Client asked for way too much space. This is checked later | |
551 | * anyway. It is easier to debug here for the drivers than in | |
552 | * the tce tables. | |
553 | */ | |
554 | if (order >= IOMAP_MAX_ORDER) { | |
555 | printk("iommu_alloc_consistent size too large: 0x%lx\n", size); | |
556 | return NULL; | |
557 | } | |
558 | ||
559 | if (!tbl) | |
560 | return NULL; | |
561 | ||
562 | /* Alloc enough pages (and possibly more) */ | |
563 | ret = (void *)__get_free_pages(flag, order); | |
564 | if (!ret) | |
565 | return NULL; | |
566 | memset(ret, 0, size); | |
567 | ||
568 | /* Set up tces to cover the allocated range */ | |
7daa411b OJ |
569 | mapping = iommu_alloc(tbl, ret, npages, DMA_BIDIRECTIONAL, |
570 | mask >> PAGE_SHIFT, order); | |
1da177e4 LT |
571 | if (mapping == DMA_ERROR_CODE) { |
572 | free_pages((unsigned long)ret, order); | |
573 | ret = NULL; | |
574 | } else | |
575 | *dma_handle = mapping; | |
576 | return ret; | |
577 | } | |
578 | ||
579 | void iommu_free_coherent(struct iommu_table *tbl, size_t size, | |
580 | void *vaddr, dma_addr_t dma_handle) | |
581 | { | |
582 | unsigned int npages; | |
583 | ||
584 | if (tbl) { | |
585 | size = PAGE_ALIGN(size); | |
586 | npages = size >> PAGE_SHIFT; | |
587 | iommu_free(tbl, dma_handle, npages); | |
588 | free_pages((unsigned long)vaddr, get_order(size)); | |
589 | } | |
590 | } |