]>
Commit | Line | Data |
---|---|---|
69d3a84a HD |
1 | /* |
2 | * omap iommu: simple virtual address space management | |
3 | * | |
4 | * Copyright (C) 2008-2009 Nokia Corporation | |
5 | * | |
6 | * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License version 2 as | |
10 | * published by the Free Software Foundation. | |
11 | */ | |
12 | ||
13 | #include <linux/err.h> | |
5a0e3ad6 | 14 | #include <linux/slab.h> |
69d3a84a HD |
15 | #include <linux/vmalloc.h> |
16 | #include <linux/device.h> | |
17 | #include <linux/scatterlist.h> | |
18 | ||
19 | #include <asm/cacheflush.h> | |
20 | #include <asm/mach/map.h> | |
21 | ||
ce491cf8 TL |
22 | #include <plat/iommu.h> |
23 | #include <plat/iovmm.h> | |
69d3a84a HD |
24 | |
25 | #include "iopgtable.h" | |
26 | ||
27 | /* | |
28 | * A device driver needs to create address mappings between: | |
29 | * | |
30 | * - iommu/device address | |
31 | * - physical address | |
32 | * - mpu virtual address | |
33 | * | |
34 | * There are 4 possible patterns for them: | |
35 | * | |
36 | * |iova/ mapping iommu_ page | |
37 | * | da pa va (d)-(p)-(v) function type | |
38 | * --------------------------------------------------------------------------- | |
39 | * 1 | c c c 1 - 1 - 1 _kmap() / _kunmap() s | |
40 | * 2 | c c,a c 1 - 1 - 1 _kmalloc()/ _kfree() s | |
41 | * 3 | c d c 1 - n - 1 _vmap() / _vunmap() s | |
42 | * 4 | c d,a c 1 - n - 1 _vmalloc()/ _vfree() n* | |
43 | * | |
44 | * | |
45 | * 'iova': device iommu virtual address | |
46 | * 'da': alias of 'iova' | |
47 | * 'pa': physical address | |
48 | * 'va': mpu virtual address | |
49 | * | |
50 | * 'c': contiguous memory area | |
ba6a1179 | 51 | * 'd': discontiguous memory area |
69d3a84a HD |
52 | * 'a': anonymous memory allocation |
53 | * '()': optional feature | |
54 | * | |
55 | * 'n': a normal page(4KB) size is used. | |
56 | * 's': multiple iommu superpage(16MB, 1MB, 64KB, 4KB) size is used. | |
57 | * | |
58 | * '*': not yet, but feasible. | |
59 | */ | |
60 | ||
61 | static struct kmem_cache *iovm_area_cachep; | |
62 | ||
63 | /* return total bytes of sg buffers */ | |
64 | static size_t sgtable_len(const struct sg_table *sgt) | |
65 | { | |
66 | unsigned int i, total = 0; | |
67 | struct scatterlist *sg; | |
68 | ||
69 | if (!sgt) | |
70 | return 0; | |
71 | ||
72 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | |
73 | size_t bytes; | |
74 | ||
66cf402b | 75 | bytes = sg->length; |
69d3a84a HD |
76 | |
77 | if (!iopgsz_ok(bytes)) { | |
78 | pr_err("%s: sg[%d] not iommu pagesize(%x)\n", | |
79 | __func__, i, bytes); | |
80 | return 0; | |
81 | } | |
82 | ||
83 | total += bytes; | |
84 | } | |
85 | ||
86 | return total; | |
87 | } | |
88 | #define sgtable_ok(x) (!!sgtable_len(x)) | |
89 | ||
ad108121 GLF |
90 | static unsigned max_alignment(u32 addr) |
91 | { | |
92 | int i; | |
93 | unsigned pagesize[] = { SZ_16M, SZ_1M, SZ_64K, SZ_4K, }; | |
94 | for (i = 0; i < ARRAY_SIZE(pagesize) && addr & (pagesize[i] - 1); i++) | |
95 | ; | |
96 | return (i < ARRAY_SIZE(pagesize)) ? pagesize[i] : 0; | |
97 | } | |
98 | ||
69d3a84a HD |
99 | /* |
100 | * calculate the optimal number sg elements from total bytes based on | |
101 | * iommu superpages | |
102 | */ | |
ad108121 | 103 | static unsigned sgtable_nents(size_t bytes, u32 da, u32 pa) |
69d3a84a | 104 | { |
ad108121 | 105 | unsigned nr_entries = 0, ent_sz; |
69d3a84a HD |
106 | |
107 | if (!IS_ALIGNED(bytes, PAGE_SIZE)) { | |
108 | pr_err("%s: wrong size %08x\n", __func__, bytes); | |
109 | return 0; | |
110 | } | |
111 | ||
ad108121 GLF |
112 | while (bytes) { |
113 | ent_sz = max_alignment(da | pa); | |
114 | ent_sz = min_t(unsigned, ent_sz, iopgsz_max(bytes)); | |
115 | nr_entries++; | |
116 | da += ent_sz; | |
117 | pa += ent_sz; | |
118 | bytes -= ent_sz; | |
69d3a84a | 119 | } |
69d3a84a HD |
120 | |
121 | return nr_entries; | |
122 | } | |
123 | ||
124 | /* allocate and initialize sg_table header(a kind of 'superblock') */ | |
ad108121 GLF |
125 | static struct sg_table *sgtable_alloc(const size_t bytes, u32 flags, |
126 | u32 da, u32 pa) | |
69d3a84a HD |
127 | { |
128 | unsigned int nr_entries; | |
129 | int err; | |
130 | struct sg_table *sgt; | |
131 | ||
132 | if (!bytes) | |
133 | return ERR_PTR(-EINVAL); | |
134 | ||
135 | if (!IS_ALIGNED(bytes, PAGE_SIZE)) | |
136 | return ERR_PTR(-EINVAL); | |
137 | ||
ad108121 GLF |
138 | if (flags & IOVMF_LINEAR) { |
139 | nr_entries = sgtable_nents(bytes, da, pa); | |
69d3a84a HD |
140 | if (!nr_entries) |
141 | return ERR_PTR(-EINVAL); | |
142 | } else | |
143 | nr_entries = bytes / PAGE_SIZE; | |
144 | ||
145 | sgt = kzalloc(sizeof(*sgt), GFP_KERNEL); | |
146 | if (!sgt) | |
147 | return ERR_PTR(-ENOMEM); | |
148 | ||
149 | err = sg_alloc_table(sgt, nr_entries, GFP_KERNEL); | |
7f1225bd S |
150 | if (err) { |
151 | kfree(sgt); | |
69d3a84a | 152 | return ERR_PTR(err); |
7f1225bd | 153 | } |
69d3a84a HD |
154 | |
155 | pr_debug("%s: sgt:%p(%d entries)\n", __func__, sgt, nr_entries); | |
156 | ||
157 | return sgt; | |
158 | } | |
159 | ||
160 | /* free sg_table header(a kind of superblock) */ | |
161 | static void sgtable_free(struct sg_table *sgt) | |
162 | { | |
163 | if (!sgt) | |
164 | return; | |
165 | ||
166 | sg_free_table(sgt); | |
167 | kfree(sgt); | |
168 | ||
169 | pr_debug("%s: sgt:%p\n", __func__, sgt); | |
170 | } | |
171 | ||
172 | /* map 'sglist' to a contiguous mpu virtual area and return 'va' */ | |
173 | static void *vmap_sg(const struct sg_table *sgt) | |
174 | { | |
175 | u32 va; | |
176 | size_t total; | |
177 | unsigned int i; | |
178 | struct scatterlist *sg; | |
179 | struct vm_struct *new; | |
180 | const struct mem_type *mtype; | |
181 | ||
182 | mtype = get_mem_type(MT_DEVICE); | |
183 | if (!mtype) | |
184 | return ERR_PTR(-EINVAL); | |
185 | ||
186 | total = sgtable_len(sgt); | |
187 | if (!total) | |
188 | return ERR_PTR(-EINVAL); | |
189 | ||
190 | new = __get_vm_area(total, VM_IOREMAP, VMALLOC_START, VMALLOC_END); | |
191 | if (!new) | |
192 | return ERR_PTR(-ENOMEM); | |
193 | va = (u32)new->addr; | |
194 | ||
195 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | |
196 | size_t bytes; | |
197 | u32 pa; | |
198 | int err; | |
199 | ||
200 | pa = sg_phys(sg); | |
66cf402b | 201 | bytes = sg->length; |
69d3a84a HD |
202 | |
203 | BUG_ON(bytes != PAGE_SIZE); | |
204 | ||
205 | err = ioremap_page(va, pa, mtype); | |
206 | if (err) | |
207 | goto err_out; | |
208 | ||
209 | va += bytes; | |
210 | } | |
211 | ||
6716bd06 SP |
212 | flush_cache_vmap((unsigned long)new->addr, |
213 | (unsigned long)(new->addr + total)); | |
69d3a84a HD |
214 | return new->addr; |
215 | ||
216 | err_out: | |
217 | WARN_ON(1); /* FIXME: cleanup some mpu mappings */ | |
218 | vunmap(new->addr); | |
219 | return ERR_PTR(-EAGAIN); | |
220 | } | |
221 | ||
222 | static inline void vunmap_sg(const void *va) | |
223 | { | |
224 | vunmap(va); | |
225 | } | |
226 | ||
227 | static struct iovm_struct *__find_iovm_area(struct iommu *obj, const u32 da) | |
228 | { | |
229 | struct iovm_struct *tmp; | |
230 | ||
231 | list_for_each_entry(tmp, &obj->mmap, list) { | |
232 | if ((da >= tmp->da_start) && (da < tmp->da_end)) { | |
233 | size_t len; | |
234 | ||
235 | len = tmp->da_end - tmp->da_start; | |
236 | ||
237 | dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", | |
238 | __func__, tmp->da_start, da, tmp->da_end, len, | |
239 | tmp->flags); | |
240 | ||
241 | return tmp; | |
242 | } | |
243 | } | |
244 | ||
245 | return NULL; | |
246 | } | |
247 | ||
248 | /** | |
249 | * find_iovm_area - find iovma which includes @da | |
250 | * @da: iommu device virtual address | |
251 | * | |
252 | * Find the existing iovma starting at @da | |
253 | */ | |
254 | struct iovm_struct *find_iovm_area(struct iommu *obj, u32 da) | |
255 | { | |
256 | struct iovm_struct *area; | |
257 | ||
258 | mutex_lock(&obj->mmap_lock); | |
259 | area = __find_iovm_area(obj, da); | |
260 | mutex_unlock(&obj->mmap_lock); | |
261 | ||
262 | return area; | |
263 | } | |
264 | EXPORT_SYMBOL_GPL(find_iovm_area); | |
265 | ||
266 | /* | |
267 | * This finds the hole(area) which fits the requested address and len | |
268 | * in iovmas mmap, and returns the new allocated iovma. | |
269 | */ | |
270 | static struct iovm_struct *alloc_iovm_area(struct iommu *obj, u32 da, | |
271 | size_t bytes, u32 flags) | |
272 | { | |
273 | struct iovm_struct *new, *tmp; | |
4359d38d | 274 | u32 start, prev_end, alignment; |
69d3a84a HD |
275 | |
276 | if (!obj || !bytes) | |
277 | return ERR_PTR(-EINVAL); | |
278 | ||
279 | start = da; | |
4359d38d | 280 | alignment = PAGE_SIZE; |
69d3a84a | 281 | |
d038aee2 | 282 | if (~flags & IOVMF_DA_FIXED) { |
4359d38d MJ |
283 | /* Don't map address 0 */ |
284 | start = obj->da_start ? obj->da_start : alignment; | |
c7f4ab26 | 285 | |
69d3a84a | 286 | if (flags & IOVMF_LINEAR) |
4359d38d MJ |
287 | alignment = iopgsz_max(bytes); |
288 | start = roundup(start, alignment); | |
c7f4ab26 GLF |
289 | } else if (start < obj->da_start || start > obj->da_end || |
290 | obj->da_end - start < bytes) { | |
291 | return ERR_PTR(-EINVAL); | |
69d3a84a HD |
292 | } |
293 | ||
294 | tmp = NULL; | |
295 | if (list_empty(&obj->mmap)) | |
296 | goto found; | |
297 | ||
298 | prev_end = 0; | |
299 | list_for_each_entry(tmp, &obj->mmap, list) { | |
300 | ||
ba6e1f4f | 301 | if (prev_end > start) |
e0a42e4f HD |
302 | break; |
303 | ||
c7f4ab26 | 304 | if (tmp->da_start > start && (tmp->da_start - start) >= bytes) |
69d3a84a HD |
305 | goto found; |
306 | ||
d038aee2 | 307 | if (tmp->da_end >= start && ~flags & IOVMF_DA_FIXED) |
4359d38d | 308 | start = roundup(tmp->da_end + 1, alignment); |
69d3a84a HD |
309 | |
310 | prev_end = tmp->da_end; | |
311 | } | |
312 | ||
c7f4ab26 | 313 | if ((start >= prev_end) && (obj->da_end - start >= bytes)) |
69d3a84a HD |
314 | goto found; |
315 | ||
316 | dev_dbg(obj->dev, "%s: no space to fit %08x(%x) flags: %08x\n", | |
317 | __func__, da, bytes, flags); | |
318 | ||
319 | return ERR_PTR(-EINVAL); | |
320 | ||
321 | found: | |
322 | new = kmem_cache_zalloc(iovm_area_cachep, GFP_KERNEL); | |
323 | if (!new) | |
324 | return ERR_PTR(-ENOMEM); | |
325 | ||
326 | new->iommu = obj; | |
327 | new->da_start = start; | |
328 | new->da_end = start + bytes; | |
329 | new->flags = flags; | |
330 | ||
331 | /* | |
332 | * keep ascending order of iovmas | |
333 | */ | |
334 | if (tmp) | |
335 | list_add_tail(&new->list, &tmp->list); | |
336 | else | |
337 | list_add(&new->list, &obj->mmap); | |
338 | ||
339 | dev_dbg(obj->dev, "%s: found %08x-%08x-%08x(%x) %08x\n", | |
340 | __func__, new->da_start, start, new->da_end, bytes, flags); | |
341 | ||
342 | return new; | |
343 | } | |
344 | ||
345 | static void free_iovm_area(struct iommu *obj, struct iovm_struct *area) | |
346 | { | |
347 | size_t bytes; | |
348 | ||
349 | BUG_ON(!obj || !area); | |
350 | ||
351 | bytes = area->da_end - area->da_start; | |
352 | ||
353 | dev_dbg(obj->dev, "%s: %08x-%08x(%x) %08x\n", | |
354 | __func__, area->da_start, area->da_end, bytes, area->flags); | |
355 | ||
356 | list_del(&area->list); | |
357 | kmem_cache_free(iovm_area_cachep, area); | |
358 | } | |
359 | ||
360 | /** | |
361 | * da_to_va - convert (d) to (v) | |
362 | * @obj: objective iommu | |
363 | * @da: iommu device virtual address | |
364 | * @va: mpu virtual address | |
365 | * | |
366 | * Returns mpu virtual addr which corresponds to a given device virtual addr | |
367 | */ | |
368 | void *da_to_va(struct iommu *obj, u32 da) | |
369 | { | |
370 | void *va = NULL; | |
371 | struct iovm_struct *area; | |
372 | ||
373 | mutex_lock(&obj->mmap_lock); | |
374 | ||
375 | area = __find_iovm_area(obj, da); | |
376 | if (!area) { | |
377 | dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da); | |
378 | goto out; | |
379 | } | |
380 | va = area->va; | |
69d3a84a | 381 | out: |
26548900 DW |
382 | mutex_unlock(&obj->mmap_lock); |
383 | ||
69d3a84a HD |
384 | return va; |
385 | } | |
386 | EXPORT_SYMBOL_GPL(da_to_va); | |
387 | ||
388 | static void sgtable_fill_vmalloc(struct sg_table *sgt, void *_va) | |
389 | { | |
390 | unsigned int i; | |
391 | struct scatterlist *sg; | |
392 | void *va = _va; | |
393 | void *va_end; | |
394 | ||
395 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | |
396 | struct page *pg; | |
397 | const size_t bytes = PAGE_SIZE; | |
398 | ||
399 | /* | |
400 | * iommu 'superpage' isn't supported with 'iommu_vmalloc()' | |
401 | */ | |
402 | pg = vmalloc_to_page(va); | |
403 | BUG_ON(!pg); | |
404 | sg_set_page(sg, pg, bytes, 0); | |
405 | ||
406 | va += bytes; | |
407 | } | |
408 | ||
409 | va_end = _va + PAGE_SIZE * i; | |
69d3a84a HD |
410 | } |
411 | ||
412 | static inline void sgtable_drain_vmalloc(struct sg_table *sgt) | |
413 | { | |
414 | /* | |
415 | * Actually this is not necessary at all, just exists for | |
ba6a1179 | 416 | * consistency of the code readability. |
69d3a84a HD |
417 | */ |
418 | BUG_ON(!sgt); | |
419 | } | |
420 | ||
ad108121 GLF |
421 | static void sgtable_fill_kmalloc(struct sg_table *sgt, u32 pa, u32 da, |
422 | size_t len) | |
69d3a84a HD |
423 | { |
424 | unsigned int i; | |
425 | struct scatterlist *sg; | |
69d3a84a HD |
426 | |
427 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | |
ad108121 | 428 | unsigned bytes; |
69d3a84a | 429 | |
ad108121 GLF |
430 | bytes = max_alignment(da | pa); |
431 | bytes = min_t(unsigned, bytes, iopgsz_max(len)); | |
69d3a84a HD |
432 | |
433 | BUG_ON(!iopgsz_ok(bytes)); | |
434 | ||
435 | sg_set_buf(sg, phys_to_virt(pa), bytes); | |
436 | /* | |
437 | * 'pa' is cotinuous(linear). | |
438 | */ | |
439 | pa += bytes; | |
ad108121 | 440 | da += bytes; |
69d3a84a HD |
441 | len -= bytes; |
442 | } | |
443 | BUG_ON(len); | |
69d3a84a HD |
444 | } |
445 | ||
446 | static inline void sgtable_drain_kmalloc(struct sg_table *sgt) | |
447 | { | |
448 | /* | |
449 | * Actually this is not necessary at all, just exists for | |
ba6a1179 | 450 | * consistency of the code readability |
69d3a84a HD |
451 | */ |
452 | BUG_ON(!sgt); | |
453 | } | |
454 | ||
455 | /* create 'da' <-> 'pa' mapping from 'sgt' */ | |
456 | static int map_iovm_area(struct iommu *obj, struct iovm_struct *new, | |
457 | const struct sg_table *sgt, u32 flags) | |
458 | { | |
459 | int err; | |
460 | unsigned int i, j; | |
461 | struct scatterlist *sg; | |
462 | u32 da = new->da_start; | |
463 | ||
20e11c2d | 464 | if (!obj || !sgt) |
69d3a84a HD |
465 | return -EINVAL; |
466 | ||
467 | BUG_ON(!sgtable_ok(sgt)); | |
468 | ||
469 | for_each_sg(sgt->sgl, sg, sgt->nents, i) { | |
470 | u32 pa; | |
471 | int pgsz; | |
472 | size_t bytes; | |
473 | struct iotlb_entry e; | |
474 | ||
475 | pa = sg_phys(sg); | |
66cf402b | 476 | bytes = sg->length; |
69d3a84a HD |
477 | |
478 | flags &= ~IOVMF_PGSZ_MASK; | |
479 | pgsz = bytes_to_iopgsz(bytes); | |
480 | if (pgsz < 0) | |
481 | goto err_out; | |
482 | flags |= pgsz; | |
483 | ||
484 | pr_debug("%s: [%d] %08x %08x(%x)\n", __func__, | |
485 | i, da, pa, bytes); | |
486 | ||
487 | iotlb_init_entry(&e, da, pa, flags); | |
488 | err = iopgtable_store_entry(obj, &e); | |
489 | if (err) | |
490 | goto err_out; | |
491 | ||
492 | da += bytes; | |
493 | } | |
494 | return 0; | |
495 | ||
496 | err_out: | |
497 | da = new->da_start; | |
498 | ||
499 | for_each_sg(sgt->sgl, sg, i, j) { | |
500 | size_t bytes; | |
501 | ||
502 | bytes = iopgtable_clear_entry(obj, da); | |
503 | ||
504 | BUG_ON(!iopgsz_ok(bytes)); | |
505 | ||
506 | da += bytes; | |
507 | } | |
508 | return err; | |
509 | } | |
510 | ||
511 | /* release 'da' <-> 'pa' mapping */ | |
512 | static void unmap_iovm_area(struct iommu *obj, struct iovm_struct *area) | |
513 | { | |
514 | u32 start; | |
515 | size_t total = area->da_end - area->da_start; | |
516 | ||
517 | BUG_ON((!total) || !IS_ALIGNED(total, PAGE_SIZE)); | |
518 | ||
519 | start = area->da_start; | |
520 | while (total > 0) { | |
521 | size_t bytes; | |
522 | ||
523 | bytes = iopgtable_clear_entry(obj, start); | |
524 | if (bytes == 0) | |
525 | bytes = PAGE_SIZE; | |
526 | else | |
527 | dev_dbg(obj->dev, "%s: unmap %08x(%x) %08x\n", | |
528 | __func__, start, bytes, area->flags); | |
529 | ||
530 | BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE)); | |
531 | ||
532 | total -= bytes; | |
533 | start += bytes; | |
534 | } | |
535 | BUG_ON(total); | |
536 | } | |
537 | ||
538 | /* template function for all unmapping */ | |
539 | static struct sg_table *unmap_vm_area(struct iommu *obj, const u32 da, | |
540 | void (*fn)(const void *), u32 flags) | |
541 | { | |
542 | struct sg_table *sgt = NULL; | |
543 | struct iovm_struct *area; | |
544 | ||
545 | if (!IS_ALIGNED(da, PAGE_SIZE)) { | |
546 | dev_err(obj->dev, "%s: alignment err(%08x)\n", __func__, da); | |
547 | return NULL; | |
548 | } | |
549 | ||
550 | mutex_lock(&obj->mmap_lock); | |
551 | ||
552 | area = __find_iovm_area(obj, da); | |
553 | if (!area) { | |
554 | dev_dbg(obj->dev, "%s: no da area(%08x)\n", __func__, da); | |
555 | goto out; | |
556 | } | |
557 | ||
558 | if ((area->flags & flags) != flags) { | |
559 | dev_err(obj->dev, "%s: wrong flags(%08x)\n", __func__, | |
560 | area->flags); | |
561 | goto out; | |
562 | } | |
563 | sgt = (struct sg_table *)area->sgt; | |
564 | ||
565 | unmap_iovm_area(obj, area); | |
566 | ||
567 | fn(area->va); | |
568 | ||
569 | dev_dbg(obj->dev, "%s: %08x-%08x-%08x(%x) %08x\n", __func__, | |
570 | area->da_start, da, area->da_end, | |
571 | area->da_end - area->da_start, area->flags); | |
572 | ||
573 | free_iovm_area(obj, area); | |
574 | out: | |
575 | mutex_unlock(&obj->mmap_lock); | |
576 | ||
577 | return sgt; | |
578 | } | |
579 | ||
580 | static u32 map_iommu_region(struct iommu *obj, u32 da, | |
581 | const struct sg_table *sgt, void *va, size_t bytes, u32 flags) | |
582 | { | |
583 | int err = -ENOMEM; | |
584 | struct iovm_struct *new; | |
585 | ||
586 | mutex_lock(&obj->mmap_lock); | |
587 | ||
588 | new = alloc_iovm_area(obj, da, bytes, flags); | |
589 | if (IS_ERR(new)) { | |
590 | err = PTR_ERR(new); | |
591 | goto err_alloc_iovma; | |
592 | } | |
593 | new->va = va; | |
594 | new->sgt = sgt; | |
595 | ||
596 | if (map_iovm_area(obj, new, sgt, new->flags)) | |
597 | goto err_map; | |
598 | ||
599 | mutex_unlock(&obj->mmap_lock); | |
600 | ||
601 | dev_dbg(obj->dev, "%s: da:%08x(%x) flags:%08x va:%p\n", | |
602 | __func__, new->da_start, bytes, new->flags, va); | |
603 | ||
604 | return new->da_start; | |
605 | ||
606 | err_map: | |
607 | free_iovm_area(obj, new); | |
608 | err_alloc_iovma: | |
609 | mutex_unlock(&obj->mmap_lock); | |
610 | return err; | |
611 | } | |
612 | ||
613 | static inline u32 __iommu_vmap(struct iommu *obj, u32 da, | |
614 | const struct sg_table *sgt, void *va, size_t bytes, u32 flags) | |
615 | { | |
616 | return map_iommu_region(obj, da, sgt, va, bytes, flags); | |
617 | } | |
618 | ||
619 | /** | |
620 | * iommu_vmap - (d)-(p)-(v) address mapper | |
621 | * @obj: objective iommu | |
622 | * @sgt: address of scatter gather table | |
623 | * @flags: iovma and page property | |
624 | * | |
625 | * Creates 1-n-1 mapping with given @sgt and returns @da. | |
626 | * All @sgt element must be io page size aligned. | |
627 | */ | |
628 | u32 iommu_vmap(struct iommu *obj, u32 da, const struct sg_table *sgt, | |
629 | u32 flags) | |
630 | { | |
631 | size_t bytes; | |
935e4739 | 632 | void *va = NULL; |
69d3a84a HD |
633 | |
634 | if (!obj || !obj->dev || !sgt) | |
635 | return -EINVAL; | |
636 | ||
637 | bytes = sgtable_len(sgt); | |
638 | if (!bytes) | |
639 | return -EINVAL; | |
640 | bytes = PAGE_ALIGN(bytes); | |
641 | ||
935e4739 HD |
642 | if (flags & IOVMF_MMIO) { |
643 | va = vmap_sg(sgt); | |
644 | if (IS_ERR(va)) | |
645 | return PTR_ERR(va); | |
646 | } | |
69d3a84a | 647 | |
69d3a84a HD |
648 | flags |= IOVMF_DISCONT; |
649 | flags |= IOVMF_MMIO; | |
69d3a84a HD |
650 | |
651 | da = __iommu_vmap(obj, da, sgt, va, bytes, flags); | |
652 | if (IS_ERR_VALUE(da)) | |
653 | vunmap_sg(va); | |
654 | ||
655 | return da; | |
656 | } | |
657 | EXPORT_SYMBOL_GPL(iommu_vmap); | |
658 | ||
659 | /** | |
660 | * iommu_vunmap - release virtual mapping obtained by 'iommu_vmap()' | |
661 | * @obj: objective iommu | |
662 | * @da: iommu device virtual address | |
663 | * | |
664 | * Free the iommu virtually contiguous memory area starting at | |
665 | * @da, which was returned by 'iommu_vmap()'. | |
666 | */ | |
667 | struct sg_table *iommu_vunmap(struct iommu *obj, u32 da) | |
668 | { | |
669 | struct sg_table *sgt; | |
670 | /* | |
671 | * 'sgt' is allocated before 'iommu_vmalloc()' is called. | |
672 | * Just returns 'sgt' to the caller to free | |
673 | */ | |
674 | sgt = unmap_vm_area(obj, da, vunmap_sg, IOVMF_DISCONT | IOVMF_MMIO); | |
675 | if (!sgt) | |
676 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); | |
677 | return sgt; | |
678 | } | |
679 | EXPORT_SYMBOL_GPL(iommu_vunmap); | |
680 | ||
681 | /** | |
682 | * iommu_vmalloc - (d)-(p)-(v) address allocator and mapper | |
683 | * @obj: objective iommu | |
684 | * @da: contiguous iommu virtual memory | |
685 | * @bytes: allocation size | |
686 | * @flags: iovma and page property | |
687 | * | |
688 | * Allocate @bytes linearly and creates 1-n-1 mapping and returns | |
d038aee2 | 689 | * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set. |
69d3a84a HD |
690 | */ |
691 | u32 iommu_vmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags) | |
692 | { | |
693 | void *va; | |
694 | struct sg_table *sgt; | |
695 | ||
696 | if (!obj || !obj->dev || !bytes) | |
697 | return -EINVAL; | |
698 | ||
699 | bytes = PAGE_ALIGN(bytes); | |
700 | ||
701 | va = vmalloc(bytes); | |
702 | if (!va) | |
703 | return -ENOMEM; | |
704 | ||
ad108121 GLF |
705 | flags |= IOVMF_DISCONT; |
706 | flags |= IOVMF_ALLOC; | |
ad108121 GLF |
707 | |
708 | sgt = sgtable_alloc(bytes, flags, da, 0); | |
69d3a84a HD |
709 | if (IS_ERR(sgt)) { |
710 | da = PTR_ERR(sgt); | |
711 | goto err_sgt_alloc; | |
712 | } | |
713 | sgtable_fill_vmalloc(sgt, va); | |
714 | ||
69d3a84a HD |
715 | da = __iommu_vmap(obj, da, sgt, va, bytes, flags); |
716 | if (IS_ERR_VALUE(da)) | |
717 | goto err_iommu_vmap; | |
718 | ||
719 | return da; | |
720 | ||
721 | err_iommu_vmap: | |
722 | sgtable_drain_vmalloc(sgt); | |
723 | sgtable_free(sgt); | |
724 | err_sgt_alloc: | |
725 | vfree(va); | |
726 | return da; | |
727 | } | |
728 | EXPORT_SYMBOL_GPL(iommu_vmalloc); | |
729 | ||
730 | /** | |
731 | * iommu_vfree - release memory allocated by 'iommu_vmalloc()' | |
732 | * @obj: objective iommu | |
733 | * @da: iommu device virtual address | |
734 | * | |
735 | * Frees the iommu virtually continuous memory area starting at | |
736 | * @da, as obtained from 'iommu_vmalloc()'. | |
737 | */ | |
738 | void iommu_vfree(struct iommu *obj, const u32 da) | |
739 | { | |
740 | struct sg_table *sgt; | |
741 | ||
742 | sgt = unmap_vm_area(obj, da, vfree, IOVMF_DISCONT | IOVMF_ALLOC); | |
743 | if (!sgt) | |
744 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); | |
745 | sgtable_free(sgt); | |
746 | } | |
747 | EXPORT_SYMBOL_GPL(iommu_vfree); | |
748 | ||
749 | static u32 __iommu_kmap(struct iommu *obj, u32 da, u32 pa, void *va, | |
750 | size_t bytes, u32 flags) | |
751 | { | |
752 | struct sg_table *sgt; | |
753 | ||
ad108121 | 754 | sgt = sgtable_alloc(bytes, flags, da, pa); |
69d3a84a HD |
755 | if (IS_ERR(sgt)) |
756 | return PTR_ERR(sgt); | |
757 | ||
ad108121 | 758 | sgtable_fill_kmalloc(sgt, pa, da, bytes); |
69d3a84a HD |
759 | |
760 | da = map_iommu_region(obj, da, sgt, va, bytes, flags); | |
761 | if (IS_ERR_VALUE(da)) { | |
762 | sgtable_drain_kmalloc(sgt); | |
763 | sgtable_free(sgt); | |
764 | } | |
765 | ||
766 | return da; | |
767 | } | |
768 | ||
769 | /** | |
770 | * iommu_kmap - (d)-(p)-(v) address mapper | |
771 | * @obj: objective iommu | |
772 | * @da: contiguous iommu virtual memory | |
773 | * @pa: contiguous physical memory | |
774 | * @flags: iovma and page property | |
775 | * | |
776 | * Creates 1-1-1 mapping and returns @da again, which can be | |
d038aee2 | 777 | * adjusted if 'IOVMF_DA_FIXED' is not set. |
69d3a84a HD |
778 | */ |
779 | u32 iommu_kmap(struct iommu *obj, u32 da, u32 pa, size_t bytes, | |
780 | u32 flags) | |
781 | { | |
782 | void *va; | |
783 | ||
784 | if (!obj || !obj->dev || !bytes) | |
785 | return -EINVAL; | |
786 | ||
787 | bytes = PAGE_ALIGN(bytes); | |
788 | ||
789 | va = ioremap(pa, bytes); | |
790 | if (!va) | |
791 | return -ENOMEM; | |
792 | ||
69d3a84a HD |
793 | flags |= IOVMF_LINEAR; |
794 | flags |= IOVMF_MMIO; | |
69d3a84a HD |
795 | |
796 | da = __iommu_kmap(obj, da, pa, va, bytes, flags); | |
797 | if (IS_ERR_VALUE(da)) | |
798 | iounmap(va); | |
799 | ||
800 | return da; | |
801 | } | |
802 | EXPORT_SYMBOL_GPL(iommu_kmap); | |
803 | ||
804 | /** | |
805 | * iommu_kunmap - release virtual mapping obtained by 'iommu_kmap()' | |
806 | * @obj: objective iommu | |
807 | * @da: iommu device virtual address | |
808 | * | |
809 | * Frees the iommu virtually contiguous memory area starting at | |
810 | * @da, which was passed to and was returned by'iommu_kmap()'. | |
811 | */ | |
812 | void iommu_kunmap(struct iommu *obj, u32 da) | |
813 | { | |
814 | struct sg_table *sgt; | |
815 | typedef void (*func_t)(const void *); | |
816 | ||
9205a109 | 817 | sgt = unmap_vm_area(obj, da, (func_t)iounmap, |
69d3a84a HD |
818 | IOVMF_LINEAR | IOVMF_MMIO); |
819 | if (!sgt) | |
820 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); | |
821 | sgtable_free(sgt); | |
822 | } | |
823 | EXPORT_SYMBOL_GPL(iommu_kunmap); | |
824 | ||
825 | /** | |
826 | * iommu_kmalloc - (d)-(p)-(v) address allocator and mapper | |
827 | * @obj: objective iommu | |
828 | * @da: contiguous iommu virtual memory | |
829 | * @bytes: bytes for allocation | |
830 | * @flags: iovma and page property | |
831 | * | |
832 | * Allocate @bytes linearly and creates 1-1-1 mapping and returns | |
d038aee2 | 833 | * @da again, which might be adjusted if 'IOVMF_DA_FIXED' is not set. |
69d3a84a HD |
834 | */ |
835 | u32 iommu_kmalloc(struct iommu *obj, u32 da, size_t bytes, u32 flags) | |
836 | { | |
837 | void *va; | |
838 | u32 pa; | |
839 | ||
840 | if (!obj || !obj->dev || !bytes) | |
841 | return -EINVAL; | |
842 | ||
843 | bytes = PAGE_ALIGN(bytes); | |
844 | ||
845 | va = kmalloc(bytes, GFP_KERNEL | GFP_DMA); | |
846 | if (!va) | |
847 | return -ENOMEM; | |
848 | pa = virt_to_phys(va); | |
849 | ||
69d3a84a HD |
850 | flags |= IOVMF_LINEAR; |
851 | flags |= IOVMF_ALLOC; | |
69d3a84a HD |
852 | |
853 | da = __iommu_kmap(obj, da, pa, va, bytes, flags); | |
854 | if (IS_ERR_VALUE(da)) | |
855 | kfree(va); | |
856 | ||
857 | return da; | |
858 | } | |
859 | EXPORT_SYMBOL_GPL(iommu_kmalloc); | |
860 | ||
861 | /** | |
862 | * iommu_kfree - release virtual mapping obtained by 'iommu_kmalloc()' | |
863 | * @obj: objective iommu | |
864 | * @da: iommu device virtual address | |
865 | * | |
866 | * Frees the iommu virtually contiguous memory area starting at | |
867 | * @da, which was passed to and was returned by'iommu_kmalloc()'. | |
868 | */ | |
869 | void iommu_kfree(struct iommu *obj, u32 da) | |
870 | { | |
871 | struct sg_table *sgt; | |
872 | ||
873 | sgt = unmap_vm_area(obj, da, kfree, IOVMF_LINEAR | IOVMF_ALLOC); | |
874 | if (!sgt) | |
875 | dev_dbg(obj->dev, "%s: No sgt\n", __func__); | |
876 | sgtable_free(sgt); | |
877 | } | |
878 | EXPORT_SYMBOL_GPL(iommu_kfree); | |
879 | ||
880 | ||
881 | static int __init iovmm_init(void) | |
882 | { | |
883 | const unsigned long flags = SLAB_HWCACHE_ALIGN; | |
884 | struct kmem_cache *p; | |
885 | ||
886 | p = kmem_cache_create("iovm_area_cache", sizeof(struct iovm_struct), 0, | |
887 | flags, NULL); | |
888 | if (!p) | |
889 | return -ENOMEM; | |
890 | iovm_area_cachep = p; | |
891 | ||
892 | return 0; | |
893 | } | |
894 | module_init(iovmm_init); | |
895 | ||
896 | static void __exit iovmm_exit(void) | |
897 | { | |
898 | kmem_cache_destroy(iovm_area_cachep); | |
899 | } | |
900 | module_exit(iovmm_exit); | |
901 | ||
902 | MODULE_DESCRIPTION("omap iommu: simple virtual address space management"); | |
903 | MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>"); | |
904 | MODULE_LICENSE("GPL v2"); |