]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/iommu/omap-iommu.c
Merge tag 'iommu-updates-v3.5' of git://git.kernel.org/pub/scm/linux/kernel/git/joro...
[mirror_ubuntu-zesty-kernel.git] / drivers / iommu / omap-iommu.c
1 /*
2 * omap iommu: tlb and pagetable primitives
3 *
4 * Copyright (C) 2008-2010 Nokia Corporation
5 *
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
7 * Paul Mundt and Toshihiro Kobayashi
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14 #include <linux/err.h>
15 #include <linux/module.h>
16 #include <linux/slab.h>
17 #include <linux/interrupt.h>
18 #include <linux/ioport.h>
19 #include <linux/clk.h>
20 #include <linux/platform_device.h>
21 #include <linux/iommu.h>
22 #include <linux/mutex.h>
23 #include <linux/spinlock.h>
24
25 #include <asm/cacheflush.h>
26
27 #include <plat/iommu.h>
28
29 #include <plat/iopgtable.h>
30
31 #define for_each_iotlb_cr(obj, n, __i, cr) \
32 for (__i = 0; \
33 (__i < (n)) && (cr = __iotlb_read_cr((obj), __i), true); \
34 __i++)
35
36 /* bitmap of the page sizes currently supported */
37 #define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
38
39 /**
40 * struct omap_iommu_domain - omap iommu domain
41 * @pgtable: the page table
42 * @iommu_dev: an omap iommu device attached to this domain. only a single
43 * iommu device can be attached for now.
44 * @dev: Device using this domain.
45 * @lock: domain lock, should be taken when attaching/detaching
46 */
47 struct omap_iommu_domain {
48 u32 *pgtable;
49 struct omap_iommu *iommu_dev;
50 struct device *dev;
51 spinlock_t lock;
52 };
53
54 /* accommodate the difference between omap1 and omap2/3 */
55 static const struct iommu_functions *arch_iommu;
56
57 static struct platform_driver omap_iommu_driver;
58 static struct kmem_cache *iopte_cachep;
59
60 /**
61 * omap_install_iommu_arch - Install archtecure specific iommu functions
62 * @ops: a pointer to architecture specific iommu functions
63 *
64 * There are several kind of iommu algorithm(tlb, pagetable) among
65 * omap series. This interface installs such an iommu algorighm.
66 **/
67 int omap_install_iommu_arch(const struct iommu_functions *ops)
68 {
69 if (arch_iommu)
70 return -EBUSY;
71
72 arch_iommu = ops;
73 return 0;
74 }
75 EXPORT_SYMBOL_GPL(omap_install_iommu_arch);
76
77 /**
78 * omap_uninstall_iommu_arch - Uninstall archtecure specific iommu functions
79 * @ops: a pointer to architecture specific iommu functions
80 *
81 * This interface uninstalls the iommu algorighm installed previously.
82 **/
83 void omap_uninstall_iommu_arch(const struct iommu_functions *ops)
84 {
85 if (arch_iommu != ops)
86 pr_err("%s: not your arch\n", __func__);
87
88 arch_iommu = NULL;
89 }
90 EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch);
91
92 /**
93 * omap_iommu_save_ctx - Save registers for pm off-mode support
94 * @dev: client device
95 **/
96 void omap_iommu_save_ctx(struct device *dev)
97 {
98 struct omap_iommu *obj = dev_to_omap_iommu(dev);
99
100 arch_iommu->save_ctx(obj);
101 }
102 EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
103
104 /**
105 * omap_iommu_restore_ctx - Restore registers for pm off-mode support
106 * @dev: client device
107 **/
108 void omap_iommu_restore_ctx(struct device *dev)
109 {
110 struct omap_iommu *obj = dev_to_omap_iommu(dev);
111
112 arch_iommu->restore_ctx(obj);
113 }
114 EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
115
116 /**
117 * omap_iommu_arch_version - Return running iommu arch version
118 **/
119 u32 omap_iommu_arch_version(void)
120 {
121 return arch_iommu->version;
122 }
123 EXPORT_SYMBOL_GPL(omap_iommu_arch_version);
124
125 static int iommu_enable(struct omap_iommu *obj)
126 {
127 int err;
128
129 if (!obj)
130 return -EINVAL;
131
132 if (!arch_iommu)
133 return -ENODEV;
134
135 clk_enable(obj->clk);
136
137 err = arch_iommu->enable(obj);
138
139 clk_disable(obj->clk);
140 return err;
141 }
142
143 static void iommu_disable(struct omap_iommu *obj)
144 {
145 if (!obj)
146 return;
147
148 clk_enable(obj->clk);
149
150 arch_iommu->disable(obj);
151
152 clk_disable(obj->clk);
153 }
154
155 /*
156 * TLB operations
157 */
158 void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
159 {
160 BUG_ON(!cr || !e);
161
162 arch_iommu->cr_to_e(cr, e);
163 }
164 EXPORT_SYMBOL_GPL(omap_iotlb_cr_to_e);
165
166 static inline int iotlb_cr_valid(struct cr_regs *cr)
167 {
168 if (!cr)
169 return -EINVAL;
170
171 return arch_iommu->cr_valid(cr);
172 }
173
174 static inline struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj,
175 struct iotlb_entry *e)
176 {
177 if (!e)
178 return NULL;
179
180 return arch_iommu->alloc_cr(obj, e);
181 }
182
183 static u32 iotlb_cr_to_virt(struct cr_regs *cr)
184 {
185 return arch_iommu->cr_to_virt(cr);
186 }
187
188 static u32 get_iopte_attr(struct iotlb_entry *e)
189 {
190 return arch_iommu->get_pte_attr(e);
191 }
192
193 static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da)
194 {
195 return arch_iommu->fault_isr(obj, da);
196 }
197
198 static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
199 {
200 u32 val;
201
202 val = iommu_read_reg(obj, MMU_LOCK);
203
204 l->base = MMU_LOCK_BASE(val);
205 l->vict = MMU_LOCK_VICT(val);
206
207 }
208
209 static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
210 {
211 u32 val;
212
213 val = (l->base << MMU_LOCK_BASE_SHIFT);
214 val |= (l->vict << MMU_LOCK_VICT_SHIFT);
215
216 iommu_write_reg(obj, val, MMU_LOCK);
217 }
218
219 static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
220 {
221 arch_iommu->tlb_read_cr(obj, cr);
222 }
223
224 static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
225 {
226 arch_iommu->tlb_load_cr(obj, cr);
227
228 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
229 iommu_write_reg(obj, 1, MMU_LD_TLB);
230 }
231
232 /**
233 * iotlb_dump_cr - Dump an iommu tlb entry into buf
234 * @obj: target iommu
235 * @cr: contents of cam and ram register
236 * @buf: output buffer
237 **/
238 static inline ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
239 char *buf)
240 {
241 BUG_ON(!cr || !buf);
242
243 return arch_iommu->dump_cr(obj, cr, buf);
244 }
245
246 /* only used in iotlb iteration for-loop */
247 static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
248 {
249 struct cr_regs cr;
250 struct iotlb_lock l;
251
252 iotlb_lock_get(obj, &l);
253 l.vict = n;
254 iotlb_lock_set(obj, &l);
255 iotlb_read_cr(obj, &cr);
256
257 return cr;
258 }
259
260 /**
261 * load_iotlb_entry - Set an iommu tlb entry
262 * @obj: target iommu
263 * @e: an iommu tlb entry info
264 **/
265 #ifdef PREFETCH_IOTLB
266 static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
267 {
268 int err = 0;
269 struct iotlb_lock l;
270 struct cr_regs *cr;
271
272 if (!obj || !obj->nr_tlb_entries || !e)
273 return -EINVAL;
274
275 clk_enable(obj->clk);
276
277 iotlb_lock_get(obj, &l);
278 if (l.base == obj->nr_tlb_entries) {
279 dev_warn(obj->dev, "%s: preserve entries full\n", __func__);
280 err = -EBUSY;
281 goto out;
282 }
283 if (!e->prsvd) {
284 int i;
285 struct cr_regs tmp;
286
287 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp)
288 if (!iotlb_cr_valid(&tmp))
289 break;
290
291 if (i == obj->nr_tlb_entries) {
292 dev_dbg(obj->dev, "%s: full: no entry\n", __func__);
293 err = -EBUSY;
294 goto out;
295 }
296
297 iotlb_lock_get(obj, &l);
298 } else {
299 l.vict = l.base;
300 iotlb_lock_set(obj, &l);
301 }
302
303 cr = iotlb_alloc_cr(obj, e);
304 if (IS_ERR(cr)) {
305 clk_disable(obj->clk);
306 return PTR_ERR(cr);
307 }
308
309 iotlb_load_cr(obj, cr);
310 kfree(cr);
311
312 if (e->prsvd)
313 l.base++;
314 /* increment victim for next tlb load */
315 if (++l.vict == obj->nr_tlb_entries)
316 l.vict = l.base;
317 iotlb_lock_set(obj, &l);
318 out:
319 clk_disable(obj->clk);
320 return err;
321 }
322
323 #else /* !PREFETCH_IOTLB */
324
325 static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
326 {
327 return 0;
328 }
329
330 #endif /* !PREFETCH_IOTLB */
331
332 static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
333 {
334 return load_iotlb_entry(obj, e);
335 }
336
337 /**
338 * flush_iotlb_page - Clear an iommu tlb entry
339 * @obj: target iommu
340 * @da: iommu device virtual address
341 *
342 * Clear an iommu tlb entry which includes 'da' address.
343 **/
344 static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
345 {
346 int i;
347 struct cr_regs cr;
348
349 clk_enable(obj->clk);
350
351 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
352 u32 start;
353 size_t bytes;
354
355 if (!iotlb_cr_valid(&cr))
356 continue;
357
358 start = iotlb_cr_to_virt(&cr);
359 bytes = iopgsz_to_bytes(cr.cam & 3);
360
361 if ((start <= da) && (da < start + bytes)) {
362 dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
363 __func__, start, da, bytes);
364 iotlb_load_cr(obj, &cr);
365 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
366 }
367 }
368 clk_disable(obj->clk);
369
370 if (i == obj->nr_tlb_entries)
371 dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
372 }
373
374 /**
375 * flush_iotlb_all - Clear all iommu tlb entries
376 * @obj: target iommu
377 **/
378 static void flush_iotlb_all(struct omap_iommu *obj)
379 {
380 struct iotlb_lock l;
381
382 clk_enable(obj->clk);
383
384 l.base = 0;
385 l.vict = 0;
386 iotlb_lock_set(obj, &l);
387
388 iommu_write_reg(obj, 1, MMU_GFLUSH);
389
390 clk_disable(obj->clk);
391 }
392
393 #if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE)
394
395 ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
396 {
397 if (!obj || !buf)
398 return -EINVAL;
399
400 clk_enable(obj->clk);
401
402 bytes = arch_iommu->dump_ctx(obj, buf, bytes);
403
404 clk_disable(obj->clk);
405
406 return bytes;
407 }
408 EXPORT_SYMBOL_GPL(omap_iommu_dump_ctx);
409
410 static int
411 __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
412 {
413 int i;
414 struct iotlb_lock saved;
415 struct cr_regs tmp;
416 struct cr_regs *p = crs;
417
418 clk_enable(obj->clk);
419 iotlb_lock_get(obj, &saved);
420
421 for_each_iotlb_cr(obj, num, i, tmp) {
422 if (!iotlb_cr_valid(&tmp))
423 continue;
424 *p++ = tmp;
425 }
426
427 iotlb_lock_set(obj, &saved);
428 clk_disable(obj->clk);
429
430 return p - crs;
431 }
432
433 /**
434 * omap_dump_tlb_entries - dump cr arrays to given buffer
435 * @obj: target iommu
436 * @buf: output buffer
437 **/
438 size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes)
439 {
440 int i, num;
441 struct cr_regs *cr;
442 char *p = buf;
443
444 num = bytes / sizeof(*cr);
445 num = min(obj->nr_tlb_entries, num);
446
447 cr = kcalloc(num, sizeof(*cr), GFP_KERNEL);
448 if (!cr)
449 return 0;
450
451 num = __dump_tlb_entries(obj, cr, num);
452 for (i = 0; i < num; i++)
453 p += iotlb_dump_cr(obj, cr + i, p);
454 kfree(cr);
455
456 return p - buf;
457 }
458 EXPORT_SYMBOL_GPL(omap_dump_tlb_entries);
459
460 int omap_foreach_iommu_device(void *data, int (*fn)(struct device *, void *))
461 {
462 return driver_for_each_device(&omap_iommu_driver.driver,
463 NULL, data, fn);
464 }
465 EXPORT_SYMBOL_GPL(omap_foreach_iommu_device);
466
467 #endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */
468
469 /*
470 * H/W pagetable operations
471 */
472 static void flush_iopgd_range(u32 *first, u32 *last)
473 {
474 /* FIXME: L2 cache should be taken care of if it exists */
475 do {
476 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd"
477 : : "r" (first));
478 first += L1_CACHE_BYTES / sizeof(*first);
479 } while (first <= last);
480 }
481
482 static void flush_iopte_range(u32 *first, u32 *last)
483 {
484 /* FIXME: L2 cache should be taken care of if it exists */
485 do {
486 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte"
487 : : "r" (first));
488 first += L1_CACHE_BYTES / sizeof(*first);
489 } while (first <= last);
490 }
491
492 static void iopte_free(u32 *iopte)
493 {
494 /* Note: freed iopte's must be clean ready for re-use */
495 kmem_cache_free(iopte_cachep, iopte);
496 }
497
498 static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da)
499 {
500 u32 *iopte;
501
502 /* a table has already existed */
503 if (*iopgd)
504 goto pte_ready;
505
506 /*
507 * do the allocation outside the page table lock
508 */
509 spin_unlock(&obj->page_table_lock);
510 iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL);
511 spin_lock(&obj->page_table_lock);
512
513 if (!*iopgd) {
514 if (!iopte)
515 return ERR_PTR(-ENOMEM);
516
517 *iopgd = virt_to_phys(iopte) | IOPGD_TABLE;
518 flush_iopgd_range(iopgd, iopgd);
519
520 dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte);
521 } else {
522 /* We raced, free the reduniovant table */
523 iopte_free(iopte);
524 }
525
526 pte_ready:
527 iopte = iopte_offset(iopgd, da);
528
529 dev_vdbg(obj->dev,
530 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
531 __func__, da, iopgd, *iopgd, iopte, *iopte);
532
533 return iopte;
534 }
535
536 static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
537 {
538 u32 *iopgd = iopgd_offset(obj, da);
539
540 if ((da | pa) & ~IOSECTION_MASK) {
541 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
542 __func__, da, pa, IOSECTION_SIZE);
543 return -EINVAL;
544 }
545
546 *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
547 flush_iopgd_range(iopgd, iopgd);
548 return 0;
549 }
550
551 static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
552 {
553 u32 *iopgd = iopgd_offset(obj, da);
554 int i;
555
556 if ((da | pa) & ~IOSUPER_MASK) {
557 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
558 __func__, da, pa, IOSUPER_SIZE);
559 return -EINVAL;
560 }
561
562 for (i = 0; i < 16; i++)
563 *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
564 flush_iopgd_range(iopgd, iopgd + 15);
565 return 0;
566 }
567
568 static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
569 {
570 u32 *iopgd = iopgd_offset(obj, da);
571 u32 *iopte = iopte_alloc(obj, iopgd, da);
572
573 if (IS_ERR(iopte))
574 return PTR_ERR(iopte);
575
576 *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
577 flush_iopte_range(iopte, iopte);
578
579 dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
580 __func__, da, pa, iopte, *iopte);
581
582 return 0;
583 }
584
585 static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
586 {
587 u32 *iopgd = iopgd_offset(obj, da);
588 u32 *iopte = iopte_alloc(obj, iopgd, da);
589 int i;
590
591 if ((da | pa) & ~IOLARGE_MASK) {
592 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
593 __func__, da, pa, IOLARGE_SIZE);
594 return -EINVAL;
595 }
596
597 if (IS_ERR(iopte))
598 return PTR_ERR(iopte);
599
600 for (i = 0; i < 16; i++)
601 *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
602 flush_iopte_range(iopte, iopte + 15);
603 return 0;
604 }
605
606 static int
607 iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e)
608 {
609 int (*fn)(struct omap_iommu *, u32, u32, u32);
610 u32 prot;
611 int err;
612
613 if (!obj || !e)
614 return -EINVAL;
615
616 switch (e->pgsz) {
617 case MMU_CAM_PGSZ_16M:
618 fn = iopgd_alloc_super;
619 break;
620 case MMU_CAM_PGSZ_1M:
621 fn = iopgd_alloc_section;
622 break;
623 case MMU_CAM_PGSZ_64K:
624 fn = iopte_alloc_large;
625 break;
626 case MMU_CAM_PGSZ_4K:
627 fn = iopte_alloc_page;
628 break;
629 default:
630 fn = NULL;
631 BUG();
632 break;
633 }
634
635 prot = get_iopte_attr(e);
636
637 spin_lock(&obj->page_table_lock);
638 err = fn(obj, e->da, e->pa, prot);
639 spin_unlock(&obj->page_table_lock);
640
641 return err;
642 }
643
644 /**
645 * omap_iopgtable_store_entry - Make an iommu pte entry
646 * @obj: target iommu
647 * @e: an iommu tlb entry info
648 **/
649 int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
650 {
651 int err;
652
653 flush_iotlb_page(obj, e->da);
654 err = iopgtable_store_entry_core(obj, e);
655 if (!err)
656 prefetch_iotlb_entry(obj, e);
657 return err;
658 }
659 EXPORT_SYMBOL_GPL(omap_iopgtable_store_entry);
660
661 /**
662 * iopgtable_lookup_entry - Lookup an iommu pte entry
663 * @obj: target iommu
664 * @da: iommu device virtual address
665 * @ppgd: iommu pgd entry pointer to be returned
666 * @ppte: iommu pte entry pointer to be returned
667 **/
668 static void
669 iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte)
670 {
671 u32 *iopgd, *iopte = NULL;
672
673 iopgd = iopgd_offset(obj, da);
674 if (!*iopgd)
675 goto out;
676
677 if (iopgd_is_table(*iopgd))
678 iopte = iopte_offset(iopgd, da);
679 out:
680 *ppgd = iopgd;
681 *ppte = iopte;
682 }
683
684 static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
685 {
686 size_t bytes;
687 u32 *iopgd = iopgd_offset(obj, da);
688 int nent = 1;
689
690 if (!*iopgd)
691 return 0;
692
693 if (iopgd_is_table(*iopgd)) {
694 int i;
695 u32 *iopte = iopte_offset(iopgd, da);
696
697 bytes = IOPTE_SIZE;
698 if (*iopte & IOPTE_LARGE) {
699 nent *= 16;
700 /* rewind to the 1st entry */
701 iopte = iopte_offset(iopgd, (da & IOLARGE_MASK));
702 }
703 bytes *= nent;
704 memset(iopte, 0, nent * sizeof(*iopte));
705 flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte));
706
707 /*
708 * do table walk to check if this table is necessary or not
709 */
710 iopte = iopte_offset(iopgd, 0);
711 for (i = 0; i < PTRS_PER_IOPTE; i++)
712 if (iopte[i])
713 goto out;
714
715 iopte_free(iopte);
716 nent = 1; /* for the next L1 entry */
717 } else {
718 bytes = IOPGD_SIZE;
719 if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) {
720 nent *= 16;
721 /* rewind to the 1st entry */
722 iopgd = iopgd_offset(obj, (da & IOSUPER_MASK));
723 }
724 bytes *= nent;
725 }
726 memset(iopgd, 0, nent * sizeof(*iopgd));
727 flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd));
728 out:
729 return bytes;
730 }
731
732 /**
733 * iopgtable_clear_entry - Remove an iommu pte entry
734 * @obj: target iommu
735 * @da: iommu device virtual address
736 **/
737 static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da)
738 {
739 size_t bytes;
740
741 spin_lock(&obj->page_table_lock);
742
743 bytes = iopgtable_clear_entry_core(obj, da);
744 flush_iotlb_page(obj, da);
745
746 spin_unlock(&obj->page_table_lock);
747
748 return bytes;
749 }
750
751 static void iopgtable_clear_entry_all(struct omap_iommu *obj)
752 {
753 int i;
754
755 spin_lock(&obj->page_table_lock);
756
757 for (i = 0; i < PTRS_PER_IOPGD; i++) {
758 u32 da;
759 u32 *iopgd;
760
761 da = i << IOPGD_SHIFT;
762 iopgd = iopgd_offset(obj, da);
763
764 if (!*iopgd)
765 continue;
766
767 if (iopgd_is_table(*iopgd))
768 iopte_free(iopte_offset(iopgd, 0));
769
770 *iopgd = 0;
771 flush_iopgd_range(iopgd, iopgd);
772 }
773
774 flush_iotlb_all(obj);
775
776 spin_unlock(&obj->page_table_lock);
777 }
778
779 /*
780 * Device IOMMU generic operations
781 */
782 static irqreturn_t iommu_fault_handler(int irq, void *data)
783 {
784 u32 da, errs;
785 u32 *iopgd, *iopte;
786 struct omap_iommu *obj = data;
787 struct iommu_domain *domain = obj->domain;
788
789 if (!obj->refcount)
790 return IRQ_NONE;
791
792 clk_enable(obj->clk);
793 errs = iommu_report_fault(obj, &da);
794 clk_disable(obj->clk);
795 if (errs == 0)
796 return IRQ_HANDLED;
797
798 /* Fault callback or TLB/PTE Dynamic loading */
799 if (!report_iommu_fault(domain, obj->dev, da, 0))
800 return IRQ_HANDLED;
801
802 iommu_disable(obj);
803
804 iopgd = iopgd_offset(obj, da);
805
806 if (!iopgd_is_table(*iopgd)) {
807 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p "
808 "*pgd:px%08x\n", obj->name, errs, da, iopgd, *iopgd);
809 return IRQ_NONE;
810 }
811
812 iopte = iopte_offset(iopgd, da);
813
814 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x "
815 "pte:0x%p *pte:0x%08x\n", obj->name, errs, da, iopgd, *iopgd,
816 iopte, *iopte);
817
818 return IRQ_NONE;
819 }
820
821 static int device_match_by_alias(struct device *dev, void *data)
822 {
823 struct omap_iommu *obj = to_iommu(dev);
824 const char *name = data;
825
826 pr_debug("%s: %s %s\n", __func__, obj->name, name);
827
828 return strcmp(obj->name, name) == 0;
829 }
830
831 /**
832 * omap_iommu_attach() - attach iommu device to an iommu domain
833 * @name: name of target omap iommu device
834 * @iopgd: page table
835 **/
836 static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
837 {
838 int err = -ENOMEM;
839 struct device *dev;
840 struct omap_iommu *obj;
841
842 dev = driver_find_device(&omap_iommu_driver.driver, NULL,
843 (void *)name,
844 device_match_by_alias);
845 if (!dev)
846 return NULL;
847
848 obj = to_iommu(dev);
849
850 spin_lock(&obj->iommu_lock);
851
852 /* an iommu device can only be attached once */
853 if (++obj->refcount > 1) {
854 dev_err(dev, "%s: already attached!\n", obj->name);
855 err = -EBUSY;
856 goto err_enable;
857 }
858
859 obj->iopgd = iopgd;
860 err = iommu_enable(obj);
861 if (err)
862 goto err_enable;
863 flush_iotlb_all(obj);
864
865 if (!try_module_get(obj->owner))
866 goto err_module;
867
868 spin_unlock(&obj->iommu_lock);
869
870 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
871 return obj;
872
873 err_module:
874 if (obj->refcount == 1)
875 iommu_disable(obj);
876 err_enable:
877 obj->refcount--;
878 spin_unlock(&obj->iommu_lock);
879 return ERR_PTR(err);
880 }
881
882 /**
883 * omap_iommu_detach - release iommu device
884 * @obj: target iommu
885 **/
886 static void omap_iommu_detach(struct omap_iommu *obj)
887 {
888 if (!obj || IS_ERR(obj))
889 return;
890
891 spin_lock(&obj->iommu_lock);
892
893 if (--obj->refcount == 0)
894 iommu_disable(obj);
895
896 module_put(obj->owner);
897
898 obj->iopgd = NULL;
899
900 spin_unlock(&obj->iommu_lock);
901
902 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
903 }
904
905 /*
906 * OMAP Device MMU(IOMMU) detection
907 */
908 static int __devinit omap_iommu_probe(struct platform_device *pdev)
909 {
910 int err = -ENODEV;
911 int irq;
912 struct omap_iommu *obj;
913 struct resource *res;
914 struct iommu_platform_data *pdata = pdev->dev.platform_data;
915
916 if (pdev->num_resources != 2)
917 return -EINVAL;
918
919 obj = kzalloc(sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
920 if (!obj)
921 return -ENOMEM;
922
923 obj->clk = clk_get(&pdev->dev, pdata->clk_name);
924 if (IS_ERR(obj->clk))
925 goto err_clk;
926
927 obj->nr_tlb_entries = pdata->nr_tlb_entries;
928 obj->name = pdata->name;
929 obj->dev = &pdev->dev;
930 obj->ctx = (void *)obj + sizeof(*obj);
931 obj->da_start = pdata->da_start;
932 obj->da_end = pdata->da_end;
933
934 spin_lock_init(&obj->iommu_lock);
935 mutex_init(&obj->mmap_lock);
936 spin_lock_init(&obj->page_table_lock);
937 INIT_LIST_HEAD(&obj->mmap);
938
939 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
940 if (!res) {
941 err = -ENODEV;
942 goto err_mem;
943 }
944
945 res = request_mem_region(res->start, resource_size(res),
946 dev_name(&pdev->dev));
947 if (!res) {
948 err = -EIO;
949 goto err_mem;
950 }
951
952 obj->regbase = ioremap(res->start, resource_size(res));
953 if (!obj->regbase) {
954 err = -ENOMEM;
955 goto err_ioremap;
956 }
957
958 irq = platform_get_irq(pdev, 0);
959 if (irq < 0) {
960 err = -ENODEV;
961 goto err_irq;
962 }
963 err = request_irq(irq, iommu_fault_handler, IRQF_SHARED,
964 dev_name(&pdev->dev), obj);
965 if (err < 0)
966 goto err_irq;
967 platform_set_drvdata(pdev, obj);
968
969 dev_info(&pdev->dev, "%s registered\n", obj->name);
970 return 0;
971
972 err_irq:
973 iounmap(obj->regbase);
974 err_ioremap:
975 release_mem_region(res->start, resource_size(res));
976 err_mem:
977 clk_put(obj->clk);
978 err_clk:
979 kfree(obj);
980 return err;
981 }
982
983 static int __devexit omap_iommu_remove(struct platform_device *pdev)
984 {
985 int irq;
986 struct resource *res;
987 struct omap_iommu *obj = platform_get_drvdata(pdev);
988
989 platform_set_drvdata(pdev, NULL);
990
991 iopgtable_clear_entry_all(obj);
992
993 irq = platform_get_irq(pdev, 0);
994 free_irq(irq, obj);
995 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
996 release_mem_region(res->start, resource_size(res));
997 iounmap(obj->regbase);
998
999 clk_put(obj->clk);
1000 dev_info(&pdev->dev, "%s removed\n", obj->name);
1001 kfree(obj);
1002 return 0;
1003 }
1004
1005 static struct platform_driver omap_iommu_driver = {
1006 .probe = omap_iommu_probe,
1007 .remove = __devexit_p(omap_iommu_remove),
1008 .driver = {
1009 .name = "omap-iommu",
1010 },
1011 };
1012
1013 static void iopte_cachep_ctor(void *iopte)
1014 {
1015 clean_dcache_area(iopte, IOPTE_TABLE_SIZE);
1016 }
1017
1018 static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
1019 phys_addr_t pa, size_t bytes, int prot)
1020 {
1021 struct omap_iommu_domain *omap_domain = domain->priv;
1022 struct omap_iommu *oiommu = omap_domain->iommu_dev;
1023 struct device *dev = oiommu->dev;
1024 struct iotlb_entry e;
1025 int omap_pgsz;
1026 u32 ret, flags;
1027
1028 /* we only support mapping a single iommu page for now */
1029 omap_pgsz = bytes_to_iopgsz(bytes);
1030 if (omap_pgsz < 0) {
1031 dev_err(dev, "invalid size to map: %d\n", bytes);
1032 return -EINVAL;
1033 }
1034
1035 dev_dbg(dev, "mapping da 0x%lx to pa 0x%x size 0x%x\n", da, pa, bytes);
1036
1037 flags = omap_pgsz | prot;
1038
1039 iotlb_init_entry(&e, da, pa, flags);
1040
1041 ret = omap_iopgtable_store_entry(oiommu, &e);
1042 if (ret)
1043 dev_err(dev, "omap_iopgtable_store_entry failed: %d\n", ret);
1044
1045 return ret;
1046 }
1047
1048 static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
1049 size_t size)
1050 {
1051 struct omap_iommu_domain *omap_domain = domain->priv;
1052 struct omap_iommu *oiommu = omap_domain->iommu_dev;
1053 struct device *dev = oiommu->dev;
1054
1055 dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size);
1056
1057 return iopgtable_clear_entry(oiommu, da);
1058 }
1059
1060 static int
1061 omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
1062 {
1063 struct omap_iommu_domain *omap_domain = domain->priv;
1064 struct omap_iommu *oiommu;
1065 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
1066 int ret = 0;
1067
1068 spin_lock(&omap_domain->lock);
1069
1070 /* only a single device is supported per domain for now */
1071 if (omap_domain->iommu_dev) {
1072 dev_err(dev, "iommu domain is already attached\n");
1073 ret = -EBUSY;
1074 goto out;
1075 }
1076
1077 /* get a handle to and enable the omap iommu */
1078 oiommu = omap_iommu_attach(arch_data->name, omap_domain->pgtable);
1079 if (IS_ERR(oiommu)) {
1080 ret = PTR_ERR(oiommu);
1081 dev_err(dev, "can't get omap iommu: %d\n", ret);
1082 goto out;
1083 }
1084
1085 omap_domain->iommu_dev = arch_data->iommu_dev = oiommu;
1086 omap_domain->dev = dev;
1087 oiommu->domain = domain;
1088
1089 out:
1090 spin_unlock(&omap_domain->lock);
1091 return ret;
1092 }
1093
1094 static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
1095 struct device *dev)
1096 {
1097 struct omap_iommu *oiommu = dev_to_omap_iommu(dev);
1098 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
1099
1100 /* only a single device is supported per domain for now */
1101 if (omap_domain->iommu_dev != oiommu) {
1102 dev_err(dev, "invalid iommu device\n");
1103 return;
1104 }
1105
1106 iopgtable_clear_entry_all(oiommu);
1107
1108 omap_iommu_detach(oiommu);
1109
1110 omap_domain->iommu_dev = arch_data->iommu_dev = NULL;
1111 omap_domain->dev = NULL;
1112 }
1113
1114 static void omap_iommu_detach_dev(struct iommu_domain *domain,
1115 struct device *dev)
1116 {
1117 struct omap_iommu_domain *omap_domain = domain->priv;
1118
1119 spin_lock(&omap_domain->lock);
1120 _omap_iommu_detach_dev(omap_domain, dev);
1121 spin_unlock(&omap_domain->lock);
1122 }
1123
1124 static int omap_iommu_domain_init(struct iommu_domain *domain)
1125 {
1126 struct omap_iommu_domain *omap_domain;
1127
1128 omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
1129 if (!omap_domain) {
1130 pr_err("kzalloc failed\n");
1131 goto out;
1132 }
1133
1134 omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL);
1135 if (!omap_domain->pgtable) {
1136 pr_err("kzalloc failed\n");
1137 goto fail_nomem;
1138 }
1139
1140 /*
1141 * should never fail, but please keep this around to ensure
1142 * we keep the hardware happy
1143 */
1144 BUG_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE));
1145
1146 clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE);
1147 spin_lock_init(&omap_domain->lock);
1148
1149 domain->priv = omap_domain;
1150
1151 return 0;
1152
1153 fail_nomem:
1154 kfree(omap_domain);
1155 out:
1156 return -ENOMEM;
1157 }
1158
1159 static void omap_iommu_domain_destroy(struct iommu_domain *domain)
1160 {
1161 struct omap_iommu_domain *omap_domain = domain->priv;
1162
1163 domain->priv = NULL;
1164
1165 /*
1166 * An iommu device is still attached
1167 * (currently, only one device can be attached) ?
1168 */
1169 if (omap_domain->iommu_dev)
1170 _omap_iommu_detach_dev(omap_domain, omap_domain->dev);
1171
1172 kfree(omap_domain->pgtable);
1173 kfree(omap_domain);
1174 }
1175
1176 static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
1177 unsigned long da)
1178 {
1179 struct omap_iommu_domain *omap_domain = domain->priv;
1180 struct omap_iommu *oiommu = omap_domain->iommu_dev;
1181 struct device *dev = oiommu->dev;
1182 u32 *pgd, *pte;
1183 phys_addr_t ret = 0;
1184
1185 iopgtable_lookup_entry(oiommu, da, &pgd, &pte);
1186
1187 if (pte) {
1188 if (iopte_is_small(*pte))
1189 ret = omap_iommu_translate(*pte, da, IOPTE_MASK);
1190 else if (iopte_is_large(*pte))
1191 ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
1192 else
1193 dev_err(dev, "bogus pte 0x%x, da 0x%lx", *pte, da);
1194 } else {
1195 if (iopgd_is_section(*pgd))
1196 ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
1197 else if (iopgd_is_super(*pgd))
1198 ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
1199 else
1200 dev_err(dev, "bogus pgd 0x%x, da 0x%lx", *pgd, da);
1201 }
1202
1203 return ret;
1204 }
1205
1206 static int omap_iommu_domain_has_cap(struct iommu_domain *domain,
1207 unsigned long cap)
1208 {
1209 return 0;
1210 }
1211
1212 static struct iommu_ops omap_iommu_ops = {
1213 .domain_init = omap_iommu_domain_init,
1214 .domain_destroy = omap_iommu_domain_destroy,
1215 .attach_dev = omap_iommu_attach_dev,
1216 .detach_dev = omap_iommu_detach_dev,
1217 .map = omap_iommu_map,
1218 .unmap = omap_iommu_unmap,
1219 .iova_to_phys = omap_iommu_iova_to_phys,
1220 .domain_has_cap = omap_iommu_domain_has_cap,
1221 .pgsize_bitmap = OMAP_IOMMU_PGSIZES,
1222 };
1223
1224 static int __init omap_iommu_init(void)
1225 {
1226 struct kmem_cache *p;
1227 const unsigned long flags = SLAB_HWCACHE_ALIGN;
1228 size_t align = 1 << 10; /* L2 pagetable alignement */
1229
1230 p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags,
1231 iopte_cachep_ctor);
1232 if (!p)
1233 return -ENOMEM;
1234 iopte_cachep = p;
1235
1236 bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
1237
1238 return platform_driver_register(&omap_iommu_driver);
1239 }
1240 /* must be ready before omap3isp is probed */
1241 subsys_initcall(omap_iommu_init);
1242
1243 static void __exit omap_iommu_exit(void)
1244 {
1245 kmem_cache_destroy(iopte_cachep);
1246
1247 platform_driver_unregister(&omap_iommu_driver);
1248 }
1249 module_exit(omap_iommu_exit);
1250
1251 MODULE_DESCRIPTION("omap iommu: tlb and pagetable primitives");
1252 MODULE_ALIAS("platform:omap-iommu");
1253 MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
1254 MODULE_LICENSE("GPL v2");