]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - drivers/iommu/omap-iommu.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jikos/hid
[mirror_ubuntu-zesty-kernel.git] / drivers / iommu / omap-iommu.c
1 /*
2 * omap iommu: tlb and pagetable primitives
3 *
4 * Copyright (C) 2008-2010 Nokia Corporation
5 *
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
7 * Paul Mundt and Toshihiro Kobayashi
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14 #include <linux/err.h>
15 #include <linux/slab.h>
16 #include <linux/interrupt.h>
17 #include <linux/ioport.h>
18 #include <linux/platform_device.h>
19 #include <linux/iommu.h>
20 #include <linux/omap-iommu.h>
21 #include <linux/mutex.h>
22 #include <linux/spinlock.h>
23 #include <linux/io.h>
24 #include <linux/pm_runtime.h>
25 #include <linux/of.h>
26 #include <linux/of_iommu.h>
27 #include <linux/of_irq.h>
28 #include <linux/of_platform.h>
29 #include <linux/regmap.h>
30 #include <linux/mfd/syscon.h>
31
32 #include <asm/cacheflush.h>
33
34 #include <linux/platform_data/iommu-omap.h>
35
36 #include "omap-iopgtable.h"
37 #include "omap-iommu.h"
38
39 #define to_iommu(dev) \
40 ((struct omap_iommu *)platform_get_drvdata(to_platform_device(dev)))
41
42 /* bitmap of the page sizes currently supported */
43 #define OMAP_IOMMU_PGSIZES (SZ_4K | SZ_64K | SZ_1M | SZ_16M)
44
45 /**
46 * struct omap_iommu_domain - omap iommu domain
47 * @pgtable: the page table
48 * @iommu_dev: an omap iommu device attached to this domain. only a single
49 * iommu device can be attached for now.
50 * @dev: Device using this domain.
51 * @lock: domain lock, should be taken when attaching/detaching
52 */
53 struct omap_iommu_domain {
54 u32 *pgtable;
55 struct omap_iommu *iommu_dev;
56 struct device *dev;
57 spinlock_t lock;
58 struct iommu_domain domain;
59 };
60
61 #define MMU_LOCK_BASE_SHIFT 10
62 #define MMU_LOCK_BASE_MASK (0x1f << MMU_LOCK_BASE_SHIFT)
63 #define MMU_LOCK_BASE(x) \
64 ((x & MMU_LOCK_BASE_MASK) >> MMU_LOCK_BASE_SHIFT)
65
66 #define MMU_LOCK_VICT_SHIFT 4
67 #define MMU_LOCK_VICT_MASK (0x1f << MMU_LOCK_VICT_SHIFT)
68 #define MMU_LOCK_VICT(x) \
69 ((x & MMU_LOCK_VICT_MASK) >> MMU_LOCK_VICT_SHIFT)
70
71 static struct platform_driver omap_iommu_driver;
72 static struct kmem_cache *iopte_cachep;
73
74 /**
75 * to_omap_domain - Get struct omap_iommu_domain from generic iommu_domain
76 * @dom: generic iommu domain handle
77 **/
78 static struct omap_iommu_domain *to_omap_domain(struct iommu_domain *dom)
79 {
80 return container_of(dom, struct omap_iommu_domain, domain);
81 }
82
83 /**
84 * omap_iommu_save_ctx - Save registers for pm off-mode support
85 * @dev: client device
86 **/
87 void omap_iommu_save_ctx(struct device *dev)
88 {
89 struct omap_iommu *obj = dev_to_omap_iommu(dev);
90 u32 *p = obj->ctx;
91 int i;
92
93 for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
94 p[i] = iommu_read_reg(obj, i * sizeof(u32));
95 dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
96 }
97 }
98 EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
99
100 /**
101 * omap_iommu_restore_ctx - Restore registers for pm off-mode support
102 * @dev: client device
103 **/
104 void omap_iommu_restore_ctx(struct device *dev)
105 {
106 struct omap_iommu *obj = dev_to_omap_iommu(dev);
107 u32 *p = obj->ctx;
108 int i;
109
110 for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
111 iommu_write_reg(obj, p[i], i * sizeof(u32));
112 dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
113 }
114 }
115 EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
116
117 static void dra7_cfg_dspsys_mmu(struct omap_iommu *obj, bool enable)
118 {
119 u32 val, mask;
120
121 if (!obj->syscfg)
122 return;
123
124 mask = (1 << (obj->id * DSP_SYS_MMU_CONFIG_EN_SHIFT));
125 val = enable ? mask : 0;
126 regmap_update_bits(obj->syscfg, DSP_SYS_MMU_CONFIG, mask, val);
127 }
128
129 static void __iommu_set_twl(struct omap_iommu *obj, bool on)
130 {
131 u32 l = iommu_read_reg(obj, MMU_CNTL);
132
133 if (on)
134 iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE);
135 else
136 iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE);
137
138 l &= ~MMU_CNTL_MASK;
139 if (on)
140 l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
141 else
142 l |= (MMU_CNTL_MMU_EN);
143
144 iommu_write_reg(obj, l, MMU_CNTL);
145 }
146
147 static int omap2_iommu_enable(struct omap_iommu *obj)
148 {
149 u32 l, pa;
150
151 if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K))
152 return -EINVAL;
153
154 pa = virt_to_phys(obj->iopgd);
155 if (!IS_ALIGNED(pa, SZ_16K))
156 return -EINVAL;
157
158 l = iommu_read_reg(obj, MMU_REVISION);
159 dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
160 (l >> 4) & 0xf, l & 0xf);
161
162 iommu_write_reg(obj, pa, MMU_TTB);
163
164 dra7_cfg_dspsys_mmu(obj, true);
165
166 if (obj->has_bus_err_back)
167 iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG);
168
169 __iommu_set_twl(obj, true);
170
171 return 0;
172 }
173
174 static void omap2_iommu_disable(struct omap_iommu *obj)
175 {
176 u32 l = iommu_read_reg(obj, MMU_CNTL);
177
178 l &= ~MMU_CNTL_MASK;
179 iommu_write_reg(obj, l, MMU_CNTL);
180 dra7_cfg_dspsys_mmu(obj, false);
181
182 dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
183 }
184
185 static int iommu_enable(struct omap_iommu *obj)
186 {
187 int err;
188 struct platform_device *pdev = to_platform_device(obj->dev);
189 struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
190
191 if (pdata && pdata->deassert_reset) {
192 err = pdata->deassert_reset(pdev, pdata->reset_name);
193 if (err) {
194 dev_err(obj->dev, "deassert_reset failed: %d\n", err);
195 return err;
196 }
197 }
198
199 pm_runtime_get_sync(obj->dev);
200
201 err = omap2_iommu_enable(obj);
202
203 return err;
204 }
205
206 static void iommu_disable(struct omap_iommu *obj)
207 {
208 struct platform_device *pdev = to_platform_device(obj->dev);
209 struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
210
211 omap2_iommu_disable(obj);
212
213 pm_runtime_put_sync(obj->dev);
214
215 if (pdata && pdata->assert_reset)
216 pdata->assert_reset(pdev, pdata->reset_name);
217 }
218
219 /*
220 * TLB operations
221 */
222 static u32 iotlb_cr_to_virt(struct cr_regs *cr)
223 {
224 u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
225 u32 mask = get_cam_va_mask(cr->cam & page_size);
226
227 return cr->cam & mask;
228 }
229
230 static u32 get_iopte_attr(struct iotlb_entry *e)
231 {
232 u32 attr;
233
234 attr = e->mixed << 5;
235 attr |= e->endian;
236 attr |= e->elsz >> 3;
237 attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) ||
238 (e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6);
239 return attr;
240 }
241
242 static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da)
243 {
244 u32 status, fault_addr;
245
246 status = iommu_read_reg(obj, MMU_IRQSTATUS);
247 status &= MMU_IRQ_MASK;
248 if (!status) {
249 *da = 0;
250 return 0;
251 }
252
253 fault_addr = iommu_read_reg(obj, MMU_FAULT_AD);
254 *da = fault_addr;
255
256 iommu_write_reg(obj, status, MMU_IRQSTATUS);
257
258 return status;
259 }
260
261 void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
262 {
263 u32 val;
264
265 val = iommu_read_reg(obj, MMU_LOCK);
266
267 l->base = MMU_LOCK_BASE(val);
268 l->vict = MMU_LOCK_VICT(val);
269 }
270
271 void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
272 {
273 u32 val;
274
275 val = (l->base << MMU_LOCK_BASE_SHIFT);
276 val |= (l->vict << MMU_LOCK_VICT_SHIFT);
277
278 iommu_write_reg(obj, val, MMU_LOCK);
279 }
280
281 static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
282 {
283 cr->cam = iommu_read_reg(obj, MMU_READ_CAM);
284 cr->ram = iommu_read_reg(obj, MMU_READ_RAM);
285 }
286
287 static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
288 {
289 iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM);
290 iommu_write_reg(obj, cr->ram, MMU_RAM);
291
292 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
293 iommu_write_reg(obj, 1, MMU_LD_TLB);
294 }
295
296 /* only used in iotlb iteration for-loop */
297 struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
298 {
299 struct cr_regs cr;
300 struct iotlb_lock l;
301
302 iotlb_lock_get(obj, &l);
303 l.vict = n;
304 iotlb_lock_set(obj, &l);
305 iotlb_read_cr(obj, &cr);
306
307 return cr;
308 }
309
310 #ifdef PREFETCH_IOTLB
311 static struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj,
312 struct iotlb_entry *e)
313 {
314 struct cr_regs *cr;
315
316 if (!e)
317 return NULL;
318
319 if (e->da & ~(get_cam_va_mask(e->pgsz))) {
320 dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__,
321 e->da);
322 return ERR_PTR(-EINVAL);
323 }
324
325 cr = kmalloc(sizeof(*cr), GFP_KERNEL);
326 if (!cr)
327 return ERR_PTR(-ENOMEM);
328
329 cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
330 cr->ram = e->pa | e->endian | e->elsz | e->mixed;
331
332 return cr;
333 }
334
335 /**
336 * load_iotlb_entry - Set an iommu tlb entry
337 * @obj: target iommu
338 * @e: an iommu tlb entry info
339 **/
340 static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
341 {
342 int err = 0;
343 struct iotlb_lock l;
344 struct cr_regs *cr;
345
346 if (!obj || !obj->nr_tlb_entries || !e)
347 return -EINVAL;
348
349 pm_runtime_get_sync(obj->dev);
350
351 iotlb_lock_get(obj, &l);
352 if (l.base == obj->nr_tlb_entries) {
353 dev_warn(obj->dev, "%s: preserve entries full\n", __func__);
354 err = -EBUSY;
355 goto out;
356 }
357 if (!e->prsvd) {
358 int i;
359 struct cr_regs tmp;
360
361 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, tmp)
362 if (!iotlb_cr_valid(&tmp))
363 break;
364
365 if (i == obj->nr_tlb_entries) {
366 dev_dbg(obj->dev, "%s: full: no entry\n", __func__);
367 err = -EBUSY;
368 goto out;
369 }
370
371 iotlb_lock_get(obj, &l);
372 } else {
373 l.vict = l.base;
374 iotlb_lock_set(obj, &l);
375 }
376
377 cr = iotlb_alloc_cr(obj, e);
378 if (IS_ERR(cr)) {
379 pm_runtime_put_sync(obj->dev);
380 return PTR_ERR(cr);
381 }
382
383 iotlb_load_cr(obj, cr);
384 kfree(cr);
385
386 if (e->prsvd)
387 l.base++;
388 /* increment victim for next tlb load */
389 if (++l.vict == obj->nr_tlb_entries)
390 l.vict = l.base;
391 iotlb_lock_set(obj, &l);
392 out:
393 pm_runtime_put_sync(obj->dev);
394 return err;
395 }
396
397 #else /* !PREFETCH_IOTLB */
398
399 static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
400 {
401 return 0;
402 }
403
404 #endif /* !PREFETCH_IOTLB */
405
406 static int prefetch_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
407 {
408 return load_iotlb_entry(obj, e);
409 }
410
411 /**
412 * flush_iotlb_page - Clear an iommu tlb entry
413 * @obj: target iommu
414 * @da: iommu device virtual address
415 *
416 * Clear an iommu tlb entry which includes 'da' address.
417 **/
418 static void flush_iotlb_page(struct omap_iommu *obj, u32 da)
419 {
420 int i;
421 struct cr_regs cr;
422
423 pm_runtime_get_sync(obj->dev);
424
425 for_each_iotlb_cr(obj, obj->nr_tlb_entries, i, cr) {
426 u32 start;
427 size_t bytes;
428
429 if (!iotlb_cr_valid(&cr))
430 continue;
431
432 start = iotlb_cr_to_virt(&cr);
433 bytes = iopgsz_to_bytes(cr.cam & 3);
434
435 if ((start <= da) && (da < start + bytes)) {
436 dev_dbg(obj->dev, "%s: %08x<=%08x(%x)\n",
437 __func__, start, da, bytes);
438 iotlb_load_cr(obj, &cr);
439 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
440 break;
441 }
442 }
443 pm_runtime_put_sync(obj->dev);
444
445 if (i == obj->nr_tlb_entries)
446 dev_dbg(obj->dev, "%s: no page for %08x\n", __func__, da);
447 }
448
449 /**
450 * flush_iotlb_all - Clear all iommu tlb entries
451 * @obj: target iommu
452 **/
453 static void flush_iotlb_all(struct omap_iommu *obj)
454 {
455 struct iotlb_lock l;
456
457 pm_runtime_get_sync(obj->dev);
458
459 l.base = 0;
460 l.vict = 0;
461 iotlb_lock_set(obj, &l);
462
463 iommu_write_reg(obj, 1, MMU_GFLUSH);
464
465 pm_runtime_put_sync(obj->dev);
466 }
467
468 /*
469 * H/W pagetable operations
470 */
471 static void flush_iopgd_range(u32 *first, u32 *last)
472 {
473 /* FIXME: L2 cache should be taken care of if it exists */
474 do {
475 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pgd"
476 : : "r" (first));
477 first += L1_CACHE_BYTES / sizeof(*first);
478 } while (first <= last);
479 }
480
481 static void flush_iopte_range(u32 *first, u32 *last)
482 {
483 /* FIXME: L2 cache should be taken care of if it exists */
484 do {
485 asm("mcr p15, 0, %0, c7, c10, 1 @ flush_pte"
486 : : "r" (first));
487 first += L1_CACHE_BYTES / sizeof(*first);
488 } while (first <= last);
489 }
490
491 static void iopte_free(u32 *iopte)
492 {
493 /* Note: freed iopte's must be clean ready for re-use */
494 if (iopte)
495 kmem_cache_free(iopte_cachep, iopte);
496 }
497
498 static u32 *iopte_alloc(struct omap_iommu *obj, u32 *iopgd, u32 da)
499 {
500 u32 *iopte;
501
502 /* a table has already existed */
503 if (*iopgd)
504 goto pte_ready;
505
506 /*
507 * do the allocation outside the page table lock
508 */
509 spin_unlock(&obj->page_table_lock);
510 iopte = kmem_cache_zalloc(iopte_cachep, GFP_KERNEL);
511 spin_lock(&obj->page_table_lock);
512
513 if (!*iopgd) {
514 if (!iopte)
515 return ERR_PTR(-ENOMEM);
516
517 *iopgd = virt_to_phys(iopte) | IOPGD_TABLE;
518 flush_iopgd_range(iopgd, iopgd);
519
520 dev_vdbg(obj->dev, "%s: a new pte:%p\n", __func__, iopte);
521 } else {
522 /* We raced, free the reduniovant table */
523 iopte_free(iopte);
524 }
525
526 pte_ready:
527 iopte = iopte_offset(iopgd, da);
528
529 dev_vdbg(obj->dev,
530 "%s: da:%08x pgd:%p *pgd:%08x pte:%p *pte:%08x\n",
531 __func__, da, iopgd, *iopgd, iopte, *iopte);
532
533 return iopte;
534 }
535
536 static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
537 {
538 u32 *iopgd = iopgd_offset(obj, da);
539
540 if ((da | pa) & ~IOSECTION_MASK) {
541 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
542 __func__, da, pa, IOSECTION_SIZE);
543 return -EINVAL;
544 }
545
546 *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION;
547 flush_iopgd_range(iopgd, iopgd);
548 return 0;
549 }
550
551 static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
552 {
553 u32 *iopgd = iopgd_offset(obj, da);
554 int i;
555
556 if ((da | pa) & ~IOSUPER_MASK) {
557 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
558 __func__, da, pa, IOSUPER_SIZE);
559 return -EINVAL;
560 }
561
562 for (i = 0; i < 16; i++)
563 *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER;
564 flush_iopgd_range(iopgd, iopgd + 15);
565 return 0;
566 }
567
568 static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
569 {
570 u32 *iopgd = iopgd_offset(obj, da);
571 u32 *iopte = iopte_alloc(obj, iopgd, da);
572
573 if (IS_ERR(iopte))
574 return PTR_ERR(iopte);
575
576 *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL;
577 flush_iopte_range(iopte, iopte);
578
579 dev_vdbg(obj->dev, "%s: da:%08x pa:%08x pte:%p *pte:%08x\n",
580 __func__, da, pa, iopte, *iopte);
581
582 return 0;
583 }
584
585 static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot)
586 {
587 u32 *iopgd = iopgd_offset(obj, da);
588 u32 *iopte = iopte_alloc(obj, iopgd, da);
589 int i;
590
591 if ((da | pa) & ~IOLARGE_MASK) {
592 dev_err(obj->dev, "%s: %08x:%08x should aligned on %08lx\n",
593 __func__, da, pa, IOLARGE_SIZE);
594 return -EINVAL;
595 }
596
597 if (IS_ERR(iopte))
598 return PTR_ERR(iopte);
599
600 for (i = 0; i < 16; i++)
601 *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE;
602 flush_iopte_range(iopte, iopte + 15);
603 return 0;
604 }
605
606 static int
607 iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e)
608 {
609 int (*fn)(struct omap_iommu *, u32, u32, u32);
610 u32 prot;
611 int err;
612
613 if (!obj || !e)
614 return -EINVAL;
615
616 switch (e->pgsz) {
617 case MMU_CAM_PGSZ_16M:
618 fn = iopgd_alloc_super;
619 break;
620 case MMU_CAM_PGSZ_1M:
621 fn = iopgd_alloc_section;
622 break;
623 case MMU_CAM_PGSZ_64K:
624 fn = iopte_alloc_large;
625 break;
626 case MMU_CAM_PGSZ_4K:
627 fn = iopte_alloc_page;
628 break;
629 default:
630 fn = NULL;
631 break;
632 }
633
634 if (WARN_ON(!fn))
635 return -EINVAL;
636
637 prot = get_iopte_attr(e);
638
639 spin_lock(&obj->page_table_lock);
640 err = fn(obj, e->da, e->pa, prot);
641 spin_unlock(&obj->page_table_lock);
642
643 return err;
644 }
645
646 /**
647 * omap_iopgtable_store_entry - Make an iommu pte entry
648 * @obj: target iommu
649 * @e: an iommu tlb entry info
650 **/
651 static int
652 omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
653 {
654 int err;
655
656 flush_iotlb_page(obj, e->da);
657 err = iopgtable_store_entry_core(obj, e);
658 if (!err)
659 prefetch_iotlb_entry(obj, e);
660 return err;
661 }
662
663 /**
664 * iopgtable_lookup_entry - Lookup an iommu pte entry
665 * @obj: target iommu
666 * @da: iommu device virtual address
667 * @ppgd: iommu pgd entry pointer to be returned
668 * @ppte: iommu pte entry pointer to be returned
669 **/
670 static void
671 iopgtable_lookup_entry(struct omap_iommu *obj, u32 da, u32 **ppgd, u32 **ppte)
672 {
673 u32 *iopgd, *iopte = NULL;
674
675 iopgd = iopgd_offset(obj, da);
676 if (!*iopgd)
677 goto out;
678
679 if (iopgd_is_table(*iopgd))
680 iopte = iopte_offset(iopgd, da);
681 out:
682 *ppgd = iopgd;
683 *ppte = iopte;
684 }
685
686 static size_t iopgtable_clear_entry_core(struct omap_iommu *obj, u32 da)
687 {
688 size_t bytes;
689 u32 *iopgd = iopgd_offset(obj, da);
690 int nent = 1;
691
692 if (!*iopgd)
693 return 0;
694
695 if (iopgd_is_table(*iopgd)) {
696 int i;
697 u32 *iopte = iopte_offset(iopgd, da);
698
699 bytes = IOPTE_SIZE;
700 if (*iopte & IOPTE_LARGE) {
701 nent *= 16;
702 /* rewind to the 1st entry */
703 iopte = iopte_offset(iopgd, (da & IOLARGE_MASK));
704 }
705 bytes *= nent;
706 memset(iopte, 0, nent * sizeof(*iopte));
707 flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte));
708
709 /*
710 * do table walk to check if this table is necessary or not
711 */
712 iopte = iopte_offset(iopgd, 0);
713 for (i = 0; i < PTRS_PER_IOPTE; i++)
714 if (iopte[i])
715 goto out;
716
717 iopte_free(iopte);
718 nent = 1; /* for the next L1 entry */
719 } else {
720 bytes = IOPGD_SIZE;
721 if ((*iopgd & IOPGD_SUPER) == IOPGD_SUPER) {
722 nent *= 16;
723 /* rewind to the 1st entry */
724 iopgd = iopgd_offset(obj, (da & IOSUPER_MASK));
725 }
726 bytes *= nent;
727 }
728 memset(iopgd, 0, nent * sizeof(*iopgd));
729 flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd));
730 out:
731 return bytes;
732 }
733
734 /**
735 * iopgtable_clear_entry - Remove an iommu pte entry
736 * @obj: target iommu
737 * @da: iommu device virtual address
738 **/
739 static size_t iopgtable_clear_entry(struct omap_iommu *obj, u32 da)
740 {
741 size_t bytes;
742
743 spin_lock(&obj->page_table_lock);
744
745 bytes = iopgtable_clear_entry_core(obj, da);
746 flush_iotlb_page(obj, da);
747
748 spin_unlock(&obj->page_table_lock);
749
750 return bytes;
751 }
752
753 static void iopgtable_clear_entry_all(struct omap_iommu *obj)
754 {
755 int i;
756
757 spin_lock(&obj->page_table_lock);
758
759 for (i = 0; i < PTRS_PER_IOPGD; i++) {
760 u32 da;
761 u32 *iopgd;
762
763 da = i << IOPGD_SHIFT;
764 iopgd = iopgd_offset(obj, da);
765
766 if (!*iopgd)
767 continue;
768
769 if (iopgd_is_table(*iopgd))
770 iopte_free(iopte_offset(iopgd, 0));
771
772 *iopgd = 0;
773 flush_iopgd_range(iopgd, iopgd);
774 }
775
776 flush_iotlb_all(obj);
777
778 spin_unlock(&obj->page_table_lock);
779 }
780
781 /*
782 * Device IOMMU generic operations
783 */
784 static irqreturn_t iommu_fault_handler(int irq, void *data)
785 {
786 u32 da, errs;
787 u32 *iopgd, *iopte;
788 struct omap_iommu *obj = data;
789 struct iommu_domain *domain = obj->domain;
790 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
791
792 if (!omap_domain->iommu_dev)
793 return IRQ_NONE;
794
795 errs = iommu_report_fault(obj, &da);
796 if (errs == 0)
797 return IRQ_HANDLED;
798
799 /* Fault callback or TLB/PTE Dynamic loading */
800 if (!report_iommu_fault(domain, obj->dev, da, 0))
801 return IRQ_HANDLED;
802
803 iommu_disable(obj);
804
805 iopgd = iopgd_offset(obj, da);
806
807 if (!iopgd_is_table(*iopgd)) {
808 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:px%08x\n",
809 obj->name, errs, da, iopgd, *iopgd);
810 return IRQ_NONE;
811 }
812
813 iopte = iopte_offset(iopgd, da);
814
815 dev_err(obj->dev, "%s: errs:0x%08x da:0x%08x pgd:0x%p *pgd:0x%08x pte:0x%p *pte:0x%08x\n",
816 obj->name, errs, da, iopgd, *iopgd, iopte, *iopte);
817
818 return IRQ_NONE;
819 }
820
821 static int device_match_by_alias(struct device *dev, void *data)
822 {
823 struct omap_iommu *obj = to_iommu(dev);
824 const char *name = data;
825
826 pr_debug("%s: %s %s\n", __func__, obj->name, name);
827
828 return strcmp(obj->name, name) == 0;
829 }
830
831 /**
832 * omap_iommu_attach() - attach iommu device to an iommu domain
833 * @name: name of target omap iommu device
834 * @iopgd: page table
835 **/
836 static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
837 {
838 int err;
839 struct device *dev;
840 struct omap_iommu *obj;
841
842 dev = driver_find_device(&omap_iommu_driver.driver, NULL, (void *)name,
843 device_match_by_alias);
844 if (!dev)
845 return ERR_PTR(-ENODEV);
846
847 obj = to_iommu(dev);
848
849 spin_lock(&obj->iommu_lock);
850
851 obj->iopgd = iopgd;
852 err = iommu_enable(obj);
853 if (err)
854 goto err_enable;
855 flush_iotlb_all(obj);
856
857 spin_unlock(&obj->iommu_lock);
858
859 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
860 return obj;
861
862 err_enable:
863 spin_unlock(&obj->iommu_lock);
864 return ERR_PTR(err);
865 }
866
867 /**
868 * omap_iommu_detach - release iommu device
869 * @obj: target iommu
870 **/
871 static void omap_iommu_detach(struct omap_iommu *obj)
872 {
873 if (!obj || IS_ERR(obj))
874 return;
875
876 spin_lock(&obj->iommu_lock);
877
878 iommu_disable(obj);
879 obj->iopgd = NULL;
880
881 spin_unlock(&obj->iommu_lock);
882
883 dev_dbg(obj->dev, "%s: %s\n", __func__, obj->name);
884 }
885
886 static int omap_iommu_dra7_get_dsp_system_cfg(struct platform_device *pdev,
887 struct omap_iommu *obj)
888 {
889 struct device_node *np = pdev->dev.of_node;
890 int ret;
891
892 if (!of_device_is_compatible(np, "ti,dra7-dsp-iommu"))
893 return 0;
894
895 if (!of_property_read_bool(np, "ti,syscon-mmuconfig")) {
896 dev_err(&pdev->dev, "ti,syscon-mmuconfig property is missing\n");
897 return -EINVAL;
898 }
899
900 obj->syscfg =
901 syscon_regmap_lookup_by_phandle(np, "ti,syscon-mmuconfig");
902 if (IS_ERR(obj->syscfg)) {
903 /* can fail with -EPROBE_DEFER */
904 ret = PTR_ERR(obj->syscfg);
905 return ret;
906 }
907
908 if (of_property_read_u32_index(np, "ti,syscon-mmuconfig", 1,
909 &obj->id)) {
910 dev_err(&pdev->dev, "couldn't get the IOMMU instance id within subsystem\n");
911 return -EINVAL;
912 }
913
914 if (obj->id != 0 && obj->id != 1) {
915 dev_err(&pdev->dev, "invalid IOMMU instance id\n");
916 return -EINVAL;
917 }
918
919 return 0;
920 }
921
922 /*
923 * OMAP Device MMU(IOMMU) detection
924 */
925 static int omap_iommu_probe(struct platform_device *pdev)
926 {
927 int err = -ENODEV;
928 int irq;
929 struct omap_iommu *obj;
930 struct resource *res;
931 struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
932 struct device_node *of = pdev->dev.of_node;
933
934 obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
935 if (!obj)
936 return -ENOMEM;
937
938 if (of) {
939 obj->name = dev_name(&pdev->dev);
940 obj->nr_tlb_entries = 32;
941 err = of_property_read_u32(of, "ti,#tlb-entries",
942 &obj->nr_tlb_entries);
943 if (err && err != -EINVAL)
944 return err;
945 if (obj->nr_tlb_entries != 32 && obj->nr_tlb_entries != 8)
946 return -EINVAL;
947 if (of_find_property(of, "ti,iommu-bus-err-back", NULL))
948 obj->has_bus_err_back = MMU_GP_REG_BUS_ERR_BACK_EN;
949 } else {
950 obj->nr_tlb_entries = pdata->nr_tlb_entries;
951 obj->name = pdata->name;
952 }
953
954 obj->dev = &pdev->dev;
955 obj->ctx = (void *)obj + sizeof(*obj);
956
957 spin_lock_init(&obj->iommu_lock);
958 spin_lock_init(&obj->page_table_lock);
959
960 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
961 obj->regbase = devm_ioremap_resource(obj->dev, res);
962 if (IS_ERR(obj->regbase))
963 return PTR_ERR(obj->regbase);
964
965 err = omap_iommu_dra7_get_dsp_system_cfg(pdev, obj);
966 if (err)
967 return err;
968
969 irq = platform_get_irq(pdev, 0);
970 if (irq < 0)
971 return -ENODEV;
972
973 err = devm_request_irq(obj->dev, irq, iommu_fault_handler, IRQF_SHARED,
974 dev_name(obj->dev), obj);
975 if (err < 0)
976 return err;
977 platform_set_drvdata(pdev, obj);
978
979 pm_runtime_irq_safe(obj->dev);
980 pm_runtime_enable(obj->dev);
981
982 omap_iommu_debugfs_add(obj);
983
984 dev_info(&pdev->dev, "%s registered\n", obj->name);
985 return 0;
986 }
987
988 static int omap_iommu_remove(struct platform_device *pdev)
989 {
990 struct omap_iommu *obj = platform_get_drvdata(pdev);
991
992 omap_iommu_debugfs_remove(obj);
993
994 pm_runtime_disable(obj->dev);
995
996 dev_info(&pdev->dev, "%s removed\n", obj->name);
997 return 0;
998 }
999
1000 static const struct of_device_id omap_iommu_of_match[] = {
1001 { .compatible = "ti,omap2-iommu" },
1002 { .compatible = "ti,omap4-iommu" },
1003 { .compatible = "ti,dra7-iommu" },
1004 { .compatible = "ti,dra7-dsp-iommu" },
1005 {},
1006 };
1007
1008 static struct platform_driver omap_iommu_driver = {
1009 .probe = omap_iommu_probe,
1010 .remove = omap_iommu_remove,
1011 .driver = {
1012 .name = "omap-iommu",
1013 .of_match_table = of_match_ptr(omap_iommu_of_match),
1014 },
1015 };
1016
1017 static void iopte_cachep_ctor(void *iopte)
1018 {
1019 clean_dcache_area(iopte, IOPTE_TABLE_SIZE);
1020 }
1021
1022 static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
1023 {
1024 memset(e, 0, sizeof(*e));
1025
1026 e->da = da;
1027 e->pa = pa;
1028 e->valid = MMU_CAM_V;
1029 e->pgsz = pgsz;
1030 e->endian = MMU_RAM_ENDIAN_LITTLE;
1031 e->elsz = MMU_RAM_ELSZ_8;
1032 e->mixed = 0;
1033
1034 return iopgsz_to_bytes(e->pgsz);
1035 }
1036
1037 static int omap_iommu_map(struct iommu_domain *domain, unsigned long da,
1038 phys_addr_t pa, size_t bytes, int prot)
1039 {
1040 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1041 struct omap_iommu *oiommu = omap_domain->iommu_dev;
1042 struct device *dev = oiommu->dev;
1043 struct iotlb_entry e;
1044 int omap_pgsz;
1045 u32 ret;
1046
1047 omap_pgsz = bytes_to_iopgsz(bytes);
1048 if (omap_pgsz < 0) {
1049 dev_err(dev, "invalid size to map: %d\n", bytes);
1050 return -EINVAL;
1051 }
1052
1053 dev_dbg(dev, "mapping da 0x%lx to pa %pa size 0x%x\n", da, &pa, bytes);
1054
1055 iotlb_init_entry(&e, da, pa, omap_pgsz);
1056
1057 ret = omap_iopgtable_store_entry(oiommu, &e);
1058 if (ret)
1059 dev_err(dev, "omap_iopgtable_store_entry failed: %d\n", ret);
1060
1061 return ret;
1062 }
1063
1064 static size_t omap_iommu_unmap(struct iommu_domain *domain, unsigned long da,
1065 size_t size)
1066 {
1067 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1068 struct omap_iommu *oiommu = omap_domain->iommu_dev;
1069 struct device *dev = oiommu->dev;
1070
1071 dev_dbg(dev, "unmapping da 0x%lx size %u\n", da, size);
1072
1073 return iopgtable_clear_entry(oiommu, da);
1074 }
1075
1076 static int
1077 omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
1078 {
1079 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1080 struct omap_iommu *oiommu;
1081 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
1082 int ret = 0;
1083
1084 if (!arch_data || !arch_data->name) {
1085 dev_err(dev, "device doesn't have an associated iommu\n");
1086 return -EINVAL;
1087 }
1088
1089 spin_lock(&omap_domain->lock);
1090
1091 /* only a single device is supported per domain for now */
1092 if (omap_domain->iommu_dev) {
1093 dev_err(dev, "iommu domain is already attached\n");
1094 ret = -EBUSY;
1095 goto out;
1096 }
1097
1098 /* get a handle to and enable the omap iommu */
1099 oiommu = omap_iommu_attach(arch_data->name, omap_domain->pgtable);
1100 if (IS_ERR(oiommu)) {
1101 ret = PTR_ERR(oiommu);
1102 dev_err(dev, "can't get omap iommu: %d\n", ret);
1103 goto out;
1104 }
1105
1106 omap_domain->iommu_dev = arch_data->iommu_dev = oiommu;
1107 omap_domain->dev = dev;
1108 oiommu->domain = domain;
1109
1110 out:
1111 spin_unlock(&omap_domain->lock);
1112 return ret;
1113 }
1114
1115 static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
1116 struct device *dev)
1117 {
1118 struct omap_iommu *oiommu = dev_to_omap_iommu(dev);
1119 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
1120
1121 /* only a single device is supported per domain for now */
1122 if (omap_domain->iommu_dev != oiommu) {
1123 dev_err(dev, "invalid iommu device\n");
1124 return;
1125 }
1126
1127 iopgtable_clear_entry_all(oiommu);
1128
1129 omap_iommu_detach(oiommu);
1130
1131 omap_domain->iommu_dev = arch_data->iommu_dev = NULL;
1132 omap_domain->dev = NULL;
1133 oiommu->domain = NULL;
1134 }
1135
1136 static void omap_iommu_detach_dev(struct iommu_domain *domain,
1137 struct device *dev)
1138 {
1139 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1140
1141 spin_lock(&omap_domain->lock);
1142 _omap_iommu_detach_dev(omap_domain, dev);
1143 spin_unlock(&omap_domain->lock);
1144 }
1145
1146 static struct iommu_domain *omap_iommu_domain_alloc(unsigned type)
1147 {
1148 struct omap_iommu_domain *omap_domain;
1149
1150 if (type != IOMMU_DOMAIN_UNMANAGED)
1151 return NULL;
1152
1153 omap_domain = kzalloc(sizeof(*omap_domain), GFP_KERNEL);
1154 if (!omap_domain)
1155 goto out;
1156
1157 omap_domain->pgtable = kzalloc(IOPGD_TABLE_SIZE, GFP_KERNEL);
1158 if (!omap_domain->pgtable)
1159 goto fail_nomem;
1160
1161 /*
1162 * should never fail, but please keep this around to ensure
1163 * we keep the hardware happy
1164 */
1165 if (WARN_ON(!IS_ALIGNED((long)omap_domain->pgtable, IOPGD_TABLE_SIZE)))
1166 goto fail_align;
1167
1168 clean_dcache_area(omap_domain->pgtable, IOPGD_TABLE_SIZE);
1169 spin_lock_init(&omap_domain->lock);
1170
1171 omap_domain->domain.geometry.aperture_start = 0;
1172 omap_domain->domain.geometry.aperture_end = (1ULL << 32) - 1;
1173 omap_domain->domain.geometry.force_aperture = true;
1174
1175 return &omap_domain->domain;
1176
1177 fail_align:
1178 kfree(omap_domain->pgtable);
1179 fail_nomem:
1180 kfree(omap_domain);
1181 out:
1182 return NULL;
1183 }
1184
1185 static void omap_iommu_domain_free(struct iommu_domain *domain)
1186 {
1187 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1188
1189 /*
1190 * An iommu device is still attached
1191 * (currently, only one device can be attached) ?
1192 */
1193 if (omap_domain->iommu_dev)
1194 _omap_iommu_detach_dev(omap_domain, omap_domain->dev);
1195
1196 kfree(omap_domain->pgtable);
1197 kfree(omap_domain);
1198 }
1199
1200 static phys_addr_t omap_iommu_iova_to_phys(struct iommu_domain *domain,
1201 dma_addr_t da)
1202 {
1203 struct omap_iommu_domain *omap_domain = to_omap_domain(domain);
1204 struct omap_iommu *oiommu = omap_domain->iommu_dev;
1205 struct device *dev = oiommu->dev;
1206 u32 *pgd, *pte;
1207 phys_addr_t ret = 0;
1208
1209 iopgtable_lookup_entry(oiommu, da, &pgd, &pte);
1210
1211 if (pte) {
1212 if (iopte_is_small(*pte))
1213 ret = omap_iommu_translate(*pte, da, IOPTE_MASK);
1214 else if (iopte_is_large(*pte))
1215 ret = omap_iommu_translate(*pte, da, IOLARGE_MASK);
1216 else
1217 dev_err(dev, "bogus pte 0x%x, da 0x%llx", *pte,
1218 (unsigned long long)da);
1219 } else {
1220 if (iopgd_is_section(*pgd))
1221 ret = omap_iommu_translate(*pgd, da, IOSECTION_MASK);
1222 else if (iopgd_is_super(*pgd))
1223 ret = omap_iommu_translate(*pgd, da, IOSUPER_MASK);
1224 else
1225 dev_err(dev, "bogus pgd 0x%x, da 0x%llx", *pgd,
1226 (unsigned long long)da);
1227 }
1228
1229 return ret;
1230 }
1231
1232 static int omap_iommu_add_device(struct device *dev)
1233 {
1234 struct omap_iommu_arch_data *arch_data;
1235 struct device_node *np;
1236 struct platform_device *pdev;
1237
1238 /*
1239 * Allocate the archdata iommu structure for DT-based devices.
1240 *
1241 * TODO: Simplify this when removing non-DT support completely from the
1242 * IOMMU users.
1243 */
1244 if (!dev->of_node)
1245 return 0;
1246
1247 np = of_parse_phandle(dev->of_node, "iommus", 0);
1248 if (!np)
1249 return 0;
1250
1251 pdev = of_find_device_by_node(np);
1252 if (WARN_ON(!pdev)) {
1253 of_node_put(np);
1254 return -EINVAL;
1255 }
1256
1257 arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL);
1258 if (!arch_data) {
1259 of_node_put(np);
1260 return -ENOMEM;
1261 }
1262
1263 arch_data->name = kstrdup(dev_name(&pdev->dev), GFP_KERNEL);
1264 dev->archdata.iommu = arch_data;
1265
1266 of_node_put(np);
1267
1268 return 0;
1269 }
1270
1271 static void omap_iommu_remove_device(struct device *dev)
1272 {
1273 struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
1274
1275 if (!dev->of_node || !arch_data)
1276 return;
1277
1278 kfree(arch_data->name);
1279 kfree(arch_data);
1280 }
1281
1282 static const struct iommu_ops omap_iommu_ops = {
1283 .domain_alloc = omap_iommu_domain_alloc,
1284 .domain_free = omap_iommu_domain_free,
1285 .attach_dev = omap_iommu_attach_dev,
1286 .detach_dev = omap_iommu_detach_dev,
1287 .map = omap_iommu_map,
1288 .unmap = omap_iommu_unmap,
1289 .map_sg = default_iommu_map_sg,
1290 .iova_to_phys = omap_iommu_iova_to_phys,
1291 .add_device = omap_iommu_add_device,
1292 .remove_device = omap_iommu_remove_device,
1293 .pgsize_bitmap = OMAP_IOMMU_PGSIZES,
1294 };
1295
1296 static int __init omap_iommu_init(void)
1297 {
1298 struct kmem_cache *p;
1299 const unsigned long flags = SLAB_HWCACHE_ALIGN;
1300 size_t align = 1 << 10; /* L2 pagetable alignement */
1301 struct device_node *np;
1302
1303 np = of_find_matching_node(NULL, omap_iommu_of_match);
1304 if (!np)
1305 return 0;
1306
1307 of_node_put(np);
1308
1309 p = kmem_cache_create("iopte_cache", IOPTE_TABLE_SIZE, align, flags,
1310 iopte_cachep_ctor);
1311 if (!p)
1312 return -ENOMEM;
1313 iopte_cachep = p;
1314
1315 bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
1316
1317 omap_iommu_debugfs_init();
1318
1319 return platform_driver_register(&omap_iommu_driver);
1320 }
1321 subsys_initcall(omap_iommu_init);
1322 /* must be ready before omap3isp is probed */