1 /* linux/drivers/iommu/exynos_iommu.c
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #ifdef CONFIG_EXYNOS_IOMMU_DEBUG
16 #include <linux/interrupt.h>
17 #include <linux/platform_device.h>
18 #include <linux/slab.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/clk.h>
21 #include <linux/err.h>
23 #include <linux/iommu.h>
24 #include <linux/errno.h>
25 #include <linux/list.h>
26 #include <linux/memblock.h>
27 #include <linux/export.h>
29 #include <asm/cacheflush.h>
30 #include <asm/pgtable.h>
32 typedef u32 sysmmu_iova_t
;
33 typedef u32 sysmmu_pte_t
;
35 /* We does not consider super section mapping (16MB) */
37 #define LPAGE_ORDER 16
38 #define SPAGE_ORDER 12
40 #define SECT_SIZE (1 << SECT_ORDER)
41 #define LPAGE_SIZE (1 << LPAGE_ORDER)
42 #define SPAGE_SIZE (1 << SPAGE_ORDER)
44 #define SECT_MASK (~(SECT_SIZE - 1))
45 #define LPAGE_MASK (~(LPAGE_SIZE - 1))
46 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
48 #define lv1ent_fault(sent) (((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
49 #define lv1ent_page(sent) ((*(sent) & 3) == 1)
50 #define lv1ent_section(sent) ((*(sent) & 3) == 2)
52 #define lv2ent_fault(pent) ((*(pent) & 3) == 0)
53 #define lv2ent_small(pent) ((*(pent) & 2) == 2)
54 #define lv2ent_large(pent) ((*(pent) & 3) == 1)
56 static u32
sysmmu_page_offset(sysmmu_iova_t iova
, u32 size
)
58 return iova
& (size
- 1);
61 #define section_phys(sent) (*(sent) & SECT_MASK)
62 #define section_offs(iova) sysmmu_page_offset((iova), SECT_SIZE)
63 #define lpage_phys(pent) (*(pent) & LPAGE_MASK)
64 #define lpage_offs(iova) sysmmu_page_offset((iova), LPAGE_SIZE)
65 #define spage_phys(pent) (*(pent) & SPAGE_MASK)
66 #define spage_offs(iova) sysmmu_page_offset((iova), SPAGE_SIZE)
68 #define NUM_LV1ENTRIES 4096
69 #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
71 static u32
lv1ent_offset(sysmmu_iova_t iova
)
73 return iova
>> SECT_ORDER
;
76 static u32
lv2ent_offset(sysmmu_iova_t iova
)
78 return (iova
>> SPAGE_ORDER
) & (NUM_LV2ENTRIES
- 1);
81 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
83 #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
85 #define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
87 #define mk_lv1ent_sect(pa) ((pa) | 2)
88 #define mk_lv1ent_page(pa) ((pa) | 1)
89 #define mk_lv2ent_lpage(pa) ((pa) | 1)
90 #define mk_lv2ent_spage(pa) ((pa) | 2)
92 #define CTRL_ENABLE 0x5
93 #define CTRL_BLOCK 0x7
94 #define CTRL_DISABLE 0x0
96 #define REG_MMU_CTRL 0x000
97 #define REG_MMU_CFG 0x004
98 #define REG_MMU_STATUS 0x008
99 #define REG_MMU_FLUSH 0x00C
100 #define REG_MMU_FLUSH_ENTRY 0x010
101 #define REG_PT_BASE_ADDR 0x014
102 #define REG_INT_STATUS 0x018
103 #define REG_INT_CLEAR 0x01C
105 #define REG_PAGE_FAULT_ADDR 0x024
106 #define REG_AW_FAULT_ADDR 0x028
107 #define REG_AR_FAULT_ADDR 0x02C
108 #define REG_DEFAULT_SLAVE_ADDR 0x030
110 #define REG_MMU_VERSION 0x034
112 #define REG_PB0_SADDR 0x04C
113 #define REG_PB0_EADDR 0x050
114 #define REG_PB1_SADDR 0x054
115 #define REG_PB1_EADDR 0x058
117 #define has_sysmmu(dev) (dev->archdata.iommu != NULL)
119 static struct kmem_cache
*lv2table_kmem_cache
;
121 static sysmmu_pte_t
*section_entry(sysmmu_pte_t
*pgtable
, sysmmu_iova_t iova
)
123 return pgtable
+ lv1ent_offset(iova
);
126 static sysmmu_pte_t
*page_entry(sysmmu_pte_t
*sent
, sysmmu_iova_t iova
)
128 return (sysmmu_pte_t
*)phys_to_virt(
129 lv2table_base(sent
)) + lv2ent_offset(iova
);
132 enum exynos_sysmmu_inttype
{
140 SYSMMU_AW_PROTECTION
, /* 7 */
141 SYSMMU_FAULT_UNKNOWN
,
145 static unsigned short fault_reg_offset
[SYSMMU_FAULTS_NUM
] = {
149 REG_DEFAULT_SLAVE_ADDR
,
156 static char *sysmmu_fault_name
[SYSMMU_FAULTS_NUM
] = {
158 "AR MULTI-HIT FAULT",
159 "AW MULTI-HIT FAULT",
161 "AR SECURITY PROTECTION FAULT",
162 "AR ACCESS PROTECTION FAULT",
163 "AW SECURITY PROTECTION FAULT",
164 "AW ACCESS PROTECTION FAULT",
168 /* attached to dev.archdata.iommu of the master device */
169 struct exynos_iommu_owner
{
170 struct list_head client
; /* entry of exynos_iommu_domain.clients */
172 struct device
*sysmmu
;
173 struct iommu_domain
*domain
;
174 void *vmm_data
; /* IO virtual memory manager's data */
175 spinlock_t lock
; /* Lock to preserve consistency of System MMU */
178 struct exynos_iommu_domain
{
179 struct list_head clients
; /* list of sysmmu_drvdata.node */
180 sysmmu_pte_t
*pgtable
; /* lv1 page table, 16KB */
181 short *lv2entcnt
; /* free lv2 entry counter for each section */
182 spinlock_t lock
; /* lock for this structure */
183 spinlock_t pgtablelock
; /* lock for modifying page table @ pgtable */
186 struct sysmmu_drvdata
{
187 struct device
*sysmmu
; /* System MMU's device descriptor */
188 struct device
*master
; /* Owner of system MMU */
189 void __iomem
*sfrbase
;
191 struct clk
*clk_master
;
194 struct iommu_domain
*domain
;
198 static bool set_sysmmu_active(struct sysmmu_drvdata
*data
)
200 /* return true if the System MMU was not active previously
201 and it needs to be initialized */
202 return ++data
->activations
== 1;
205 static bool set_sysmmu_inactive(struct sysmmu_drvdata
*data
)
207 /* return true if the System MMU is needed to be disabled */
208 BUG_ON(data
->activations
< 1);
209 return --data
->activations
== 0;
212 static bool is_sysmmu_active(struct sysmmu_drvdata
*data
)
214 return data
->activations
> 0;
217 static void sysmmu_unblock(void __iomem
*sfrbase
)
219 __raw_writel(CTRL_ENABLE
, sfrbase
+ REG_MMU_CTRL
);
222 static bool sysmmu_block(void __iomem
*sfrbase
)
226 __raw_writel(CTRL_BLOCK
, sfrbase
+ REG_MMU_CTRL
);
227 while ((i
> 0) && !(__raw_readl(sfrbase
+ REG_MMU_STATUS
) & 1))
230 if (!(__raw_readl(sfrbase
+ REG_MMU_STATUS
) & 1)) {
231 sysmmu_unblock(sfrbase
);
238 static void __sysmmu_tlb_invalidate(void __iomem
*sfrbase
)
240 __raw_writel(0x1, sfrbase
+ REG_MMU_FLUSH
);
243 static void __sysmmu_tlb_invalidate_entry(void __iomem
*sfrbase
,
244 sysmmu_iova_t iova
, unsigned int num_inv
)
247 for (i
= 0; i
< num_inv
; i
++) {
248 __raw_writel((iova
& SPAGE_MASK
) | 1,
249 sfrbase
+ REG_MMU_FLUSH_ENTRY
);
254 static void __sysmmu_set_ptbase(void __iomem
*sfrbase
,
257 __raw_writel(pgd
, sfrbase
+ REG_PT_BASE_ADDR
);
259 __sysmmu_tlb_invalidate(sfrbase
);
262 static void show_fault_information(const char *name
,
263 enum exynos_sysmmu_inttype itype
,
264 phys_addr_t pgtable_base
, sysmmu_iova_t fault_addr
)
268 if ((itype
>= SYSMMU_FAULTS_NUM
) || (itype
< SYSMMU_PAGEFAULT
))
269 itype
= SYSMMU_FAULT_UNKNOWN
;
271 pr_err("%s occurred at %#x by %s(Page table base: %pa)\n",
272 sysmmu_fault_name
[itype
], fault_addr
, name
, &pgtable_base
);
274 ent
= section_entry(phys_to_virt(pgtable_base
), fault_addr
);
275 pr_err("\tLv1 entry: %#x\n", *ent
);
277 if (lv1ent_page(ent
)) {
278 ent
= page_entry(ent
, fault_addr
);
279 pr_err("\t Lv2 entry: %#x\n", *ent
);
283 static irqreturn_t
exynos_sysmmu_irq(int irq
, void *dev_id
)
285 /* SYSMMU is in blocked when interrupt occurred. */
286 struct sysmmu_drvdata
*data
= dev_id
;
287 enum exynos_sysmmu_inttype itype
;
288 sysmmu_iova_t addr
= -1;
291 WARN_ON(!is_sysmmu_active(data
));
293 spin_lock(&data
->lock
);
295 if (!IS_ERR(data
->clk_master
))
296 clk_enable(data
->clk_master
);
298 itype
= (enum exynos_sysmmu_inttype
)
299 __ffs(__raw_readl(data
->sfrbase
+ REG_INT_STATUS
));
300 if (WARN_ON(!((itype
>= 0) && (itype
< SYSMMU_FAULT_UNKNOWN
))))
301 itype
= SYSMMU_FAULT_UNKNOWN
;
303 addr
= __raw_readl(data
->sfrbase
+ fault_reg_offset
[itype
]);
305 if (itype
== SYSMMU_FAULT_UNKNOWN
) {
306 pr_err("%s: Fault is not occurred by System MMU '%s'!\n",
307 __func__
, dev_name(data
->sysmmu
));
308 pr_err("%s: Please check if IRQ is correctly configured.\n",
313 __raw_readl(data
->sfrbase
+ REG_PT_BASE_ADDR
);
314 show_fault_information(dev_name(data
->sysmmu
),
317 ret
= report_iommu_fault(data
->domain
,
318 data
->master
, addr
, itype
);
321 /* fault is not recovered by fault handler */
324 __raw_writel(1 << itype
, data
->sfrbase
+ REG_INT_CLEAR
);
326 sysmmu_unblock(data
->sfrbase
);
328 if (!IS_ERR(data
->clk_master
))
329 clk_disable(data
->clk_master
);
331 spin_unlock(&data
->lock
);
336 static void __sysmmu_disable_nocount(struct sysmmu_drvdata
*data
)
338 if (!IS_ERR(data
->clk_master
))
339 clk_enable(data
->clk_master
);
341 __raw_writel(CTRL_DISABLE
, data
->sfrbase
+ REG_MMU_CTRL
);
342 __raw_writel(0, data
->sfrbase
+ REG_MMU_CFG
);
344 clk_disable(data
->clk
);
345 if (!IS_ERR(data
->clk_master
))
346 clk_disable(data
->clk_master
);
349 static bool __sysmmu_disable(struct sysmmu_drvdata
*data
)
354 spin_lock_irqsave(&data
->lock
, flags
);
356 disabled
= set_sysmmu_inactive(data
);
362 __sysmmu_disable_nocount(data
);
364 dev_dbg(data
->sysmmu
, "Disabled\n");
366 dev_dbg(data
->sysmmu
, "%d times left to disable\n",
370 spin_unlock_irqrestore(&data
->lock
, flags
);
375 static void __sysmmu_init_config(struct sysmmu_drvdata
*data
)
377 unsigned int cfg
= 0;
379 __raw_writel(cfg
, data
->sfrbase
+ REG_MMU_CFG
);
382 static void __sysmmu_enable_nocount(struct sysmmu_drvdata
*data
)
384 if (!IS_ERR(data
->clk_master
))
385 clk_enable(data
->clk_master
);
386 clk_enable(data
->clk
);
388 __raw_writel(CTRL_BLOCK
, data
->sfrbase
+ REG_MMU_CTRL
);
390 __sysmmu_init_config(data
);
392 __sysmmu_set_ptbase(data
->sfrbase
, data
->pgtable
);
394 __raw_writel(CTRL_ENABLE
, data
->sfrbase
+ REG_MMU_CTRL
);
396 if (!IS_ERR(data
->clk_master
))
397 clk_disable(data
->clk_master
);
400 static int __sysmmu_enable(struct sysmmu_drvdata
*data
,
401 phys_addr_t pgtable
, struct iommu_domain
*domain
)
406 spin_lock_irqsave(&data
->lock
, flags
);
407 if (set_sysmmu_active(data
)) {
408 data
->pgtable
= pgtable
;
409 data
->domain
= domain
;
411 __sysmmu_enable_nocount(data
);
413 dev_dbg(data
->sysmmu
, "Enabled\n");
415 ret
= (pgtable
== data
->pgtable
) ? 1 : -EBUSY
;
417 dev_dbg(data
->sysmmu
, "already enabled\n");
420 if (WARN_ON(ret
< 0))
421 set_sysmmu_inactive(data
); /* decrement count */
423 spin_unlock_irqrestore(&data
->lock
, flags
);
428 /* __exynos_sysmmu_enable: Enables System MMU
430 * returns -error if an error occurred and System MMU is not enabled,
431 * 0 if the System MMU has been just enabled and 1 if System MMU was already
434 static int __exynos_sysmmu_enable(struct device
*dev
, phys_addr_t pgtable
,
435 struct iommu_domain
*domain
)
439 struct exynos_iommu_owner
*owner
= dev
->archdata
.iommu
;
440 struct sysmmu_drvdata
*data
;
442 BUG_ON(!has_sysmmu(dev
));
444 spin_lock_irqsave(&owner
->lock
, flags
);
446 data
= dev_get_drvdata(owner
->sysmmu
);
448 ret
= __sysmmu_enable(data
, pgtable
, domain
);
452 spin_unlock_irqrestore(&owner
->lock
, flags
);
457 int exynos_sysmmu_enable(struct device
*dev
, phys_addr_t pgtable
)
459 BUG_ON(!memblock_is_memory(pgtable
));
461 return __exynos_sysmmu_enable(dev
, pgtable
, NULL
);
464 static bool exynos_sysmmu_disable(struct device
*dev
)
467 bool disabled
= true;
468 struct exynos_iommu_owner
*owner
= dev
->archdata
.iommu
;
469 struct sysmmu_drvdata
*data
;
471 BUG_ON(!has_sysmmu(dev
));
473 spin_lock_irqsave(&owner
->lock
, flags
);
475 data
= dev_get_drvdata(owner
->sysmmu
);
477 disabled
= __sysmmu_disable(data
);
481 spin_unlock_irqrestore(&owner
->lock
, flags
);
486 static void sysmmu_tlb_invalidate_entry(struct device
*dev
, sysmmu_iova_t iova
,
489 struct exynos_iommu_owner
*owner
= dev
->archdata
.iommu
;
491 struct sysmmu_drvdata
*data
;
493 data
= dev_get_drvdata(owner
->sysmmu
);
495 spin_lock_irqsave(&data
->lock
, flags
);
496 if (is_sysmmu_active(data
)) {
498 unsigned int num_inv
= 1;
500 if (!IS_ERR(data
->clk_master
))
501 clk_enable(data
->clk_master
);
503 maj
= __raw_readl(data
->sfrbase
+ REG_MMU_VERSION
);
505 * L2TLB invalidation required
506 * 4KB page: 1 invalidation
507 * 64KB page: 16 invalidation
508 * 1MB page: 64 invalidation
509 * because it is set-associative TLB
510 * with 8-way and 64 sets.
511 * 1MB page can be cached in one of all sets.
512 * 64KB page can be one of 16 consecutive sets.
514 if ((maj
>> 28) == 2) /* major version number */
515 num_inv
= min_t(unsigned int, size
/ PAGE_SIZE
, 64);
517 if (sysmmu_block(data
->sfrbase
)) {
518 __sysmmu_tlb_invalidate_entry(
519 data
->sfrbase
, iova
, num_inv
);
520 sysmmu_unblock(data
->sfrbase
);
522 if (!IS_ERR(data
->clk_master
))
523 clk_disable(data
->clk_master
);
525 dev_dbg(dev
, "disabled. Skipping TLB invalidation @ %#x\n",
528 spin_unlock_irqrestore(&data
->lock
, flags
);
531 void exynos_sysmmu_tlb_invalidate(struct device
*dev
)
533 struct exynos_iommu_owner
*owner
= dev
->archdata
.iommu
;
535 struct sysmmu_drvdata
*data
;
537 data
= dev_get_drvdata(owner
->sysmmu
);
539 spin_lock_irqsave(&data
->lock
, flags
);
540 if (is_sysmmu_active(data
)) {
541 if (!IS_ERR(data
->clk_master
))
542 clk_enable(data
->clk_master
);
543 if (sysmmu_block(data
->sfrbase
)) {
544 __sysmmu_tlb_invalidate(data
->sfrbase
);
545 sysmmu_unblock(data
->sfrbase
);
547 if (!IS_ERR(data
->clk_master
))
548 clk_disable(data
->clk_master
);
550 dev_dbg(dev
, "disabled. Skipping TLB invalidation\n");
552 spin_unlock_irqrestore(&data
->lock
, flags
);
555 static int __init
exynos_sysmmu_probe(struct platform_device
*pdev
)
558 struct device
*dev
= &pdev
->dev
;
559 struct sysmmu_drvdata
*data
;
560 struct resource
*res
;
562 data
= devm_kzalloc(dev
, sizeof(*data
), GFP_KERNEL
);
566 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
567 data
->sfrbase
= devm_ioremap_resource(dev
, res
);
568 if (IS_ERR(data
->sfrbase
))
569 return PTR_ERR(data
->sfrbase
);
571 irq
= platform_get_irq(pdev
, 0);
573 dev_err(dev
, "Unable to find IRQ resource\n");
577 ret
= devm_request_irq(dev
, irq
, exynos_sysmmu_irq
, 0,
578 dev_name(dev
), data
);
580 dev_err(dev
, "Unabled to register handler of irq %d\n", irq
);
584 data
->clk
= devm_clk_get(dev
, "sysmmu");
585 if (IS_ERR(data
->clk
)) {
586 dev_err(dev
, "Failed to get clock!\n");
587 return PTR_ERR(data
->clk
);
589 ret
= clk_prepare(data
->clk
);
591 dev_err(dev
, "Failed to prepare clk\n");
596 data
->clk_master
= devm_clk_get(dev
, "master");
597 if (!IS_ERR(data
->clk_master
)) {
598 ret
= clk_prepare(data
->clk_master
);
600 clk_unprepare(data
->clk
);
601 dev_err(dev
, "Failed to prepare master's clk\n");
607 spin_lock_init(&data
->lock
);
609 platform_set_drvdata(pdev
, data
);
611 pm_runtime_enable(dev
);
616 static const struct of_device_id sysmmu_of_match
[] __initconst
= {
617 { .compatible
= "samsung,exynos-sysmmu", },
621 static struct platform_driver exynos_sysmmu_driver __refdata
= {
622 .probe
= exynos_sysmmu_probe
,
624 .owner
= THIS_MODULE
,
625 .name
= "exynos-sysmmu",
626 .of_match_table
= sysmmu_of_match
,
630 static inline void pgtable_flush(void *vastart
, void *vaend
)
632 dmac_flush_range(vastart
, vaend
);
633 outer_flush_range(virt_to_phys(vastart
),
634 virt_to_phys(vaend
));
637 static int exynos_iommu_domain_init(struct iommu_domain
*domain
)
639 struct exynos_iommu_domain
*priv
;
641 priv
= kzalloc(sizeof(*priv
), GFP_KERNEL
);
645 priv
->pgtable
= (sysmmu_pte_t
*)__get_free_pages(
646 GFP_KERNEL
| __GFP_ZERO
, 2);
650 priv
->lv2entcnt
= (short *)__get_free_pages(
651 GFP_KERNEL
| __GFP_ZERO
, 1);
652 if (!priv
->lv2entcnt
)
655 pgtable_flush(priv
->pgtable
, priv
->pgtable
+ NUM_LV1ENTRIES
);
657 spin_lock_init(&priv
->lock
);
658 spin_lock_init(&priv
->pgtablelock
);
659 INIT_LIST_HEAD(&priv
->clients
);
661 domain
->geometry
.aperture_start
= 0;
662 domain
->geometry
.aperture_end
= ~0UL;
663 domain
->geometry
.force_aperture
= true;
669 free_pages((unsigned long)priv
->pgtable
, 2);
675 static void exynos_iommu_domain_destroy(struct iommu_domain
*domain
)
677 struct exynos_iommu_domain
*priv
= domain
->priv
;
678 struct exynos_iommu_owner
*owner
;
682 WARN_ON(!list_empty(&priv
->clients
));
684 spin_lock_irqsave(&priv
->lock
, flags
);
686 list_for_each_entry(owner
, &priv
->clients
, client
) {
687 while (!exynos_sysmmu_disable(owner
->dev
))
688 ; /* until System MMU is actually disabled */
691 while (!list_empty(&priv
->clients
))
692 list_del_init(priv
->clients
.next
);
694 spin_unlock_irqrestore(&priv
->lock
, flags
);
696 for (i
= 0; i
< NUM_LV1ENTRIES
; i
++)
697 if (lv1ent_page(priv
->pgtable
+ i
))
698 kmem_cache_free(lv2table_kmem_cache
,
699 phys_to_virt(lv2table_base(priv
->pgtable
+ i
)));
701 free_pages((unsigned long)priv
->pgtable
, 2);
702 free_pages((unsigned long)priv
->lv2entcnt
, 1);
707 static int exynos_iommu_attach_device(struct iommu_domain
*domain
,
710 struct exynos_iommu_owner
*owner
= dev
->archdata
.iommu
;
711 struct exynos_iommu_domain
*priv
= domain
->priv
;
712 phys_addr_t pagetable
= virt_to_phys(priv
->pgtable
);
716 spin_lock_irqsave(&priv
->lock
, flags
);
718 ret
= __exynos_sysmmu_enable(dev
, pagetable
, domain
);
720 list_add_tail(&owner
->client
, &priv
->clients
);
721 owner
->domain
= domain
;
724 spin_unlock_irqrestore(&priv
->lock
, flags
);
727 dev_err(dev
, "%s: Failed to attach IOMMU with pgtable %pa\n",
728 __func__
, &pagetable
);
732 dev_dbg(dev
, "%s: Attached IOMMU with pgtable %pa %s\n",
733 __func__
, &pagetable
, (ret
== 0) ? "" : ", again");
738 static void exynos_iommu_detach_device(struct iommu_domain
*domain
,
741 struct exynos_iommu_owner
*owner
;
742 struct exynos_iommu_domain
*priv
= domain
->priv
;
743 phys_addr_t pagetable
= virt_to_phys(priv
->pgtable
);
746 spin_lock_irqsave(&priv
->lock
, flags
);
748 list_for_each_entry(owner
, &priv
->clients
, client
) {
749 if (owner
== dev
->archdata
.iommu
) {
750 if (exynos_sysmmu_disable(dev
)) {
751 list_del_init(&owner
->client
);
752 owner
->domain
= NULL
;
758 spin_unlock_irqrestore(&priv
->lock
, flags
);
760 if (owner
== dev
->archdata
.iommu
)
761 dev_dbg(dev
, "%s: Detached IOMMU with pgtable %pa\n",
762 __func__
, &pagetable
);
764 dev_err(dev
, "%s: No IOMMU is attached\n", __func__
);
767 static sysmmu_pte_t
*alloc_lv2entry(sysmmu_pte_t
*sent
, sysmmu_iova_t iova
,
770 if (lv1ent_section(sent
)) {
771 WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova
);
772 return ERR_PTR(-EADDRINUSE
);
775 if (lv1ent_fault(sent
)) {
778 pent
= kmem_cache_zalloc(lv2table_kmem_cache
, GFP_ATOMIC
);
779 BUG_ON((unsigned int)pent
& (LV2TABLE_SIZE
- 1));
781 return ERR_PTR(-ENOMEM
);
783 *sent
= mk_lv1ent_page(virt_to_phys(pent
));
784 *pgcounter
= NUM_LV2ENTRIES
;
785 pgtable_flush(pent
, pent
+ NUM_LV2ENTRIES
);
786 pgtable_flush(sent
, sent
+ 1);
789 return page_entry(sent
, iova
);
792 static int lv1set_section(sysmmu_pte_t
*sent
, sysmmu_iova_t iova
,
793 phys_addr_t paddr
, short *pgcnt
)
795 if (lv1ent_section(sent
)) {
796 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
801 if (lv1ent_page(sent
)) {
802 if (*pgcnt
!= NUM_LV2ENTRIES
) {
803 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
808 kmem_cache_free(lv2table_kmem_cache
, page_entry(sent
, 0));
812 *sent
= mk_lv1ent_sect(paddr
);
814 pgtable_flush(sent
, sent
+ 1);
819 static int lv2set_page(sysmmu_pte_t
*pent
, phys_addr_t paddr
, size_t size
,
822 if (size
== SPAGE_SIZE
) {
823 if (WARN_ON(!lv2ent_fault(pent
)))
826 *pent
= mk_lv2ent_spage(paddr
);
827 pgtable_flush(pent
, pent
+ 1);
829 } else { /* size == LPAGE_SIZE */
831 for (i
= 0; i
< SPAGES_PER_LPAGE
; i
++, pent
++) {
832 if (WARN_ON(!lv2ent_fault(pent
))) {
834 memset(pent
- i
, 0, sizeof(*pent
) * i
);
838 *pent
= mk_lv2ent_lpage(paddr
);
840 pgtable_flush(pent
- SPAGES_PER_LPAGE
, pent
);
841 *pgcnt
-= SPAGES_PER_LPAGE
;
847 static int exynos_iommu_map(struct iommu_domain
*domain
, unsigned long l_iova
,
848 phys_addr_t paddr
, size_t size
, int prot
)
850 struct exynos_iommu_domain
*priv
= domain
->priv
;
852 sysmmu_iova_t iova
= (sysmmu_iova_t
)l_iova
;
856 BUG_ON(priv
->pgtable
== NULL
);
858 spin_lock_irqsave(&priv
->pgtablelock
, flags
);
860 entry
= section_entry(priv
->pgtable
, iova
);
862 if (size
== SECT_SIZE
) {
863 ret
= lv1set_section(entry
, iova
, paddr
,
864 &priv
->lv2entcnt
[lv1ent_offset(iova
)]);
868 pent
= alloc_lv2entry(entry
, iova
,
869 &priv
->lv2entcnt
[lv1ent_offset(iova
)]);
874 ret
= lv2set_page(pent
, paddr
, size
,
875 &priv
->lv2entcnt
[lv1ent_offset(iova
)]);
879 pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
880 __func__
, ret
, size
, iova
);
882 spin_unlock_irqrestore(&priv
->pgtablelock
, flags
);
887 static size_t exynos_iommu_unmap(struct iommu_domain
*domain
,
888 unsigned long l_iova
, size_t size
)
890 struct exynos_iommu_domain
*priv
= domain
->priv
;
891 struct exynos_iommu_owner
*owner
;
892 sysmmu_iova_t iova
= (sysmmu_iova_t
)l_iova
;
897 BUG_ON(priv
->pgtable
== NULL
);
899 spin_lock_irqsave(&priv
->pgtablelock
, flags
);
901 ent
= section_entry(priv
->pgtable
, iova
);
903 if (lv1ent_section(ent
)) {
904 if (WARN_ON(size
< SECT_SIZE
)) {
905 err_pgsize
= SECT_SIZE
;
910 pgtable_flush(ent
, ent
+ 1);
915 if (unlikely(lv1ent_fault(ent
))) {
916 if (size
> SECT_SIZE
)
921 /* lv1ent_page(sent) == true here */
923 ent
= page_entry(ent
, iova
);
925 if (unlikely(lv2ent_fault(ent
))) {
930 if (lv2ent_small(ent
)) {
933 pgtable_flush(ent
, ent
+ 1);
934 priv
->lv2entcnt
[lv1ent_offset(iova
)] += 1;
938 /* lv1ent_large(ent) == true here */
939 if (WARN_ON(size
< LPAGE_SIZE
)) {
940 err_pgsize
= LPAGE_SIZE
;
944 memset(ent
, 0, sizeof(*ent
) * SPAGES_PER_LPAGE
);
945 pgtable_flush(ent
, ent
+ SPAGES_PER_LPAGE
);
948 priv
->lv2entcnt
[lv1ent_offset(iova
)] += SPAGES_PER_LPAGE
;
950 spin_unlock_irqrestore(&priv
->pgtablelock
, flags
);
952 spin_lock_irqsave(&priv
->lock
, flags
);
953 list_for_each_entry(owner
, &priv
->clients
, client
)
954 sysmmu_tlb_invalidate_entry(owner
->dev
, iova
, size
);
955 spin_unlock_irqrestore(&priv
->lock
, flags
);
959 spin_unlock_irqrestore(&priv
->pgtablelock
, flags
);
961 pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
962 __func__
, size
, iova
, err_pgsize
);
967 static phys_addr_t
exynos_iommu_iova_to_phys(struct iommu_domain
*domain
,
970 struct exynos_iommu_domain
*priv
= domain
->priv
;
973 phys_addr_t phys
= 0;
975 spin_lock_irqsave(&priv
->pgtablelock
, flags
);
977 entry
= section_entry(priv
->pgtable
, iova
);
979 if (lv1ent_section(entry
)) {
980 phys
= section_phys(entry
) + section_offs(iova
);
981 } else if (lv1ent_page(entry
)) {
982 entry
= page_entry(entry
, iova
);
984 if (lv2ent_large(entry
))
985 phys
= lpage_phys(entry
) + lpage_offs(iova
);
986 else if (lv2ent_small(entry
))
987 phys
= spage_phys(entry
) + spage_offs(iova
);
990 spin_unlock_irqrestore(&priv
->pgtablelock
, flags
);
995 static int exynos_iommu_add_device(struct device
*dev
)
997 struct iommu_group
*group
;
1000 group
= iommu_group_get(dev
);
1003 group
= iommu_group_alloc();
1004 if (IS_ERR(group
)) {
1005 dev_err(dev
, "Failed to allocate IOMMU group\n");
1006 return PTR_ERR(group
);
1010 ret
= iommu_group_add_device(group
, dev
);
1011 iommu_group_put(group
);
1016 static void exynos_iommu_remove_device(struct device
*dev
)
1018 iommu_group_remove_device(dev
);
1021 static struct iommu_ops exynos_iommu_ops
= {
1022 .domain_init
= &exynos_iommu_domain_init
,
1023 .domain_destroy
= &exynos_iommu_domain_destroy
,
1024 .attach_dev
= &exynos_iommu_attach_device
,
1025 .detach_dev
= &exynos_iommu_detach_device
,
1026 .map
= &exynos_iommu_map
,
1027 .unmap
= &exynos_iommu_unmap
,
1028 .iova_to_phys
= &exynos_iommu_iova_to_phys
,
1029 .add_device
= &exynos_iommu_add_device
,
1030 .remove_device
= &exynos_iommu_remove_device
,
1031 .pgsize_bitmap
= SECT_SIZE
| LPAGE_SIZE
| SPAGE_SIZE
,
1034 static int __init
exynos_iommu_init(void)
1038 lv2table_kmem_cache
= kmem_cache_create("exynos-iommu-lv2table",
1039 LV2TABLE_SIZE
, LV2TABLE_SIZE
, 0, NULL
);
1040 if (!lv2table_kmem_cache
) {
1041 pr_err("%s: Failed to create kmem cache\n", __func__
);
1045 ret
= platform_driver_register(&exynos_sysmmu_driver
);
1047 pr_err("%s: Failed to register driver\n", __func__
);
1048 goto err_reg_driver
;
1051 ret
= bus_set_iommu(&platform_bus_type
, &exynos_iommu_ops
);
1053 pr_err("%s: Failed to register exynos-iommu driver.\n",
1060 platform_driver_unregister(&exynos_sysmmu_driver
);
1062 kmem_cache_destroy(lv2table_kmem_cache
);
1065 subsys_initcall(exynos_iommu_init
);