2 * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
10 #ifdef CONFIG_EXYNOS_IOMMU_DEBUG
14 #include <linux/clk.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/err.h>
18 #include <linux/iommu.h>
19 #include <linux/interrupt.h>
20 #include <linux/list.h>
22 #include <linux/of_iommu.h>
23 #include <linux/of_platform.h>
24 #include <linux/platform_device.h>
25 #include <linux/pm_runtime.h>
26 #include <linux/slab.h>
27 #include <linux/dma-iommu.h>
29 typedef u32 sysmmu_iova_t
;
30 typedef u32 sysmmu_pte_t
;
32 /* We do not consider super section mapping (16MB) */
34 #define LPAGE_ORDER 16
35 #define SPAGE_ORDER 12
37 #define SECT_SIZE (1 << SECT_ORDER)
38 #define LPAGE_SIZE (1 << LPAGE_ORDER)
39 #define SPAGE_SIZE (1 << SPAGE_ORDER)
41 #define SECT_MASK (~(SECT_SIZE - 1))
42 #define LPAGE_MASK (~(LPAGE_SIZE - 1))
43 #define SPAGE_MASK (~(SPAGE_SIZE - 1))
45 #define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
46 ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
47 #define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
48 #define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
49 #define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
51 #define lv1ent_section(sent) ((*(sent) & 3) == 2)
53 #define lv2ent_fault(pent) ((*(pent) & 3) == 0)
54 #define lv2ent_small(pent) ((*(pent) & 2) == 2)
55 #define lv2ent_large(pent) ((*(pent) & 3) == 1)
58 * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces
59 * v5.0 introduced support for 36bit physical address space by shifting
60 * all page entry values by 4 bits.
61 * All SYSMMU controllers in the system support the address spaces of the same
62 * size, so PG_ENT_SHIFT can be initialized on first SYSMMU probe to proper
65 static short PG_ENT_SHIFT
= -1;
66 #define SYSMMU_PG_ENT_SHIFT 0
67 #define SYSMMU_V5_PG_ENT_SHIFT 4
69 static const sysmmu_pte_t
*LV1_PROT
;
70 static const sysmmu_pte_t SYSMMU_LV1_PROT
[] = {
71 ((0 << 15) | (0 << 10)), /* no access */
72 ((1 << 15) | (1 << 10)), /* IOMMU_READ only */
73 ((0 << 15) | (1 << 10)), /* IOMMU_WRITE not supported, use read/write */
74 ((0 << 15) | (1 << 10)), /* IOMMU_READ | IOMMU_WRITE */
76 static const sysmmu_pte_t SYSMMU_V5_LV1_PROT
[] = {
77 (0 << 4), /* no access */
78 (1 << 4), /* IOMMU_READ only */
79 (2 << 4), /* IOMMU_WRITE only */
80 (3 << 4), /* IOMMU_READ | IOMMU_WRITE */
83 static const sysmmu_pte_t
*LV2_PROT
;
84 static const sysmmu_pte_t SYSMMU_LV2_PROT
[] = {
85 ((0 << 9) | (0 << 4)), /* no access */
86 ((1 << 9) | (1 << 4)), /* IOMMU_READ only */
87 ((0 << 9) | (1 << 4)), /* IOMMU_WRITE not supported, use read/write */
88 ((0 << 9) | (1 << 4)), /* IOMMU_READ | IOMMU_WRITE */
90 static const sysmmu_pte_t SYSMMU_V5_LV2_PROT
[] = {
91 (0 << 2), /* no access */
92 (1 << 2), /* IOMMU_READ only */
93 (2 << 2), /* IOMMU_WRITE only */
94 (3 << 2), /* IOMMU_READ | IOMMU_WRITE */
97 #define SYSMMU_SUPPORTED_PROT_BITS (IOMMU_READ | IOMMU_WRITE)
99 #define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT)
100 #define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK)
101 #define section_offs(iova) (iova & (SECT_SIZE - 1))
102 #define lpage_phys(pent) (sect_to_phys(*(pent)) & LPAGE_MASK)
103 #define lpage_offs(iova) (iova & (LPAGE_SIZE - 1))
104 #define spage_phys(pent) (sect_to_phys(*(pent)) & SPAGE_MASK)
105 #define spage_offs(iova) (iova & (SPAGE_SIZE - 1))
107 #define NUM_LV1ENTRIES 4096
108 #define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
110 static u32
lv1ent_offset(sysmmu_iova_t iova
)
112 return iova
>> SECT_ORDER
;
115 static u32
lv2ent_offset(sysmmu_iova_t iova
)
117 return (iova
>> SPAGE_ORDER
) & (NUM_LV2ENTRIES
- 1);
120 #define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t))
121 #define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
123 #define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
124 #define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0))
126 #define mk_lv1ent_sect(pa, prot) ((pa >> PG_ENT_SHIFT) | LV1_PROT[prot] | 2)
127 #define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1)
128 #define mk_lv2ent_lpage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 1)
129 #define mk_lv2ent_spage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 2)
131 #define CTRL_ENABLE 0x5
132 #define CTRL_BLOCK 0x7
133 #define CTRL_DISABLE 0x0
136 #define CFG_EAP (1 << 2)
137 #define CFG_QOS(n) ((n & 0xF) << 7)
138 #define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
139 #define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
140 #define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
142 /* common registers */
143 #define REG_MMU_CTRL 0x000
144 #define REG_MMU_CFG 0x004
145 #define REG_MMU_STATUS 0x008
146 #define REG_MMU_VERSION 0x034
148 #define MMU_MAJ_VER(val) ((val) >> 7)
149 #define MMU_MIN_VER(val) ((val) & 0x7F)
150 #define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
152 #define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
154 /* v1.x - v3.x registers */
155 #define REG_MMU_FLUSH 0x00C
156 #define REG_MMU_FLUSH_ENTRY 0x010
157 #define REG_PT_BASE_ADDR 0x014
158 #define REG_INT_STATUS 0x018
159 #define REG_INT_CLEAR 0x01C
161 #define REG_PAGE_FAULT_ADDR 0x024
162 #define REG_AW_FAULT_ADDR 0x028
163 #define REG_AR_FAULT_ADDR 0x02C
164 #define REG_DEFAULT_SLAVE_ADDR 0x030
167 #define REG_V5_PT_BASE_PFN 0x00C
168 #define REG_V5_MMU_FLUSH_ALL 0x010
169 #define REG_V5_MMU_FLUSH_ENTRY 0x014
170 #define REG_V5_MMU_FLUSH_RANGE 0x018
171 #define REG_V5_MMU_FLUSH_START 0x020
172 #define REG_V5_MMU_FLUSH_END 0x024
173 #define REG_V5_INT_STATUS 0x060
174 #define REG_V5_INT_CLEAR 0x064
175 #define REG_V5_FAULT_AR_VA 0x070
176 #define REG_V5_FAULT_AW_VA 0x080
178 #define has_sysmmu(dev) (dev->archdata.iommu != NULL)
180 static struct device
*dma_dev
;
181 static struct kmem_cache
*lv2table_kmem_cache
;
182 static sysmmu_pte_t
*zero_lv2_table
;
183 #define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
185 static sysmmu_pte_t
*section_entry(sysmmu_pte_t
*pgtable
, sysmmu_iova_t iova
)
187 return pgtable
+ lv1ent_offset(iova
);
190 static sysmmu_pte_t
*page_entry(sysmmu_pte_t
*sent
, sysmmu_iova_t iova
)
192 return (sysmmu_pte_t
*)phys_to_virt(
193 lv2table_base(sent
)) + lv2ent_offset(iova
);
197 * IOMMU fault information register
199 struct sysmmu_fault_info
{
200 unsigned int bit
; /* bit number in STATUS register */
201 unsigned short addr_reg
; /* register to read VA fault address */
202 const char *name
; /* human readable fault name */
203 unsigned int type
; /* fault type for report_iommu_fault */
206 static const struct sysmmu_fault_info sysmmu_faults
[] = {
207 { 0, REG_PAGE_FAULT_ADDR
, "PAGE", IOMMU_FAULT_READ
},
208 { 1, REG_AR_FAULT_ADDR
, "AR MULTI-HIT", IOMMU_FAULT_READ
},
209 { 2, REG_AW_FAULT_ADDR
, "AW MULTI-HIT", IOMMU_FAULT_WRITE
},
210 { 3, REG_DEFAULT_SLAVE_ADDR
, "BUS ERROR", IOMMU_FAULT_READ
},
211 { 4, REG_AR_FAULT_ADDR
, "AR SECURITY PROTECTION", IOMMU_FAULT_READ
},
212 { 5, REG_AR_FAULT_ADDR
, "AR ACCESS PROTECTION", IOMMU_FAULT_READ
},
213 { 6, REG_AW_FAULT_ADDR
, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE
},
214 { 7, REG_AW_FAULT_ADDR
, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE
},
217 static const struct sysmmu_fault_info sysmmu_v5_faults
[] = {
218 { 0, REG_V5_FAULT_AR_VA
, "AR PTW", IOMMU_FAULT_READ
},
219 { 1, REG_V5_FAULT_AR_VA
, "AR PAGE", IOMMU_FAULT_READ
},
220 { 2, REG_V5_FAULT_AR_VA
, "AR MULTI-HIT", IOMMU_FAULT_READ
},
221 { 3, REG_V5_FAULT_AR_VA
, "AR ACCESS PROTECTION", IOMMU_FAULT_READ
},
222 { 4, REG_V5_FAULT_AR_VA
, "AR SECURITY PROTECTION", IOMMU_FAULT_READ
},
223 { 16, REG_V5_FAULT_AW_VA
, "AW PTW", IOMMU_FAULT_WRITE
},
224 { 17, REG_V5_FAULT_AW_VA
, "AW PAGE", IOMMU_FAULT_WRITE
},
225 { 18, REG_V5_FAULT_AW_VA
, "AW MULTI-HIT", IOMMU_FAULT_WRITE
},
226 { 19, REG_V5_FAULT_AW_VA
, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE
},
227 { 20, REG_V5_FAULT_AW_VA
, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE
},
231 * This structure is attached to dev.archdata.iommu of the master device
232 * on device add, contains a list of SYSMMU controllers defined by device tree,
233 * which are bound to given master device. It is usually referenced by 'owner'
236 struct exynos_iommu_owner
{
237 struct list_head controllers
; /* list of sysmmu_drvdata.owner_node */
238 struct iommu_domain
*domain
; /* domain this device is attached */
239 struct mutex rpm_lock
; /* for runtime pm of all sysmmus */
243 * This structure exynos specific generalization of struct iommu_domain.
244 * It contains list of SYSMMU controllers from all master devices, which has
245 * been attached to this domain and page tables of IO address space defined by
246 * it. It is usually referenced by 'domain' pointer.
248 struct exynos_iommu_domain
{
249 struct list_head clients
; /* list of sysmmu_drvdata.domain_node */
250 sysmmu_pte_t
*pgtable
; /* lv1 page table, 16KB */
251 short *lv2entcnt
; /* free lv2 entry counter for each section */
252 spinlock_t lock
; /* lock for modyfying list of clients */
253 spinlock_t pgtablelock
; /* lock for modifying page table @ pgtable */
254 struct iommu_domain domain
; /* generic domain data structure */
258 * This structure hold all data of a single SYSMMU controller, this includes
259 * hw resources like registers and clocks, pointers and list nodes to connect
260 * it to all other structures, internal state and parameters read from device
261 * tree. It is usually referenced by 'data' pointer.
263 struct sysmmu_drvdata
{
264 struct device
*sysmmu
; /* SYSMMU controller device */
265 struct device
*master
; /* master device (owner) */
266 void __iomem
*sfrbase
; /* our registers */
267 struct clk
*clk
; /* SYSMMU's clock */
268 struct clk
*aclk
; /* SYSMMU's aclk clock */
269 struct clk
*pclk
; /* SYSMMU's pclk clock */
270 struct clk
*clk_master
; /* master's device clock */
271 spinlock_t lock
; /* lock for modyfying state */
272 bool active
; /* current status */
273 struct exynos_iommu_domain
*domain
; /* domain we belong to */
274 struct list_head domain_node
; /* node for domain clients list */
275 struct list_head owner_node
; /* node for owner controllers list */
276 phys_addr_t pgtable
; /* assigned page table structure */
277 unsigned int version
; /* our version */
279 struct iommu_device iommu
; /* IOMMU core handle */
282 static struct exynos_iommu_domain
*to_exynos_domain(struct iommu_domain
*dom
)
284 return container_of(dom
, struct exynos_iommu_domain
, domain
);
287 static void sysmmu_unblock(struct sysmmu_drvdata
*data
)
289 writel(CTRL_ENABLE
, data
->sfrbase
+ REG_MMU_CTRL
);
292 static bool sysmmu_block(struct sysmmu_drvdata
*data
)
296 writel(CTRL_BLOCK
, data
->sfrbase
+ REG_MMU_CTRL
);
297 while ((i
> 0) && !(readl(data
->sfrbase
+ REG_MMU_STATUS
) & 1))
300 if (!(readl(data
->sfrbase
+ REG_MMU_STATUS
) & 1)) {
301 sysmmu_unblock(data
);
308 static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata
*data
)
310 if (MMU_MAJ_VER(data
->version
) < 5)
311 writel(0x1, data
->sfrbase
+ REG_MMU_FLUSH
);
313 writel(0x1, data
->sfrbase
+ REG_V5_MMU_FLUSH_ALL
);
316 static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata
*data
,
317 sysmmu_iova_t iova
, unsigned int num_inv
)
321 if (MMU_MAJ_VER(data
->version
) < 5) {
322 for (i
= 0; i
< num_inv
; i
++) {
323 writel((iova
& SPAGE_MASK
) | 1,
324 data
->sfrbase
+ REG_MMU_FLUSH_ENTRY
);
329 writel((iova
& SPAGE_MASK
) | 1,
330 data
->sfrbase
+ REG_V5_MMU_FLUSH_ENTRY
);
332 writel((iova
& SPAGE_MASK
),
333 data
->sfrbase
+ REG_V5_MMU_FLUSH_START
);
334 writel((iova
& SPAGE_MASK
) + (num_inv
- 1) * SPAGE_SIZE
,
335 data
->sfrbase
+ REG_V5_MMU_FLUSH_END
);
336 writel(1, data
->sfrbase
+ REG_V5_MMU_FLUSH_RANGE
);
341 static void __sysmmu_set_ptbase(struct sysmmu_drvdata
*data
, phys_addr_t pgd
)
343 if (MMU_MAJ_VER(data
->version
) < 5)
344 writel(pgd
, data
->sfrbase
+ REG_PT_BASE_ADDR
);
346 writel(pgd
>> PAGE_SHIFT
,
347 data
->sfrbase
+ REG_V5_PT_BASE_PFN
);
349 __sysmmu_tlb_invalidate(data
);
352 static void __sysmmu_enable_clocks(struct sysmmu_drvdata
*data
)
354 BUG_ON(clk_prepare_enable(data
->clk_master
));
355 BUG_ON(clk_prepare_enable(data
->clk
));
356 BUG_ON(clk_prepare_enable(data
->pclk
));
357 BUG_ON(clk_prepare_enable(data
->aclk
));
360 static void __sysmmu_disable_clocks(struct sysmmu_drvdata
*data
)
362 clk_disable_unprepare(data
->aclk
);
363 clk_disable_unprepare(data
->pclk
);
364 clk_disable_unprepare(data
->clk
);
365 clk_disable_unprepare(data
->clk_master
);
368 static void __sysmmu_get_version(struct sysmmu_drvdata
*data
)
372 __sysmmu_enable_clocks(data
);
374 ver
= readl(data
->sfrbase
+ REG_MMU_VERSION
);
376 /* controllers on some SoCs don't report proper version */
377 if (ver
== 0x80000001u
)
378 data
->version
= MAKE_MMU_VER(1, 0);
380 data
->version
= MMU_RAW_VER(ver
);
382 dev_dbg(data
->sysmmu
, "hardware version: %d.%d\n",
383 MMU_MAJ_VER(data
->version
), MMU_MIN_VER(data
->version
));
385 __sysmmu_disable_clocks(data
);
388 static void show_fault_information(struct sysmmu_drvdata
*data
,
389 const struct sysmmu_fault_info
*finfo
,
390 sysmmu_iova_t fault_addr
)
394 dev_err(data
->sysmmu
, "%s: %s FAULT occurred at %#x\n",
395 dev_name(data
->master
), finfo
->name
, fault_addr
);
396 dev_dbg(data
->sysmmu
, "Page table base: %pa\n", &data
->pgtable
);
397 ent
= section_entry(phys_to_virt(data
->pgtable
), fault_addr
);
398 dev_dbg(data
->sysmmu
, "\tLv1 entry: %#x\n", *ent
);
399 if (lv1ent_page(ent
)) {
400 ent
= page_entry(ent
, fault_addr
);
401 dev_dbg(data
->sysmmu
, "\t Lv2 entry: %#x\n", *ent
);
405 static irqreturn_t
exynos_sysmmu_irq(int irq
, void *dev_id
)
407 /* SYSMMU is in blocked state when interrupt occurred. */
408 struct sysmmu_drvdata
*data
= dev_id
;
409 const struct sysmmu_fault_info
*finfo
;
410 unsigned int i
, n
, itype
;
411 sysmmu_iova_t fault_addr
= -1;
412 unsigned short reg_status
, reg_clear
;
415 WARN_ON(!data
->active
);
417 if (MMU_MAJ_VER(data
->version
) < 5) {
418 reg_status
= REG_INT_STATUS
;
419 reg_clear
= REG_INT_CLEAR
;
420 finfo
= sysmmu_faults
;
421 n
= ARRAY_SIZE(sysmmu_faults
);
423 reg_status
= REG_V5_INT_STATUS
;
424 reg_clear
= REG_V5_INT_CLEAR
;
425 finfo
= sysmmu_v5_faults
;
426 n
= ARRAY_SIZE(sysmmu_v5_faults
);
429 spin_lock(&data
->lock
);
431 clk_enable(data
->clk_master
);
433 itype
= __ffs(readl(data
->sfrbase
+ reg_status
));
434 for (i
= 0; i
< n
; i
++, finfo
++)
435 if (finfo
->bit
== itype
)
437 /* unknown/unsupported fault */
440 /* print debug message */
441 fault_addr
= readl(data
->sfrbase
+ finfo
->addr_reg
);
442 show_fault_information(data
, finfo
, fault_addr
);
445 ret
= report_iommu_fault(&data
->domain
->domain
,
446 data
->master
, fault_addr
, finfo
->type
);
447 /* fault is not recovered by fault handler */
450 writel(1 << itype
, data
->sfrbase
+ reg_clear
);
452 sysmmu_unblock(data
);
454 clk_disable(data
->clk_master
);
456 spin_unlock(&data
->lock
);
461 static void __sysmmu_disable(struct sysmmu_drvdata
*data
)
465 clk_enable(data
->clk_master
);
467 spin_lock_irqsave(&data
->lock
, flags
);
468 writel(CTRL_DISABLE
, data
->sfrbase
+ REG_MMU_CTRL
);
469 writel(0, data
->sfrbase
+ REG_MMU_CFG
);
470 data
->active
= false;
471 spin_unlock_irqrestore(&data
->lock
, flags
);
473 __sysmmu_disable_clocks(data
);
476 static void __sysmmu_init_config(struct sysmmu_drvdata
*data
)
480 if (data
->version
<= MAKE_MMU_VER(3, 1))
481 cfg
= CFG_LRU
| CFG_QOS(15);
482 else if (data
->version
<= MAKE_MMU_VER(3, 2))
483 cfg
= CFG_LRU
| CFG_QOS(15) | CFG_FLPDCACHE
| CFG_SYSSEL
;
485 cfg
= CFG_QOS(15) | CFG_FLPDCACHE
| CFG_ACGEN
;
487 cfg
|= CFG_EAP
; /* enable access protection bits check */
489 writel(cfg
, data
->sfrbase
+ REG_MMU_CFG
);
492 static void __sysmmu_enable(struct sysmmu_drvdata
*data
)
496 __sysmmu_enable_clocks(data
);
498 spin_lock_irqsave(&data
->lock
, flags
);
499 writel(CTRL_BLOCK
, data
->sfrbase
+ REG_MMU_CTRL
);
500 __sysmmu_init_config(data
);
501 __sysmmu_set_ptbase(data
, data
->pgtable
);
502 writel(CTRL_ENABLE
, data
->sfrbase
+ REG_MMU_CTRL
);
504 spin_unlock_irqrestore(&data
->lock
, flags
);
507 * SYSMMU driver keeps master's clock enabled only for the short
508 * time, while accessing the registers. For performing address
509 * translation during DMA transaction it relies on the client
510 * driver to enable it.
512 clk_disable(data
->clk_master
);
515 static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata
*data
,
520 spin_lock_irqsave(&data
->lock
, flags
);
521 if (data
->active
&& data
->version
>= MAKE_MMU_VER(3, 3)) {
522 clk_enable(data
->clk_master
);
523 if (sysmmu_block(data
)) {
524 if (data
->version
>= MAKE_MMU_VER(5, 0))
525 __sysmmu_tlb_invalidate(data
);
527 __sysmmu_tlb_invalidate_entry(data
, iova
, 1);
528 sysmmu_unblock(data
);
530 clk_disable(data
->clk_master
);
532 spin_unlock_irqrestore(&data
->lock
, flags
);
535 static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata
*data
,
536 sysmmu_iova_t iova
, size_t size
)
540 spin_lock_irqsave(&data
->lock
, flags
);
542 unsigned int num_inv
= 1;
544 clk_enable(data
->clk_master
);
547 * L2TLB invalidation required
548 * 4KB page: 1 invalidation
549 * 64KB page: 16 invalidations
550 * 1MB page: 64 invalidations
551 * because it is set-associative TLB
552 * with 8-way and 64 sets.
553 * 1MB page can be cached in one of all sets.
554 * 64KB page can be one of 16 consecutive sets.
556 if (MMU_MAJ_VER(data
->version
) == 2)
557 num_inv
= min_t(unsigned int, size
/ PAGE_SIZE
, 64);
559 if (sysmmu_block(data
)) {
560 __sysmmu_tlb_invalidate_entry(data
, iova
, num_inv
);
561 sysmmu_unblock(data
);
563 clk_disable(data
->clk_master
);
565 spin_unlock_irqrestore(&data
->lock
, flags
);
568 static const struct iommu_ops exynos_iommu_ops
;
570 static int __init
exynos_sysmmu_probe(struct platform_device
*pdev
)
573 struct device
*dev
= &pdev
->dev
;
574 struct sysmmu_drvdata
*data
;
575 struct resource
*res
;
577 data
= devm_kzalloc(dev
, sizeof(*data
), GFP_KERNEL
);
581 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
582 data
->sfrbase
= devm_ioremap_resource(dev
, res
);
583 if (IS_ERR(data
->sfrbase
))
584 return PTR_ERR(data
->sfrbase
);
586 irq
= platform_get_irq(pdev
, 0);
588 dev_err(dev
, "Unable to find IRQ resource\n");
592 ret
= devm_request_irq(dev
, irq
, exynos_sysmmu_irq
, 0,
593 dev_name(dev
), data
);
595 dev_err(dev
, "Unabled to register handler of irq %d\n", irq
);
599 data
->clk
= devm_clk_get(dev
, "sysmmu");
600 if (PTR_ERR(data
->clk
) == -ENOENT
)
602 else if (IS_ERR(data
->clk
))
603 return PTR_ERR(data
->clk
);
605 data
->aclk
= devm_clk_get(dev
, "aclk");
606 if (PTR_ERR(data
->aclk
) == -ENOENT
)
608 else if (IS_ERR(data
->aclk
))
609 return PTR_ERR(data
->aclk
);
611 data
->pclk
= devm_clk_get(dev
, "pclk");
612 if (PTR_ERR(data
->pclk
) == -ENOENT
)
614 else if (IS_ERR(data
->pclk
))
615 return PTR_ERR(data
->pclk
);
617 if (!data
->clk
&& (!data
->aclk
|| !data
->pclk
)) {
618 dev_err(dev
, "Failed to get device clock(s)!\n");
622 data
->clk_master
= devm_clk_get(dev
, "master");
623 if (PTR_ERR(data
->clk_master
) == -ENOENT
)
624 data
->clk_master
= NULL
;
625 else if (IS_ERR(data
->clk_master
))
626 return PTR_ERR(data
->clk_master
);
629 spin_lock_init(&data
->lock
);
631 ret
= iommu_device_sysfs_add(&data
->iommu
, &pdev
->dev
, NULL
,
632 dev_name(data
->sysmmu
));
636 iommu_device_set_ops(&data
->iommu
, &exynos_iommu_ops
);
637 iommu_device_set_fwnode(&data
->iommu
, &dev
->of_node
->fwnode
);
639 ret
= iommu_device_register(&data
->iommu
);
643 platform_set_drvdata(pdev
, data
);
645 __sysmmu_get_version(data
);
646 if (PG_ENT_SHIFT
< 0) {
647 if (MMU_MAJ_VER(data
->version
) < 5) {
648 PG_ENT_SHIFT
= SYSMMU_PG_ENT_SHIFT
;
649 LV1_PROT
= SYSMMU_LV1_PROT
;
650 LV2_PROT
= SYSMMU_LV2_PROT
;
652 PG_ENT_SHIFT
= SYSMMU_V5_PG_ENT_SHIFT
;
653 LV1_PROT
= SYSMMU_V5_LV1_PROT
;
654 LV2_PROT
= SYSMMU_V5_LV2_PROT
;
659 * use the first registered sysmmu device for performing
660 * dma mapping operations on iommu page tables (cpu cache flush)
663 dma_dev
= &pdev
->dev
;
665 pm_runtime_enable(dev
);
670 static int __maybe_unused
exynos_sysmmu_suspend(struct device
*dev
)
672 struct sysmmu_drvdata
*data
= dev_get_drvdata(dev
);
673 struct device
*master
= data
->master
;
676 struct exynos_iommu_owner
*owner
= master
->archdata
.iommu
;
678 mutex_lock(&owner
->rpm_lock
);
680 dev_dbg(data
->sysmmu
, "saving state\n");
681 __sysmmu_disable(data
);
683 mutex_unlock(&owner
->rpm_lock
);
688 static int __maybe_unused
exynos_sysmmu_resume(struct device
*dev
)
690 struct sysmmu_drvdata
*data
= dev_get_drvdata(dev
);
691 struct device
*master
= data
->master
;
694 struct exynos_iommu_owner
*owner
= master
->archdata
.iommu
;
696 mutex_lock(&owner
->rpm_lock
);
698 dev_dbg(data
->sysmmu
, "restoring state\n");
699 __sysmmu_enable(data
);
701 mutex_unlock(&owner
->rpm_lock
);
706 static const struct dev_pm_ops sysmmu_pm_ops
= {
707 SET_RUNTIME_PM_OPS(exynos_sysmmu_suspend
, exynos_sysmmu_resume
, NULL
)
708 SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend
,
709 pm_runtime_force_resume
)
712 static const struct of_device_id sysmmu_of_match
[] __initconst
= {
713 { .compatible
= "samsung,exynos-sysmmu", },
717 static struct platform_driver exynos_sysmmu_driver __refdata
= {
718 .probe
= exynos_sysmmu_probe
,
720 .name
= "exynos-sysmmu",
721 .of_match_table
= sysmmu_of_match
,
722 .pm
= &sysmmu_pm_ops
,
723 .suppress_bind_attrs
= true,
727 static inline void update_pte(sysmmu_pte_t
*ent
, sysmmu_pte_t val
)
729 dma_sync_single_for_cpu(dma_dev
, virt_to_phys(ent
), sizeof(*ent
),
731 *ent
= cpu_to_le32(val
);
732 dma_sync_single_for_device(dma_dev
, virt_to_phys(ent
), sizeof(*ent
),
736 static struct iommu_domain
*exynos_iommu_domain_alloc(unsigned type
)
738 struct exynos_iommu_domain
*domain
;
742 /* Check if correct PTE offsets are initialized */
743 BUG_ON(PG_ENT_SHIFT
< 0 || !dma_dev
);
745 domain
= kzalloc(sizeof(*domain
), GFP_KERNEL
);
749 if (type
== IOMMU_DOMAIN_DMA
) {
750 if (iommu_get_dma_cookie(&domain
->domain
) != 0)
752 } else if (type
!= IOMMU_DOMAIN_UNMANAGED
) {
756 domain
->pgtable
= (sysmmu_pte_t
*)__get_free_pages(GFP_KERNEL
, 2);
757 if (!domain
->pgtable
)
760 domain
->lv2entcnt
= (short *)__get_free_pages(GFP_KERNEL
| __GFP_ZERO
, 1);
761 if (!domain
->lv2entcnt
)
764 /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
765 for (i
= 0; i
< NUM_LV1ENTRIES
; i
++)
766 domain
->pgtable
[i
] = ZERO_LV2LINK
;
768 handle
= dma_map_single(dma_dev
, domain
->pgtable
, LV1TABLE_SIZE
,
770 /* For mapping page table entries we rely on dma == phys */
771 BUG_ON(handle
!= virt_to_phys(domain
->pgtable
));
772 if (dma_mapping_error(dma_dev
, handle
))
775 spin_lock_init(&domain
->lock
);
776 spin_lock_init(&domain
->pgtablelock
);
777 INIT_LIST_HEAD(&domain
->clients
);
779 domain
->domain
.geometry
.aperture_start
= 0;
780 domain
->domain
.geometry
.aperture_end
= ~0UL;
781 domain
->domain
.geometry
.force_aperture
= true;
783 return &domain
->domain
;
786 free_pages((unsigned long)domain
->lv2entcnt
, 1);
788 free_pages((unsigned long)domain
->pgtable
, 2);
790 if (type
== IOMMU_DOMAIN_DMA
)
791 iommu_put_dma_cookie(&domain
->domain
);
797 static void exynos_iommu_domain_free(struct iommu_domain
*iommu_domain
)
799 struct exynos_iommu_domain
*domain
= to_exynos_domain(iommu_domain
);
800 struct sysmmu_drvdata
*data
, *next
;
804 WARN_ON(!list_empty(&domain
->clients
));
806 spin_lock_irqsave(&domain
->lock
, flags
);
808 list_for_each_entry_safe(data
, next
, &domain
->clients
, domain_node
) {
809 spin_lock(&data
->lock
);
810 __sysmmu_disable(data
);
813 list_del_init(&data
->domain_node
);
814 spin_unlock(&data
->lock
);
817 spin_unlock_irqrestore(&domain
->lock
, flags
);
819 if (iommu_domain
->type
== IOMMU_DOMAIN_DMA
)
820 iommu_put_dma_cookie(iommu_domain
);
822 dma_unmap_single(dma_dev
, virt_to_phys(domain
->pgtable
), LV1TABLE_SIZE
,
825 for (i
= 0; i
< NUM_LV1ENTRIES
; i
++)
826 if (lv1ent_page(domain
->pgtable
+ i
)) {
827 phys_addr_t base
= lv2table_base(domain
->pgtable
+ i
);
829 dma_unmap_single(dma_dev
, base
, LV2TABLE_SIZE
,
831 kmem_cache_free(lv2table_kmem_cache
,
835 free_pages((unsigned long)domain
->pgtable
, 2);
836 free_pages((unsigned long)domain
->lv2entcnt
, 1);
840 static void exynos_iommu_detach_device(struct iommu_domain
*iommu_domain
,
843 struct exynos_iommu_owner
*owner
= dev
->archdata
.iommu
;
844 struct exynos_iommu_domain
*domain
= to_exynos_domain(iommu_domain
);
845 phys_addr_t pagetable
= virt_to_phys(domain
->pgtable
);
846 struct sysmmu_drvdata
*data
, *next
;
849 if (!has_sysmmu(dev
) || owner
->domain
!= iommu_domain
)
852 mutex_lock(&owner
->rpm_lock
);
854 list_for_each_entry(data
, &owner
->controllers
, owner_node
) {
855 pm_runtime_get_noresume(data
->sysmmu
);
856 if (pm_runtime_active(data
->sysmmu
))
857 __sysmmu_disable(data
);
858 pm_runtime_put(data
->sysmmu
);
861 spin_lock_irqsave(&domain
->lock
, flags
);
862 list_for_each_entry_safe(data
, next
, &domain
->clients
, domain_node
) {
863 spin_lock(&data
->lock
);
866 list_del_init(&data
->domain_node
);
867 spin_unlock(&data
->lock
);
869 owner
->domain
= NULL
;
870 spin_unlock_irqrestore(&domain
->lock
, flags
);
872 mutex_unlock(&owner
->rpm_lock
);
874 dev_dbg(dev
, "%s: Detached IOMMU with pgtable %pa\n", __func__
,
878 static int exynos_iommu_attach_device(struct iommu_domain
*iommu_domain
,
881 struct exynos_iommu_owner
*owner
= dev
->archdata
.iommu
;
882 struct exynos_iommu_domain
*domain
= to_exynos_domain(iommu_domain
);
883 struct sysmmu_drvdata
*data
;
884 phys_addr_t pagetable
= virt_to_phys(domain
->pgtable
);
887 if (!has_sysmmu(dev
))
891 exynos_iommu_detach_device(owner
->domain
, dev
);
893 mutex_lock(&owner
->rpm_lock
);
895 spin_lock_irqsave(&domain
->lock
, flags
);
896 list_for_each_entry(data
, &owner
->controllers
, owner_node
) {
897 spin_lock(&data
->lock
);
898 data
->pgtable
= pagetable
;
899 data
->domain
= domain
;
900 list_add_tail(&data
->domain_node
, &domain
->clients
);
901 spin_unlock(&data
->lock
);
903 owner
->domain
= iommu_domain
;
904 spin_unlock_irqrestore(&domain
->lock
, flags
);
906 list_for_each_entry(data
, &owner
->controllers
, owner_node
) {
907 pm_runtime_get_noresume(data
->sysmmu
);
908 if (pm_runtime_active(data
->sysmmu
))
909 __sysmmu_enable(data
);
910 pm_runtime_put(data
->sysmmu
);
913 mutex_unlock(&owner
->rpm_lock
);
915 dev_dbg(dev
, "%s: Attached IOMMU with pgtable %pa\n", __func__
,
921 static sysmmu_pte_t
*alloc_lv2entry(struct exynos_iommu_domain
*domain
,
922 sysmmu_pte_t
*sent
, sysmmu_iova_t iova
, short *pgcounter
)
924 if (lv1ent_section(sent
)) {
925 WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova
);
926 return ERR_PTR(-EADDRINUSE
);
929 if (lv1ent_fault(sent
)) {
932 bool need_flush_flpd_cache
= lv1ent_zero(sent
);
934 pent
= kmem_cache_zalloc(lv2table_kmem_cache
, GFP_ATOMIC
);
935 BUG_ON((uintptr_t)pent
& (LV2TABLE_SIZE
- 1));
937 return ERR_PTR(-ENOMEM
);
939 update_pte(sent
, mk_lv1ent_page(virt_to_phys(pent
)));
940 kmemleak_ignore(pent
);
941 *pgcounter
= NUM_LV2ENTRIES
;
942 handle
= dma_map_single(dma_dev
, pent
, LV2TABLE_SIZE
,
944 if (dma_mapping_error(dma_dev
, handle
)) {
945 kmem_cache_free(lv2table_kmem_cache
, pent
);
946 return ERR_PTR(-EADDRINUSE
);
950 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
951 * FLPD cache may cache the address of zero_l2_table. This
952 * function replaces the zero_l2_table with new L2 page table
953 * to write valid mappings.
954 * Accessing the valid area may cause page fault since FLPD
955 * cache may still cache zero_l2_table for the valid area
956 * instead of new L2 page table that has the mapping
957 * information of the valid area.
958 * Thus any replacement of zero_l2_table with other valid L2
959 * page table must involve FLPD cache invalidation for System
961 * FLPD cache invalidation is performed with TLB invalidation
962 * by VPN without blocking. It is safe to invalidate TLB without
963 * blocking because the target address of TLB invalidation is
964 * not currently mapped.
966 if (need_flush_flpd_cache
) {
967 struct sysmmu_drvdata
*data
;
969 spin_lock(&domain
->lock
);
970 list_for_each_entry(data
, &domain
->clients
, domain_node
)
971 sysmmu_tlb_invalidate_flpdcache(data
, iova
);
972 spin_unlock(&domain
->lock
);
976 return page_entry(sent
, iova
);
979 static int lv1set_section(struct exynos_iommu_domain
*domain
,
980 sysmmu_pte_t
*sent
, sysmmu_iova_t iova
,
981 phys_addr_t paddr
, int prot
, short *pgcnt
)
983 if (lv1ent_section(sent
)) {
984 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
989 if (lv1ent_page(sent
)) {
990 if (*pgcnt
!= NUM_LV2ENTRIES
) {
991 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
996 kmem_cache_free(lv2table_kmem_cache
, page_entry(sent
, 0));
1000 update_pte(sent
, mk_lv1ent_sect(paddr
, prot
));
1002 spin_lock(&domain
->lock
);
1003 if (lv1ent_page_zero(sent
)) {
1004 struct sysmmu_drvdata
*data
;
1006 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
1007 * entry by speculative prefetch of SLPD which has no mapping.
1009 list_for_each_entry(data
, &domain
->clients
, domain_node
)
1010 sysmmu_tlb_invalidate_flpdcache(data
, iova
);
1012 spin_unlock(&domain
->lock
);
1017 static int lv2set_page(sysmmu_pte_t
*pent
, phys_addr_t paddr
, size_t size
,
1018 int prot
, short *pgcnt
)
1020 if (size
== SPAGE_SIZE
) {
1021 if (WARN_ON(!lv2ent_fault(pent
)))
1024 update_pte(pent
, mk_lv2ent_spage(paddr
, prot
));
1026 } else { /* size == LPAGE_SIZE */
1028 dma_addr_t pent_base
= virt_to_phys(pent
);
1030 dma_sync_single_for_cpu(dma_dev
, pent_base
,
1031 sizeof(*pent
) * SPAGES_PER_LPAGE
,
1033 for (i
= 0; i
< SPAGES_PER_LPAGE
; i
++, pent
++) {
1034 if (WARN_ON(!lv2ent_fault(pent
))) {
1036 memset(pent
- i
, 0, sizeof(*pent
) * i
);
1040 *pent
= mk_lv2ent_lpage(paddr
, prot
);
1042 dma_sync_single_for_device(dma_dev
, pent_base
,
1043 sizeof(*pent
) * SPAGES_PER_LPAGE
,
1045 *pgcnt
-= SPAGES_PER_LPAGE
;
1052 * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
1054 * System MMU v3.x has advanced logic to improve address translation
1055 * performance with caching more page table entries by a page table walk.
1056 * However, the logic has a bug that while caching faulty page table entries,
1057 * System MMU reports page fault if the cached fault entry is hit even though
1058 * the fault entry is updated to a valid entry after the entry is cached.
1059 * To prevent caching faulty page table entries which may be updated to valid
1060 * entries later, the virtual memory manager should care about the workaround
1061 * for the problem. The following describes the workaround.
1063 * Any two consecutive I/O virtual address regions must have a hole of 128KiB
1064 * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
1066 * Precisely, any start address of I/O virtual region must be aligned with
1067 * the following sizes for System MMU v3.1 and v3.2.
1068 * System MMU v3.1: 128KiB
1069 * System MMU v3.2: 256KiB
1071 * Because System MMU v3.3 caches page table entries more aggressively, it needs
1073 * - Any two consecutive I/O virtual regions must have a hole of size larger
1074 * than or equal to 128KiB.
1075 * - Start address of an I/O virtual region must be aligned by 128KiB.
1077 static int exynos_iommu_map(struct iommu_domain
*iommu_domain
,
1078 unsigned long l_iova
, phys_addr_t paddr
, size_t size
,
1081 struct exynos_iommu_domain
*domain
= to_exynos_domain(iommu_domain
);
1082 sysmmu_pte_t
*entry
;
1083 sysmmu_iova_t iova
= (sysmmu_iova_t
)l_iova
;
1084 unsigned long flags
;
1087 BUG_ON(domain
->pgtable
== NULL
);
1088 prot
&= SYSMMU_SUPPORTED_PROT_BITS
;
1090 spin_lock_irqsave(&domain
->pgtablelock
, flags
);
1092 entry
= section_entry(domain
->pgtable
, iova
);
1094 if (size
== SECT_SIZE
) {
1095 ret
= lv1set_section(domain
, entry
, iova
, paddr
, prot
,
1096 &domain
->lv2entcnt
[lv1ent_offset(iova
)]);
1100 pent
= alloc_lv2entry(domain
, entry
, iova
,
1101 &domain
->lv2entcnt
[lv1ent_offset(iova
)]);
1104 ret
= PTR_ERR(pent
);
1106 ret
= lv2set_page(pent
, paddr
, size
, prot
,
1107 &domain
->lv2entcnt
[lv1ent_offset(iova
)]);
1111 pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
1112 __func__
, ret
, size
, iova
);
1114 spin_unlock_irqrestore(&domain
->pgtablelock
, flags
);
1119 static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain
*domain
,
1120 sysmmu_iova_t iova
, size_t size
)
1122 struct sysmmu_drvdata
*data
;
1123 unsigned long flags
;
1125 spin_lock_irqsave(&domain
->lock
, flags
);
1127 list_for_each_entry(data
, &domain
->clients
, domain_node
)
1128 sysmmu_tlb_invalidate_entry(data
, iova
, size
);
1130 spin_unlock_irqrestore(&domain
->lock
, flags
);
1133 static size_t exynos_iommu_unmap(struct iommu_domain
*iommu_domain
,
1134 unsigned long l_iova
, size_t size
)
1136 struct exynos_iommu_domain
*domain
= to_exynos_domain(iommu_domain
);
1137 sysmmu_iova_t iova
= (sysmmu_iova_t
)l_iova
;
1140 unsigned long flags
;
1142 BUG_ON(domain
->pgtable
== NULL
);
1144 spin_lock_irqsave(&domain
->pgtablelock
, flags
);
1146 ent
= section_entry(domain
->pgtable
, iova
);
1148 if (lv1ent_section(ent
)) {
1149 if (WARN_ON(size
< SECT_SIZE
)) {
1150 err_pgsize
= SECT_SIZE
;
1154 /* workaround for h/w bug in System MMU v3.3 */
1155 update_pte(ent
, ZERO_LV2LINK
);
1160 if (unlikely(lv1ent_fault(ent
))) {
1161 if (size
> SECT_SIZE
)
1166 /* lv1ent_page(sent) == true here */
1168 ent
= page_entry(ent
, iova
);
1170 if (unlikely(lv2ent_fault(ent
))) {
1175 if (lv2ent_small(ent
)) {
1178 domain
->lv2entcnt
[lv1ent_offset(iova
)] += 1;
1182 /* lv1ent_large(ent) == true here */
1183 if (WARN_ON(size
< LPAGE_SIZE
)) {
1184 err_pgsize
= LPAGE_SIZE
;
1188 dma_sync_single_for_cpu(dma_dev
, virt_to_phys(ent
),
1189 sizeof(*ent
) * SPAGES_PER_LPAGE
,
1191 memset(ent
, 0, sizeof(*ent
) * SPAGES_PER_LPAGE
);
1192 dma_sync_single_for_device(dma_dev
, virt_to_phys(ent
),
1193 sizeof(*ent
) * SPAGES_PER_LPAGE
,
1196 domain
->lv2entcnt
[lv1ent_offset(iova
)] += SPAGES_PER_LPAGE
;
1198 spin_unlock_irqrestore(&domain
->pgtablelock
, flags
);
1200 exynos_iommu_tlb_invalidate_entry(domain
, iova
, size
);
1204 spin_unlock_irqrestore(&domain
->pgtablelock
, flags
);
1206 pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
1207 __func__
, size
, iova
, err_pgsize
);
1212 static phys_addr_t
exynos_iommu_iova_to_phys(struct iommu_domain
*iommu_domain
,
1215 struct exynos_iommu_domain
*domain
= to_exynos_domain(iommu_domain
);
1216 sysmmu_pte_t
*entry
;
1217 unsigned long flags
;
1218 phys_addr_t phys
= 0;
1220 spin_lock_irqsave(&domain
->pgtablelock
, flags
);
1222 entry
= section_entry(domain
->pgtable
, iova
);
1224 if (lv1ent_section(entry
)) {
1225 phys
= section_phys(entry
) + section_offs(iova
);
1226 } else if (lv1ent_page(entry
)) {
1227 entry
= page_entry(entry
, iova
);
1229 if (lv2ent_large(entry
))
1230 phys
= lpage_phys(entry
) + lpage_offs(iova
);
1231 else if (lv2ent_small(entry
))
1232 phys
= spage_phys(entry
) + spage_offs(iova
);
1235 spin_unlock_irqrestore(&domain
->pgtablelock
, flags
);
1240 static struct iommu_group
*get_device_iommu_group(struct device
*dev
)
1242 struct iommu_group
*group
;
1244 group
= iommu_group_get(dev
);
1246 group
= iommu_group_alloc();
1251 static int exynos_iommu_add_device(struct device
*dev
)
1253 struct iommu_group
*group
;
1255 if (!has_sysmmu(dev
))
1258 group
= iommu_group_get_for_dev(dev
);
1261 return PTR_ERR(group
);
1263 iommu_group_put(group
);
1268 static void exynos_iommu_remove_device(struct device
*dev
)
1270 struct exynos_iommu_owner
*owner
= dev
->archdata
.iommu
;
1272 if (!has_sysmmu(dev
))
1275 if (owner
->domain
) {
1276 struct iommu_group
*group
= iommu_group_get(dev
);
1279 WARN_ON(owner
->domain
!=
1280 iommu_group_default_domain(group
));
1281 exynos_iommu_detach_device(owner
->domain
, dev
);
1282 iommu_group_put(group
);
1285 iommu_group_remove_device(dev
);
1288 static int exynos_iommu_of_xlate(struct device
*dev
,
1289 struct of_phandle_args
*spec
)
1291 struct exynos_iommu_owner
*owner
= dev
->archdata
.iommu
;
1292 struct platform_device
*sysmmu
= of_find_device_by_node(spec
->np
);
1293 struct sysmmu_drvdata
*data
, *entry
;
1298 data
= platform_get_drvdata(sysmmu
);
1303 owner
= kzalloc(sizeof(*owner
), GFP_KERNEL
);
1307 INIT_LIST_HEAD(&owner
->controllers
);
1308 mutex_init(&owner
->rpm_lock
);
1309 dev
->archdata
.iommu
= owner
;
1312 list_for_each_entry(entry
, &owner
->controllers
, owner_node
)
1316 list_add_tail(&data
->owner_node
, &owner
->controllers
);
1320 * SYSMMU will be runtime activated via device link (dependency) to its
1321 * master device, so there are no direct calls to pm_runtime_get/put
1324 device_link_add(dev
, data
->sysmmu
, DL_FLAG_PM_RUNTIME
);
1329 static const struct iommu_ops exynos_iommu_ops
= {
1330 .domain_alloc
= exynos_iommu_domain_alloc
,
1331 .domain_free
= exynos_iommu_domain_free
,
1332 .attach_dev
= exynos_iommu_attach_device
,
1333 .detach_dev
= exynos_iommu_detach_device
,
1334 .map
= exynos_iommu_map
,
1335 .unmap
= exynos_iommu_unmap
,
1336 .map_sg
= default_iommu_map_sg
,
1337 .iova_to_phys
= exynos_iommu_iova_to_phys
,
1338 .device_group
= get_device_iommu_group
,
1339 .add_device
= exynos_iommu_add_device
,
1340 .remove_device
= exynos_iommu_remove_device
,
1341 .pgsize_bitmap
= SECT_SIZE
| LPAGE_SIZE
| SPAGE_SIZE
,
1342 .of_xlate
= exynos_iommu_of_xlate
,
1345 static int __init
exynos_iommu_init(void)
1349 lv2table_kmem_cache
= kmem_cache_create("exynos-iommu-lv2table",
1350 LV2TABLE_SIZE
, LV2TABLE_SIZE
, 0, NULL
);
1351 if (!lv2table_kmem_cache
) {
1352 pr_err("%s: Failed to create kmem cache\n", __func__
);
1356 ret
= platform_driver_register(&exynos_sysmmu_driver
);
1358 pr_err("%s: Failed to register driver\n", __func__
);
1359 goto err_reg_driver
;
1362 zero_lv2_table
= kmem_cache_zalloc(lv2table_kmem_cache
, GFP_KERNEL
);
1363 if (zero_lv2_table
== NULL
) {
1364 pr_err("%s: Failed to allocate zero level2 page table\n",
1370 ret
= bus_set_iommu(&platform_bus_type
, &exynos_iommu_ops
);
1372 pr_err("%s: Failed to register exynos-iommu driver.\n",
1379 kmem_cache_free(lv2table_kmem_cache
, zero_lv2_table
);
1381 platform_driver_unregister(&exynos_sysmmu_driver
);
1383 kmem_cache_destroy(lv2table_kmem_cache
);
1386 core_initcall(exynos_iommu_init
);
1388 IOMMU_OF_DECLARE(exynos_iommu_of
, "samsung,exynos-sysmmu", NULL
);