]> git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blame - drivers/iommu/exynos-iommu.c
iommu/exynos: Fix master clock management for inactive SYSMMU
[mirror_ubuntu-eoan-kernel.git] / drivers / iommu / exynos-iommu.c
CommitLineData
740a01ee
MS
1/*
2 * Copyright (c) 2011,2016 Samsung Electronics Co., Ltd.
2a96536e
KC
3 * http://www.samsung.com
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#ifdef CONFIG_EXYNOS_IOMMU_DEBUG
11#define DEBUG
12#endif
13
2a96536e 14#include <linux/clk.h>
8ed55c81 15#include <linux/dma-mapping.h>
2a96536e 16#include <linux/err.h>
312900c6 17#include <linux/io.h>
2a96536e 18#include <linux/iommu.h>
312900c6 19#include <linux/interrupt.h>
2a96536e 20#include <linux/list.h>
8ed55c81
MS
21#include <linux/of.h>
22#include <linux/of_iommu.h>
23#include <linux/of_platform.h>
312900c6
MS
24#include <linux/platform_device.h>
25#include <linux/pm_runtime.h>
26#include <linux/slab.h>
58c6f6a3 27#include <linux/dma-iommu.h>
2a96536e 28
d09d78fc
CK
29typedef u32 sysmmu_iova_t;
30typedef u32 sysmmu_pte_t;
31
f171abab 32/* We do not consider super section mapping (16MB) */
2a96536e
KC
33#define SECT_ORDER 20
34#define LPAGE_ORDER 16
35#define SPAGE_ORDER 12
36
37#define SECT_SIZE (1 << SECT_ORDER)
38#define LPAGE_SIZE (1 << LPAGE_ORDER)
39#define SPAGE_SIZE (1 << SPAGE_ORDER)
40
41#define SECT_MASK (~(SECT_SIZE - 1))
42#define LPAGE_MASK (~(LPAGE_SIZE - 1))
43#define SPAGE_MASK (~(SPAGE_SIZE - 1))
44
66a7ed84
CK
45#define lv1ent_fault(sent) ((*(sent) == ZERO_LV2LINK) || \
46 ((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
47#define lv1ent_zero(sent) (*(sent) == ZERO_LV2LINK)
48#define lv1ent_page_zero(sent) ((*(sent) & 3) == 1)
49#define lv1ent_page(sent) ((*(sent) != ZERO_LV2LINK) && \
50 ((*(sent) & 3) == 1))
2a96536e
KC
51#define lv1ent_section(sent) ((*(sent) & 3) == 2)
52
53#define lv2ent_fault(pent) ((*(pent) & 3) == 0)
54#define lv2ent_small(pent) ((*(pent) & 2) == 2)
55#define lv2ent_large(pent) ((*(pent) & 3) == 1)
56
740a01ee
MS
57/*
58 * v1.x - v3.x SYSMMU supports 32bit physical and 32bit virtual address spaces
59 * v5.0 introduced support for 36bit physical address space by shifting
60 * all page entry values by 4 bits.
61 * All SYSMMU controllers in the system support the address spaces of the same
62 * size, so PG_ENT_SHIFT can be initialized on first SYSMMU probe to proper
63 * value (0 or 4).
64 */
65static short PG_ENT_SHIFT = -1;
66#define SYSMMU_PG_ENT_SHIFT 0
67#define SYSMMU_V5_PG_ENT_SHIFT 4
68
69#define sect_to_phys(ent) (((phys_addr_t) ent) << PG_ENT_SHIFT)
70#define section_phys(sent) (sect_to_phys(*(sent)) & SECT_MASK)
71#define section_offs(iova) (iova & (SECT_SIZE - 1))
72#define lpage_phys(pent) (sect_to_phys(*(pent)) & LPAGE_MASK)
73#define lpage_offs(iova) (iova & (LPAGE_SIZE - 1))
74#define spage_phys(pent) (sect_to_phys(*(pent)) & SPAGE_MASK)
75#define spage_offs(iova) (iova & (SPAGE_SIZE - 1))
2a96536e
KC
76
77#define NUM_LV1ENTRIES 4096
d09d78fc 78#define NUM_LV2ENTRIES (SECT_SIZE / SPAGE_SIZE)
2a96536e 79
d09d78fc
CK
80static u32 lv1ent_offset(sysmmu_iova_t iova)
81{
82 return iova >> SECT_ORDER;
83}
84
85static u32 lv2ent_offset(sysmmu_iova_t iova)
86{
87 return (iova >> SPAGE_ORDER) & (NUM_LV2ENTRIES - 1);
88}
89
5e3435eb 90#define LV1TABLE_SIZE (NUM_LV1ENTRIES * sizeof(sysmmu_pte_t))
d09d78fc 91#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(sysmmu_pte_t))
2a96536e
KC
92
93#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
740a01ee 94#define lv2table_base(sent) (sect_to_phys(*(sent) & 0xFFFFFFC0))
2a96536e 95
740a01ee
MS
96#define mk_lv1ent_sect(pa) ((pa >> PG_ENT_SHIFT) | 2)
97#define mk_lv1ent_page(pa) ((pa >> PG_ENT_SHIFT) | 1)
98#define mk_lv2ent_lpage(pa) ((pa >> PG_ENT_SHIFT) | 1)
99#define mk_lv2ent_spage(pa) ((pa >> PG_ENT_SHIFT) | 2)
2a96536e
KC
100
101#define CTRL_ENABLE 0x5
102#define CTRL_BLOCK 0x7
103#define CTRL_DISABLE 0x0
104
eeb5184b
CK
105#define CFG_LRU 0x1
106#define CFG_QOS(n) ((n & 0xF) << 7)
eeb5184b
CK
107#define CFG_ACGEN (1 << 24) /* System MMU 3.3 only */
108#define CFG_SYSSEL (1 << 22) /* System MMU 3.2 only */
109#define CFG_FLPDCACHE (1 << 20) /* System MMU 3.2+ only */
110
740a01ee 111/* common registers */
2a96536e
KC
112#define REG_MMU_CTRL 0x000
113#define REG_MMU_CFG 0x004
114#define REG_MMU_STATUS 0x008
740a01ee
MS
115#define REG_MMU_VERSION 0x034
116
117#define MMU_MAJ_VER(val) ((val) >> 7)
118#define MMU_MIN_VER(val) ((val) & 0x7F)
119#define MMU_RAW_VER(reg) (((reg) >> 21) & ((1 << 11) - 1)) /* 11 bits */
120
121#define MAKE_MMU_VER(maj, min) ((((maj) & 0xF) << 7) | ((min) & 0x7F))
122
123/* v1.x - v3.x registers */
2a96536e
KC
124#define REG_MMU_FLUSH 0x00C
125#define REG_MMU_FLUSH_ENTRY 0x010
126#define REG_PT_BASE_ADDR 0x014
127#define REG_INT_STATUS 0x018
128#define REG_INT_CLEAR 0x01C
129
130#define REG_PAGE_FAULT_ADDR 0x024
131#define REG_AW_FAULT_ADDR 0x028
132#define REG_AR_FAULT_ADDR 0x02C
133#define REG_DEFAULT_SLAVE_ADDR 0x030
134
740a01ee
MS
135/* v5.x registers */
136#define REG_V5_PT_BASE_PFN 0x00C
137#define REG_V5_MMU_FLUSH_ALL 0x010
138#define REG_V5_MMU_FLUSH_ENTRY 0x014
139#define REG_V5_INT_STATUS 0x060
140#define REG_V5_INT_CLEAR 0x064
141#define REG_V5_FAULT_AR_VA 0x070
142#define REG_V5_FAULT_AW_VA 0x080
2a96536e 143
6b21a5db
CK
144#define has_sysmmu(dev) (dev->archdata.iommu != NULL)
145
5e3435eb 146static struct device *dma_dev;
734c3c73 147static struct kmem_cache *lv2table_kmem_cache;
66a7ed84
CK
148static sysmmu_pte_t *zero_lv2_table;
149#define ZERO_LV2LINK mk_lv1ent_page(virt_to_phys(zero_lv2_table))
734c3c73 150
d09d78fc 151static sysmmu_pte_t *section_entry(sysmmu_pte_t *pgtable, sysmmu_iova_t iova)
2a96536e
KC
152{
153 return pgtable + lv1ent_offset(iova);
154}
155
d09d78fc 156static sysmmu_pte_t *page_entry(sysmmu_pte_t *sent, sysmmu_iova_t iova)
2a96536e 157{
d09d78fc 158 return (sysmmu_pte_t *)phys_to_virt(
7222e8db 159 lv2table_base(sent)) + lv2ent_offset(iova);
2a96536e
KC
160}
161
d093fc7e
MS
162/*
163 * IOMMU fault information register
164 */
165struct sysmmu_fault_info {
166 unsigned int bit; /* bit number in STATUS register */
167 unsigned short addr_reg; /* register to read VA fault address */
168 const char *name; /* human readable fault name */
169 unsigned int type; /* fault type for report_iommu_fault */
2a96536e
KC
170};
171
d093fc7e
MS
172static const struct sysmmu_fault_info sysmmu_faults[] = {
173 { 0, REG_PAGE_FAULT_ADDR, "PAGE", IOMMU_FAULT_READ },
174 { 1, REG_AR_FAULT_ADDR, "AR MULTI-HIT", IOMMU_FAULT_READ },
175 { 2, REG_AW_FAULT_ADDR, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
176 { 3, REG_DEFAULT_SLAVE_ADDR, "BUS ERROR", IOMMU_FAULT_READ },
177 { 4, REG_AR_FAULT_ADDR, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
178 { 5, REG_AR_FAULT_ADDR, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
179 { 6, REG_AW_FAULT_ADDR, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
180 { 7, REG_AW_FAULT_ADDR, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
2a96536e
KC
181};
182
740a01ee
MS
183static const struct sysmmu_fault_info sysmmu_v5_faults[] = {
184 { 0, REG_V5_FAULT_AR_VA, "AR PTW", IOMMU_FAULT_READ },
185 { 1, REG_V5_FAULT_AR_VA, "AR PAGE", IOMMU_FAULT_READ },
186 { 2, REG_V5_FAULT_AR_VA, "AR MULTI-HIT", IOMMU_FAULT_READ },
187 { 3, REG_V5_FAULT_AR_VA, "AR ACCESS PROTECTION", IOMMU_FAULT_READ },
188 { 4, REG_V5_FAULT_AR_VA, "AR SECURITY PROTECTION", IOMMU_FAULT_READ },
189 { 16, REG_V5_FAULT_AW_VA, "AW PTW", IOMMU_FAULT_WRITE },
190 { 17, REG_V5_FAULT_AW_VA, "AW PAGE", IOMMU_FAULT_WRITE },
191 { 18, REG_V5_FAULT_AW_VA, "AW MULTI-HIT", IOMMU_FAULT_WRITE },
192 { 19, REG_V5_FAULT_AW_VA, "AW ACCESS PROTECTION", IOMMU_FAULT_WRITE },
193 { 20, REG_V5_FAULT_AW_VA, "AW SECURITY PROTECTION", IOMMU_FAULT_WRITE },
194};
195
2860af3c
MS
196/*
197 * This structure is attached to dev.archdata.iommu of the master device
198 * on device add, contains a list of SYSMMU controllers defined by device tree,
199 * which are bound to given master device. It is usually referenced by 'owner'
200 * pointer.
201*/
6b21a5db 202struct exynos_iommu_owner {
1b092054 203 struct list_head controllers; /* list of sysmmu_drvdata.owner_node */
5fa61cbf 204 struct iommu_domain *domain; /* domain this device is attached */
6b21a5db
CK
205};
206
2860af3c
MS
207/*
208 * This structure exynos specific generalization of struct iommu_domain.
209 * It contains list of SYSMMU controllers from all master devices, which has
210 * been attached to this domain and page tables of IO address space defined by
211 * it. It is usually referenced by 'domain' pointer.
212 */
2a96536e 213struct exynos_iommu_domain {
2860af3c
MS
214 struct list_head clients; /* list of sysmmu_drvdata.domain_node */
215 sysmmu_pte_t *pgtable; /* lv1 page table, 16KB */
216 short *lv2entcnt; /* free lv2 entry counter for each section */
217 spinlock_t lock; /* lock for modyfying list of clients */
218 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
e1fd1eaa 219 struct iommu_domain domain; /* generic domain data structure */
2a96536e
KC
220};
221
2860af3c
MS
222/*
223 * This structure hold all data of a single SYSMMU controller, this includes
224 * hw resources like registers and clocks, pointers and list nodes to connect
225 * it to all other structures, internal state and parameters read from device
226 * tree. It is usually referenced by 'data' pointer.
227 */
2a96536e 228struct sysmmu_drvdata {
2860af3c
MS
229 struct device *sysmmu; /* SYSMMU controller device */
230 struct device *master; /* master device (owner) */
231 void __iomem *sfrbase; /* our registers */
232 struct clk *clk; /* SYSMMU's clock */
740a01ee
MS
233 struct clk *aclk; /* SYSMMU's aclk clock */
234 struct clk *pclk; /* SYSMMU's pclk clock */
2860af3c
MS
235 struct clk *clk_master; /* master's device clock */
236 int activations; /* number of calls to sysmmu_enable */
237 spinlock_t lock; /* lock for modyfying state */
238 struct exynos_iommu_domain *domain; /* domain we belong to */
239 struct list_head domain_node; /* node for domain clients list */
1b092054 240 struct list_head owner_node; /* node for owner controllers list */
2860af3c
MS
241 phys_addr_t pgtable; /* assigned page table structure */
242 unsigned int version; /* our version */
2a96536e
KC
243};
244
e1fd1eaa
JR
245static struct exynos_iommu_domain *to_exynos_domain(struct iommu_domain *dom)
246{
247 return container_of(dom, struct exynos_iommu_domain, domain);
248}
249
2a96536e
KC
250static bool set_sysmmu_active(struct sysmmu_drvdata *data)
251{
252 /* return true if the System MMU was not active previously
253 and it needs to be initialized */
254 return ++data->activations == 1;
255}
256
257static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
258{
259 /* return true if the System MMU is needed to be disabled */
260 BUG_ON(data->activations < 1);
261 return --data->activations == 0;
262}
263
264static bool is_sysmmu_active(struct sysmmu_drvdata *data)
265{
266 return data->activations > 0;
267}
268
02cdc365 269static void sysmmu_unblock(struct sysmmu_drvdata *data)
2a96536e 270{
84bd0428 271 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
2a96536e
KC
272}
273
02cdc365 274static bool sysmmu_block(struct sysmmu_drvdata *data)
2a96536e
KC
275{
276 int i = 120;
277
84bd0428
MS
278 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
279 while ((i > 0) && !(readl(data->sfrbase + REG_MMU_STATUS) & 1))
2a96536e
KC
280 --i;
281
84bd0428 282 if (!(readl(data->sfrbase + REG_MMU_STATUS) & 1)) {
02cdc365 283 sysmmu_unblock(data);
2a96536e
KC
284 return false;
285 }
286
287 return true;
288}
289
02cdc365 290static void __sysmmu_tlb_invalidate(struct sysmmu_drvdata *data)
2a96536e 291{
740a01ee 292 if (MMU_MAJ_VER(data->version) < 5)
84bd0428 293 writel(0x1, data->sfrbase + REG_MMU_FLUSH);
740a01ee 294 else
84bd0428 295 writel(0x1, data->sfrbase + REG_V5_MMU_FLUSH_ALL);
2a96536e
KC
296}
297
02cdc365 298static void __sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
d09d78fc 299 sysmmu_iova_t iova, unsigned int num_inv)
2a96536e 300{
3ad6b7f3 301 unsigned int i;
365409db 302
3ad6b7f3 303 for (i = 0; i < num_inv; i++) {
740a01ee 304 if (MMU_MAJ_VER(data->version) < 5)
84bd0428 305 writel((iova & SPAGE_MASK) | 1,
740a01ee
MS
306 data->sfrbase + REG_MMU_FLUSH_ENTRY);
307 else
84bd0428 308 writel((iova & SPAGE_MASK) | 1,
740a01ee 309 data->sfrbase + REG_V5_MMU_FLUSH_ENTRY);
3ad6b7f3
CK
310 iova += SPAGE_SIZE;
311 }
2a96536e
KC
312}
313
02cdc365 314static void __sysmmu_set_ptbase(struct sysmmu_drvdata *data, phys_addr_t pgd)
2a96536e 315{
740a01ee 316 if (MMU_MAJ_VER(data->version) < 5)
84bd0428 317 writel(pgd, data->sfrbase + REG_PT_BASE_ADDR);
740a01ee 318 else
84bd0428 319 writel(pgd >> PAGE_SHIFT,
740a01ee 320 data->sfrbase + REG_V5_PT_BASE_PFN);
2a96536e 321
02cdc365 322 __sysmmu_tlb_invalidate(data);
2a96536e
KC
323}
324
850d313e
MS
325static void __sysmmu_get_version(struct sysmmu_drvdata *data)
326{
327 u32 ver;
328
329 clk_enable(data->clk_master);
330 clk_enable(data->clk);
740a01ee
MS
331 clk_enable(data->pclk);
332 clk_enable(data->aclk);
850d313e 333
84bd0428 334 ver = readl(data->sfrbase + REG_MMU_VERSION);
850d313e
MS
335
336 /* controllers on some SoCs don't report proper version */
337 if (ver == 0x80000001u)
338 data->version = MAKE_MMU_VER(1, 0);
339 else
340 data->version = MMU_RAW_VER(ver);
341
342 dev_dbg(data->sysmmu, "hardware version: %d.%d\n",
343 MMU_MAJ_VER(data->version), MMU_MIN_VER(data->version));
344
740a01ee
MS
345 clk_disable(data->aclk);
346 clk_disable(data->pclk);
850d313e
MS
347 clk_disable(data->clk);
348 clk_disable(data->clk_master);
349}
350
d093fc7e
MS
351static void show_fault_information(struct sysmmu_drvdata *data,
352 const struct sysmmu_fault_info *finfo,
353 sysmmu_iova_t fault_addr)
2a96536e 354{
d09d78fc 355 sysmmu_pte_t *ent;
2a96536e 356
d093fc7e
MS
357 dev_err(data->sysmmu, "%s FAULT occurred at %#x (page table base: %pa)\n",
358 finfo->name, fault_addr, &data->pgtable);
359 ent = section_entry(phys_to_virt(data->pgtable), fault_addr);
360 dev_err(data->sysmmu, "\tLv1 entry: %#x\n", *ent);
2a96536e
KC
361 if (lv1ent_page(ent)) {
362 ent = page_entry(ent, fault_addr);
d093fc7e 363 dev_err(data->sysmmu, "\t Lv2 entry: %#x\n", *ent);
2a96536e 364 }
2a96536e
KC
365}
366
367static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
368{
f171abab 369 /* SYSMMU is in blocked state when interrupt occurred. */
2a96536e 370 struct sysmmu_drvdata *data = dev_id;
740a01ee
MS
371 const struct sysmmu_fault_info *finfo;
372 unsigned int i, n, itype;
d093fc7e 373 sysmmu_iova_t fault_addr = -1;
740a01ee 374 unsigned short reg_status, reg_clear;
7222e8db 375 int ret = -ENOSYS;
2a96536e 376
2a96536e
KC
377 WARN_ON(!is_sysmmu_active(data));
378
740a01ee
MS
379 if (MMU_MAJ_VER(data->version) < 5) {
380 reg_status = REG_INT_STATUS;
381 reg_clear = REG_INT_CLEAR;
382 finfo = sysmmu_faults;
383 n = ARRAY_SIZE(sysmmu_faults);
384 } else {
385 reg_status = REG_V5_INT_STATUS;
386 reg_clear = REG_V5_INT_CLEAR;
387 finfo = sysmmu_v5_faults;
388 n = ARRAY_SIZE(sysmmu_v5_faults);
389 }
390
9d4e7a24
CK
391 spin_lock(&data->lock);
392
b398af21 393 clk_enable(data->clk_master);
9d4e7a24 394
84bd0428 395 itype = __ffs(readl(data->sfrbase + reg_status));
d093fc7e
MS
396 for (i = 0; i < n; i++, finfo++)
397 if (finfo->bit == itype)
398 break;
399 /* unknown/unsupported fault */
400 BUG_ON(i == n);
401
402 /* print debug message */
84bd0428 403 fault_addr = readl(data->sfrbase + finfo->addr_reg);
d093fc7e 404 show_fault_information(data, finfo, fault_addr);
2a96536e 405
d093fc7e
MS
406 if (data->domain)
407 ret = report_iommu_fault(&data->domain->domain,
408 data->master, fault_addr, finfo->type);
1fab7fa7
CK
409 /* fault is not recovered by fault handler */
410 BUG_ON(ret != 0);
2a96536e 411
84bd0428 412 writel(1 << itype, data->sfrbase + reg_clear);
1fab7fa7 413
02cdc365 414 sysmmu_unblock(data);
2a96536e 415
b398af21 416 clk_disable(data->clk_master);
70605870 417
9d4e7a24 418 spin_unlock(&data->lock);
2a96536e
KC
419
420 return IRQ_HANDLED;
421}
422
6b21a5db 423static void __sysmmu_disable_nocount(struct sysmmu_drvdata *data)
2a96536e 424{
b398af21 425 clk_enable(data->clk_master);
70605870 426
84bd0428
MS
427 writel(CTRL_DISABLE, data->sfrbase + REG_MMU_CTRL);
428 writel(0, data->sfrbase + REG_MMU_CFG);
2a96536e 429
740a01ee
MS
430 clk_disable(data->aclk);
431 clk_disable(data->pclk);
46c16d1e 432 clk_disable(data->clk);
b398af21 433 clk_disable(data->clk_master);
2a96536e
KC
434}
435
6b21a5db 436static bool __sysmmu_disable(struct sysmmu_drvdata *data)
2a96536e 437{
6b21a5db 438 bool disabled;
2a96536e
KC
439 unsigned long flags;
440
9d4e7a24 441 spin_lock_irqsave(&data->lock, flags);
2a96536e 442
6b21a5db
CK
443 disabled = set_sysmmu_inactive(data);
444
445 if (disabled) {
446 data->pgtable = 0;
447 data->domain = NULL;
448
449 __sysmmu_disable_nocount(data);
2a96536e 450
6b21a5db
CK
451 dev_dbg(data->sysmmu, "Disabled\n");
452 } else {
453 dev_dbg(data->sysmmu, "%d times left to disable\n",
454 data->activations);
2a96536e
KC
455 }
456
6b21a5db
CK
457 spin_unlock_irqrestore(&data->lock, flags);
458
459 return disabled;
460}
2a96536e 461
6b21a5db
CK
462static void __sysmmu_init_config(struct sysmmu_drvdata *data)
463{
83addecd
MS
464 unsigned int cfg;
465
83addecd
MS
466 if (data->version <= MAKE_MMU_VER(3, 1))
467 cfg = CFG_LRU | CFG_QOS(15);
468 else if (data->version <= MAKE_MMU_VER(3, 2))
469 cfg = CFG_LRU | CFG_QOS(15) | CFG_FLPDCACHE | CFG_SYSSEL;
470 else
471 cfg = CFG_QOS(15) | CFG_FLPDCACHE | CFG_ACGEN;
6b21a5db 472
84bd0428 473 writel(cfg, data->sfrbase + REG_MMU_CFG);
6b21a5db
CK
474}
475
476static void __sysmmu_enable_nocount(struct sysmmu_drvdata *data)
477{
b398af21 478 clk_enable(data->clk_master);
70605870 479 clk_enable(data->clk);
740a01ee
MS
480 clk_enable(data->pclk);
481 clk_enable(data->aclk);
70605870 482
84bd0428 483 writel(CTRL_BLOCK, data->sfrbase + REG_MMU_CTRL);
6b21a5db
CK
484
485 __sysmmu_init_config(data);
486
02cdc365 487 __sysmmu_set_ptbase(data, data->pgtable);
2a96536e 488
84bd0428 489 writel(CTRL_ENABLE, data->sfrbase + REG_MMU_CTRL);
7222e8db 490
b398af21 491 clk_disable(data->clk_master);
6b21a5db 492}
70605870 493
bfa00489 494static int __sysmmu_enable(struct sysmmu_drvdata *data, phys_addr_t pgtable,
a9133b99 495 struct exynos_iommu_domain *domain)
6b21a5db
CK
496{
497 int ret = 0;
498 unsigned long flags;
499
500 spin_lock_irqsave(&data->lock, flags);
501 if (set_sysmmu_active(data)) {
502 data->pgtable = pgtable;
a9133b99 503 data->domain = domain;
6b21a5db
CK
504
505 __sysmmu_enable_nocount(data);
506
507 dev_dbg(data->sysmmu, "Enabled\n");
508 } else {
509 ret = (pgtable == data->pgtable) ? 1 : -EBUSY;
510
511 dev_dbg(data->sysmmu, "already enabled\n");
512 }
513
514 if (WARN_ON(ret < 0))
515 set_sysmmu_inactive(data); /* decrement count */
2a96536e 516
9d4e7a24 517 spin_unlock_irqrestore(&data->lock, flags);
2a96536e
KC
518
519 return ret;
520}
521
469acebe 522static void sysmmu_tlb_invalidate_flpdcache(struct sysmmu_drvdata *data,
66a7ed84
CK
523 sysmmu_iova_t iova)
524{
525 unsigned long flags;
66a7ed84 526
66a7ed84
CK
527
528 spin_lock_irqsave(&data->lock, flags);
01324ab2
MS
529 if (is_sysmmu_active(data) && data->version >= MAKE_MMU_VER(3, 3)) {
530 clk_enable(data->clk_master);
531 __sysmmu_tlb_invalidate_entry(data, iova, 1);
532 clk_disable(data->clk_master);
d631ea98 533 }
66a7ed84
CK
534 spin_unlock_irqrestore(&data->lock, flags);
535
66a7ed84
CK
536}
537
469acebe
MS
538static void sysmmu_tlb_invalidate_entry(struct sysmmu_drvdata *data,
539 sysmmu_iova_t iova, size_t size)
2a96536e
KC
540{
541 unsigned long flags;
2a96536e 542
6b21a5db 543 spin_lock_irqsave(&data->lock, flags);
2a96536e 544 if (is_sysmmu_active(data)) {
3ad6b7f3 545 unsigned int num_inv = 1;
70605870 546
b398af21 547 clk_enable(data->clk_master);
70605870 548
3ad6b7f3
CK
549 /*
550 * L2TLB invalidation required
551 * 4KB page: 1 invalidation
f171abab
SK
552 * 64KB page: 16 invalidations
553 * 1MB page: 64 invalidations
3ad6b7f3
CK
554 * because it is set-associative TLB
555 * with 8-way and 64 sets.
556 * 1MB page can be cached in one of all sets.
557 * 64KB page can be one of 16 consecutive sets.
558 */
512bd0c6 559 if (MMU_MAJ_VER(data->version) == 2)
3ad6b7f3
CK
560 num_inv = min_t(unsigned int, size / PAGE_SIZE, 64);
561
02cdc365
MS
562 if (sysmmu_block(data)) {
563 __sysmmu_tlb_invalidate_entry(data, iova, num_inv);
564 sysmmu_unblock(data);
2a96536e 565 }
b398af21 566 clk_disable(data->clk_master);
2a96536e 567 } else {
469acebe
MS
568 dev_dbg(data->master,
569 "disabled. Skipping TLB invalidation @ %#x\n", iova);
2a96536e 570 }
9d4e7a24 571 spin_unlock_irqrestore(&data->lock, flags);
2a96536e
KC
572}
573
6b21a5db 574static int __init exynos_sysmmu_probe(struct platform_device *pdev)
2a96536e 575{
46c16d1e 576 int irq, ret;
7222e8db 577 struct device *dev = &pdev->dev;
2a96536e 578 struct sysmmu_drvdata *data;
7222e8db 579 struct resource *res;
2a96536e 580
46c16d1e
CK
581 data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL);
582 if (!data)
583 return -ENOMEM;
2a96536e 584
7222e8db 585 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
46c16d1e
CK
586 data->sfrbase = devm_ioremap_resource(dev, res);
587 if (IS_ERR(data->sfrbase))
588 return PTR_ERR(data->sfrbase);
2a96536e 589
46c16d1e
CK
590 irq = platform_get_irq(pdev, 0);
591 if (irq <= 0) {
0bf4e54d 592 dev_err(dev, "Unable to find IRQ resource\n");
46c16d1e 593 return irq;
2a96536e
KC
594 }
595
46c16d1e 596 ret = devm_request_irq(dev, irq, exynos_sysmmu_irq, 0,
7222e8db
CK
597 dev_name(dev), data);
598 if (ret) {
46c16d1e
CK
599 dev_err(dev, "Unabled to register handler of irq %d\n", irq);
600 return ret;
2a96536e
KC
601 }
602
46c16d1e 603 data->clk = devm_clk_get(dev, "sysmmu");
0c2b063f 604 if (PTR_ERR(data->clk) == -ENOENT)
740a01ee 605 data->clk = NULL;
0c2b063f
MS
606 else if (IS_ERR(data->clk))
607 return PTR_ERR(data->clk);
608 ret = clk_prepare(data->clk);
609 if (ret)
610 return ret;
740a01ee
MS
611
612 data->aclk = devm_clk_get(dev, "aclk");
0c2b063f 613 if (PTR_ERR(data->aclk) == -ENOENT)
740a01ee 614 data->aclk = NULL;
0c2b063f
MS
615 else if (IS_ERR(data->aclk))
616 return PTR_ERR(data->aclk);
617 ret = clk_prepare(data->aclk);
618 if (ret)
619 return ret;
740a01ee
MS
620
621 data->pclk = devm_clk_get(dev, "pclk");
0c2b063f 622 if (PTR_ERR(data->pclk) == -ENOENT)
740a01ee 623 data->pclk = NULL;
0c2b063f
MS
624 else if (IS_ERR(data->pclk))
625 return PTR_ERR(data->pclk);
626 ret = clk_prepare(data->pclk);
627 if (ret)
628 return ret;
740a01ee
MS
629
630 if (!data->clk && (!data->aclk || !data->pclk)) {
631 dev_err(dev, "Failed to get device clock(s)!\n");
632 return -ENOSYS;
2a96536e
KC
633 }
634
70605870 635 data->clk_master = devm_clk_get(dev, "master");
0c2b063f 636 if (PTR_ERR(data->clk_master) == -ENOENT)
b398af21 637 data->clk_master = NULL;
0c2b063f
MS
638 else if (IS_ERR(data->clk_master))
639 return PTR_ERR(data->clk_master);
640 ret = clk_prepare(data->clk_master);
641 if (ret)
642 return ret;
70605870 643
2a96536e 644 data->sysmmu = dev;
9d4e7a24 645 spin_lock_init(&data->lock);
2a96536e 646
7222e8db
CK
647 platform_set_drvdata(pdev, data);
648
850d313e 649 __sysmmu_get_version(data);
740a01ee
MS
650 if (PG_ENT_SHIFT < 0) {
651 if (MMU_MAJ_VER(data->version) < 5)
652 PG_ENT_SHIFT = SYSMMU_PG_ENT_SHIFT;
653 else
654 PG_ENT_SHIFT = SYSMMU_V5_PG_ENT_SHIFT;
655 }
656
f4723ec1 657 pm_runtime_enable(dev);
2a96536e 658
2a96536e 659 return 0;
2a96536e
KC
660}
661
622015e4
MS
662#ifdef CONFIG_PM_SLEEP
663static int exynos_sysmmu_suspend(struct device *dev)
664{
665 struct sysmmu_drvdata *data = dev_get_drvdata(dev);
666
667 dev_dbg(dev, "suspend\n");
668 if (is_sysmmu_active(data)) {
669 __sysmmu_disable_nocount(data);
670 pm_runtime_put(dev);
671 }
672 return 0;
673}
674
675static int exynos_sysmmu_resume(struct device *dev)
676{
677 struct sysmmu_drvdata *data = dev_get_drvdata(dev);
678
679 dev_dbg(dev, "resume\n");
680 if (is_sysmmu_active(data)) {
681 pm_runtime_get_sync(dev);
682 __sysmmu_enable_nocount(data);
683 }
684 return 0;
685}
686#endif
687
688static const struct dev_pm_ops sysmmu_pm_ops = {
689 SET_LATE_SYSTEM_SLEEP_PM_OPS(exynos_sysmmu_suspend, exynos_sysmmu_resume)
690};
691
6b21a5db
CK
692static const struct of_device_id sysmmu_of_match[] __initconst = {
693 { .compatible = "samsung,exynos-sysmmu", },
694 { },
695};
696
697static struct platform_driver exynos_sysmmu_driver __refdata = {
698 .probe = exynos_sysmmu_probe,
699 .driver = {
2a96536e 700 .name = "exynos-sysmmu",
6b21a5db 701 .of_match_table = sysmmu_of_match,
622015e4 702 .pm = &sysmmu_pm_ops,
b54b874f 703 .suppress_bind_attrs = true,
2a96536e
KC
704 }
705};
706
5e3435eb 707static inline void update_pte(sysmmu_pte_t *ent, sysmmu_pte_t val)
2a96536e 708{
5e3435eb
MS
709 dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent), sizeof(*ent),
710 DMA_TO_DEVICE);
711 *ent = val;
712 dma_sync_single_for_device(dma_dev, virt_to_phys(ent), sizeof(*ent),
713 DMA_TO_DEVICE);
2a96536e
KC
714}
715
e1fd1eaa 716static struct iommu_domain *exynos_iommu_domain_alloc(unsigned type)
2a96536e 717{
bfa00489 718 struct exynos_iommu_domain *domain;
5e3435eb 719 dma_addr_t handle;
66a7ed84 720 int i;
2a96536e 721
740a01ee
MS
722 /* Check if correct PTE offsets are initialized */
723 BUG_ON(PG_ENT_SHIFT < 0 || !dma_dev);
e1fd1eaa 724
bfa00489
MS
725 domain = kzalloc(sizeof(*domain), GFP_KERNEL);
726 if (!domain)
e1fd1eaa 727 return NULL;
2a96536e 728
58c6f6a3
MS
729 if (type == IOMMU_DOMAIN_DMA) {
730 if (iommu_get_dma_cookie(&domain->domain) != 0)
731 goto err_pgtable;
732 } else if (type != IOMMU_DOMAIN_UNMANAGED) {
733 goto err_pgtable;
734 }
735
bfa00489
MS
736 domain->pgtable = (sysmmu_pte_t *)__get_free_pages(GFP_KERNEL, 2);
737 if (!domain->pgtable)
58c6f6a3 738 goto err_dma_cookie;
2a96536e 739
bfa00489
MS
740 domain->lv2entcnt = (short *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
741 if (!domain->lv2entcnt)
2a96536e
KC
742 goto err_counter;
743
f171abab 744 /* Workaround for System MMU v3.3 to prevent caching 1MiB mapping */
66a7ed84 745 for (i = 0; i < NUM_LV1ENTRIES; i += 8) {
bfa00489
MS
746 domain->pgtable[i + 0] = ZERO_LV2LINK;
747 domain->pgtable[i + 1] = ZERO_LV2LINK;
748 domain->pgtable[i + 2] = ZERO_LV2LINK;
749 domain->pgtable[i + 3] = ZERO_LV2LINK;
750 domain->pgtable[i + 4] = ZERO_LV2LINK;
751 domain->pgtable[i + 5] = ZERO_LV2LINK;
752 domain->pgtable[i + 6] = ZERO_LV2LINK;
753 domain->pgtable[i + 7] = ZERO_LV2LINK;
66a7ed84
CK
754 }
755
5e3435eb
MS
756 handle = dma_map_single(dma_dev, domain->pgtable, LV1TABLE_SIZE,
757 DMA_TO_DEVICE);
758 /* For mapping page table entries we rely on dma == phys */
759 BUG_ON(handle != virt_to_phys(domain->pgtable));
2a96536e 760
bfa00489
MS
761 spin_lock_init(&domain->lock);
762 spin_lock_init(&domain->pgtablelock);
763 INIT_LIST_HEAD(&domain->clients);
2a96536e 764
bfa00489
MS
765 domain->domain.geometry.aperture_start = 0;
766 domain->domain.geometry.aperture_end = ~0UL;
767 domain->domain.geometry.force_aperture = true;
3177bb76 768
bfa00489 769 return &domain->domain;
2a96536e
KC
770
771err_counter:
bfa00489 772 free_pages((unsigned long)domain->pgtable, 2);
58c6f6a3
MS
773err_dma_cookie:
774 if (type == IOMMU_DOMAIN_DMA)
775 iommu_put_dma_cookie(&domain->domain);
2a96536e 776err_pgtable:
bfa00489 777 kfree(domain);
e1fd1eaa 778 return NULL;
2a96536e
KC
779}
780
bfa00489 781static void exynos_iommu_domain_free(struct iommu_domain *iommu_domain)
2a96536e 782{
bfa00489 783 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
469acebe 784 struct sysmmu_drvdata *data, *next;
2a96536e
KC
785 unsigned long flags;
786 int i;
787
bfa00489 788 WARN_ON(!list_empty(&domain->clients));
2a96536e 789
bfa00489 790 spin_lock_irqsave(&domain->lock, flags);
2a96536e 791
bfa00489 792 list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
469acebe
MS
793 if (__sysmmu_disable(data))
794 data->master = NULL;
795 list_del_init(&data->domain_node);
2a96536e
KC
796 }
797
bfa00489 798 spin_unlock_irqrestore(&domain->lock, flags);
2a96536e 799
58c6f6a3
MS
800 if (iommu_domain->type == IOMMU_DOMAIN_DMA)
801 iommu_put_dma_cookie(iommu_domain);
802
5e3435eb
MS
803 dma_unmap_single(dma_dev, virt_to_phys(domain->pgtable), LV1TABLE_SIZE,
804 DMA_TO_DEVICE);
805
2a96536e 806 for (i = 0; i < NUM_LV1ENTRIES; i++)
5e3435eb
MS
807 if (lv1ent_page(domain->pgtable + i)) {
808 phys_addr_t base = lv2table_base(domain->pgtable + i);
809
810 dma_unmap_single(dma_dev, base, LV2TABLE_SIZE,
811 DMA_TO_DEVICE);
734c3c73 812 kmem_cache_free(lv2table_kmem_cache,
5e3435eb
MS
813 phys_to_virt(base));
814 }
2a96536e 815
bfa00489
MS
816 free_pages((unsigned long)domain->pgtable, 2);
817 free_pages((unsigned long)domain->lv2entcnt, 1);
818 kfree(domain);
2a96536e
KC
819}
820
5fa61cbf
MS
821static void exynos_iommu_detach_device(struct iommu_domain *iommu_domain,
822 struct device *dev)
823{
824 struct exynos_iommu_owner *owner = dev->archdata.iommu;
825 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
826 phys_addr_t pagetable = virt_to_phys(domain->pgtable);
827 struct sysmmu_drvdata *data, *next;
828 unsigned long flags;
829 bool found = false;
830
831 if (!has_sysmmu(dev) || owner->domain != iommu_domain)
832 return;
833
834 spin_lock_irqsave(&domain->lock, flags);
835 list_for_each_entry_safe(data, next, &domain->clients, domain_node) {
836 if (data->master == dev) {
837 if (__sysmmu_disable(data)) {
838 data->master = NULL;
839 list_del_init(&data->domain_node);
840 }
841 pm_runtime_put(data->sysmmu);
842 found = true;
843 }
844 }
845 spin_unlock_irqrestore(&domain->lock, flags);
846
847 owner->domain = NULL;
848
849 if (found)
850 dev_dbg(dev, "%s: Detached IOMMU with pgtable %pa\n",
851 __func__, &pagetable);
852 else
853 dev_err(dev, "%s: No IOMMU is attached\n", __func__);
854}
855
bfa00489 856static int exynos_iommu_attach_device(struct iommu_domain *iommu_domain,
2a96536e
KC
857 struct device *dev)
858{
6b21a5db 859 struct exynos_iommu_owner *owner = dev->archdata.iommu;
bfa00489 860 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
469acebe 861 struct sysmmu_drvdata *data;
bfa00489 862 phys_addr_t pagetable = virt_to_phys(domain->pgtable);
2a96536e 863 unsigned long flags;
469acebe 864 int ret = -ENODEV;
2a96536e 865
469acebe
MS
866 if (!has_sysmmu(dev))
867 return -ENODEV;
2a96536e 868
5fa61cbf
MS
869 if (owner->domain)
870 exynos_iommu_detach_device(owner->domain, dev);
871
1b092054 872 list_for_each_entry(data, &owner->controllers, owner_node) {
ce70ca56 873 pm_runtime_get_sync(data->sysmmu);
a9133b99 874 ret = __sysmmu_enable(data, pagetable, domain);
469acebe
MS
875 if (ret >= 0) {
876 data->master = dev;
877
bfa00489
MS
878 spin_lock_irqsave(&domain->lock, flags);
879 list_add_tail(&data->domain_node, &domain->clients);
880 spin_unlock_irqrestore(&domain->lock, flags);
469acebe
MS
881 }
882 }
2a96536e
KC
883
884 if (ret < 0) {
7222e8db
CK
885 dev_err(dev, "%s: Failed to attach IOMMU with pgtable %pa\n",
886 __func__, &pagetable);
7222e8db 887 return ret;
2a96536e
KC
888 }
889
5fa61cbf 890 owner->domain = iommu_domain;
7222e8db
CK
891 dev_dbg(dev, "%s: Attached IOMMU with pgtable %pa %s\n",
892 __func__, &pagetable, (ret == 0) ? "" : ", again");
893
2a96536e
KC
894 return ret;
895}
896
bfa00489 897static sysmmu_pte_t *alloc_lv2entry(struct exynos_iommu_domain *domain,
66a7ed84 898 sysmmu_pte_t *sent, sysmmu_iova_t iova, short *pgcounter)
2a96536e 899{
61128f08 900 if (lv1ent_section(sent)) {
d09d78fc 901 WARN(1, "Trying mapping on %#08x mapped with 1MiB page", iova);
61128f08
CK
902 return ERR_PTR(-EADDRINUSE);
903 }
904
2a96536e 905 if (lv1ent_fault(sent)) {
d09d78fc 906 sysmmu_pte_t *pent;
66a7ed84 907 bool need_flush_flpd_cache = lv1ent_zero(sent);
2a96536e 908
734c3c73 909 pent = kmem_cache_zalloc(lv2table_kmem_cache, GFP_ATOMIC);
dbf6c6ef 910 BUG_ON((uintptr_t)pent & (LV2TABLE_SIZE - 1));
2a96536e 911 if (!pent)
61128f08 912 return ERR_PTR(-ENOMEM);
2a96536e 913
5e3435eb 914 update_pte(sent, mk_lv1ent_page(virt_to_phys(pent)));
dc3814f4 915 kmemleak_ignore(pent);
2a96536e 916 *pgcounter = NUM_LV2ENTRIES;
5e3435eb 917 dma_map_single(dma_dev, pent, LV2TABLE_SIZE, DMA_TO_DEVICE);
66a7ed84
CK
918
919 /*
f171abab
SK
920 * If pre-fetched SLPD is a faulty SLPD in zero_l2_table,
921 * FLPD cache may cache the address of zero_l2_table. This
922 * function replaces the zero_l2_table with new L2 page table
923 * to write valid mappings.
66a7ed84 924 * Accessing the valid area may cause page fault since FLPD
f171abab
SK
925 * cache may still cache zero_l2_table for the valid area
926 * instead of new L2 page table that has the mapping
927 * information of the valid area.
66a7ed84
CK
928 * Thus any replacement of zero_l2_table with other valid L2
929 * page table must involve FLPD cache invalidation for System
930 * MMU v3.3.
931 * FLPD cache invalidation is performed with TLB invalidation
932 * by VPN without blocking. It is safe to invalidate TLB without
933 * blocking because the target address of TLB invalidation is
934 * not currently mapped.
935 */
936 if (need_flush_flpd_cache) {
469acebe 937 struct sysmmu_drvdata *data;
365409db 938
bfa00489
MS
939 spin_lock(&domain->lock);
940 list_for_each_entry(data, &domain->clients, domain_node)
469acebe 941 sysmmu_tlb_invalidate_flpdcache(data, iova);
bfa00489 942 spin_unlock(&domain->lock);
66a7ed84 943 }
2a96536e
KC
944 }
945
946 return page_entry(sent, iova);
947}
948
bfa00489 949static int lv1set_section(struct exynos_iommu_domain *domain,
66a7ed84 950 sysmmu_pte_t *sent, sysmmu_iova_t iova,
61128f08 951 phys_addr_t paddr, short *pgcnt)
2a96536e 952{
61128f08 953 if (lv1ent_section(sent)) {
d09d78fc 954 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
61128f08 955 iova);
2a96536e 956 return -EADDRINUSE;
61128f08 957 }
2a96536e
KC
958
959 if (lv1ent_page(sent)) {
61128f08 960 if (*pgcnt != NUM_LV2ENTRIES) {
d09d78fc 961 WARN(1, "Trying mapping on 1MiB@%#08x that is mapped",
61128f08 962 iova);
2a96536e 963 return -EADDRINUSE;
61128f08 964 }
2a96536e 965
734c3c73 966 kmem_cache_free(lv2table_kmem_cache, page_entry(sent, 0));
2a96536e
KC
967 *pgcnt = 0;
968 }
969
5e3435eb 970 update_pte(sent, mk_lv1ent_sect(paddr));
2a96536e 971
bfa00489 972 spin_lock(&domain->lock);
66a7ed84 973 if (lv1ent_page_zero(sent)) {
469acebe 974 struct sysmmu_drvdata *data;
66a7ed84
CK
975 /*
976 * Flushing FLPD cache in System MMU v3.3 that may cache a FLPD
977 * entry by speculative prefetch of SLPD which has no mapping.
978 */
bfa00489 979 list_for_each_entry(data, &domain->clients, domain_node)
469acebe 980 sysmmu_tlb_invalidate_flpdcache(data, iova);
66a7ed84 981 }
bfa00489 982 spin_unlock(&domain->lock);
66a7ed84 983
2a96536e
KC
984 return 0;
985}
986
d09d78fc 987static int lv2set_page(sysmmu_pte_t *pent, phys_addr_t paddr, size_t size,
2a96536e
KC
988 short *pgcnt)
989{
990 if (size == SPAGE_SIZE) {
0bf4e54d 991 if (WARN_ON(!lv2ent_fault(pent)))
2a96536e
KC
992 return -EADDRINUSE;
993
5e3435eb 994 update_pte(pent, mk_lv2ent_spage(paddr));
2a96536e
KC
995 *pgcnt -= 1;
996 } else { /* size == LPAGE_SIZE */
997 int i;
5e3435eb 998 dma_addr_t pent_base = virt_to_phys(pent);
365409db 999
5e3435eb
MS
1000 dma_sync_single_for_cpu(dma_dev, pent_base,
1001 sizeof(*pent) * SPAGES_PER_LPAGE,
1002 DMA_TO_DEVICE);
2a96536e 1003 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
0bf4e54d 1004 if (WARN_ON(!lv2ent_fault(pent))) {
61128f08
CK
1005 if (i > 0)
1006 memset(pent - i, 0, sizeof(*pent) * i);
2a96536e
KC
1007 return -EADDRINUSE;
1008 }
1009
1010 *pent = mk_lv2ent_lpage(paddr);
1011 }
5e3435eb
MS
1012 dma_sync_single_for_device(dma_dev, pent_base,
1013 sizeof(*pent) * SPAGES_PER_LPAGE,
1014 DMA_TO_DEVICE);
2a96536e
KC
1015 *pgcnt -= SPAGES_PER_LPAGE;
1016 }
1017
1018 return 0;
1019}
1020
66a7ed84
CK
1021/*
1022 * *CAUTION* to the I/O virtual memory managers that support exynos-iommu:
1023 *
f171abab 1024 * System MMU v3.x has advanced logic to improve address translation
66a7ed84 1025 * performance with caching more page table entries by a page table walk.
f171abab
SK
1026 * However, the logic has a bug that while caching faulty page table entries,
1027 * System MMU reports page fault if the cached fault entry is hit even though
1028 * the fault entry is updated to a valid entry after the entry is cached.
1029 * To prevent caching faulty page table entries which may be updated to valid
1030 * entries later, the virtual memory manager should care about the workaround
1031 * for the problem. The following describes the workaround.
66a7ed84
CK
1032 *
1033 * Any two consecutive I/O virtual address regions must have a hole of 128KiB
f171abab 1034 * at maximum to prevent misbehavior of System MMU 3.x (workaround for h/w bug).
66a7ed84 1035 *
f171abab 1036 * Precisely, any start address of I/O virtual region must be aligned with
66a7ed84
CK
1037 * the following sizes for System MMU v3.1 and v3.2.
1038 * System MMU v3.1: 128KiB
1039 * System MMU v3.2: 256KiB
1040 *
1041 * Because System MMU v3.3 caches page table entries more aggressively, it needs
f171abab
SK
1042 * more workarounds.
1043 * - Any two consecutive I/O virtual regions must have a hole of size larger
1044 * than or equal to 128KiB.
66a7ed84
CK
1045 * - Start address of an I/O virtual region must be aligned by 128KiB.
1046 */
bfa00489
MS
1047static int exynos_iommu_map(struct iommu_domain *iommu_domain,
1048 unsigned long l_iova, phys_addr_t paddr, size_t size,
1049 int prot)
2a96536e 1050{
bfa00489 1051 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
d09d78fc
CK
1052 sysmmu_pte_t *entry;
1053 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
2a96536e
KC
1054 unsigned long flags;
1055 int ret = -ENOMEM;
1056
bfa00489 1057 BUG_ON(domain->pgtable == NULL);
2a96536e 1058
bfa00489 1059 spin_lock_irqsave(&domain->pgtablelock, flags);
2a96536e 1060
bfa00489 1061 entry = section_entry(domain->pgtable, iova);
2a96536e
KC
1062
1063 if (size == SECT_SIZE) {
bfa00489
MS
1064 ret = lv1set_section(domain, entry, iova, paddr,
1065 &domain->lv2entcnt[lv1ent_offset(iova)]);
2a96536e 1066 } else {
d09d78fc 1067 sysmmu_pte_t *pent;
2a96536e 1068
bfa00489
MS
1069 pent = alloc_lv2entry(domain, entry, iova,
1070 &domain->lv2entcnt[lv1ent_offset(iova)]);
2a96536e 1071
61128f08
CK
1072 if (IS_ERR(pent))
1073 ret = PTR_ERR(pent);
2a96536e
KC
1074 else
1075 ret = lv2set_page(pent, paddr, size,
bfa00489 1076 &domain->lv2entcnt[lv1ent_offset(iova)]);
2a96536e
KC
1077 }
1078
61128f08 1079 if (ret)
0bf4e54d
CK
1080 pr_err("%s: Failed(%d) to map %#zx bytes @ %#x\n",
1081 __func__, ret, size, iova);
2a96536e 1082
bfa00489 1083 spin_unlock_irqrestore(&domain->pgtablelock, flags);
2a96536e
KC
1084
1085 return ret;
1086}
1087
bfa00489
MS
1088static void exynos_iommu_tlb_invalidate_entry(struct exynos_iommu_domain *domain,
1089 sysmmu_iova_t iova, size_t size)
66a7ed84 1090{
469acebe 1091 struct sysmmu_drvdata *data;
66a7ed84
CK
1092 unsigned long flags;
1093
bfa00489 1094 spin_lock_irqsave(&domain->lock, flags);
66a7ed84 1095
bfa00489 1096 list_for_each_entry(data, &domain->clients, domain_node)
469acebe 1097 sysmmu_tlb_invalidate_entry(data, iova, size);
66a7ed84 1098
bfa00489 1099 spin_unlock_irqrestore(&domain->lock, flags);
66a7ed84
CK
1100}
1101
bfa00489
MS
1102static size_t exynos_iommu_unmap(struct iommu_domain *iommu_domain,
1103 unsigned long l_iova, size_t size)
2a96536e 1104{
bfa00489 1105 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
d09d78fc
CK
1106 sysmmu_iova_t iova = (sysmmu_iova_t)l_iova;
1107 sysmmu_pte_t *ent;
61128f08 1108 size_t err_pgsize;
d09d78fc 1109 unsigned long flags;
2a96536e 1110
bfa00489 1111 BUG_ON(domain->pgtable == NULL);
2a96536e 1112
bfa00489 1113 spin_lock_irqsave(&domain->pgtablelock, flags);
2a96536e 1114
bfa00489 1115 ent = section_entry(domain->pgtable, iova);
2a96536e
KC
1116
1117 if (lv1ent_section(ent)) {
0bf4e54d 1118 if (WARN_ON(size < SECT_SIZE)) {
61128f08
CK
1119 err_pgsize = SECT_SIZE;
1120 goto err;
1121 }
2a96536e 1122
f171abab 1123 /* workaround for h/w bug in System MMU v3.3 */
5e3435eb 1124 update_pte(ent, ZERO_LV2LINK);
2a96536e
KC
1125 size = SECT_SIZE;
1126 goto done;
1127 }
1128
1129 if (unlikely(lv1ent_fault(ent))) {
1130 if (size > SECT_SIZE)
1131 size = SECT_SIZE;
1132 goto done;
1133 }
1134
1135 /* lv1ent_page(sent) == true here */
1136
1137 ent = page_entry(ent, iova);
1138
1139 if (unlikely(lv2ent_fault(ent))) {
1140 size = SPAGE_SIZE;
1141 goto done;
1142 }
1143
1144 if (lv2ent_small(ent)) {
5e3435eb 1145 update_pte(ent, 0);
2a96536e 1146 size = SPAGE_SIZE;
bfa00489 1147 domain->lv2entcnt[lv1ent_offset(iova)] += 1;
2a96536e
KC
1148 goto done;
1149 }
1150
1151 /* lv1ent_large(ent) == true here */
0bf4e54d 1152 if (WARN_ON(size < LPAGE_SIZE)) {
61128f08
CK
1153 err_pgsize = LPAGE_SIZE;
1154 goto err;
1155 }
2a96536e 1156
5e3435eb
MS
1157 dma_sync_single_for_cpu(dma_dev, virt_to_phys(ent),
1158 sizeof(*ent) * SPAGES_PER_LPAGE,
1159 DMA_TO_DEVICE);
2a96536e 1160 memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
5e3435eb
MS
1161 dma_sync_single_for_device(dma_dev, virt_to_phys(ent),
1162 sizeof(*ent) * SPAGES_PER_LPAGE,
1163 DMA_TO_DEVICE);
2a96536e 1164 size = LPAGE_SIZE;
bfa00489 1165 domain->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
2a96536e 1166done:
bfa00489 1167 spin_unlock_irqrestore(&domain->pgtablelock, flags);
2a96536e 1168
bfa00489 1169 exynos_iommu_tlb_invalidate_entry(domain, iova, size);
2a96536e 1170
2a96536e 1171 return size;
61128f08 1172err:
bfa00489 1173 spin_unlock_irqrestore(&domain->pgtablelock, flags);
61128f08 1174
0bf4e54d
CK
1175 pr_err("%s: Failed: size(%#zx) @ %#x is smaller than page size %#zx\n",
1176 __func__, size, iova, err_pgsize);
61128f08
CK
1177
1178 return 0;
2a96536e
KC
1179}
1180
bfa00489 1181static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *iommu_domain,
bb5547ac 1182 dma_addr_t iova)
2a96536e 1183{
bfa00489 1184 struct exynos_iommu_domain *domain = to_exynos_domain(iommu_domain);
d09d78fc 1185 sysmmu_pte_t *entry;
2a96536e
KC
1186 unsigned long flags;
1187 phys_addr_t phys = 0;
1188
bfa00489 1189 spin_lock_irqsave(&domain->pgtablelock, flags);
2a96536e 1190
bfa00489 1191 entry = section_entry(domain->pgtable, iova);
2a96536e
KC
1192
1193 if (lv1ent_section(entry)) {
1194 phys = section_phys(entry) + section_offs(iova);
1195 } else if (lv1ent_page(entry)) {
1196 entry = page_entry(entry, iova);
1197
1198 if (lv2ent_large(entry))
1199 phys = lpage_phys(entry) + lpage_offs(iova);
1200 else if (lv2ent_small(entry))
1201 phys = spage_phys(entry) + spage_offs(iova);
1202 }
1203
bfa00489 1204 spin_unlock_irqrestore(&domain->pgtablelock, flags);
2a96536e
KC
1205
1206 return phys;
1207}
1208
6c2ae7e2
MS
1209static struct iommu_group *get_device_iommu_group(struct device *dev)
1210{
1211 struct iommu_group *group;
1212
1213 group = iommu_group_get(dev);
1214 if (!group)
1215 group = iommu_group_alloc();
1216
1217 return group;
1218}
1219
bf4a1c92
AM
1220static int exynos_iommu_add_device(struct device *dev)
1221{
1222 struct iommu_group *group;
bf4a1c92 1223
06801db0
MS
1224 if (!has_sysmmu(dev))
1225 return -ENODEV;
1226
6c2ae7e2 1227 group = iommu_group_get_for_dev(dev);
bf4a1c92 1228
6c2ae7e2
MS
1229 if (IS_ERR(group))
1230 return PTR_ERR(group);
bf4a1c92 1231
bf4a1c92
AM
1232 iommu_group_put(group);
1233
6c2ae7e2 1234 return 0;
bf4a1c92
AM
1235}
1236
1237static void exynos_iommu_remove_device(struct device *dev)
1238{
06801db0
MS
1239 if (!has_sysmmu(dev))
1240 return;
1241
bf4a1c92
AM
1242 iommu_group_remove_device(dev);
1243}
1244
aa759fd3
MS
1245static int exynos_iommu_of_xlate(struct device *dev,
1246 struct of_phandle_args *spec)
1247{
1248 struct exynos_iommu_owner *owner = dev->archdata.iommu;
1249 struct platform_device *sysmmu = of_find_device_by_node(spec->np);
1250 struct sysmmu_drvdata *data;
1251
1252 if (!sysmmu)
1253 return -ENODEV;
1254
1255 data = platform_get_drvdata(sysmmu);
1256 if (!data)
1257 return -ENODEV;
1258
1259 if (!owner) {
1260 owner = kzalloc(sizeof(*owner), GFP_KERNEL);
1261 if (!owner)
1262 return -ENOMEM;
1263
1264 INIT_LIST_HEAD(&owner->controllers);
1265 dev->archdata.iommu = owner;
1266 }
1267
1268 list_add_tail(&data->owner_node, &owner->controllers);
1269 return 0;
1270}
1271
8ed55c81 1272static struct iommu_ops exynos_iommu_ops = {
e1fd1eaa
JR
1273 .domain_alloc = exynos_iommu_domain_alloc,
1274 .domain_free = exynos_iommu_domain_free,
ba5fa6f6
BH
1275 .attach_dev = exynos_iommu_attach_device,
1276 .detach_dev = exynos_iommu_detach_device,
1277 .map = exynos_iommu_map,
1278 .unmap = exynos_iommu_unmap,
315786eb 1279 .map_sg = default_iommu_map_sg,
ba5fa6f6 1280 .iova_to_phys = exynos_iommu_iova_to_phys,
6c2ae7e2 1281 .device_group = get_device_iommu_group,
ba5fa6f6
BH
1282 .add_device = exynos_iommu_add_device,
1283 .remove_device = exynos_iommu_remove_device,
2a96536e 1284 .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
aa759fd3 1285 .of_xlate = exynos_iommu_of_xlate,
2a96536e
KC
1286};
1287
8ed55c81
MS
1288static bool init_done;
1289
2a96536e
KC
1290static int __init exynos_iommu_init(void)
1291{
1292 int ret;
1293
734c3c73
CK
1294 lv2table_kmem_cache = kmem_cache_create("exynos-iommu-lv2table",
1295 LV2TABLE_SIZE, LV2TABLE_SIZE, 0, NULL);
1296 if (!lv2table_kmem_cache) {
1297 pr_err("%s: Failed to create kmem cache\n", __func__);
1298 return -ENOMEM;
1299 }
1300
2a96536e 1301 ret = platform_driver_register(&exynos_sysmmu_driver);
734c3c73
CK
1302 if (ret) {
1303 pr_err("%s: Failed to register driver\n", __func__);
1304 goto err_reg_driver;
1305 }
2a96536e 1306
66a7ed84
CK
1307 zero_lv2_table = kmem_cache_zalloc(lv2table_kmem_cache, GFP_KERNEL);
1308 if (zero_lv2_table == NULL) {
1309 pr_err("%s: Failed to allocate zero level2 page table\n",
1310 __func__);
1311 ret = -ENOMEM;
1312 goto err_zero_lv2;
1313 }
1314
734c3c73
CK
1315 ret = bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1316 if (ret) {
1317 pr_err("%s: Failed to register exynos-iommu driver.\n",
1318 __func__);
1319 goto err_set_iommu;
1320 }
2a96536e 1321
8ed55c81
MS
1322 init_done = true;
1323
734c3c73
CK
1324 return 0;
1325err_set_iommu:
66a7ed84
CK
1326 kmem_cache_free(lv2table_kmem_cache, zero_lv2_table);
1327err_zero_lv2:
734c3c73
CK
1328 platform_driver_unregister(&exynos_sysmmu_driver);
1329err_reg_driver:
1330 kmem_cache_destroy(lv2table_kmem_cache);
2a96536e
KC
1331 return ret;
1332}
8ed55c81
MS
1333
1334static int __init exynos_iommu_of_setup(struct device_node *np)
1335{
1336 struct platform_device *pdev;
1337
1338 if (!init_done)
1339 exynos_iommu_init();
1340
1341 pdev = of_platform_device_create(np, NULL, platform_bus_type.dev_root);
1342 if (IS_ERR(pdev))
1343 return PTR_ERR(pdev);
1344
5e3435eb
MS
1345 /*
1346 * use the first registered sysmmu device for performing
1347 * dma mapping operations on iommu page tables (cpu cache flush)
1348 */
1349 if (!dma_dev)
1350 dma_dev = &pdev->dev;
1351
8ed55c81
MS
1352 of_iommu_set_ops(np, &exynos_iommu_ops);
1353 return 0;
1354}
1355
1356IOMMU_OF_DECLARE(exynos_iommu_of, "samsung,exynos-sysmmu",
1357 exynos_iommu_of_setup);