1 // SPDX-License-Identifier: GPL-2.0-only
3 * CPU-agnostic ARM page table allocator.
5 * ARMv7 Short-descriptor format, supporting
6 * - Basic memory attributes
7 * - Simplified access permissions (AP[2:1] model)
8 * - Backwards-compatible TEX remap
9 * - Large pages/supersections (if indicated by the caller)
12 * - Legacy access permissions (AP[2:0] model)
14 * Almost certainly never supporting:
18 * Copyright (C) 2014-2015 ARM Limited
19 * Copyright (c) 2014-2015 MediaTek Inc.
22 #define pr_fmt(fmt) "arm-v7s io-pgtable: " fmt
24 #include <linux/atomic.h>
25 #include <linux/dma-mapping.h>
26 #include <linux/gfp.h>
27 #include <linux/io-pgtable.h>
28 #include <linux/iommu.h>
29 #include <linux/kernel.h>
30 #include <linux/kmemleak.h>
31 #include <linux/sizes.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
34 #include <linux/types.h>
36 #include <asm/barrier.h>
38 /* Struct accessors */
39 #define io_pgtable_to_data(x) \
40 container_of((x), struct arm_v7s_io_pgtable, iop)
42 #define io_pgtable_ops_to_data(x) \
43 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
46 * We have 32 bits total; 12 bits resolved at level 1, 8 bits at level 2,
47 * and 12 bits in a page.
48 * MediaTek extend 2 bits to reach 34bits, 14 bits at lvl1 and 8 bits at lvl2.
50 #define ARM_V7S_ADDR_BITS 32
51 #define _ARM_V7S_LVL_BITS(lvl, cfg) ((lvl) == 1 ? ((cfg)->ias - 20) : 8)
52 #define ARM_V7S_LVL_SHIFT(lvl) ((lvl) == 1 ? 20 : 12)
53 #define ARM_V7S_TABLE_SHIFT 10
55 #define ARM_V7S_PTES_PER_LVL(lvl, cfg) (1 << _ARM_V7S_LVL_BITS(lvl, cfg))
56 #define ARM_V7S_TABLE_SIZE(lvl, cfg) \
57 (ARM_V7S_PTES_PER_LVL(lvl, cfg) * sizeof(arm_v7s_iopte))
59 #define ARM_V7S_BLOCK_SIZE(lvl) (1UL << ARM_V7S_LVL_SHIFT(lvl))
60 #define ARM_V7S_LVL_MASK(lvl) ((u32)(~0U << ARM_V7S_LVL_SHIFT(lvl)))
61 #define ARM_V7S_TABLE_MASK ((u32)(~0U << ARM_V7S_TABLE_SHIFT))
62 #define _ARM_V7S_IDX_MASK(lvl, cfg) (ARM_V7S_PTES_PER_LVL(lvl, cfg) - 1)
63 #define ARM_V7S_LVL_IDX(addr, lvl, cfg) ({ \
65 ((addr) >> ARM_V7S_LVL_SHIFT(_l)) & _ARM_V7S_IDX_MASK(_l, cfg); \
69 * Large page/supersection entries are effectively a block of 16 page/section
70 * entries, along the lines of the LPAE contiguous hint, but all with the
71 * same output address. For want of a better common name we'll call them
72 * "contiguous" versions of their respective page/section entries here, but
73 * noting the distinction (WRT to TLB maintenance) that they represent *one*
74 * entry repeated 16 times, not 16 separate entries (as in the LPAE case).
76 #define ARM_V7S_CONT_PAGES 16
78 /* PTE type bits: these are all mixed up with XN/PXN bits in most cases */
79 #define ARM_V7S_PTE_TYPE_TABLE 0x1
80 #define ARM_V7S_PTE_TYPE_PAGE 0x2
81 #define ARM_V7S_PTE_TYPE_CONT_PAGE 0x1
83 #define ARM_V7S_PTE_IS_VALID(pte) (((pte) & 0x3) != 0)
84 #define ARM_V7S_PTE_IS_TABLE(pte, lvl) \
85 ((lvl) == 1 && (((pte) & 0x3) == ARM_V7S_PTE_TYPE_TABLE))
88 #define ARM_V7S_ATTR_XN(lvl) BIT(4 * (2 - (lvl)))
89 #define ARM_V7S_ATTR_B BIT(2)
90 #define ARM_V7S_ATTR_C BIT(3)
91 #define ARM_V7S_ATTR_NS_TABLE BIT(3)
92 #define ARM_V7S_ATTR_NS_SECTION BIT(19)
94 #define ARM_V7S_CONT_SECTION BIT(18)
95 #define ARM_V7S_CONT_PAGE_XN_SHIFT 15
98 * The attribute bits are consistently ordered*, but occupy bits [17:10] of
99 * a level 1 PTE vs. bits [11:4] at level 2. Thus we define the individual
100 * fields relative to that 8-bit block, plus a total shift relative to the PTE.
102 #define ARM_V7S_ATTR_SHIFT(lvl) (16 - (lvl) * 6)
104 #define ARM_V7S_ATTR_MASK 0xff
105 #define ARM_V7S_ATTR_AP0 BIT(0)
106 #define ARM_V7S_ATTR_AP1 BIT(1)
107 #define ARM_V7S_ATTR_AP2 BIT(5)
108 #define ARM_V7S_ATTR_S BIT(6)
109 #define ARM_V7S_ATTR_NG BIT(7)
110 #define ARM_V7S_TEX_SHIFT 2
111 #define ARM_V7S_TEX_MASK 0x7
112 #define ARM_V7S_ATTR_TEX(val) (((val) & ARM_V7S_TEX_MASK) << ARM_V7S_TEX_SHIFT)
114 /* MediaTek extend the bits below for PA 32bit/33bit/34bit */
115 #define ARM_V7S_ATTR_MTK_PA_BIT32 BIT(9)
116 #define ARM_V7S_ATTR_MTK_PA_BIT33 BIT(4)
117 #define ARM_V7S_ATTR_MTK_PA_BIT34 BIT(5)
119 /* *well, except for TEX on level 2 large pages, of course :( */
120 #define ARM_V7S_CONT_PAGE_TEX_SHIFT 6
121 #define ARM_V7S_CONT_PAGE_TEX_MASK (ARM_V7S_TEX_MASK << ARM_V7S_CONT_PAGE_TEX_SHIFT)
123 /* Simplified access permissions */
124 #define ARM_V7S_PTE_AF ARM_V7S_ATTR_AP0
125 #define ARM_V7S_PTE_AP_UNPRIV ARM_V7S_ATTR_AP1
126 #define ARM_V7S_PTE_AP_RDONLY ARM_V7S_ATTR_AP2
129 #define ARM_V7S_RGN_NC 0
130 #define ARM_V7S_RGN_WBWA 1
131 #define ARM_V7S_RGN_WT 2
132 #define ARM_V7S_RGN_WB 3
134 #define ARM_V7S_PRRR_TYPE_DEVICE 1
135 #define ARM_V7S_PRRR_TYPE_NORMAL 2
136 #define ARM_V7S_PRRR_TR(n, type) (((type) & 0x3) << ((n) * 2))
137 #define ARM_V7S_PRRR_DS0 BIT(16)
138 #define ARM_V7S_PRRR_DS1 BIT(17)
139 #define ARM_V7S_PRRR_NS0 BIT(18)
140 #define ARM_V7S_PRRR_NS1 BIT(19)
141 #define ARM_V7S_PRRR_NOS(n) BIT((n) + 24)
143 #define ARM_V7S_NMRR_IR(n, attr) (((attr) & 0x3) << ((n) * 2))
144 #define ARM_V7S_NMRR_OR(n, attr) (((attr) & 0x3) << ((n) * 2 + 16))
146 #define ARM_V7S_TTBR_S BIT(1)
147 #define ARM_V7S_TTBR_NOS BIT(5)
148 #define ARM_V7S_TTBR_ORGN_ATTR(attr) (((attr) & 0x3) << 3)
149 #define ARM_V7S_TTBR_IRGN_ATTR(attr) \
150 ((((attr) & 0x1) << 6) | (((attr) & 0x2) >> 1))
152 #ifdef CONFIG_ZONE_DMA32
153 #define ARM_V7S_TABLE_GFP_DMA GFP_DMA32
154 #define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA32
156 #define ARM_V7S_TABLE_GFP_DMA GFP_DMA
157 #define ARM_V7S_TABLE_SLAB_FLAGS SLAB_CACHE_DMA
160 typedef u32 arm_v7s_iopte
;
162 static bool selftest_running
;
164 struct arm_v7s_io_pgtable
{
165 struct io_pgtable iop
;
168 struct kmem_cache
*l2_tables
;
169 spinlock_t split_lock
;
172 static bool arm_v7s_pte_is_cont(arm_v7s_iopte pte
, int lvl
);
174 static dma_addr_t
__arm_v7s_dma_addr(void *pages
)
176 return (dma_addr_t
)virt_to_phys(pages
);
179 static bool arm_v7s_is_mtk_enabled(struct io_pgtable_cfg
*cfg
)
181 return IS_ENABLED(CONFIG_PHYS_ADDR_T_64BIT
) &&
182 (cfg
->quirks
& IO_PGTABLE_QUIRK_ARM_MTK_EXT
);
185 static arm_v7s_iopte
paddr_to_iopte(phys_addr_t paddr
, int lvl
,
186 struct io_pgtable_cfg
*cfg
)
188 arm_v7s_iopte pte
= paddr
& ARM_V7S_LVL_MASK(lvl
);
190 if (!arm_v7s_is_mtk_enabled(cfg
))
193 if (paddr
& BIT_ULL(32))
194 pte
|= ARM_V7S_ATTR_MTK_PA_BIT32
;
195 if (paddr
& BIT_ULL(33))
196 pte
|= ARM_V7S_ATTR_MTK_PA_BIT33
;
197 if (paddr
& BIT_ULL(34))
198 pte
|= ARM_V7S_ATTR_MTK_PA_BIT34
;
202 static phys_addr_t
iopte_to_paddr(arm_v7s_iopte pte
, int lvl
,
203 struct io_pgtable_cfg
*cfg
)
208 if (ARM_V7S_PTE_IS_TABLE(pte
, lvl
))
209 mask
= ARM_V7S_TABLE_MASK
;
210 else if (arm_v7s_pte_is_cont(pte
, lvl
))
211 mask
= ARM_V7S_LVL_MASK(lvl
) * ARM_V7S_CONT_PAGES
;
213 mask
= ARM_V7S_LVL_MASK(lvl
);
216 if (!arm_v7s_is_mtk_enabled(cfg
))
219 if (pte
& ARM_V7S_ATTR_MTK_PA_BIT32
)
220 paddr
|= BIT_ULL(32);
221 if (pte
& ARM_V7S_ATTR_MTK_PA_BIT33
)
222 paddr
|= BIT_ULL(33);
223 if (pte
& ARM_V7S_ATTR_MTK_PA_BIT34
)
224 paddr
|= BIT_ULL(34);
228 static arm_v7s_iopte
*iopte_deref(arm_v7s_iopte pte
, int lvl
,
229 struct arm_v7s_io_pgtable
*data
)
231 return phys_to_virt(iopte_to_paddr(pte
, lvl
, &data
->iop
.cfg
));
234 static void *__arm_v7s_alloc_table(int lvl
, gfp_t gfp
,
235 struct arm_v7s_io_pgtable
*data
)
237 struct io_pgtable_cfg
*cfg
= &data
->iop
.cfg
;
238 struct device
*dev
= cfg
->iommu_dev
;
241 size_t size
= ARM_V7S_TABLE_SIZE(lvl
, cfg
);
245 table
= (void *)__get_free_pages(
246 __GFP_ZERO
| ARM_V7S_TABLE_GFP_DMA
, get_order(size
));
248 table
= kmem_cache_zalloc(data
->l2_tables
, gfp
);
253 phys
= virt_to_phys(table
);
254 if (phys
!= (arm_v7s_iopte
)phys
) {
255 /* Doesn't fit in PTE */
256 dev_err(dev
, "Page table does not fit in PTE: %pa", &phys
);
259 if (!cfg
->coherent_walk
) {
260 dma
= dma_map_single(dev
, table
, size
, DMA_TO_DEVICE
);
261 if (dma_mapping_error(dev
, dma
))
264 * We depend on the IOMMU being able to work with any physical
265 * address directly, so if the DMA layer suggests otherwise by
266 * translating or truncating them, that bodes very badly...
272 kmemleak_ignore(table
);
276 dev_err(dev
, "Cannot accommodate DMA translation for IOMMU page tables\n");
277 dma_unmap_single(dev
, dma
, size
, DMA_TO_DEVICE
);
280 free_pages((unsigned long)table
, get_order(size
));
282 kmem_cache_free(data
->l2_tables
, table
);
286 static void __arm_v7s_free_table(void *table
, int lvl
,
287 struct arm_v7s_io_pgtable
*data
)
289 struct io_pgtable_cfg
*cfg
= &data
->iop
.cfg
;
290 struct device
*dev
= cfg
->iommu_dev
;
291 size_t size
= ARM_V7S_TABLE_SIZE(lvl
, cfg
);
293 if (!cfg
->coherent_walk
)
294 dma_unmap_single(dev
, __arm_v7s_dma_addr(table
), size
,
297 free_pages((unsigned long)table
, get_order(size
));
299 kmem_cache_free(data
->l2_tables
, table
);
302 static void __arm_v7s_pte_sync(arm_v7s_iopte
*ptep
, int num_entries
,
303 struct io_pgtable_cfg
*cfg
)
305 if (cfg
->coherent_walk
)
308 dma_sync_single_for_device(cfg
->iommu_dev
, __arm_v7s_dma_addr(ptep
),
309 num_entries
* sizeof(*ptep
), DMA_TO_DEVICE
);
311 static void __arm_v7s_set_pte(arm_v7s_iopte
*ptep
, arm_v7s_iopte pte
,
312 int num_entries
, struct io_pgtable_cfg
*cfg
)
316 for (i
= 0; i
< num_entries
; i
++)
319 __arm_v7s_pte_sync(ptep
, num_entries
, cfg
);
322 static arm_v7s_iopte
arm_v7s_prot_to_pte(int prot
, int lvl
,
323 struct io_pgtable_cfg
*cfg
)
325 bool ap
= !(cfg
->quirks
& IO_PGTABLE_QUIRK_NO_PERMS
);
326 arm_v7s_iopte pte
= ARM_V7S_ATTR_NG
| ARM_V7S_ATTR_S
;
328 if (!(prot
& IOMMU_MMIO
))
329 pte
|= ARM_V7S_ATTR_TEX(1);
331 pte
|= ARM_V7S_PTE_AF
;
332 if (!(prot
& IOMMU_PRIV
))
333 pte
|= ARM_V7S_PTE_AP_UNPRIV
;
334 if (!(prot
& IOMMU_WRITE
))
335 pte
|= ARM_V7S_PTE_AP_RDONLY
;
337 pte
<<= ARM_V7S_ATTR_SHIFT(lvl
);
339 if ((prot
& IOMMU_NOEXEC
) && ap
)
340 pte
|= ARM_V7S_ATTR_XN(lvl
);
341 if (prot
& IOMMU_MMIO
)
342 pte
|= ARM_V7S_ATTR_B
;
343 else if (prot
& IOMMU_CACHE
)
344 pte
|= ARM_V7S_ATTR_B
| ARM_V7S_ATTR_C
;
346 pte
|= ARM_V7S_PTE_TYPE_PAGE
;
347 if (lvl
== 1 && (cfg
->quirks
& IO_PGTABLE_QUIRK_ARM_NS
))
348 pte
|= ARM_V7S_ATTR_NS_SECTION
;
353 static int arm_v7s_pte_to_prot(arm_v7s_iopte pte
, int lvl
)
355 int prot
= IOMMU_READ
;
356 arm_v7s_iopte attr
= pte
>> ARM_V7S_ATTR_SHIFT(lvl
);
358 if (!(attr
& ARM_V7S_PTE_AP_RDONLY
))
360 if (!(attr
& ARM_V7S_PTE_AP_UNPRIV
))
362 if ((attr
& (ARM_V7S_TEX_MASK
<< ARM_V7S_TEX_SHIFT
)) == 0)
364 else if (pte
& ARM_V7S_ATTR_C
)
366 if (pte
& ARM_V7S_ATTR_XN(lvl
))
367 prot
|= IOMMU_NOEXEC
;
372 static arm_v7s_iopte
arm_v7s_pte_to_cont(arm_v7s_iopte pte
, int lvl
)
375 pte
|= ARM_V7S_CONT_SECTION
;
376 } else if (lvl
== 2) {
377 arm_v7s_iopte xn
= pte
& ARM_V7S_ATTR_XN(lvl
);
378 arm_v7s_iopte tex
= pte
& ARM_V7S_CONT_PAGE_TEX_MASK
;
380 pte
^= xn
| tex
| ARM_V7S_PTE_TYPE_PAGE
;
381 pte
|= (xn
<< ARM_V7S_CONT_PAGE_XN_SHIFT
) |
382 (tex
<< ARM_V7S_CONT_PAGE_TEX_SHIFT
) |
383 ARM_V7S_PTE_TYPE_CONT_PAGE
;
388 static arm_v7s_iopte
arm_v7s_cont_to_pte(arm_v7s_iopte pte
, int lvl
)
391 pte
&= ~ARM_V7S_CONT_SECTION
;
392 } else if (lvl
== 2) {
393 arm_v7s_iopte xn
= pte
& BIT(ARM_V7S_CONT_PAGE_XN_SHIFT
);
394 arm_v7s_iopte tex
= pte
& (ARM_V7S_CONT_PAGE_TEX_MASK
<<
395 ARM_V7S_CONT_PAGE_TEX_SHIFT
);
397 pte
^= xn
| tex
| ARM_V7S_PTE_TYPE_CONT_PAGE
;
398 pte
|= (xn
>> ARM_V7S_CONT_PAGE_XN_SHIFT
) |
399 (tex
>> ARM_V7S_CONT_PAGE_TEX_SHIFT
) |
400 ARM_V7S_PTE_TYPE_PAGE
;
405 static bool arm_v7s_pte_is_cont(arm_v7s_iopte pte
, int lvl
)
407 if (lvl
== 1 && !ARM_V7S_PTE_IS_TABLE(pte
, lvl
))
408 return pte
& ARM_V7S_CONT_SECTION
;
410 return !(pte
& ARM_V7S_PTE_TYPE_PAGE
);
414 static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable
*,
415 struct iommu_iotlb_gather
*, unsigned long,
416 size_t, int, arm_v7s_iopte
*);
418 static int arm_v7s_init_pte(struct arm_v7s_io_pgtable
*data
,
419 unsigned long iova
, phys_addr_t paddr
, int prot
,
420 int lvl
, int num_entries
, arm_v7s_iopte
*ptep
)
422 struct io_pgtable_cfg
*cfg
= &data
->iop
.cfg
;
426 for (i
= 0; i
< num_entries
; i
++)
427 if (ARM_V7S_PTE_IS_TABLE(ptep
[i
], lvl
)) {
429 * We need to unmap and free the old table before
430 * overwriting it with a block entry.
433 size_t sz
= ARM_V7S_BLOCK_SIZE(lvl
);
435 tblp
= ptep
- ARM_V7S_LVL_IDX(iova
, lvl
, cfg
);
436 if (WARN_ON(__arm_v7s_unmap(data
, NULL
, iova
+ i
* sz
,
437 sz
, lvl
, tblp
) != sz
))
439 } else if (ptep
[i
]) {
440 /* We require an unmap first */
441 WARN_ON(!selftest_running
);
445 pte
= arm_v7s_prot_to_pte(prot
, lvl
, cfg
);
447 pte
= arm_v7s_pte_to_cont(pte
, lvl
);
449 pte
|= paddr_to_iopte(paddr
, lvl
, cfg
);
451 __arm_v7s_set_pte(ptep
, pte
, num_entries
, cfg
);
455 static arm_v7s_iopte
arm_v7s_install_table(arm_v7s_iopte
*table
,
458 struct io_pgtable_cfg
*cfg
)
460 arm_v7s_iopte old
, new;
462 new = virt_to_phys(table
) | ARM_V7S_PTE_TYPE_TABLE
;
463 if (cfg
->quirks
& IO_PGTABLE_QUIRK_ARM_NS
)
464 new |= ARM_V7S_ATTR_NS_TABLE
;
467 * Ensure the table itself is visible before its PTE can be.
468 * Whilst we could get away with cmpxchg64_release below, this
469 * doesn't have any ordering semantics when !CONFIG_SMP.
473 old
= cmpxchg_relaxed(ptep
, curr
, new);
474 __arm_v7s_pte_sync(ptep
, 1, cfg
);
479 static int __arm_v7s_map(struct arm_v7s_io_pgtable
*data
, unsigned long iova
,
480 phys_addr_t paddr
, size_t size
, int prot
,
481 int lvl
, arm_v7s_iopte
*ptep
, gfp_t gfp
)
483 struct io_pgtable_cfg
*cfg
= &data
->iop
.cfg
;
484 arm_v7s_iopte pte
, *cptep
;
485 int num_entries
= size
>> ARM_V7S_LVL_SHIFT(lvl
);
487 /* Find our entry at the current level */
488 ptep
+= ARM_V7S_LVL_IDX(iova
, lvl
, cfg
);
490 /* If we can install a leaf entry at this level, then do so */
492 return arm_v7s_init_pte(data
, iova
, paddr
, prot
,
493 lvl
, num_entries
, ptep
);
495 /* We can't allocate tables at the final level */
496 if (WARN_ON(lvl
== 2))
499 /* Grab a pointer to the next level */
500 pte
= READ_ONCE(*ptep
);
502 cptep
= __arm_v7s_alloc_table(lvl
+ 1, gfp
, data
);
506 pte
= arm_v7s_install_table(cptep
, ptep
, 0, cfg
);
508 __arm_v7s_free_table(cptep
, lvl
+ 1, data
);
510 /* We've no easy way of knowing if it's synced yet, so... */
511 __arm_v7s_pte_sync(ptep
, 1, cfg
);
514 if (ARM_V7S_PTE_IS_TABLE(pte
, lvl
)) {
515 cptep
= iopte_deref(pte
, lvl
, data
);
517 /* We require an unmap first */
518 WARN_ON(!selftest_running
);
523 return __arm_v7s_map(data
, iova
, paddr
, size
, prot
, lvl
+ 1, cptep
, gfp
);
526 static int arm_v7s_map_pages(struct io_pgtable_ops
*ops
, unsigned long iova
,
527 phys_addr_t paddr
, size_t pgsize
, size_t pgcount
,
528 int prot
, gfp_t gfp
, size_t *mapped
)
530 struct arm_v7s_io_pgtable
*data
= io_pgtable_ops_to_data(ops
);
533 if (WARN_ON(iova
>= (1ULL << data
->iop
.cfg
.ias
) ||
534 paddr
>= (1ULL << data
->iop
.cfg
.oas
)))
537 /* If no access, then nothing to do */
538 if (!(prot
& (IOMMU_READ
| IOMMU_WRITE
)))
542 ret
= __arm_v7s_map(data
, iova
, paddr
, pgsize
, prot
, 1, data
->pgd
,
553 * Synchronise all PTE updates for the new mapping before there's
554 * a chance for anything to kick off a table walk for the new iova.
561 static int arm_v7s_map(struct io_pgtable_ops
*ops
, unsigned long iova
,
562 phys_addr_t paddr
, size_t size
, int prot
, gfp_t gfp
)
564 return arm_v7s_map_pages(ops
, iova
, paddr
, size
, 1, prot
, gfp
, NULL
);
567 static void arm_v7s_free_pgtable(struct io_pgtable
*iop
)
569 struct arm_v7s_io_pgtable
*data
= io_pgtable_to_data(iop
);
572 for (i
= 0; i
< ARM_V7S_PTES_PER_LVL(1, &data
->iop
.cfg
); i
++) {
573 arm_v7s_iopte pte
= data
->pgd
[i
];
575 if (ARM_V7S_PTE_IS_TABLE(pte
, 1))
576 __arm_v7s_free_table(iopte_deref(pte
, 1, data
),
579 __arm_v7s_free_table(data
->pgd
, 1, data
);
580 kmem_cache_destroy(data
->l2_tables
);
584 static arm_v7s_iopte
arm_v7s_split_cont(struct arm_v7s_io_pgtable
*data
,
585 unsigned long iova
, int idx
, int lvl
,
588 struct io_pgtable
*iop
= &data
->iop
;
590 size_t size
= ARM_V7S_BLOCK_SIZE(lvl
);
593 /* Check that we didn't lose a race to get the lock */
595 if (!arm_v7s_pte_is_cont(pte
, lvl
))
598 ptep
-= idx
& (ARM_V7S_CONT_PAGES
- 1);
599 pte
= arm_v7s_cont_to_pte(pte
, lvl
);
600 for (i
= 0; i
< ARM_V7S_CONT_PAGES
; i
++)
601 ptep
[i
] = pte
+ i
* size
;
603 __arm_v7s_pte_sync(ptep
, ARM_V7S_CONT_PAGES
, &iop
->cfg
);
605 size
*= ARM_V7S_CONT_PAGES
;
606 io_pgtable_tlb_flush_walk(iop
, iova
, size
, size
);
610 static size_t arm_v7s_split_blk_unmap(struct arm_v7s_io_pgtable
*data
,
611 struct iommu_iotlb_gather
*gather
,
612 unsigned long iova
, size_t size
,
613 arm_v7s_iopte blk_pte
,
616 struct io_pgtable_cfg
*cfg
= &data
->iop
.cfg
;
617 arm_v7s_iopte pte
, *tablep
;
618 int i
, unmap_idx
, num_entries
, num_ptes
;
620 tablep
= __arm_v7s_alloc_table(2, GFP_ATOMIC
, data
);
622 return 0; /* Bytes unmapped */
624 num_ptes
= ARM_V7S_PTES_PER_LVL(2, cfg
);
625 num_entries
= size
>> ARM_V7S_LVL_SHIFT(2);
626 unmap_idx
= ARM_V7S_LVL_IDX(iova
, 2, cfg
);
628 pte
= arm_v7s_prot_to_pte(arm_v7s_pte_to_prot(blk_pte
, 1), 2, cfg
);
630 pte
= arm_v7s_pte_to_cont(pte
, 2);
632 for (i
= 0; i
< num_ptes
; i
+= num_entries
, pte
+= size
) {
637 __arm_v7s_set_pte(&tablep
[i
], pte
, num_entries
, cfg
);
640 pte
= arm_v7s_install_table(tablep
, ptep
, blk_pte
, cfg
);
641 if (pte
!= blk_pte
) {
642 __arm_v7s_free_table(tablep
, 2, data
);
644 if (!ARM_V7S_PTE_IS_TABLE(pte
, 1))
647 tablep
= iopte_deref(pte
, 1, data
);
648 return __arm_v7s_unmap(data
, gather
, iova
, size
, 2, tablep
);
651 io_pgtable_tlb_add_page(&data
->iop
, gather
, iova
, size
);
655 static size_t __arm_v7s_unmap(struct arm_v7s_io_pgtable
*data
,
656 struct iommu_iotlb_gather
*gather
,
657 unsigned long iova
, size_t size
, int lvl
,
660 arm_v7s_iopte pte
[ARM_V7S_CONT_PAGES
];
661 struct io_pgtable
*iop
= &data
->iop
;
662 int idx
, i
= 0, num_entries
= size
>> ARM_V7S_LVL_SHIFT(lvl
);
664 /* Something went horribly wrong and we ran out of page table */
665 if (WARN_ON(lvl
> 2))
668 idx
= ARM_V7S_LVL_IDX(iova
, lvl
, &iop
->cfg
);
671 pte
[i
] = READ_ONCE(ptep
[i
]);
672 if (WARN_ON(!ARM_V7S_PTE_IS_VALID(pte
[i
])))
674 } while (++i
< num_entries
);
677 * If we've hit a contiguous 'large page' entry at this level, it
678 * needs splitting first, unless we're unmapping the whole lot.
680 * For splitting, we can't rewrite 16 PTEs atomically, and since we
681 * can't necessarily assume TEX remap we don't have a software bit to
682 * mark live entries being split. In practice (i.e. DMA API code), we
683 * will never be splitting large pages anyway, so just wrap this edge
684 * case in a lock for the sake of correctness and be done with it.
686 if (num_entries
<= 1 && arm_v7s_pte_is_cont(pte
[0], lvl
)) {
689 spin_lock_irqsave(&data
->split_lock
, flags
);
690 pte
[0] = arm_v7s_split_cont(data
, iova
, idx
, lvl
, ptep
);
691 spin_unlock_irqrestore(&data
->split_lock
, flags
);
694 /* If the size matches this level, we're in the right place */
696 size_t blk_size
= ARM_V7S_BLOCK_SIZE(lvl
);
698 __arm_v7s_set_pte(ptep
, 0, num_entries
, &iop
->cfg
);
700 for (i
= 0; i
< num_entries
; i
++) {
701 if (ARM_V7S_PTE_IS_TABLE(pte
[i
], lvl
)) {
702 /* Also flush any partial walks */
703 io_pgtable_tlb_flush_walk(iop
, iova
, blk_size
,
704 ARM_V7S_BLOCK_SIZE(lvl
+ 1));
705 ptep
= iopte_deref(pte
[i
], lvl
, data
);
706 __arm_v7s_free_table(ptep
, lvl
+ 1, data
);
707 } else if (!iommu_iotlb_gather_queued(gather
)) {
708 io_pgtable_tlb_add_page(iop
, gather
, iova
, blk_size
);
713 } else if (lvl
== 1 && !ARM_V7S_PTE_IS_TABLE(pte
[0], lvl
)) {
715 * Insert a table at the next level to map the old region,
716 * minus the part we want to unmap
718 return arm_v7s_split_blk_unmap(data
, gather
, iova
, size
, pte
[0],
722 /* Keep on walkin' */
723 ptep
= iopte_deref(pte
[0], lvl
, data
);
724 return __arm_v7s_unmap(data
, gather
, iova
, size
, lvl
+ 1, ptep
);
727 static size_t arm_v7s_unmap_pages(struct io_pgtable_ops
*ops
, unsigned long iova
,
728 size_t pgsize
, size_t pgcount
,
729 struct iommu_iotlb_gather
*gather
)
731 struct arm_v7s_io_pgtable
*data
= io_pgtable_ops_to_data(ops
);
732 size_t unmapped
= 0, ret
;
734 if (WARN_ON(iova
>= (1ULL << data
->iop
.cfg
.ias
)))
738 ret
= __arm_v7s_unmap(data
, gather
, iova
, pgsize
, 1, data
->pgd
);
749 static size_t arm_v7s_unmap(struct io_pgtable_ops
*ops
, unsigned long iova
,
750 size_t size
, struct iommu_iotlb_gather
*gather
)
752 return arm_v7s_unmap_pages(ops
, iova
, size
, 1, gather
);
755 static phys_addr_t
arm_v7s_iova_to_phys(struct io_pgtable_ops
*ops
,
758 struct arm_v7s_io_pgtable
*data
= io_pgtable_ops_to_data(ops
);
759 arm_v7s_iopte
*ptep
= data
->pgd
, pte
;
764 ptep
+= ARM_V7S_LVL_IDX(iova
, ++lvl
, &data
->iop
.cfg
);
765 pte
= READ_ONCE(*ptep
);
766 ptep
= iopte_deref(pte
, lvl
, data
);
767 } while (ARM_V7S_PTE_IS_TABLE(pte
, lvl
));
769 if (!ARM_V7S_PTE_IS_VALID(pte
))
772 mask
= ARM_V7S_LVL_MASK(lvl
);
773 if (arm_v7s_pte_is_cont(pte
, lvl
))
774 mask
*= ARM_V7S_CONT_PAGES
;
775 return iopte_to_paddr(pte
, lvl
, &data
->iop
.cfg
) | (iova
& ~mask
);
778 static struct io_pgtable
*arm_v7s_alloc_pgtable(struct io_pgtable_cfg
*cfg
,
781 struct arm_v7s_io_pgtable
*data
;
783 if (cfg
->ias
> (arm_v7s_is_mtk_enabled(cfg
) ? 34 : ARM_V7S_ADDR_BITS
))
786 if (cfg
->oas
> (arm_v7s_is_mtk_enabled(cfg
) ? 35 : ARM_V7S_ADDR_BITS
))
789 if (cfg
->quirks
& ~(IO_PGTABLE_QUIRK_ARM_NS
|
790 IO_PGTABLE_QUIRK_NO_PERMS
|
791 IO_PGTABLE_QUIRK_ARM_MTK_EXT
))
794 /* If ARM_MTK_4GB is enabled, the NO_PERMS is also expected. */
795 if (cfg
->quirks
& IO_PGTABLE_QUIRK_ARM_MTK_EXT
&&
796 !(cfg
->quirks
& IO_PGTABLE_QUIRK_NO_PERMS
))
799 data
= kmalloc(sizeof(*data
), GFP_KERNEL
);
803 spin_lock_init(&data
->split_lock
);
804 data
->l2_tables
= kmem_cache_create("io-pgtable_armv7s_l2",
805 ARM_V7S_TABLE_SIZE(2, cfg
),
806 ARM_V7S_TABLE_SIZE(2, cfg
),
807 ARM_V7S_TABLE_SLAB_FLAGS
, NULL
);
808 if (!data
->l2_tables
)
811 data
->iop
.ops
= (struct io_pgtable_ops
) {
813 .map_pages
= arm_v7s_map_pages
,
814 .unmap
= arm_v7s_unmap
,
815 .unmap_pages
= arm_v7s_unmap_pages
,
816 .iova_to_phys
= arm_v7s_iova_to_phys
,
819 /* We have to do this early for __arm_v7s_alloc_table to work... */
820 data
->iop
.cfg
= *cfg
;
823 * Unless the IOMMU driver indicates supersection support by
824 * having SZ_16M set in the initial bitmap, they won't be used.
826 cfg
->pgsize_bitmap
&= SZ_4K
| SZ_64K
| SZ_1M
| SZ_16M
;
828 /* TCR: T0SZ=0, EAE=0 (if applicable) */
829 cfg
->arm_v7s_cfg
.tcr
= 0;
832 * TEX remap: the indices used map to the closest equivalent types
833 * under the non-TEX-remap interpretation of those attribute bits,
834 * excepting various implementation-defined aspects of shareability.
836 cfg
->arm_v7s_cfg
.prrr
= ARM_V7S_PRRR_TR(1, ARM_V7S_PRRR_TYPE_DEVICE
) |
837 ARM_V7S_PRRR_TR(4, ARM_V7S_PRRR_TYPE_NORMAL
) |
838 ARM_V7S_PRRR_TR(7, ARM_V7S_PRRR_TYPE_NORMAL
) |
839 ARM_V7S_PRRR_DS0
| ARM_V7S_PRRR_DS1
|
840 ARM_V7S_PRRR_NS1
| ARM_V7S_PRRR_NOS(7);
841 cfg
->arm_v7s_cfg
.nmrr
= ARM_V7S_NMRR_IR(7, ARM_V7S_RGN_WBWA
) |
842 ARM_V7S_NMRR_OR(7, ARM_V7S_RGN_WBWA
);
844 /* Looking good; allocate a pgd */
845 data
->pgd
= __arm_v7s_alloc_table(1, GFP_KERNEL
, data
);
849 /* Ensure the empty pgd is visible before any actual TTBR write */
853 cfg
->arm_v7s_cfg
.ttbr
= virt_to_phys(data
->pgd
) | ARM_V7S_TTBR_S
|
854 (cfg
->coherent_walk
? (ARM_V7S_TTBR_NOS
|
855 ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_WBWA
) |
856 ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_WBWA
)) :
857 (ARM_V7S_TTBR_IRGN_ATTR(ARM_V7S_RGN_NC
) |
858 ARM_V7S_TTBR_ORGN_ATTR(ARM_V7S_RGN_NC
)));
862 kmem_cache_destroy(data
->l2_tables
);
867 struct io_pgtable_init_fns io_pgtable_arm_v7s_init_fns
= {
868 .alloc
= arm_v7s_alloc_pgtable
,
869 .free
= arm_v7s_free_pgtable
,
872 #ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S_SELFTEST
874 static struct io_pgtable_cfg
*cfg_cookie __initdata
;
876 static void __init
dummy_tlb_flush_all(void *cookie
)
878 WARN_ON(cookie
!= cfg_cookie
);
881 static void __init
dummy_tlb_flush(unsigned long iova
, size_t size
,
882 size_t granule
, void *cookie
)
884 WARN_ON(cookie
!= cfg_cookie
);
885 WARN_ON(!(size
& cfg_cookie
->pgsize_bitmap
));
888 static void __init
dummy_tlb_add_page(struct iommu_iotlb_gather
*gather
,
889 unsigned long iova
, size_t granule
,
892 dummy_tlb_flush(iova
, granule
, granule
, cookie
);
895 static const struct iommu_flush_ops dummy_tlb_ops __initconst
= {
896 .tlb_flush_all
= dummy_tlb_flush_all
,
897 .tlb_flush_walk
= dummy_tlb_flush
,
898 .tlb_add_page
= dummy_tlb_add_page
,
901 #define __FAIL(ops) ({ \
902 WARN(1, "selftest: test failed\n"); \
903 selftest_running = false; \
907 static int __init
arm_v7s_do_selftests(void)
909 struct io_pgtable_ops
*ops
;
910 struct io_pgtable_cfg cfg
= {
911 .tlb
= &dummy_tlb_ops
,
914 .coherent_walk
= true,
915 .quirks
= IO_PGTABLE_QUIRK_ARM_NS
,
916 .pgsize_bitmap
= SZ_4K
| SZ_64K
| SZ_1M
| SZ_16M
,
918 unsigned int iova
, size
, iova_start
;
919 unsigned int i
, loopnr
= 0;
921 selftest_running
= true;
925 ops
= alloc_io_pgtable_ops(ARM_V7S
, &cfg
, &cfg
);
927 pr_err("selftest: failed to allocate io pgtable ops\n");
932 * Initial sanity checks.
933 * Empty page tables shouldn't provide any translations.
935 if (ops
->iova_to_phys(ops
, 42))
938 if (ops
->iova_to_phys(ops
, SZ_1G
+ 42))
941 if (ops
->iova_to_phys(ops
, SZ_2G
+ 42))
945 * Distinct mappings of different granule sizes.
948 for_each_set_bit(i
, &cfg
.pgsize_bitmap
, BITS_PER_LONG
) {
950 if (ops
->map(ops
, iova
, iova
, size
, IOMMU_READ
|
953 IOMMU_CACHE
, GFP_KERNEL
))
956 /* Overlapping mappings */
957 if (!ops
->map(ops
, iova
, iova
+ size
, size
,
958 IOMMU_READ
| IOMMU_NOEXEC
, GFP_KERNEL
))
961 if (ops
->iova_to_phys(ops
, iova
+ 42) != (iova
+ 42))
970 size
= 1UL << __ffs(cfg
.pgsize_bitmap
);
972 iova_start
= i
* SZ_16M
;
973 if (ops
->unmap(ops
, iova_start
+ size
, size
, NULL
) != size
)
976 /* Remap of partial unmap */
977 if (ops
->map(ops
, iova_start
+ size
, size
, size
, IOMMU_READ
, GFP_KERNEL
))
980 if (ops
->iova_to_phys(ops
, iova_start
+ size
+ 42)
988 for_each_set_bit(i
, &cfg
.pgsize_bitmap
, BITS_PER_LONG
) {
991 if (ops
->unmap(ops
, iova
, size
, NULL
) != size
)
994 if (ops
->iova_to_phys(ops
, iova
+ 42))
997 /* Remap full block */
998 if (ops
->map(ops
, iova
, iova
, size
, IOMMU_WRITE
, GFP_KERNEL
))
1001 if (ops
->iova_to_phys(ops
, iova
+ 42) != (iova
+ 42))
1007 free_io_pgtable_ops(ops
);
1009 selftest_running
= false;
1011 pr_info("self test ok\n");
1014 subsys_initcall(arm_v7s_do_selftests
);