2 * CPU-agnostic ARM page table allocator.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 * Copyright (C) 2014 ARM Limited
18 * Author: Will Deacon <will.deacon@arm.com>
21 #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
23 #include <linux/iommu.h>
24 #include <linux/kernel.h>
25 #include <linux/sizes.h>
26 #include <linux/slab.h>
27 #include <linux/types.h>
29 #include "io-pgtable.h"
31 #define ARM_LPAE_MAX_ADDR_BITS 48
32 #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
33 #define ARM_LPAE_MAX_LEVELS 4
35 /* Struct accessors */
36 #define io_pgtable_to_data(x) \
37 container_of((x), struct arm_lpae_io_pgtable, iop)
39 #define io_pgtable_ops_to_pgtable(x) \
40 container_of((x), struct io_pgtable, ops)
42 #define io_pgtable_ops_to_data(x) \
43 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
46 * For consistency with the architecture, we always consider
47 * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
49 #define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels)
52 * Calculate the right shift amount to get to the portion describing level l
53 * in a virtual address mapped by the pagetable in d.
55 #define ARM_LPAE_LVL_SHIFT(l,d) \
56 ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
57 * (d)->bits_per_level) + (d)->pg_shift)
59 #define ARM_LPAE_PAGES_PER_PGD(d) \
60 DIV_ROUND_UP((d)->pgd_size, 1UL << (d)->pg_shift)
63 * Calculate the index at level l used to map virtual address a using the
66 #define ARM_LPAE_PGD_IDX(l,d) \
67 ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
69 #define ARM_LPAE_LVL_IDX(a,l,d) \
70 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
71 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
73 /* Calculate the block/page mapping size at level l for pagetable in d. */
74 #define ARM_LPAE_BLOCK_SIZE(l,d) \
75 (1 << (ilog2(sizeof(arm_lpae_iopte)) + \
76 ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
79 #define ARM_LPAE_PTE_TYPE_SHIFT 0
80 #define ARM_LPAE_PTE_TYPE_MASK 0x3
82 #define ARM_LPAE_PTE_TYPE_BLOCK 1
83 #define ARM_LPAE_PTE_TYPE_TABLE 3
84 #define ARM_LPAE_PTE_TYPE_PAGE 3
86 #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
87 #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
88 #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
89 #define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
90 #define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
91 #define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
92 #define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
93 #define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
95 #define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
96 /* Ignore the contiguous bit for block splitting */
97 #define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
98 #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
99 ARM_LPAE_PTE_ATTR_HI_MASK)
102 #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
103 #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
104 #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
105 #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
108 #define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
109 #define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
110 #define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
111 #define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
112 #define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
113 #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
116 #define ARM_32_LPAE_TCR_EAE (1 << 31)
117 #define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
119 #define ARM_LPAE_TCR_TG0_4K (0 << 14)
120 #define ARM_LPAE_TCR_TG0_64K (1 << 14)
121 #define ARM_LPAE_TCR_TG0_16K (2 << 14)
123 #define ARM_LPAE_TCR_SH0_SHIFT 12
124 #define ARM_LPAE_TCR_SH0_MASK 0x3
125 #define ARM_LPAE_TCR_SH_NS 0
126 #define ARM_LPAE_TCR_SH_OS 2
127 #define ARM_LPAE_TCR_SH_IS 3
129 #define ARM_LPAE_TCR_ORGN0_SHIFT 10
130 #define ARM_LPAE_TCR_IRGN0_SHIFT 8
131 #define ARM_LPAE_TCR_RGN_MASK 0x3
132 #define ARM_LPAE_TCR_RGN_NC 0
133 #define ARM_LPAE_TCR_RGN_WBWA 1
134 #define ARM_LPAE_TCR_RGN_WT 2
135 #define ARM_LPAE_TCR_RGN_WB 3
137 #define ARM_LPAE_TCR_SL0_SHIFT 6
138 #define ARM_LPAE_TCR_SL0_MASK 0x3
140 #define ARM_LPAE_TCR_T0SZ_SHIFT 0
141 #define ARM_LPAE_TCR_SZ_MASK 0xf
143 #define ARM_LPAE_TCR_PS_SHIFT 16
144 #define ARM_LPAE_TCR_PS_MASK 0x7
146 #define ARM_LPAE_TCR_IPS_SHIFT 32
147 #define ARM_LPAE_TCR_IPS_MASK 0x7
149 #define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
150 #define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
151 #define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
152 #define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
153 #define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
154 #define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
156 #define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
157 #define ARM_LPAE_MAIR_ATTR_MASK 0xff
158 #define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
159 #define ARM_LPAE_MAIR_ATTR_NC 0x44
160 #define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
161 #define ARM_LPAE_MAIR_ATTR_IDX_NC 0
162 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
163 #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
165 /* IOPTE accessors */
166 #define iopte_deref(pte,d) \
167 (__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \
168 & ~((1ULL << (d)->pg_shift) - 1)))
170 #define iopte_type(pte,l) \
171 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
173 #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
175 #define iopte_leaf(pte,l) \
176 (l == (ARM_LPAE_MAX_LEVELS - 1) ? \
177 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \
178 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
180 #define iopte_to_pfn(pte,d) \
181 (((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift)
183 #define pfn_to_iopte(pfn,d) \
184 (((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1))
186 struct arm_lpae_io_pgtable
{
187 struct io_pgtable iop
;
191 unsigned long pg_shift
;
192 unsigned long bits_per_level
;
197 typedef u64 arm_lpae_iopte
;
199 static bool selftest_running
= false;
201 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable
*data
,
202 unsigned long iova
, phys_addr_t paddr
,
203 arm_lpae_iopte prot
, int lvl
,
204 arm_lpae_iopte
*ptep
)
206 arm_lpae_iopte pte
= prot
;
208 /* We require an unmap first */
209 if (iopte_leaf(*ptep
, lvl
)) {
210 WARN_ON(!selftest_running
);
214 if (data
->iop
.cfg
.quirks
& IO_PGTABLE_QUIRK_ARM_NS
)
215 pte
|= ARM_LPAE_PTE_NS
;
217 if (lvl
== ARM_LPAE_MAX_LEVELS
- 1)
218 pte
|= ARM_LPAE_PTE_TYPE_PAGE
;
220 pte
|= ARM_LPAE_PTE_TYPE_BLOCK
;
222 pte
|= ARM_LPAE_PTE_AF
| ARM_LPAE_PTE_SH_IS
;
223 pte
|= pfn_to_iopte(paddr
>> data
->pg_shift
, data
);
226 data
->iop
.cfg
.tlb
->flush_pgtable(ptep
, sizeof(*ptep
), data
->iop
.cookie
);
230 static int __arm_lpae_map(struct arm_lpae_io_pgtable
*data
, unsigned long iova
,
231 phys_addr_t paddr
, size_t size
, arm_lpae_iopte prot
,
232 int lvl
, arm_lpae_iopte
*ptep
)
234 arm_lpae_iopte
*cptep
, pte
;
235 void *cookie
= data
->iop
.cookie
;
236 size_t block_size
= ARM_LPAE_BLOCK_SIZE(lvl
, data
);
238 /* Find our entry at the current level */
239 ptep
+= ARM_LPAE_LVL_IDX(iova
, lvl
, data
);
241 /* If we can install a leaf entry at this level, then do so */
242 if (size
== block_size
&& (size
& data
->iop
.cfg
.pgsize_bitmap
))
243 return arm_lpae_init_pte(data
, iova
, paddr
, prot
, lvl
, ptep
);
245 /* We can't allocate tables at the final level */
246 if (WARN_ON(lvl
>= ARM_LPAE_MAX_LEVELS
- 1))
249 /* Grab a pointer to the next level */
252 cptep
= alloc_pages_exact(1UL << data
->pg_shift
,
253 GFP_ATOMIC
| __GFP_ZERO
);
257 data
->iop
.cfg
.tlb
->flush_pgtable(cptep
, 1UL << data
->pg_shift
,
259 pte
= __pa(cptep
) | ARM_LPAE_PTE_TYPE_TABLE
;
260 if (data
->iop
.cfg
.quirks
& IO_PGTABLE_QUIRK_ARM_NS
)
261 pte
|= ARM_LPAE_PTE_NSTABLE
;
263 data
->iop
.cfg
.tlb
->flush_pgtable(ptep
, sizeof(*ptep
), cookie
);
265 cptep
= iopte_deref(pte
, data
);
269 return __arm_lpae_map(data
, iova
, paddr
, size
, prot
, lvl
+ 1, cptep
);
272 static arm_lpae_iopte
arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable
*data
,
277 if (data
->iop
.fmt
== ARM_64_LPAE_S1
||
278 data
->iop
.fmt
== ARM_32_LPAE_S1
) {
279 pte
= ARM_LPAE_PTE_AP_UNPRIV
| ARM_LPAE_PTE_nG
;
281 if (!(prot
& IOMMU_WRITE
) && (prot
& IOMMU_READ
))
282 pte
|= ARM_LPAE_PTE_AP_RDONLY
;
284 if (prot
& IOMMU_CACHE
)
285 pte
|= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
286 << ARM_LPAE_PTE_ATTRINDX_SHIFT
);
288 pte
= ARM_LPAE_PTE_HAP_FAULT
;
289 if (prot
& IOMMU_READ
)
290 pte
|= ARM_LPAE_PTE_HAP_READ
;
291 if (prot
& IOMMU_WRITE
)
292 pte
|= ARM_LPAE_PTE_HAP_WRITE
;
293 if (prot
& IOMMU_CACHE
)
294 pte
|= ARM_LPAE_PTE_MEMATTR_OIWB
;
296 pte
|= ARM_LPAE_PTE_MEMATTR_NC
;
299 if (prot
& IOMMU_NOEXEC
)
300 pte
|= ARM_LPAE_PTE_XN
;
305 static int arm_lpae_map(struct io_pgtable_ops
*ops
, unsigned long iova
,
306 phys_addr_t paddr
, size_t size
, int iommu_prot
)
308 struct arm_lpae_io_pgtable
*data
= io_pgtable_ops_to_data(ops
);
309 arm_lpae_iopte
*ptep
= data
->pgd
;
310 int lvl
= ARM_LPAE_START_LVL(data
);
313 /* If no access, then nothing to do */
314 if (!(iommu_prot
& (IOMMU_READ
| IOMMU_WRITE
)))
317 prot
= arm_lpae_prot_to_pte(data
, iommu_prot
);
318 return __arm_lpae_map(data
, iova
, paddr
, size
, prot
, lvl
, ptep
);
321 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable
*data
, int lvl
,
322 arm_lpae_iopte
*ptep
)
324 arm_lpae_iopte
*start
, *end
;
325 unsigned long table_size
;
327 /* Only leaf entries at the last level */
328 if (lvl
== ARM_LPAE_MAX_LEVELS
- 1)
331 if (lvl
== ARM_LPAE_START_LVL(data
))
332 table_size
= data
->pgd_size
;
334 table_size
= 1UL << data
->pg_shift
;
337 end
= (void *)ptep
+ table_size
;
339 while (ptep
!= end
) {
340 arm_lpae_iopte pte
= *ptep
++;
342 if (!pte
|| iopte_leaf(pte
, lvl
))
345 __arm_lpae_free_pgtable(data
, lvl
+ 1, iopte_deref(pte
, data
));
348 free_pages_exact(start
, table_size
);
351 static void arm_lpae_free_pgtable(struct io_pgtable
*iop
)
353 struct arm_lpae_io_pgtable
*data
= io_pgtable_to_data(iop
);
355 __arm_lpae_free_pgtable(data
, ARM_LPAE_START_LVL(data
), data
->pgd
);
359 static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable
*data
,
360 unsigned long iova
, size_t size
,
361 arm_lpae_iopte prot
, int lvl
,
362 arm_lpae_iopte
*ptep
, size_t blk_size
)
364 unsigned long blk_start
, blk_end
;
365 phys_addr_t blk_paddr
;
366 arm_lpae_iopte table
= 0;
367 void *cookie
= data
->iop
.cookie
;
368 const struct iommu_gather_ops
*tlb
= data
->iop
.cfg
.tlb
;
370 blk_start
= iova
& ~(blk_size
- 1);
371 blk_end
= blk_start
+ blk_size
;
372 blk_paddr
= iopte_to_pfn(*ptep
, data
) << data
->pg_shift
;
374 for (; blk_start
< blk_end
; blk_start
+= size
, blk_paddr
+= size
) {
375 arm_lpae_iopte
*tablep
;
378 if (blk_start
== iova
)
381 /* __arm_lpae_map expects a pointer to the start of the table */
382 tablep
= &table
- ARM_LPAE_LVL_IDX(blk_start
, lvl
, data
);
383 if (__arm_lpae_map(data
, blk_start
, blk_paddr
, size
, prot
, lvl
,
386 /* Free the table we allocated */
387 tablep
= iopte_deref(table
, data
);
388 __arm_lpae_free_pgtable(data
, lvl
+ 1, tablep
);
390 return 0; /* Bytes unmapped */
395 tlb
->flush_pgtable(ptep
, sizeof(*ptep
), cookie
);
396 iova
&= ~(blk_size
- 1);
397 tlb
->tlb_add_flush(iova
, blk_size
, true, cookie
);
401 static int __arm_lpae_unmap(struct arm_lpae_io_pgtable
*data
,
402 unsigned long iova
, size_t size
, int lvl
,
403 arm_lpae_iopte
*ptep
)
406 const struct iommu_gather_ops
*tlb
= data
->iop
.cfg
.tlb
;
407 void *cookie
= data
->iop
.cookie
;
408 size_t blk_size
= ARM_LPAE_BLOCK_SIZE(lvl
, data
);
410 ptep
+= ARM_LPAE_LVL_IDX(iova
, lvl
, data
);
413 /* Something went horribly wrong and we ran out of page table */
414 if (WARN_ON(!pte
|| (lvl
== ARM_LPAE_MAX_LEVELS
)))
417 /* If the size matches this level, we're in the right place */
418 if (size
== blk_size
) {
420 tlb
->flush_pgtable(ptep
, sizeof(*ptep
), cookie
);
422 if (!iopte_leaf(pte
, lvl
)) {
423 /* Also flush any partial walks */
424 tlb
->tlb_add_flush(iova
, size
, false, cookie
);
425 tlb
->tlb_sync(data
->iop
.cookie
);
426 ptep
= iopte_deref(pte
, data
);
427 __arm_lpae_free_pgtable(data
, lvl
+ 1, ptep
);
429 tlb
->tlb_add_flush(iova
, size
, true, cookie
);
433 } else if (iopte_leaf(pte
, lvl
)) {
435 * Insert a table at the next level to map the old region,
436 * minus the part we want to unmap
438 return arm_lpae_split_blk_unmap(data
, iova
, size
,
439 iopte_prot(pte
), lvl
, ptep
,
443 /* Keep on walkin' */
444 ptep
= iopte_deref(pte
, data
);
445 return __arm_lpae_unmap(data
, iova
, size
, lvl
+ 1, ptep
);
448 static int arm_lpae_unmap(struct io_pgtable_ops
*ops
, unsigned long iova
,
452 struct arm_lpae_io_pgtable
*data
= io_pgtable_ops_to_data(ops
);
453 struct io_pgtable
*iop
= &data
->iop
;
454 arm_lpae_iopte
*ptep
= data
->pgd
;
455 int lvl
= ARM_LPAE_START_LVL(data
);
457 unmapped
= __arm_lpae_unmap(data
, iova
, size
, lvl
, ptep
);
459 iop
->cfg
.tlb
->tlb_sync(iop
->cookie
);
464 static phys_addr_t
arm_lpae_iova_to_phys(struct io_pgtable_ops
*ops
,
467 struct arm_lpae_io_pgtable
*data
= io_pgtable_ops_to_data(ops
);
468 arm_lpae_iopte pte
, *ptep
= data
->pgd
;
469 int lvl
= ARM_LPAE_START_LVL(data
);
472 /* Valid IOPTE pointer? */
476 /* Grab the IOPTE we're interested in */
477 pte
= *(ptep
+ ARM_LPAE_LVL_IDX(iova
, lvl
, data
));
484 if (iopte_leaf(pte
,lvl
))
485 goto found_translation
;
487 /* Take it to the next level */
488 ptep
= iopte_deref(pte
, data
);
489 } while (++lvl
< ARM_LPAE_MAX_LEVELS
);
491 /* Ran out of page tables to walk */
495 iova
&= ((1 << data
->pg_shift
) - 1);
496 return ((phys_addr_t
)iopte_to_pfn(pte
,data
) << data
->pg_shift
) | iova
;
499 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg
*cfg
)
501 unsigned long granule
;
504 * We need to restrict the supported page sizes to match the
505 * translation regime for a particular granule. Aim to match
506 * the CPU page size if possible, otherwise prefer smaller sizes.
507 * While we're at it, restrict the block sizes to match the
510 if (cfg
->pgsize_bitmap
& PAGE_SIZE
)
512 else if (cfg
->pgsize_bitmap
& ~PAGE_MASK
)
513 granule
= 1UL << __fls(cfg
->pgsize_bitmap
& ~PAGE_MASK
);
514 else if (cfg
->pgsize_bitmap
& PAGE_MASK
)
515 granule
= 1UL << __ffs(cfg
->pgsize_bitmap
& PAGE_MASK
);
521 cfg
->pgsize_bitmap
&= (SZ_4K
| SZ_2M
| SZ_1G
);
524 cfg
->pgsize_bitmap
&= (SZ_16K
| SZ_32M
);
527 cfg
->pgsize_bitmap
&= (SZ_64K
| SZ_512M
);
530 cfg
->pgsize_bitmap
= 0;
534 static struct arm_lpae_io_pgtable
*
535 arm_lpae_alloc_pgtable(struct io_pgtable_cfg
*cfg
)
537 unsigned long va_bits
, pgd_bits
;
538 struct arm_lpae_io_pgtable
*data
;
540 arm_lpae_restrict_pgsizes(cfg
);
542 if (!(cfg
->pgsize_bitmap
& (SZ_4K
| SZ_16K
| SZ_64K
)))
545 if (cfg
->ias
> ARM_LPAE_MAX_ADDR_BITS
)
548 if (cfg
->oas
> ARM_LPAE_MAX_ADDR_BITS
)
551 data
= kmalloc(sizeof(*data
), GFP_KERNEL
);
555 data
->pg_shift
= __ffs(cfg
->pgsize_bitmap
);
556 data
->bits_per_level
= data
->pg_shift
- ilog2(sizeof(arm_lpae_iopte
));
558 va_bits
= cfg
->ias
- data
->pg_shift
;
559 data
->levels
= DIV_ROUND_UP(va_bits
, data
->bits_per_level
);
561 /* Calculate the actual size of our pgd (without concatenation) */
562 pgd_bits
= va_bits
- (data
->bits_per_level
* (data
->levels
- 1));
563 data
->pgd_size
= 1UL << (pgd_bits
+ ilog2(sizeof(arm_lpae_iopte
)));
565 data
->iop
.ops
= (struct io_pgtable_ops
) {
567 .unmap
= arm_lpae_unmap
,
568 .iova_to_phys
= arm_lpae_iova_to_phys
,
574 static struct io_pgtable
*
575 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg
*cfg
, void *cookie
)
578 struct arm_lpae_io_pgtable
*data
= arm_lpae_alloc_pgtable(cfg
);
584 reg
= (ARM_LPAE_TCR_SH_IS
<< ARM_LPAE_TCR_SH0_SHIFT
) |
585 (ARM_LPAE_TCR_RGN_WBWA
<< ARM_LPAE_TCR_IRGN0_SHIFT
) |
586 (ARM_LPAE_TCR_RGN_WBWA
<< ARM_LPAE_TCR_ORGN0_SHIFT
);
588 switch (1 << data
->pg_shift
) {
590 reg
|= ARM_LPAE_TCR_TG0_4K
;
593 reg
|= ARM_LPAE_TCR_TG0_16K
;
596 reg
|= ARM_LPAE_TCR_TG0_64K
;
602 reg
|= (ARM_LPAE_TCR_PS_32_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
605 reg
|= (ARM_LPAE_TCR_PS_36_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
608 reg
|= (ARM_LPAE_TCR_PS_40_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
611 reg
|= (ARM_LPAE_TCR_PS_42_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
614 reg
|= (ARM_LPAE_TCR_PS_44_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
617 reg
|= (ARM_LPAE_TCR_PS_48_BIT
<< ARM_LPAE_TCR_IPS_SHIFT
);
623 reg
|= (64ULL - cfg
->ias
) << ARM_LPAE_TCR_T0SZ_SHIFT
;
624 cfg
->arm_lpae_s1_cfg
.tcr
= reg
;
627 reg
= (ARM_LPAE_MAIR_ATTR_NC
628 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC
)) |
629 (ARM_LPAE_MAIR_ATTR_WBRWA
630 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE
)) |
631 (ARM_LPAE_MAIR_ATTR_DEVICE
632 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV
));
634 cfg
->arm_lpae_s1_cfg
.mair
[0] = reg
;
635 cfg
->arm_lpae_s1_cfg
.mair
[1] = 0;
637 /* Looking good; allocate a pgd */
638 data
->pgd
= alloc_pages_exact(data
->pgd_size
, GFP_KERNEL
| __GFP_ZERO
);
642 cfg
->tlb
->flush_pgtable(data
->pgd
, data
->pgd_size
, cookie
);
645 cfg
->arm_lpae_s1_cfg
.ttbr
[0] = virt_to_phys(data
->pgd
);
646 cfg
->arm_lpae_s1_cfg
.ttbr
[1] = 0;
654 static struct io_pgtable
*
655 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg
*cfg
, void *cookie
)
658 struct arm_lpae_io_pgtable
*data
= arm_lpae_alloc_pgtable(cfg
);
664 * Concatenate PGDs at level 1 if possible in order to reduce
665 * the depth of the stage-2 walk.
667 if (data
->levels
== ARM_LPAE_MAX_LEVELS
) {
668 unsigned long pgd_pages
;
670 pgd_pages
= data
->pgd_size
>> ilog2(sizeof(arm_lpae_iopte
));
671 if (pgd_pages
<= ARM_LPAE_S2_MAX_CONCAT_PAGES
) {
672 data
->pgd_size
= pgd_pages
<< data
->pg_shift
;
678 reg
= ARM_64_LPAE_S2_TCR_RES1
|
679 (ARM_LPAE_TCR_SH_IS
<< ARM_LPAE_TCR_SH0_SHIFT
) |
680 (ARM_LPAE_TCR_RGN_WBWA
<< ARM_LPAE_TCR_IRGN0_SHIFT
) |
681 (ARM_LPAE_TCR_RGN_WBWA
<< ARM_LPAE_TCR_ORGN0_SHIFT
);
683 sl
= ARM_LPAE_START_LVL(data
);
685 switch (1 << data
->pg_shift
) {
687 reg
|= ARM_LPAE_TCR_TG0_4K
;
688 sl
++; /* SL0 format is different for 4K granule size */
691 reg
|= ARM_LPAE_TCR_TG0_16K
;
694 reg
|= ARM_LPAE_TCR_TG0_64K
;
700 reg
|= (ARM_LPAE_TCR_PS_32_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
703 reg
|= (ARM_LPAE_TCR_PS_36_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
706 reg
|= (ARM_LPAE_TCR_PS_40_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
709 reg
|= (ARM_LPAE_TCR_PS_42_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
712 reg
|= (ARM_LPAE_TCR_PS_44_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
715 reg
|= (ARM_LPAE_TCR_PS_48_BIT
<< ARM_LPAE_TCR_PS_SHIFT
);
721 reg
|= (64ULL - cfg
->ias
) << ARM_LPAE_TCR_T0SZ_SHIFT
;
722 reg
|= (~sl
& ARM_LPAE_TCR_SL0_MASK
) << ARM_LPAE_TCR_SL0_SHIFT
;
723 cfg
->arm_lpae_s2_cfg
.vtcr
= reg
;
725 /* Allocate pgd pages */
726 data
->pgd
= alloc_pages_exact(data
->pgd_size
, GFP_KERNEL
| __GFP_ZERO
);
730 cfg
->tlb
->flush_pgtable(data
->pgd
, data
->pgd_size
, cookie
);
733 cfg
->arm_lpae_s2_cfg
.vttbr
= virt_to_phys(data
->pgd
);
741 static struct io_pgtable
*
742 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg
*cfg
, void *cookie
)
744 struct io_pgtable
*iop
;
746 if (cfg
->ias
> 32 || cfg
->oas
> 40)
749 cfg
->pgsize_bitmap
&= (SZ_4K
| SZ_2M
| SZ_1G
);
750 iop
= arm_64_lpae_alloc_pgtable_s1(cfg
, cookie
);
752 cfg
->arm_lpae_s1_cfg
.tcr
|= ARM_32_LPAE_TCR_EAE
;
753 cfg
->arm_lpae_s1_cfg
.tcr
&= 0xffffffff;
759 static struct io_pgtable
*
760 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg
*cfg
, void *cookie
)
762 struct io_pgtable
*iop
;
764 if (cfg
->ias
> 40 || cfg
->oas
> 40)
767 cfg
->pgsize_bitmap
&= (SZ_4K
| SZ_2M
| SZ_1G
);
768 iop
= arm_64_lpae_alloc_pgtable_s2(cfg
, cookie
);
770 cfg
->arm_lpae_s2_cfg
.vtcr
&= 0xffffffff;
775 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns
= {
776 .alloc
= arm_64_lpae_alloc_pgtable_s1
,
777 .free
= arm_lpae_free_pgtable
,
780 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns
= {
781 .alloc
= arm_64_lpae_alloc_pgtable_s2
,
782 .free
= arm_lpae_free_pgtable
,
785 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns
= {
786 .alloc
= arm_32_lpae_alloc_pgtable_s1
,
787 .free
= arm_lpae_free_pgtable
,
790 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns
= {
791 .alloc
= arm_32_lpae_alloc_pgtable_s2
,
792 .free
= arm_lpae_free_pgtable
,
795 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
797 static struct io_pgtable_cfg
*cfg_cookie
;
799 static void dummy_tlb_flush_all(void *cookie
)
801 WARN_ON(cookie
!= cfg_cookie
);
804 static void dummy_tlb_add_flush(unsigned long iova
, size_t size
, bool leaf
,
807 WARN_ON(cookie
!= cfg_cookie
);
808 WARN_ON(!(size
& cfg_cookie
->pgsize_bitmap
));
811 static void dummy_tlb_sync(void *cookie
)
813 WARN_ON(cookie
!= cfg_cookie
);
816 static void dummy_flush_pgtable(void *ptr
, size_t size
, void *cookie
)
818 WARN_ON(cookie
!= cfg_cookie
);
821 static struct iommu_gather_ops dummy_tlb_ops __initdata
= {
822 .tlb_flush_all
= dummy_tlb_flush_all
,
823 .tlb_add_flush
= dummy_tlb_add_flush
,
824 .tlb_sync
= dummy_tlb_sync
,
825 .flush_pgtable
= dummy_flush_pgtable
,
828 static void __init
arm_lpae_dump_ops(struct io_pgtable_ops
*ops
)
830 struct arm_lpae_io_pgtable
*data
= io_pgtable_ops_to_data(ops
);
831 struct io_pgtable_cfg
*cfg
= &data
->iop
.cfg
;
833 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
834 cfg
->pgsize_bitmap
, cfg
->ias
);
835 pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
836 data
->levels
, data
->pgd_size
, data
->pg_shift
,
837 data
->bits_per_level
, data
->pgd
);
840 #define __FAIL(ops, i) ({ \
841 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
842 arm_lpae_dump_ops(ops); \
843 selftest_running = false; \
847 static int __init
arm_lpae_run_tests(struct io_pgtable_cfg
*cfg
)
849 static const enum io_pgtable_fmt fmts
[] = {
857 struct io_pgtable_ops
*ops
;
859 selftest_running
= true;
861 for (i
= 0; i
< ARRAY_SIZE(fmts
); ++i
) {
863 ops
= alloc_io_pgtable_ops(fmts
[i
], cfg
, cfg
);
865 pr_err("selftest: failed to allocate io pgtable ops\n");
870 * Initial sanity checks.
871 * Empty page tables shouldn't provide any translations.
873 if (ops
->iova_to_phys(ops
, 42))
874 return __FAIL(ops
, i
);
876 if (ops
->iova_to_phys(ops
, SZ_1G
+ 42))
877 return __FAIL(ops
, i
);
879 if (ops
->iova_to_phys(ops
, SZ_2G
+ 42))
880 return __FAIL(ops
, i
);
883 * Distinct mappings of different granule sizes.
886 j
= find_first_bit(&cfg
->pgsize_bitmap
, BITS_PER_LONG
);
887 while (j
!= BITS_PER_LONG
) {
890 if (ops
->map(ops
, iova
, iova
, size
, IOMMU_READ
|
894 return __FAIL(ops
, i
);
896 /* Overlapping mappings */
897 if (!ops
->map(ops
, iova
, iova
+ size
, size
,
898 IOMMU_READ
| IOMMU_NOEXEC
))
899 return __FAIL(ops
, i
);
901 if (ops
->iova_to_phys(ops
, iova
+ 42) != (iova
+ 42))
902 return __FAIL(ops
, i
);
906 j
= find_next_bit(&cfg
->pgsize_bitmap
, BITS_PER_LONG
, j
);
910 size
= 1UL << __ffs(cfg
->pgsize_bitmap
);
911 if (ops
->unmap(ops
, SZ_1G
+ size
, size
) != size
)
912 return __FAIL(ops
, i
);
914 /* Remap of partial unmap */
915 if (ops
->map(ops
, SZ_1G
+ size
, size
, size
, IOMMU_READ
))
916 return __FAIL(ops
, i
);
918 if (ops
->iova_to_phys(ops
, SZ_1G
+ size
+ 42) != (size
+ 42))
919 return __FAIL(ops
, i
);
923 j
= find_first_bit(&cfg
->pgsize_bitmap
, BITS_PER_LONG
);
924 while (j
!= BITS_PER_LONG
) {
927 if (ops
->unmap(ops
, iova
, size
) != size
)
928 return __FAIL(ops
, i
);
930 if (ops
->iova_to_phys(ops
, iova
+ 42))
931 return __FAIL(ops
, i
);
933 /* Remap full block */
934 if (ops
->map(ops
, iova
, iova
, size
, IOMMU_WRITE
))
935 return __FAIL(ops
, i
);
937 if (ops
->iova_to_phys(ops
, iova
+ 42) != (iova
+ 42))
938 return __FAIL(ops
, i
);
942 j
= find_next_bit(&cfg
->pgsize_bitmap
, BITS_PER_LONG
, j
);
945 free_io_pgtable_ops(ops
);
948 selftest_running
= false;
952 static int __init
arm_lpae_do_selftests(void)
954 static const unsigned long pgsize
[] = {
955 SZ_4K
| SZ_2M
| SZ_1G
,
960 static const unsigned int ias
[] = {
961 32, 36, 40, 42, 44, 48,
964 int i
, j
, pass
= 0, fail
= 0;
965 struct io_pgtable_cfg cfg
= {
966 .tlb
= &dummy_tlb_ops
,
970 for (i
= 0; i
< ARRAY_SIZE(pgsize
); ++i
) {
971 for (j
= 0; j
< ARRAY_SIZE(ias
); ++j
) {
972 cfg
.pgsize_bitmap
= pgsize
[i
];
974 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
976 if (arm_lpae_run_tests(&cfg
))
983 pr_info("selftest: completed with %d PASS %d FAIL\n", pass
, fail
);
984 return fail
? -EFAULT
: 0;
986 subsys_initcall(arm_lpae_do_selftests
);