]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - drivers/iommu/io-pgtable-arm.c
nvme: depend on INFINIBAND_ADDR_TRANS
[mirror_ubuntu-bionic-kernel.git] / drivers / iommu / io-pgtable-arm.c
1 /*
2 * CPU-agnostic ARM page table allocator.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 * Copyright (C) 2014 ARM Limited
17 *
18 * Author: Will Deacon <will.deacon@arm.com>
19 */
20
21 #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
22
23 #include <linux/atomic.h>
24 #include <linux/iommu.h>
25 #include <linux/kernel.h>
26 #include <linux/sizes.h>
27 #include <linux/slab.h>
28 #include <linux/types.h>
29 #include <linux/dma-mapping.h>
30
31 #include <asm/barrier.h>
32
33 #include "io-pgtable.h"
34
35 #define ARM_LPAE_MAX_ADDR_BITS 48
36 #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
37 #define ARM_LPAE_MAX_LEVELS 4
38
39 /* Struct accessors */
40 #define io_pgtable_to_data(x) \
41 container_of((x), struct arm_lpae_io_pgtable, iop)
42
43 #define io_pgtable_ops_to_data(x) \
44 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
45
46 /*
47 * For consistency with the architecture, we always consider
48 * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
49 */
50 #define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels)
51
52 /*
53 * Calculate the right shift amount to get to the portion describing level l
54 * in a virtual address mapped by the pagetable in d.
55 */
56 #define ARM_LPAE_LVL_SHIFT(l,d) \
57 ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
58 * (d)->bits_per_level) + (d)->pg_shift)
59
60 #define ARM_LPAE_GRANULE(d) (1UL << (d)->pg_shift)
61
62 #define ARM_LPAE_PAGES_PER_PGD(d) \
63 DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d))
64
65 /*
66 * Calculate the index at level l used to map virtual address a using the
67 * pagetable in d.
68 */
69 #define ARM_LPAE_PGD_IDX(l,d) \
70 ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
71
72 #define ARM_LPAE_LVL_IDX(a,l,d) \
73 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
74 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
75
76 /* Calculate the block/page mapping size at level l for pagetable in d. */
77 #define ARM_LPAE_BLOCK_SIZE(l,d) \
78 (1ULL << (ilog2(sizeof(arm_lpae_iopte)) + \
79 ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
80
81 /* Page table bits */
82 #define ARM_LPAE_PTE_TYPE_SHIFT 0
83 #define ARM_LPAE_PTE_TYPE_MASK 0x3
84
85 #define ARM_LPAE_PTE_TYPE_BLOCK 1
86 #define ARM_LPAE_PTE_TYPE_TABLE 3
87 #define ARM_LPAE_PTE_TYPE_PAGE 3
88
89 #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
90 #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
91 #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
92 #define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
93 #define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
94 #define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
95 #define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
96 #define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
97
98 #define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
99 /* Ignore the contiguous bit for block splitting */
100 #define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
101 #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
102 ARM_LPAE_PTE_ATTR_HI_MASK)
103 /* Software bit for solving coherency races */
104 #define ARM_LPAE_PTE_SW_SYNC (((arm_lpae_iopte)1) << 55)
105
106 /* Stage-1 PTE */
107 #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
108 #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
109 #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
110 #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
111
112 /* Stage-2 PTE */
113 #define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
114 #define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
115 #define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
116 #define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
117 #define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
118 #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
119
120 /* Register bits */
121 #define ARM_32_LPAE_TCR_EAE (1 << 31)
122 #define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
123
124 #define ARM_LPAE_TCR_EPD1 (1 << 23)
125
126 #define ARM_LPAE_TCR_TG0_4K (0 << 14)
127 #define ARM_LPAE_TCR_TG0_64K (1 << 14)
128 #define ARM_LPAE_TCR_TG0_16K (2 << 14)
129
130 #define ARM_LPAE_TCR_SH0_SHIFT 12
131 #define ARM_LPAE_TCR_SH0_MASK 0x3
132 #define ARM_LPAE_TCR_SH_NS 0
133 #define ARM_LPAE_TCR_SH_OS 2
134 #define ARM_LPAE_TCR_SH_IS 3
135
136 #define ARM_LPAE_TCR_ORGN0_SHIFT 10
137 #define ARM_LPAE_TCR_IRGN0_SHIFT 8
138 #define ARM_LPAE_TCR_RGN_MASK 0x3
139 #define ARM_LPAE_TCR_RGN_NC 0
140 #define ARM_LPAE_TCR_RGN_WBWA 1
141 #define ARM_LPAE_TCR_RGN_WT 2
142 #define ARM_LPAE_TCR_RGN_WB 3
143
144 #define ARM_LPAE_TCR_SL0_SHIFT 6
145 #define ARM_LPAE_TCR_SL0_MASK 0x3
146
147 #define ARM_LPAE_TCR_T0SZ_SHIFT 0
148 #define ARM_LPAE_TCR_SZ_MASK 0xf
149
150 #define ARM_LPAE_TCR_PS_SHIFT 16
151 #define ARM_LPAE_TCR_PS_MASK 0x7
152
153 #define ARM_LPAE_TCR_IPS_SHIFT 32
154 #define ARM_LPAE_TCR_IPS_MASK 0x7
155
156 #define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
157 #define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
158 #define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
159 #define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
160 #define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
161 #define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
162
163 #define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
164 #define ARM_LPAE_MAIR_ATTR_MASK 0xff
165 #define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
166 #define ARM_LPAE_MAIR_ATTR_NC 0x44
167 #define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
168 #define ARM_LPAE_MAIR_ATTR_IDX_NC 0
169 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
170 #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
171
172 /* IOPTE accessors */
173 #define iopte_deref(pte,d) \
174 (__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \
175 & ~(ARM_LPAE_GRANULE(d) - 1ULL)))
176
177 #define iopte_type(pte,l) \
178 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
179
180 #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
181
182 #define iopte_leaf(pte,l) \
183 (l == (ARM_LPAE_MAX_LEVELS - 1) ? \
184 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \
185 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
186
187 #define iopte_to_pfn(pte,d) \
188 (((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift)
189
190 #define pfn_to_iopte(pfn,d) \
191 (((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1))
192
193 struct arm_lpae_io_pgtable {
194 struct io_pgtable iop;
195
196 int levels;
197 size_t pgd_size;
198 unsigned long pg_shift;
199 unsigned long bits_per_level;
200
201 void *pgd;
202 };
203
204 typedef u64 arm_lpae_iopte;
205
206 static bool selftest_running = false;
207
208 static dma_addr_t __arm_lpae_dma_addr(void *pages)
209 {
210 return (dma_addr_t)virt_to_phys(pages);
211 }
212
213 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
214 struct io_pgtable_cfg *cfg)
215 {
216 struct device *dev = cfg->iommu_dev;
217 dma_addr_t dma;
218 void *pages = alloc_pages_exact(size, gfp | __GFP_ZERO);
219
220 if (!pages)
221 return NULL;
222
223 if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
224 dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
225 if (dma_mapping_error(dev, dma))
226 goto out_free;
227 /*
228 * We depend on the IOMMU being able to work with any physical
229 * address directly, so if the DMA layer suggests otherwise by
230 * translating or truncating them, that bodes very badly...
231 */
232 if (dma != virt_to_phys(pages))
233 goto out_unmap;
234 }
235
236 return pages;
237
238 out_unmap:
239 dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
240 dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
241 out_free:
242 free_pages_exact(pages, size);
243 return NULL;
244 }
245
246 static void __arm_lpae_free_pages(void *pages, size_t size,
247 struct io_pgtable_cfg *cfg)
248 {
249 if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
250 dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
251 size, DMA_TO_DEVICE);
252 free_pages_exact(pages, size);
253 }
254
255 static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep,
256 struct io_pgtable_cfg *cfg)
257 {
258 dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep),
259 sizeof(*ptep), DMA_TO_DEVICE);
260 }
261
262 static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
263 struct io_pgtable_cfg *cfg)
264 {
265 *ptep = pte;
266
267 if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
268 __arm_lpae_sync_pte(ptep, cfg);
269 }
270
271 static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
272 unsigned long iova, size_t size, int lvl,
273 arm_lpae_iopte *ptep);
274
275 static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
276 phys_addr_t paddr, arm_lpae_iopte prot,
277 int lvl, arm_lpae_iopte *ptep)
278 {
279 arm_lpae_iopte pte = prot;
280
281 if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
282 pte |= ARM_LPAE_PTE_NS;
283
284 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
285 pte |= ARM_LPAE_PTE_TYPE_PAGE;
286 else
287 pte |= ARM_LPAE_PTE_TYPE_BLOCK;
288
289 pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
290 pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
291
292 __arm_lpae_set_pte(ptep, pte, &data->iop.cfg);
293 }
294
295 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
296 unsigned long iova, phys_addr_t paddr,
297 arm_lpae_iopte prot, int lvl,
298 arm_lpae_iopte *ptep)
299 {
300 arm_lpae_iopte pte = *ptep;
301
302 if (iopte_leaf(pte, lvl)) {
303 /* We require an unmap first */
304 WARN_ON(!selftest_running);
305 return -EEXIST;
306 } else if (iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
307 /*
308 * We need to unmap and free the old table before
309 * overwriting it with a block entry.
310 */
311 arm_lpae_iopte *tblp;
312 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
313
314 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
315 if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
316 return -EINVAL;
317 }
318
319 __arm_lpae_init_pte(data, paddr, prot, lvl, ptep);
320 return 0;
321 }
322
323 static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
324 arm_lpae_iopte *ptep,
325 arm_lpae_iopte curr,
326 struct io_pgtable_cfg *cfg)
327 {
328 arm_lpae_iopte old, new;
329
330 new = __pa(table) | ARM_LPAE_PTE_TYPE_TABLE;
331 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
332 new |= ARM_LPAE_PTE_NSTABLE;
333
334 /*
335 * Ensure the table itself is visible before its PTE can be.
336 * Whilst we could get away with cmpxchg64_release below, this
337 * doesn't have any ordering semantics when !CONFIG_SMP.
338 */
339 dma_wmb();
340
341 old = cmpxchg64_relaxed(ptep, curr, new);
342
343 if ((cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) ||
344 (old & ARM_LPAE_PTE_SW_SYNC))
345 return old;
346
347 /* Even if it's not ours, there's no point waiting; just kick it */
348 __arm_lpae_sync_pte(ptep, cfg);
349 if (old == curr)
350 WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC);
351
352 return old;
353 }
354
355 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
356 phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
357 int lvl, arm_lpae_iopte *ptep)
358 {
359 arm_lpae_iopte *cptep, pte;
360 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
361 size_t tblsz = ARM_LPAE_GRANULE(data);
362 struct io_pgtable_cfg *cfg = &data->iop.cfg;
363
364 /* Find our entry at the current level */
365 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
366
367 /* If we can install a leaf entry at this level, then do so */
368 if (size == block_size && (size & cfg->pgsize_bitmap))
369 return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
370
371 /* We can't allocate tables at the final level */
372 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
373 return -EINVAL;
374
375 /* Grab a pointer to the next level */
376 pte = READ_ONCE(*ptep);
377 if (!pte) {
378 cptep = __arm_lpae_alloc_pages(tblsz, GFP_ATOMIC, cfg);
379 if (!cptep)
380 return -ENOMEM;
381
382 pte = arm_lpae_install_table(cptep, ptep, 0, cfg);
383 if (pte)
384 __arm_lpae_free_pages(cptep, tblsz, cfg);
385 } else if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) &&
386 !(pte & ARM_LPAE_PTE_SW_SYNC)) {
387 __arm_lpae_sync_pte(ptep, cfg);
388 }
389
390 if (pte && !iopte_leaf(pte, lvl)) {
391 cptep = iopte_deref(pte, data);
392 } else if (pte) {
393 /* We require an unmap first */
394 WARN_ON(!selftest_running);
395 return -EEXIST;
396 }
397
398 /* Rinse, repeat */
399 return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
400 }
401
402 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
403 int prot)
404 {
405 arm_lpae_iopte pte;
406
407 if (data->iop.fmt == ARM_64_LPAE_S1 ||
408 data->iop.fmt == ARM_32_LPAE_S1) {
409 pte = ARM_LPAE_PTE_nG;
410
411 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
412 pte |= ARM_LPAE_PTE_AP_RDONLY;
413
414 if (!(prot & IOMMU_PRIV))
415 pte |= ARM_LPAE_PTE_AP_UNPRIV;
416
417 if (prot & IOMMU_MMIO)
418 pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
419 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
420 else if (prot & IOMMU_CACHE)
421 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
422 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
423 } else {
424 pte = ARM_LPAE_PTE_HAP_FAULT;
425 if (prot & IOMMU_READ)
426 pte |= ARM_LPAE_PTE_HAP_READ;
427 if (prot & IOMMU_WRITE)
428 pte |= ARM_LPAE_PTE_HAP_WRITE;
429 if (prot & IOMMU_MMIO)
430 pte |= ARM_LPAE_PTE_MEMATTR_DEV;
431 else if (prot & IOMMU_CACHE)
432 pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
433 else
434 pte |= ARM_LPAE_PTE_MEMATTR_NC;
435 }
436
437 if (prot & IOMMU_NOEXEC)
438 pte |= ARM_LPAE_PTE_XN;
439
440 return pte;
441 }
442
443 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
444 phys_addr_t paddr, size_t size, int iommu_prot)
445 {
446 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
447 arm_lpae_iopte *ptep = data->pgd;
448 int ret, lvl = ARM_LPAE_START_LVL(data);
449 arm_lpae_iopte prot;
450
451 /* If no access, then nothing to do */
452 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
453 return 0;
454
455 if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) ||
456 paddr >= (1ULL << data->iop.cfg.oas)))
457 return -ERANGE;
458
459 prot = arm_lpae_prot_to_pte(data, iommu_prot);
460 ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
461 /*
462 * Synchronise all PTE updates for the new mapping before there's
463 * a chance for anything to kick off a table walk for the new iova.
464 */
465 wmb();
466
467 return ret;
468 }
469
470 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
471 arm_lpae_iopte *ptep)
472 {
473 arm_lpae_iopte *start, *end;
474 unsigned long table_size;
475
476 if (lvl == ARM_LPAE_START_LVL(data))
477 table_size = data->pgd_size;
478 else
479 table_size = ARM_LPAE_GRANULE(data);
480
481 start = ptep;
482
483 /* Only leaf entries at the last level */
484 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
485 end = ptep;
486 else
487 end = (void *)ptep + table_size;
488
489 while (ptep != end) {
490 arm_lpae_iopte pte = *ptep++;
491
492 if (!pte || iopte_leaf(pte, lvl))
493 continue;
494
495 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
496 }
497
498 __arm_lpae_free_pages(start, table_size, &data->iop.cfg);
499 }
500
501 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
502 {
503 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
504
505 __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
506 kfree(data);
507 }
508
509 static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
510 unsigned long iova, size_t size,
511 arm_lpae_iopte blk_pte, int lvl,
512 arm_lpae_iopte *ptep)
513 {
514 struct io_pgtable_cfg *cfg = &data->iop.cfg;
515 arm_lpae_iopte pte, *tablep;
516 phys_addr_t blk_paddr;
517 size_t tablesz = ARM_LPAE_GRANULE(data);
518 size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
519 int i, unmap_idx = -1;
520
521 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
522 return 0;
523
524 tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg);
525 if (!tablep)
526 return 0; /* Bytes unmapped */
527
528 if (size == split_sz)
529 unmap_idx = ARM_LPAE_LVL_IDX(iova, lvl, data);
530
531 blk_paddr = iopte_to_pfn(blk_pte, data) << data->pg_shift;
532 pte = iopte_prot(blk_pte);
533
534 for (i = 0; i < tablesz / sizeof(pte); i++, blk_paddr += split_sz) {
535 /* Unmap! */
536 if (i == unmap_idx)
537 continue;
538
539 __arm_lpae_init_pte(data, blk_paddr, pte, lvl, &tablep[i]);
540 }
541
542 pte = arm_lpae_install_table(tablep, ptep, blk_pte, cfg);
543 if (pte != blk_pte) {
544 __arm_lpae_free_pages(tablep, tablesz, cfg);
545 /*
546 * We may race against someone unmapping another part of this
547 * block, but anything else is invalid. We can't misinterpret
548 * a page entry here since we're never at the last level.
549 */
550 if (iopte_type(pte, lvl - 1) != ARM_LPAE_PTE_TYPE_TABLE)
551 return 0;
552
553 tablep = iopte_deref(pte, data);
554 }
555
556 if (unmap_idx < 0)
557 return __arm_lpae_unmap(data, iova, size, lvl, tablep);
558
559 io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
560 return size;
561 }
562
563 static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
564 unsigned long iova, size_t size, int lvl,
565 arm_lpae_iopte *ptep)
566 {
567 arm_lpae_iopte pte;
568 struct io_pgtable *iop = &data->iop;
569
570 /* Something went horribly wrong and we ran out of page table */
571 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
572 return 0;
573
574 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
575 pte = READ_ONCE(*ptep);
576 if (WARN_ON(!pte))
577 return 0;
578
579 /* If the size matches this level, we're in the right place */
580 if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
581 __arm_lpae_set_pte(ptep, 0, &iop->cfg);
582
583 if (!iopte_leaf(pte, lvl)) {
584 /* Also flush any partial walks */
585 io_pgtable_tlb_add_flush(iop, iova, size,
586 ARM_LPAE_GRANULE(data), false);
587 io_pgtable_tlb_sync(iop);
588 ptep = iopte_deref(pte, data);
589 __arm_lpae_free_pgtable(data, lvl + 1, ptep);
590 } else {
591 io_pgtable_tlb_add_flush(iop, iova, size, size, true);
592 }
593
594 return size;
595 } else if (iopte_leaf(pte, lvl)) {
596 /*
597 * Insert a table at the next level to map the old region,
598 * minus the part we want to unmap
599 */
600 return arm_lpae_split_blk_unmap(data, iova, size, pte,
601 lvl + 1, ptep);
602 }
603
604 /* Keep on walkin' */
605 ptep = iopte_deref(pte, data);
606 return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
607 }
608
609 static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
610 size_t size)
611 {
612 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
613 arm_lpae_iopte *ptep = data->pgd;
614 int lvl = ARM_LPAE_START_LVL(data);
615
616 if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias)))
617 return 0;
618
619 return __arm_lpae_unmap(data, iova, size, lvl, ptep);
620 }
621
622 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
623 unsigned long iova)
624 {
625 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
626 arm_lpae_iopte pte, *ptep = data->pgd;
627 int lvl = ARM_LPAE_START_LVL(data);
628
629 do {
630 /* Valid IOPTE pointer? */
631 if (!ptep)
632 return 0;
633
634 /* Grab the IOPTE we're interested in */
635 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
636 pte = READ_ONCE(*ptep);
637
638 /* Valid entry? */
639 if (!pte)
640 return 0;
641
642 /* Leaf entry? */
643 if (iopte_leaf(pte,lvl))
644 goto found_translation;
645
646 /* Take it to the next level */
647 ptep = iopte_deref(pte, data);
648 } while (++lvl < ARM_LPAE_MAX_LEVELS);
649
650 /* Ran out of page tables to walk */
651 return 0;
652
653 found_translation:
654 iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
655 return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova;
656 }
657
658 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
659 {
660 unsigned long granule;
661
662 /*
663 * We need to restrict the supported page sizes to match the
664 * translation regime for a particular granule. Aim to match
665 * the CPU page size if possible, otherwise prefer smaller sizes.
666 * While we're at it, restrict the block sizes to match the
667 * chosen granule.
668 */
669 if (cfg->pgsize_bitmap & PAGE_SIZE)
670 granule = PAGE_SIZE;
671 else if (cfg->pgsize_bitmap & ~PAGE_MASK)
672 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
673 else if (cfg->pgsize_bitmap & PAGE_MASK)
674 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
675 else
676 granule = 0;
677
678 switch (granule) {
679 case SZ_4K:
680 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
681 break;
682 case SZ_16K:
683 cfg->pgsize_bitmap &= (SZ_16K | SZ_32M);
684 break;
685 case SZ_64K:
686 cfg->pgsize_bitmap &= (SZ_64K | SZ_512M);
687 break;
688 default:
689 cfg->pgsize_bitmap = 0;
690 }
691 }
692
693 static struct arm_lpae_io_pgtable *
694 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
695 {
696 unsigned long va_bits, pgd_bits;
697 struct arm_lpae_io_pgtable *data;
698
699 arm_lpae_restrict_pgsizes(cfg);
700
701 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
702 return NULL;
703
704 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
705 return NULL;
706
707 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
708 return NULL;
709
710 if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) {
711 dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n");
712 return NULL;
713 }
714
715 data = kmalloc(sizeof(*data), GFP_KERNEL);
716 if (!data)
717 return NULL;
718
719 data->pg_shift = __ffs(cfg->pgsize_bitmap);
720 data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
721
722 va_bits = cfg->ias - data->pg_shift;
723 data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
724
725 /* Calculate the actual size of our pgd (without concatenation) */
726 pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
727 data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
728
729 data->iop.ops = (struct io_pgtable_ops) {
730 .map = arm_lpae_map,
731 .unmap = arm_lpae_unmap,
732 .iova_to_phys = arm_lpae_iova_to_phys,
733 };
734
735 return data;
736 }
737
738 static struct io_pgtable *
739 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
740 {
741 u64 reg;
742 struct arm_lpae_io_pgtable *data;
743
744 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA))
745 return NULL;
746
747 data = arm_lpae_alloc_pgtable(cfg);
748 if (!data)
749 return NULL;
750
751 /* TCR */
752 reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
753 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
754 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
755
756 switch (ARM_LPAE_GRANULE(data)) {
757 case SZ_4K:
758 reg |= ARM_LPAE_TCR_TG0_4K;
759 break;
760 case SZ_16K:
761 reg |= ARM_LPAE_TCR_TG0_16K;
762 break;
763 case SZ_64K:
764 reg |= ARM_LPAE_TCR_TG0_64K;
765 break;
766 }
767
768 switch (cfg->oas) {
769 case 32:
770 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
771 break;
772 case 36:
773 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
774 break;
775 case 40:
776 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
777 break;
778 case 42:
779 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
780 break;
781 case 44:
782 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
783 break;
784 case 48:
785 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
786 break;
787 default:
788 goto out_free_data;
789 }
790
791 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
792
793 /* Disable speculative walks through TTBR1 */
794 reg |= ARM_LPAE_TCR_EPD1;
795 cfg->arm_lpae_s1_cfg.tcr = reg;
796
797 /* MAIRs */
798 reg = (ARM_LPAE_MAIR_ATTR_NC
799 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
800 (ARM_LPAE_MAIR_ATTR_WBRWA
801 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
802 (ARM_LPAE_MAIR_ATTR_DEVICE
803 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
804
805 cfg->arm_lpae_s1_cfg.mair[0] = reg;
806 cfg->arm_lpae_s1_cfg.mair[1] = 0;
807
808 /* Looking good; allocate a pgd */
809 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
810 if (!data->pgd)
811 goto out_free_data;
812
813 /* Ensure the empty pgd is visible before any actual TTBR write */
814 wmb();
815
816 /* TTBRs */
817 cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
818 cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
819 return &data->iop;
820
821 out_free_data:
822 kfree(data);
823 return NULL;
824 }
825
826 static struct io_pgtable *
827 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
828 {
829 u64 reg, sl;
830 struct arm_lpae_io_pgtable *data;
831
832 /* The NS quirk doesn't apply at stage 2 */
833 if (cfg->quirks & ~IO_PGTABLE_QUIRK_NO_DMA)
834 return NULL;
835
836 data = arm_lpae_alloc_pgtable(cfg);
837 if (!data)
838 return NULL;
839
840 /*
841 * Concatenate PGDs at level 1 if possible in order to reduce
842 * the depth of the stage-2 walk.
843 */
844 if (data->levels == ARM_LPAE_MAX_LEVELS) {
845 unsigned long pgd_pages;
846
847 pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
848 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
849 data->pgd_size = pgd_pages << data->pg_shift;
850 data->levels--;
851 }
852 }
853
854 /* VTCR */
855 reg = ARM_64_LPAE_S2_TCR_RES1 |
856 (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
857 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
858 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
859
860 sl = ARM_LPAE_START_LVL(data);
861
862 switch (ARM_LPAE_GRANULE(data)) {
863 case SZ_4K:
864 reg |= ARM_LPAE_TCR_TG0_4K;
865 sl++; /* SL0 format is different for 4K granule size */
866 break;
867 case SZ_16K:
868 reg |= ARM_LPAE_TCR_TG0_16K;
869 break;
870 case SZ_64K:
871 reg |= ARM_LPAE_TCR_TG0_64K;
872 break;
873 }
874
875 switch (cfg->oas) {
876 case 32:
877 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
878 break;
879 case 36:
880 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
881 break;
882 case 40:
883 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
884 break;
885 case 42:
886 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
887 break;
888 case 44:
889 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
890 break;
891 case 48:
892 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
893 break;
894 default:
895 goto out_free_data;
896 }
897
898 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
899 reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
900 cfg->arm_lpae_s2_cfg.vtcr = reg;
901
902 /* Allocate pgd pages */
903 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
904 if (!data->pgd)
905 goto out_free_data;
906
907 /* Ensure the empty pgd is visible before any actual TTBR write */
908 wmb();
909
910 /* VTTBR */
911 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
912 return &data->iop;
913
914 out_free_data:
915 kfree(data);
916 return NULL;
917 }
918
919 static struct io_pgtable *
920 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
921 {
922 struct io_pgtable *iop;
923
924 if (cfg->ias > 32 || cfg->oas > 40)
925 return NULL;
926
927 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
928 iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
929 if (iop) {
930 cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
931 cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
932 }
933
934 return iop;
935 }
936
937 static struct io_pgtable *
938 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
939 {
940 struct io_pgtable *iop;
941
942 if (cfg->ias > 40 || cfg->oas > 40)
943 return NULL;
944
945 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
946 iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
947 if (iop)
948 cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
949
950 return iop;
951 }
952
953 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
954 .alloc = arm_64_lpae_alloc_pgtable_s1,
955 .free = arm_lpae_free_pgtable,
956 };
957
958 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
959 .alloc = arm_64_lpae_alloc_pgtable_s2,
960 .free = arm_lpae_free_pgtable,
961 };
962
963 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
964 .alloc = arm_32_lpae_alloc_pgtable_s1,
965 .free = arm_lpae_free_pgtable,
966 };
967
968 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
969 .alloc = arm_32_lpae_alloc_pgtable_s2,
970 .free = arm_lpae_free_pgtable,
971 };
972
973 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
974
975 static struct io_pgtable_cfg *cfg_cookie;
976
977 static void dummy_tlb_flush_all(void *cookie)
978 {
979 WARN_ON(cookie != cfg_cookie);
980 }
981
982 static void dummy_tlb_add_flush(unsigned long iova, size_t size,
983 size_t granule, bool leaf, void *cookie)
984 {
985 WARN_ON(cookie != cfg_cookie);
986 WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
987 }
988
989 static void dummy_tlb_sync(void *cookie)
990 {
991 WARN_ON(cookie != cfg_cookie);
992 }
993
994 static const struct iommu_gather_ops dummy_tlb_ops __initconst = {
995 .tlb_flush_all = dummy_tlb_flush_all,
996 .tlb_add_flush = dummy_tlb_add_flush,
997 .tlb_sync = dummy_tlb_sync,
998 };
999
1000 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
1001 {
1002 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
1003 struct io_pgtable_cfg *cfg = &data->iop.cfg;
1004
1005 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
1006 cfg->pgsize_bitmap, cfg->ias);
1007 pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
1008 data->levels, data->pgd_size, data->pg_shift,
1009 data->bits_per_level, data->pgd);
1010 }
1011
1012 #define __FAIL(ops, i) ({ \
1013 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
1014 arm_lpae_dump_ops(ops); \
1015 selftest_running = false; \
1016 -EFAULT; \
1017 })
1018
1019 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
1020 {
1021 static const enum io_pgtable_fmt fmts[] = {
1022 ARM_64_LPAE_S1,
1023 ARM_64_LPAE_S2,
1024 };
1025
1026 int i, j;
1027 unsigned long iova;
1028 size_t size;
1029 struct io_pgtable_ops *ops;
1030
1031 selftest_running = true;
1032
1033 for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
1034 cfg_cookie = cfg;
1035 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
1036 if (!ops) {
1037 pr_err("selftest: failed to allocate io pgtable ops\n");
1038 return -ENOMEM;
1039 }
1040
1041 /*
1042 * Initial sanity checks.
1043 * Empty page tables shouldn't provide any translations.
1044 */
1045 if (ops->iova_to_phys(ops, 42))
1046 return __FAIL(ops, i);
1047
1048 if (ops->iova_to_phys(ops, SZ_1G + 42))
1049 return __FAIL(ops, i);
1050
1051 if (ops->iova_to_phys(ops, SZ_2G + 42))
1052 return __FAIL(ops, i);
1053
1054 /*
1055 * Distinct mappings of different granule sizes.
1056 */
1057 iova = 0;
1058 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1059 size = 1UL << j;
1060
1061 if (ops->map(ops, iova, iova, size, IOMMU_READ |
1062 IOMMU_WRITE |
1063 IOMMU_NOEXEC |
1064 IOMMU_CACHE))
1065 return __FAIL(ops, i);
1066
1067 /* Overlapping mappings */
1068 if (!ops->map(ops, iova, iova + size, size,
1069 IOMMU_READ | IOMMU_NOEXEC))
1070 return __FAIL(ops, i);
1071
1072 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1073 return __FAIL(ops, i);
1074
1075 iova += SZ_1G;
1076 }
1077
1078 /* Partial unmap */
1079 size = 1UL << __ffs(cfg->pgsize_bitmap);
1080 if (ops->unmap(ops, SZ_1G + size, size) != size)
1081 return __FAIL(ops, i);
1082
1083 /* Remap of partial unmap */
1084 if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
1085 return __FAIL(ops, i);
1086
1087 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
1088 return __FAIL(ops, i);
1089
1090 /* Full unmap */
1091 iova = 0;
1092 j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG);
1093 while (j != BITS_PER_LONG) {
1094 size = 1UL << j;
1095
1096 if (ops->unmap(ops, iova, size) != size)
1097 return __FAIL(ops, i);
1098
1099 if (ops->iova_to_phys(ops, iova + 42))
1100 return __FAIL(ops, i);
1101
1102 /* Remap full block */
1103 if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
1104 return __FAIL(ops, i);
1105
1106 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1107 return __FAIL(ops, i);
1108
1109 iova += SZ_1G;
1110 j++;
1111 j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j);
1112 }
1113
1114 free_io_pgtable_ops(ops);
1115 }
1116
1117 selftest_running = false;
1118 return 0;
1119 }
1120
1121 static int __init arm_lpae_do_selftests(void)
1122 {
1123 static const unsigned long pgsize[] = {
1124 SZ_4K | SZ_2M | SZ_1G,
1125 SZ_16K | SZ_32M,
1126 SZ_64K | SZ_512M,
1127 };
1128
1129 static const unsigned int ias[] = {
1130 32, 36, 40, 42, 44, 48,
1131 };
1132
1133 int i, j, pass = 0, fail = 0;
1134 struct io_pgtable_cfg cfg = {
1135 .tlb = &dummy_tlb_ops,
1136 .oas = 48,
1137 .quirks = IO_PGTABLE_QUIRK_NO_DMA,
1138 };
1139
1140 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1141 for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1142 cfg.pgsize_bitmap = pgsize[i];
1143 cfg.ias = ias[j];
1144 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1145 pgsize[i], ias[j]);
1146 if (arm_lpae_run_tests(&cfg))
1147 fail++;
1148 else
1149 pass++;
1150 }
1151 }
1152
1153 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1154 return fail ? -EFAULT : 0;
1155 }
1156 subsys_initcall(arm_lpae_do_selftests);
1157 #endif