]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - drivers/iommu/io-pgtable-arm.c
HID: logitech-dj: fix spelling in printk
[mirror_ubuntu-kernels.git] / drivers / iommu / io-pgtable-arm.c
1 /*
2 * CPU-agnostic ARM page table allocator.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program. If not, see <http://www.gnu.org/licenses/>.
15 *
16 * Copyright (C) 2014 ARM Limited
17 *
18 * Author: Will Deacon <will.deacon@arm.com>
19 */
20
21 #define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt
22
23 #include <linux/atomic.h>
24 #include <linux/bitops.h>
25 #include <linux/iommu.h>
26 #include <linux/kernel.h>
27 #include <linux/sizes.h>
28 #include <linux/slab.h>
29 #include <linux/types.h>
30 #include <linux/dma-mapping.h>
31
32 #include <asm/barrier.h>
33
34 #include "io-pgtable.h"
35
36 #define ARM_LPAE_MAX_ADDR_BITS 52
37 #define ARM_LPAE_S2_MAX_CONCAT_PAGES 16
38 #define ARM_LPAE_MAX_LEVELS 4
39
40 /* Struct accessors */
41 #define io_pgtable_to_data(x) \
42 container_of((x), struct arm_lpae_io_pgtable, iop)
43
44 #define io_pgtable_ops_to_data(x) \
45 io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
46
47 /*
48 * For consistency with the architecture, we always consider
49 * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0
50 */
51 #define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels)
52
53 /*
54 * Calculate the right shift amount to get to the portion describing level l
55 * in a virtual address mapped by the pagetable in d.
56 */
57 #define ARM_LPAE_LVL_SHIFT(l,d) \
58 ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \
59 * (d)->bits_per_level) + (d)->pg_shift)
60
61 #define ARM_LPAE_GRANULE(d) (1UL << (d)->pg_shift)
62
63 #define ARM_LPAE_PAGES_PER_PGD(d) \
64 DIV_ROUND_UP((d)->pgd_size, ARM_LPAE_GRANULE(d))
65
66 /*
67 * Calculate the index at level l used to map virtual address a using the
68 * pagetable in d.
69 */
70 #define ARM_LPAE_PGD_IDX(l,d) \
71 ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0)
72
73 #define ARM_LPAE_LVL_IDX(a,l,d) \
74 (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \
75 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
76
77 /* Calculate the block/page mapping size at level l for pagetable in d. */
78 #define ARM_LPAE_BLOCK_SIZE(l,d) \
79 (1ULL << (ilog2(sizeof(arm_lpae_iopte)) + \
80 ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level)))
81
82 /* Page table bits */
83 #define ARM_LPAE_PTE_TYPE_SHIFT 0
84 #define ARM_LPAE_PTE_TYPE_MASK 0x3
85
86 #define ARM_LPAE_PTE_TYPE_BLOCK 1
87 #define ARM_LPAE_PTE_TYPE_TABLE 3
88 #define ARM_LPAE_PTE_TYPE_PAGE 3
89
90 #define ARM_LPAE_PTE_ADDR_MASK GENMASK_ULL(47,12)
91
92 #define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63)
93 #define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53)
94 #define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10)
95 #define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8)
96 #define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8)
97 #define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8)
98 #define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5)
99 #define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0)
100
101 #define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2)
102 /* Ignore the contiguous bit for block splitting */
103 #define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52)
104 #define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \
105 ARM_LPAE_PTE_ATTR_HI_MASK)
106 /* Software bit for solving coherency races */
107 #define ARM_LPAE_PTE_SW_SYNC (((arm_lpae_iopte)1) << 55)
108
109 /* Stage-1 PTE */
110 #define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6)
111 #define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6)
112 #define ARM_LPAE_PTE_ATTRINDX_SHIFT 2
113 #define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11)
114
115 /* Stage-2 PTE */
116 #define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6)
117 #define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6)
118 #define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6)
119 #define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2)
120 #define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2)
121 #define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2)
122
123 /* Register bits */
124 #define ARM_32_LPAE_TCR_EAE (1 << 31)
125 #define ARM_64_LPAE_S2_TCR_RES1 (1 << 31)
126
127 #define ARM_LPAE_TCR_EPD1 (1 << 23)
128
129 #define ARM_LPAE_TCR_TG0_4K (0 << 14)
130 #define ARM_LPAE_TCR_TG0_64K (1 << 14)
131 #define ARM_LPAE_TCR_TG0_16K (2 << 14)
132
133 #define ARM_LPAE_TCR_SH0_SHIFT 12
134 #define ARM_LPAE_TCR_SH0_MASK 0x3
135 #define ARM_LPAE_TCR_SH_NS 0
136 #define ARM_LPAE_TCR_SH_OS 2
137 #define ARM_LPAE_TCR_SH_IS 3
138
139 #define ARM_LPAE_TCR_ORGN0_SHIFT 10
140 #define ARM_LPAE_TCR_IRGN0_SHIFT 8
141 #define ARM_LPAE_TCR_RGN_MASK 0x3
142 #define ARM_LPAE_TCR_RGN_NC 0
143 #define ARM_LPAE_TCR_RGN_WBWA 1
144 #define ARM_LPAE_TCR_RGN_WT 2
145 #define ARM_LPAE_TCR_RGN_WB 3
146
147 #define ARM_LPAE_TCR_SL0_SHIFT 6
148 #define ARM_LPAE_TCR_SL0_MASK 0x3
149
150 #define ARM_LPAE_TCR_T0SZ_SHIFT 0
151 #define ARM_LPAE_TCR_SZ_MASK 0xf
152
153 #define ARM_LPAE_TCR_PS_SHIFT 16
154 #define ARM_LPAE_TCR_PS_MASK 0x7
155
156 #define ARM_LPAE_TCR_IPS_SHIFT 32
157 #define ARM_LPAE_TCR_IPS_MASK 0x7
158
159 #define ARM_LPAE_TCR_PS_32_BIT 0x0ULL
160 #define ARM_LPAE_TCR_PS_36_BIT 0x1ULL
161 #define ARM_LPAE_TCR_PS_40_BIT 0x2ULL
162 #define ARM_LPAE_TCR_PS_42_BIT 0x3ULL
163 #define ARM_LPAE_TCR_PS_44_BIT 0x4ULL
164 #define ARM_LPAE_TCR_PS_48_BIT 0x5ULL
165 #define ARM_LPAE_TCR_PS_52_BIT 0x6ULL
166
167 #define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
168 #define ARM_LPAE_MAIR_ATTR_MASK 0xff
169 #define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
170 #define ARM_LPAE_MAIR_ATTR_NC 0x44
171 #define ARM_LPAE_MAIR_ATTR_WBRWA 0xff
172 #define ARM_LPAE_MAIR_ATTR_IDX_NC 0
173 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1
174 #define ARM_LPAE_MAIR_ATTR_IDX_DEV 2
175
176 /* IOPTE accessors */
177 #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
178
179 #define iopte_type(pte,l) \
180 (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
181
182 #define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK)
183
184 #define iopte_leaf(pte,l) \
185 (l == (ARM_LPAE_MAX_LEVELS - 1) ? \
186 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \
187 (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK))
188
189 struct arm_lpae_io_pgtable {
190 struct io_pgtable iop;
191
192 int levels;
193 size_t pgd_size;
194 unsigned long pg_shift;
195 unsigned long bits_per_level;
196
197 void *pgd;
198 };
199
200 typedef u64 arm_lpae_iopte;
201
202 static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
203 struct arm_lpae_io_pgtable *data)
204 {
205 arm_lpae_iopte pte = paddr;
206
207 /* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */
208 return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK;
209 }
210
211 static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
212 struct arm_lpae_io_pgtable *data)
213 {
214 u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
215
216 if (data->pg_shift < 16)
217 return paddr;
218
219 /* Rotate the packed high-order bits back to the top */
220 return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4);
221 }
222
223 static bool selftest_running = false;
224
225 static dma_addr_t __arm_lpae_dma_addr(void *pages)
226 {
227 return (dma_addr_t)virt_to_phys(pages);
228 }
229
230 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
231 struct io_pgtable_cfg *cfg)
232 {
233 struct device *dev = cfg->iommu_dev;
234 int order = get_order(size);
235 struct page *p;
236 dma_addr_t dma;
237 void *pages;
238
239 VM_BUG_ON((gfp & __GFP_HIGHMEM));
240 p = alloc_pages_node(dev ? dev_to_node(dev) : NUMA_NO_NODE,
241 gfp | __GFP_ZERO, order);
242 if (!p)
243 return NULL;
244
245 pages = page_address(p);
246 if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA)) {
247 dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
248 if (dma_mapping_error(dev, dma))
249 goto out_free;
250 /*
251 * We depend on the IOMMU being able to work with any physical
252 * address directly, so if the DMA layer suggests otherwise by
253 * translating or truncating them, that bodes very badly...
254 */
255 if (dma != virt_to_phys(pages))
256 goto out_unmap;
257 }
258
259 return pages;
260
261 out_unmap:
262 dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
263 dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
264 out_free:
265 __free_pages(p, order);
266 return NULL;
267 }
268
269 static void __arm_lpae_free_pages(void *pages, size_t size,
270 struct io_pgtable_cfg *cfg)
271 {
272 if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
273 dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
274 size, DMA_TO_DEVICE);
275 free_pages((unsigned long)pages, get_order(size));
276 }
277
278 static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep,
279 struct io_pgtable_cfg *cfg)
280 {
281 dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep),
282 sizeof(*ptep), DMA_TO_DEVICE);
283 }
284
285 static void __arm_lpae_set_pte(arm_lpae_iopte *ptep, arm_lpae_iopte pte,
286 struct io_pgtable_cfg *cfg)
287 {
288 *ptep = pte;
289
290 if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA))
291 __arm_lpae_sync_pte(ptep, cfg);
292 }
293
294 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
295 unsigned long iova, size_t size, int lvl,
296 arm_lpae_iopte *ptep);
297
298 static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
299 phys_addr_t paddr, arm_lpae_iopte prot,
300 int lvl, arm_lpae_iopte *ptep)
301 {
302 arm_lpae_iopte pte = prot;
303
304 if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
305 pte |= ARM_LPAE_PTE_NS;
306
307 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
308 pte |= ARM_LPAE_PTE_TYPE_PAGE;
309 else
310 pte |= ARM_LPAE_PTE_TYPE_BLOCK;
311
312 pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
313 pte |= paddr_to_iopte(paddr, data);
314
315 __arm_lpae_set_pte(ptep, pte, &data->iop.cfg);
316 }
317
318 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
319 unsigned long iova, phys_addr_t paddr,
320 arm_lpae_iopte prot, int lvl,
321 arm_lpae_iopte *ptep)
322 {
323 arm_lpae_iopte pte = *ptep;
324
325 if (iopte_leaf(pte, lvl)) {
326 /* We require an unmap first */
327 WARN_ON(!selftest_running);
328 return -EEXIST;
329 } else if (iopte_type(pte, lvl) == ARM_LPAE_PTE_TYPE_TABLE) {
330 /*
331 * We need to unmap and free the old table before
332 * overwriting it with a block entry.
333 */
334 arm_lpae_iopte *tblp;
335 size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
336
337 tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
338 if (WARN_ON(__arm_lpae_unmap(data, iova, sz, lvl, tblp) != sz))
339 return -EINVAL;
340 }
341
342 __arm_lpae_init_pte(data, paddr, prot, lvl, ptep);
343 return 0;
344 }
345
346 static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
347 arm_lpae_iopte *ptep,
348 arm_lpae_iopte curr,
349 struct io_pgtable_cfg *cfg)
350 {
351 arm_lpae_iopte old, new;
352
353 new = __pa(table) | ARM_LPAE_PTE_TYPE_TABLE;
354 if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
355 new |= ARM_LPAE_PTE_NSTABLE;
356
357 /*
358 * Ensure the table itself is visible before its PTE can be.
359 * Whilst we could get away with cmpxchg64_release below, this
360 * doesn't have any ordering semantics when !CONFIG_SMP.
361 */
362 dma_wmb();
363
364 old = cmpxchg64_relaxed(ptep, curr, new);
365
366 if ((cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) ||
367 (old & ARM_LPAE_PTE_SW_SYNC))
368 return old;
369
370 /* Even if it's not ours, there's no point waiting; just kick it */
371 __arm_lpae_sync_pte(ptep, cfg);
372 if (old == curr)
373 WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC);
374
375 return old;
376 }
377
378 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
379 phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
380 int lvl, arm_lpae_iopte *ptep)
381 {
382 arm_lpae_iopte *cptep, pte;
383 size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
384 size_t tblsz = ARM_LPAE_GRANULE(data);
385 struct io_pgtable_cfg *cfg = &data->iop.cfg;
386
387 /* Find our entry at the current level */
388 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
389
390 /* If we can install a leaf entry at this level, then do so */
391 if (size == block_size && (size & cfg->pgsize_bitmap))
392 return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
393
394 /* We can't allocate tables at the final level */
395 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
396 return -EINVAL;
397
398 /* Grab a pointer to the next level */
399 pte = READ_ONCE(*ptep);
400 if (!pte) {
401 cptep = __arm_lpae_alloc_pages(tblsz, GFP_ATOMIC, cfg);
402 if (!cptep)
403 return -ENOMEM;
404
405 pte = arm_lpae_install_table(cptep, ptep, 0, cfg);
406 if (pte)
407 __arm_lpae_free_pages(cptep, tblsz, cfg);
408 } else if (!(cfg->quirks & IO_PGTABLE_QUIRK_NO_DMA) &&
409 !(pte & ARM_LPAE_PTE_SW_SYNC)) {
410 __arm_lpae_sync_pte(ptep, cfg);
411 }
412
413 if (pte && !iopte_leaf(pte, lvl)) {
414 cptep = iopte_deref(pte, data);
415 } else if (pte) {
416 /* We require an unmap first */
417 WARN_ON(!selftest_running);
418 return -EEXIST;
419 }
420
421 /* Rinse, repeat */
422 return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep);
423 }
424
425 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
426 int prot)
427 {
428 arm_lpae_iopte pte;
429
430 if (data->iop.fmt == ARM_64_LPAE_S1 ||
431 data->iop.fmt == ARM_32_LPAE_S1) {
432 pte = ARM_LPAE_PTE_nG;
433
434 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
435 pte |= ARM_LPAE_PTE_AP_RDONLY;
436
437 if (!(prot & IOMMU_PRIV))
438 pte |= ARM_LPAE_PTE_AP_UNPRIV;
439
440 if (prot & IOMMU_MMIO)
441 pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
442 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
443 else if (prot & IOMMU_CACHE)
444 pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
445 << ARM_LPAE_PTE_ATTRINDX_SHIFT);
446 } else {
447 pte = ARM_LPAE_PTE_HAP_FAULT;
448 if (prot & IOMMU_READ)
449 pte |= ARM_LPAE_PTE_HAP_READ;
450 if (prot & IOMMU_WRITE)
451 pte |= ARM_LPAE_PTE_HAP_WRITE;
452 if (prot & IOMMU_MMIO)
453 pte |= ARM_LPAE_PTE_MEMATTR_DEV;
454 else if (prot & IOMMU_CACHE)
455 pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
456 else
457 pte |= ARM_LPAE_PTE_MEMATTR_NC;
458 }
459
460 if (prot & IOMMU_NOEXEC)
461 pte |= ARM_LPAE_PTE_XN;
462
463 return pte;
464 }
465
466 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
467 phys_addr_t paddr, size_t size, int iommu_prot)
468 {
469 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
470 arm_lpae_iopte *ptep = data->pgd;
471 int ret, lvl = ARM_LPAE_START_LVL(data);
472 arm_lpae_iopte prot;
473
474 /* If no access, then nothing to do */
475 if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
476 return 0;
477
478 if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias) ||
479 paddr >= (1ULL << data->iop.cfg.oas)))
480 return -ERANGE;
481
482 prot = arm_lpae_prot_to_pte(data, iommu_prot);
483 ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
484 /*
485 * Synchronise all PTE updates for the new mapping before there's
486 * a chance for anything to kick off a table walk for the new iova.
487 */
488 wmb();
489
490 return ret;
491 }
492
493 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
494 arm_lpae_iopte *ptep)
495 {
496 arm_lpae_iopte *start, *end;
497 unsigned long table_size;
498
499 if (lvl == ARM_LPAE_START_LVL(data))
500 table_size = data->pgd_size;
501 else
502 table_size = ARM_LPAE_GRANULE(data);
503
504 start = ptep;
505
506 /* Only leaf entries at the last level */
507 if (lvl == ARM_LPAE_MAX_LEVELS - 1)
508 end = ptep;
509 else
510 end = (void *)ptep + table_size;
511
512 while (ptep != end) {
513 arm_lpae_iopte pte = *ptep++;
514
515 if (!pte || iopte_leaf(pte, lvl))
516 continue;
517
518 __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
519 }
520
521 __arm_lpae_free_pages(start, table_size, &data->iop.cfg);
522 }
523
524 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
525 {
526 struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
527
528 __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
529 kfree(data);
530 }
531
532 static size_t arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data,
533 unsigned long iova, size_t size,
534 arm_lpae_iopte blk_pte, int lvl,
535 arm_lpae_iopte *ptep)
536 {
537 struct io_pgtable_cfg *cfg = &data->iop.cfg;
538 arm_lpae_iopte pte, *tablep;
539 phys_addr_t blk_paddr;
540 size_t tablesz = ARM_LPAE_GRANULE(data);
541 size_t split_sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
542 int i, unmap_idx = -1;
543
544 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
545 return 0;
546
547 tablep = __arm_lpae_alloc_pages(tablesz, GFP_ATOMIC, cfg);
548 if (!tablep)
549 return 0; /* Bytes unmapped */
550
551 if (size == split_sz)
552 unmap_idx = ARM_LPAE_LVL_IDX(iova, lvl, data);
553
554 blk_paddr = iopte_to_paddr(blk_pte, data);
555 pte = iopte_prot(blk_pte);
556
557 for (i = 0; i < tablesz / sizeof(pte); i++, blk_paddr += split_sz) {
558 /* Unmap! */
559 if (i == unmap_idx)
560 continue;
561
562 __arm_lpae_init_pte(data, blk_paddr, pte, lvl, &tablep[i]);
563 }
564
565 pte = arm_lpae_install_table(tablep, ptep, blk_pte, cfg);
566 if (pte != blk_pte) {
567 __arm_lpae_free_pages(tablep, tablesz, cfg);
568 /*
569 * We may race against someone unmapping another part of this
570 * block, but anything else is invalid. We can't misinterpret
571 * a page entry here since we're never at the last level.
572 */
573 if (iopte_type(pte, lvl - 1) != ARM_LPAE_PTE_TYPE_TABLE)
574 return 0;
575
576 tablep = iopte_deref(pte, data);
577 } else if (unmap_idx >= 0) {
578 io_pgtable_tlb_add_flush(&data->iop, iova, size, size, true);
579 io_pgtable_tlb_sync(&data->iop);
580 return size;
581 }
582
583 return __arm_lpae_unmap(data, iova, size, lvl, tablep);
584 }
585
586 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
587 unsigned long iova, size_t size, int lvl,
588 arm_lpae_iopte *ptep)
589 {
590 arm_lpae_iopte pte;
591 struct io_pgtable *iop = &data->iop;
592
593 /* Something went horribly wrong and we ran out of page table */
594 if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
595 return 0;
596
597 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
598 pte = READ_ONCE(*ptep);
599 if (WARN_ON(!pte))
600 return 0;
601
602 /* If the size matches this level, we're in the right place */
603 if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
604 __arm_lpae_set_pte(ptep, 0, &iop->cfg);
605
606 if (!iopte_leaf(pte, lvl)) {
607 /* Also flush any partial walks */
608 io_pgtable_tlb_add_flush(iop, iova, size,
609 ARM_LPAE_GRANULE(data), false);
610 io_pgtable_tlb_sync(iop);
611 ptep = iopte_deref(pte, data);
612 __arm_lpae_free_pgtable(data, lvl + 1, ptep);
613 } else if (iop->cfg.quirks & IO_PGTABLE_QUIRK_NON_STRICT) {
614 /*
615 * Order the PTE update against queueing the IOVA, to
616 * guarantee that a flush callback from a different CPU
617 * has observed it before the TLBIALL can be issued.
618 */
619 smp_wmb();
620 } else {
621 io_pgtable_tlb_add_flush(iop, iova, size, size, true);
622 }
623
624 return size;
625 } else if (iopte_leaf(pte, lvl)) {
626 /*
627 * Insert a table at the next level to map the old region,
628 * minus the part we want to unmap
629 */
630 return arm_lpae_split_blk_unmap(data, iova, size, pte,
631 lvl + 1, ptep);
632 }
633
634 /* Keep on walkin' */
635 ptep = iopte_deref(pte, data);
636 return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep);
637 }
638
639 static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
640 size_t size)
641 {
642 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
643 arm_lpae_iopte *ptep = data->pgd;
644 int lvl = ARM_LPAE_START_LVL(data);
645
646 if (WARN_ON(iova >= (1ULL << data->iop.cfg.ias)))
647 return 0;
648
649 return __arm_lpae_unmap(data, iova, size, lvl, ptep);
650 }
651
652 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
653 unsigned long iova)
654 {
655 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
656 arm_lpae_iopte pte, *ptep = data->pgd;
657 int lvl = ARM_LPAE_START_LVL(data);
658
659 do {
660 /* Valid IOPTE pointer? */
661 if (!ptep)
662 return 0;
663
664 /* Grab the IOPTE we're interested in */
665 ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
666 pte = READ_ONCE(*ptep);
667
668 /* Valid entry? */
669 if (!pte)
670 return 0;
671
672 /* Leaf entry? */
673 if (iopte_leaf(pte,lvl))
674 goto found_translation;
675
676 /* Take it to the next level */
677 ptep = iopte_deref(pte, data);
678 } while (++lvl < ARM_LPAE_MAX_LEVELS);
679
680 /* Ran out of page tables to walk */
681 return 0;
682
683 found_translation:
684 iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
685 return iopte_to_paddr(pte, data) | iova;
686 }
687
688 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
689 {
690 unsigned long granule, page_sizes;
691 unsigned int max_addr_bits = 48;
692
693 /*
694 * We need to restrict the supported page sizes to match the
695 * translation regime for a particular granule. Aim to match
696 * the CPU page size if possible, otherwise prefer smaller sizes.
697 * While we're at it, restrict the block sizes to match the
698 * chosen granule.
699 */
700 if (cfg->pgsize_bitmap & PAGE_SIZE)
701 granule = PAGE_SIZE;
702 else if (cfg->pgsize_bitmap & ~PAGE_MASK)
703 granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
704 else if (cfg->pgsize_bitmap & PAGE_MASK)
705 granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
706 else
707 granule = 0;
708
709 switch (granule) {
710 case SZ_4K:
711 page_sizes = (SZ_4K | SZ_2M | SZ_1G);
712 break;
713 case SZ_16K:
714 page_sizes = (SZ_16K | SZ_32M);
715 break;
716 case SZ_64K:
717 max_addr_bits = 52;
718 page_sizes = (SZ_64K | SZ_512M);
719 if (cfg->oas > 48)
720 page_sizes |= 1ULL << 42; /* 4TB */
721 break;
722 default:
723 page_sizes = 0;
724 }
725
726 cfg->pgsize_bitmap &= page_sizes;
727 cfg->ias = min(cfg->ias, max_addr_bits);
728 cfg->oas = min(cfg->oas, max_addr_bits);
729 }
730
731 static struct arm_lpae_io_pgtable *
732 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
733 {
734 unsigned long va_bits, pgd_bits;
735 struct arm_lpae_io_pgtable *data;
736
737 arm_lpae_restrict_pgsizes(cfg);
738
739 if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
740 return NULL;
741
742 if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
743 return NULL;
744
745 if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
746 return NULL;
747
748 if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) {
749 dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables\n");
750 return NULL;
751 }
752
753 data = kmalloc(sizeof(*data), GFP_KERNEL);
754 if (!data)
755 return NULL;
756
757 data->pg_shift = __ffs(cfg->pgsize_bitmap);
758 data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte));
759
760 va_bits = cfg->ias - data->pg_shift;
761 data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
762
763 /* Calculate the actual size of our pgd (without concatenation) */
764 pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1));
765 data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte)));
766
767 data->iop.ops = (struct io_pgtable_ops) {
768 .map = arm_lpae_map,
769 .unmap = arm_lpae_unmap,
770 .iova_to_phys = arm_lpae_iova_to_phys,
771 };
772
773 return data;
774 }
775
776 static struct io_pgtable *
777 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
778 {
779 u64 reg;
780 struct arm_lpae_io_pgtable *data;
781
782 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS | IO_PGTABLE_QUIRK_NO_DMA |
783 IO_PGTABLE_QUIRK_NON_STRICT))
784 return NULL;
785
786 data = arm_lpae_alloc_pgtable(cfg);
787 if (!data)
788 return NULL;
789
790 /* TCR */
791 reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
792 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
793 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
794
795 switch (ARM_LPAE_GRANULE(data)) {
796 case SZ_4K:
797 reg |= ARM_LPAE_TCR_TG0_4K;
798 break;
799 case SZ_16K:
800 reg |= ARM_LPAE_TCR_TG0_16K;
801 break;
802 case SZ_64K:
803 reg |= ARM_LPAE_TCR_TG0_64K;
804 break;
805 }
806
807 switch (cfg->oas) {
808 case 32:
809 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT);
810 break;
811 case 36:
812 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT);
813 break;
814 case 40:
815 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT);
816 break;
817 case 42:
818 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT);
819 break;
820 case 44:
821 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT);
822 break;
823 case 48:
824 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT);
825 break;
826 case 52:
827 reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_IPS_SHIFT);
828 break;
829 default:
830 goto out_free_data;
831 }
832
833 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
834
835 /* Disable speculative walks through TTBR1 */
836 reg |= ARM_LPAE_TCR_EPD1;
837 cfg->arm_lpae_s1_cfg.tcr = reg;
838
839 /* MAIRs */
840 reg = (ARM_LPAE_MAIR_ATTR_NC
841 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
842 (ARM_LPAE_MAIR_ATTR_WBRWA
843 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
844 (ARM_LPAE_MAIR_ATTR_DEVICE
845 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
846
847 cfg->arm_lpae_s1_cfg.mair[0] = reg;
848 cfg->arm_lpae_s1_cfg.mair[1] = 0;
849
850 /* Looking good; allocate a pgd */
851 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
852 if (!data->pgd)
853 goto out_free_data;
854
855 /* Ensure the empty pgd is visible before any actual TTBR write */
856 wmb();
857
858 /* TTBRs */
859 cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
860 cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
861 return &data->iop;
862
863 out_free_data:
864 kfree(data);
865 return NULL;
866 }
867
868 static struct io_pgtable *
869 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
870 {
871 u64 reg, sl;
872 struct arm_lpae_io_pgtable *data;
873
874 /* The NS quirk doesn't apply at stage 2 */
875 if (cfg->quirks & ~(IO_PGTABLE_QUIRK_NO_DMA |
876 IO_PGTABLE_QUIRK_NON_STRICT))
877 return NULL;
878
879 data = arm_lpae_alloc_pgtable(cfg);
880 if (!data)
881 return NULL;
882
883 /*
884 * Concatenate PGDs at level 1 if possible in order to reduce
885 * the depth of the stage-2 walk.
886 */
887 if (data->levels == ARM_LPAE_MAX_LEVELS) {
888 unsigned long pgd_pages;
889
890 pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte));
891 if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
892 data->pgd_size = pgd_pages << data->pg_shift;
893 data->levels--;
894 }
895 }
896
897 /* VTCR */
898 reg = ARM_64_LPAE_S2_TCR_RES1 |
899 (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
900 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
901 (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
902
903 sl = ARM_LPAE_START_LVL(data);
904
905 switch (ARM_LPAE_GRANULE(data)) {
906 case SZ_4K:
907 reg |= ARM_LPAE_TCR_TG0_4K;
908 sl++; /* SL0 format is different for 4K granule size */
909 break;
910 case SZ_16K:
911 reg |= ARM_LPAE_TCR_TG0_16K;
912 break;
913 case SZ_64K:
914 reg |= ARM_LPAE_TCR_TG0_64K;
915 break;
916 }
917
918 switch (cfg->oas) {
919 case 32:
920 reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT);
921 break;
922 case 36:
923 reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT);
924 break;
925 case 40:
926 reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT);
927 break;
928 case 42:
929 reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT);
930 break;
931 case 44:
932 reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT);
933 break;
934 case 48:
935 reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT);
936 break;
937 case 52:
938 reg |= (ARM_LPAE_TCR_PS_52_BIT << ARM_LPAE_TCR_PS_SHIFT);
939 break;
940 default:
941 goto out_free_data;
942 }
943
944 reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
945 reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT;
946 cfg->arm_lpae_s2_cfg.vtcr = reg;
947
948 /* Allocate pgd pages */
949 data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg);
950 if (!data->pgd)
951 goto out_free_data;
952
953 /* Ensure the empty pgd is visible before any actual TTBR write */
954 wmb();
955
956 /* VTTBR */
957 cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
958 return &data->iop;
959
960 out_free_data:
961 kfree(data);
962 return NULL;
963 }
964
965 static struct io_pgtable *
966 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
967 {
968 struct io_pgtable *iop;
969
970 if (cfg->ias > 32 || cfg->oas > 40)
971 return NULL;
972
973 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
974 iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
975 if (iop) {
976 cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE;
977 cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff;
978 }
979
980 return iop;
981 }
982
983 static struct io_pgtable *
984 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
985 {
986 struct io_pgtable *iop;
987
988 if (cfg->ias > 40 || cfg->oas > 40)
989 return NULL;
990
991 cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
992 iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
993 if (iop)
994 cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff;
995
996 return iop;
997 }
998
999 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
1000 .alloc = arm_64_lpae_alloc_pgtable_s1,
1001 .free = arm_lpae_free_pgtable,
1002 };
1003
1004 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
1005 .alloc = arm_64_lpae_alloc_pgtable_s2,
1006 .free = arm_lpae_free_pgtable,
1007 };
1008
1009 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
1010 .alloc = arm_32_lpae_alloc_pgtable_s1,
1011 .free = arm_lpae_free_pgtable,
1012 };
1013
1014 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
1015 .alloc = arm_32_lpae_alloc_pgtable_s2,
1016 .free = arm_lpae_free_pgtable,
1017 };
1018
1019 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
1020
1021 static struct io_pgtable_cfg *cfg_cookie;
1022
1023 static void dummy_tlb_flush_all(void *cookie)
1024 {
1025 WARN_ON(cookie != cfg_cookie);
1026 }
1027
1028 static void dummy_tlb_add_flush(unsigned long iova, size_t size,
1029 size_t granule, bool leaf, void *cookie)
1030 {
1031 WARN_ON(cookie != cfg_cookie);
1032 WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
1033 }
1034
1035 static void dummy_tlb_sync(void *cookie)
1036 {
1037 WARN_ON(cookie != cfg_cookie);
1038 }
1039
1040 static const struct iommu_gather_ops dummy_tlb_ops __initconst = {
1041 .tlb_flush_all = dummy_tlb_flush_all,
1042 .tlb_add_flush = dummy_tlb_add_flush,
1043 .tlb_sync = dummy_tlb_sync,
1044 };
1045
1046 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
1047 {
1048 struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
1049 struct io_pgtable_cfg *cfg = &data->iop.cfg;
1050
1051 pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
1052 cfg->pgsize_bitmap, cfg->ias);
1053 pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
1054 data->levels, data->pgd_size, data->pg_shift,
1055 data->bits_per_level, data->pgd);
1056 }
1057
1058 #define __FAIL(ops, i) ({ \
1059 WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \
1060 arm_lpae_dump_ops(ops); \
1061 selftest_running = false; \
1062 -EFAULT; \
1063 })
1064
1065 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
1066 {
1067 static const enum io_pgtable_fmt fmts[] = {
1068 ARM_64_LPAE_S1,
1069 ARM_64_LPAE_S2,
1070 };
1071
1072 int i, j;
1073 unsigned long iova;
1074 size_t size;
1075 struct io_pgtable_ops *ops;
1076
1077 selftest_running = true;
1078
1079 for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
1080 cfg_cookie = cfg;
1081 ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
1082 if (!ops) {
1083 pr_err("selftest: failed to allocate io pgtable ops\n");
1084 return -ENOMEM;
1085 }
1086
1087 /*
1088 * Initial sanity checks.
1089 * Empty page tables shouldn't provide any translations.
1090 */
1091 if (ops->iova_to_phys(ops, 42))
1092 return __FAIL(ops, i);
1093
1094 if (ops->iova_to_phys(ops, SZ_1G + 42))
1095 return __FAIL(ops, i);
1096
1097 if (ops->iova_to_phys(ops, SZ_2G + 42))
1098 return __FAIL(ops, i);
1099
1100 /*
1101 * Distinct mappings of different granule sizes.
1102 */
1103 iova = 0;
1104 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1105 size = 1UL << j;
1106
1107 if (ops->map(ops, iova, iova, size, IOMMU_READ |
1108 IOMMU_WRITE |
1109 IOMMU_NOEXEC |
1110 IOMMU_CACHE))
1111 return __FAIL(ops, i);
1112
1113 /* Overlapping mappings */
1114 if (!ops->map(ops, iova, iova + size, size,
1115 IOMMU_READ | IOMMU_NOEXEC))
1116 return __FAIL(ops, i);
1117
1118 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1119 return __FAIL(ops, i);
1120
1121 iova += SZ_1G;
1122 }
1123
1124 /* Partial unmap */
1125 size = 1UL << __ffs(cfg->pgsize_bitmap);
1126 if (ops->unmap(ops, SZ_1G + size, size) != size)
1127 return __FAIL(ops, i);
1128
1129 /* Remap of partial unmap */
1130 if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ))
1131 return __FAIL(ops, i);
1132
1133 if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42))
1134 return __FAIL(ops, i);
1135
1136 /* Full unmap */
1137 iova = 0;
1138 for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1139 size = 1UL << j;
1140
1141 if (ops->unmap(ops, iova, size) != size)
1142 return __FAIL(ops, i);
1143
1144 if (ops->iova_to_phys(ops, iova + 42))
1145 return __FAIL(ops, i);
1146
1147 /* Remap full block */
1148 if (ops->map(ops, iova, iova, size, IOMMU_WRITE))
1149 return __FAIL(ops, i);
1150
1151 if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1152 return __FAIL(ops, i);
1153
1154 iova += SZ_1G;
1155 }
1156
1157 free_io_pgtable_ops(ops);
1158 }
1159
1160 selftest_running = false;
1161 return 0;
1162 }
1163
1164 static int __init arm_lpae_do_selftests(void)
1165 {
1166 static const unsigned long pgsize[] = {
1167 SZ_4K | SZ_2M | SZ_1G,
1168 SZ_16K | SZ_32M,
1169 SZ_64K | SZ_512M,
1170 };
1171
1172 static const unsigned int ias[] = {
1173 32, 36, 40, 42, 44, 48,
1174 };
1175
1176 int i, j, pass = 0, fail = 0;
1177 struct io_pgtable_cfg cfg = {
1178 .tlb = &dummy_tlb_ops,
1179 .oas = 48,
1180 .quirks = IO_PGTABLE_QUIRK_NO_DMA,
1181 };
1182
1183 for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1184 for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1185 cfg.pgsize_bitmap = pgsize[i];
1186 cfg.ias = ias[j];
1187 pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1188 pgsize[i], ias[j]);
1189 if (arm_lpae_run_tests(&cfg))
1190 fail++;
1191 else
1192 pass++;
1193 }
1194 }
1195
1196 pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1197 return fail ? -EFAULT : 0;
1198 }
1199 subsys_initcall(arm_lpae_do_selftests);
1200 #endif