]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/arm64/kvm/hyp/pgtable.c
KVM: arm64: pkvm: Use the mm_ops indirection for cache maintenance
[mirror_ubuntu-jammy-kernel.git] / arch / arm64 / kvm / hyp / pgtable.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Stand-alone page-table allocator for hyp stage-1 and guest stage-2.
4 * No bombay mix was harmed in the writing of this file.
5 *
6 * Copyright (C) 2020 Google LLC
7 * Author: Will Deacon <will@kernel.org>
8 */
9
10 #include <linux/bitfield.h>
11 #include <asm/kvm_pgtable.h>
12 #include <asm/stage2_pgtable.h>
13
14
15 #define KVM_PTE_TYPE BIT(1)
16 #define KVM_PTE_TYPE_BLOCK 0
17 #define KVM_PTE_TYPE_PAGE 1
18 #define KVM_PTE_TYPE_TABLE 1
19
20 #define KVM_PTE_LEAF_ATTR_LO GENMASK(11, 2)
21
22 #define KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX GENMASK(4, 2)
23 #define KVM_PTE_LEAF_ATTR_LO_S1_AP GENMASK(7, 6)
24 #define KVM_PTE_LEAF_ATTR_LO_S1_AP_RO 3
25 #define KVM_PTE_LEAF_ATTR_LO_S1_AP_RW 1
26 #define KVM_PTE_LEAF_ATTR_LO_S1_SH GENMASK(9, 8)
27 #define KVM_PTE_LEAF_ATTR_LO_S1_SH_IS 3
28 #define KVM_PTE_LEAF_ATTR_LO_S1_AF BIT(10)
29
30 #define KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR GENMASK(5, 2)
31 #define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R BIT(6)
32 #define KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W BIT(7)
33 #define KVM_PTE_LEAF_ATTR_LO_S2_SH GENMASK(9, 8)
34 #define KVM_PTE_LEAF_ATTR_LO_S2_SH_IS 3
35 #define KVM_PTE_LEAF_ATTR_LO_S2_AF BIT(10)
36
37 #define KVM_PTE_LEAF_ATTR_HI GENMASK(63, 51)
38
39 #define KVM_PTE_LEAF_ATTR_HI_SW GENMASK(58, 55)
40
41 #define KVM_PTE_LEAF_ATTR_HI_S1_XN BIT(54)
42
43 #define KVM_PTE_LEAF_ATTR_HI_S2_XN BIT(54)
44
45 #define KVM_PTE_LEAF_ATTR_S2_PERMS (KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R | \
46 KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W | \
47 KVM_PTE_LEAF_ATTR_HI_S2_XN)
48
49 #define KVM_INVALID_PTE_OWNER_MASK GENMASK(9, 2)
50 #define KVM_MAX_OWNER_ID 1
51
52 struct kvm_pgtable_walk_data {
53 struct kvm_pgtable *pgt;
54 struct kvm_pgtable_walker *walker;
55
56 u64 addr;
57 u64 end;
58 };
59
60 #define KVM_PHYS_INVALID (-1ULL)
61
62 static bool kvm_phys_is_valid(u64 phys)
63 {
64 return phys < BIT(id_aa64mmfr0_parange_to_phys_shift(ID_AA64MMFR0_PARANGE_MAX));
65 }
66
67 static bool kvm_block_mapping_supported(u64 addr, u64 end, u64 phys, u32 level)
68 {
69 u64 granule = kvm_granule_size(level);
70
71 if (!kvm_level_supports_block_mapping(level))
72 return false;
73
74 if (granule > (end - addr))
75 return false;
76
77 if (kvm_phys_is_valid(phys) && !IS_ALIGNED(phys, granule))
78 return false;
79
80 return IS_ALIGNED(addr, granule);
81 }
82
83 static u32 kvm_pgtable_idx(struct kvm_pgtable_walk_data *data, u32 level)
84 {
85 u64 shift = kvm_granule_shift(level);
86 u64 mask = BIT(PAGE_SHIFT - 3) - 1;
87
88 return (data->addr >> shift) & mask;
89 }
90
91 static u32 __kvm_pgd_page_idx(struct kvm_pgtable *pgt, u64 addr)
92 {
93 u64 shift = kvm_granule_shift(pgt->start_level - 1); /* May underflow */
94 u64 mask = BIT(pgt->ia_bits) - 1;
95
96 return (addr & mask) >> shift;
97 }
98
99 static u32 kvm_pgd_page_idx(struct kvm_pgtable_walk_data *data)
100 {
101 return __kvm_pgd_page_idx(data->pgt, data->addr);
102 }
103
104 static u32 kvm_pgd_pages(u32 ia_bits, u32 start_level)
105 {
106 struct kvm_pgtable pgt = {
107 .ia_bits = ia_bits,
108 .start_level = start_level,
109 };
110
111 return __kvm_pgd_page_idx(&pgt, -1ULL) + 1;
112 }
113
114 static bool kvm_pte_table(kvm_pte_t pte, u32 level)
115 {
116 if (level == KVM_PGTABLE_MAX_LEVELS - 1)
117 return false;
118
119 if (!kvm_pte_valid(pte))
120 return false;
121
122 return FIELD_GET(KVM_PTE_TYPE, pte) == KVM_PTE_TYPE_TABLE;
123 }
124
125 static kvm_pte_t kvm_phys_to_pte(u64 pa)
126 {
127 kvm_pte_t pte = pa & KVM_PTE_ADDR_MASK;
128
129 if (PAGE_SHIFT == 16)
130 pte |= FIELD_PREP(KVM_PTE_ADDR_51_48, pa >> 48);
131
132 return pte;
133 }
134
135 static kvm_pte_t *kvm_pte_follow(kvm_pte_t pte, struct kvm_pgtable_mm_ops *mm_ops)
136 {
137 return mm_ops->phys_to_virt(kvm_pte_to_phys(pte));
138 }
139
140 static void kvm_clear_pte(kvm_pte_t *ptep)
141 {
142 WRITE_ONCE(*ptep, 0);
143 }
144
145 static void kvm_set_table_pte(kvm_pte_t *ptep, kvm_pte_t *childp,
146 struct kvm_pgtable_mm_ops *mm_ops)
147 {
148 kvm_pte_t old = *ptep, pte = kvm_phys_to_pte(mm_ops->virt_to_phys(childp));
149
150 pte |= FIELD_PREP(KVM_PTE_TYPE, KVM_PTE_TYPE_TABLE);
151 pte |= KVM_PTE_VALID;
152
153 WARN_ON(kvm_pte_valid(old));
154 smp_store_release(ptep, pte);
155 }
156
157 static kvm_pte_t kvm_init_valid_leaf_pte(u64 pa, kvm_pte_t attr, u32 level)
158 {
159 kvm_pte_t pte = kvm_phys_to_pte(pa);
160 u64 type = (level == KVM_PGTABLE_MAX_LEVELS - 1) ? KVM_PTE_TYPE_PAGE :
161 KVM_PTE_TYPE_BLOCK;
162
163 pte |= attr & (KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI);
164 pte |= FIELD_PREP(KVM_PTE_TYPE, type);
165 pte |= KVM_PTE_VALID;
166
167 return pte;
168 }
169
170 static kvm_pte_t kvm_init_invalid_leaf_owner(u8 owner_id)
171 {
172 return FIELD_PREP(KVM_INVALID_PTE_OWNER_MASK, owner_id);
173 }
174
175 static int kvm_pgtable_visitor_cb(struct kvm_pgtable_walk_data *data, u64 addr,
176 u32 level, kvm_pte_t *ptep,
177 enum kvm_pgtable_walk_flags flag)
178 {
179 struct kvm_pgtable_walker *walker = data->walker;
180 return walker->cb(addr, data->end, level, ptep, flag, walker->arg);
181 }
182
183 static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
184 kvm_pte_t *pgtable, u32 level);
185
186 static inline int __kvm_pgtable_visit(struct kvm_pgtable_walk_data *data,
187 kvm_pte_t *ptep, u32 level)
188 {
189 int ret = 0;
190 u64 addr = data->addr;
191 kvm_pte_t *childp, pte = *ptep;
192 bool table = kvm_pte_table(pte, level);
193 enum kvm_pgtable_walk_flags flags = data->walker->flags;
194
195 if (table && (flags & KVM_PGTABLE_WALK_TABLE_PRE)) {
196 ret = kvm_pgtable_visitor_cb(data, addr, level, ptep,
197 KVM_PGTABLE_WALK_TABLE_PRE);
198 }
199
200 if (!table && (flags & KVM_PGTABLE_WALK_LEAF)) {
201 ret = kvm_pgtable_visitor_cb(data, addr, level, ptep,
202 KVM_PGTABLE_WALK_LEAF);
203 pte = *ptep;
204 table = kvm_pte_table(pte, level);
205 }
206
207 if (ret)
208 goto out;
209
210 if (!table) {
211 data->addr = ALIGN_DOWN(data->addr, kvm_granule_size(level));
212 data->addr += kvm_granule_size(level);
213 goto out;
214 }
215
216 childp = kvm_pte_follow(pte, data->pgt->mm_ops);
217 ret = __kvm_pgtable_walk(data, childp, level + 1);
218 if (ret)
219 goto out;
220
221 if (flags & KVM_PGTABLE_WALK_TABLE_POST) {
222 ret = kvm_pgtable_visitor_cb(data, addr, level, ptep,
223 KVM_PGTABLE_WALK_TABLE_POST);
224 }
225
226 out:
227 return ret;
228 }
229
230 static int __kvm_pgtable_walk(struct kvm_pgtable_walk_data *data,
231 kvm_pte_t *pgtable, u32 level)
232 {
233 u32 idx;
234 int ret = 0;
235
236 if (WARN_ON_ONCE(level >= KVM_PGTABLE_MAX_LEVELS))
237 return -EINVAL;
238
239 for (idx = kvm_pgtable_idx(data, level); idx < PTRS_PER_PTE; ++idx) {
240 kvm_pte_t *ptep = &pgtable[idx];
241
242 if (data->addr >= data->end)
243 break;
244
245 ret = __kvm_pgtable_visit(data, ptep, level);
246 if (ret)
247 break;
248 }
249
250 return ret;
251 }
252
253 static int _kvm_pgtable_walk(struct kvm_pgtable_walk_data *data)
254 {
255 u32 idx;
256 int ret = 0;
257 struct kvm_pgtable *pgt = data->pgt;
258 u64 limit = BIT(pgt->ia_bits);
259
260 if (data->addr > limit || data->end > limit)
261 return -ERANGE;
262
263 if (!pgt->pgd)
264 return -EINVAL;
265
266 for (idx = kvm_pgd_page_idx(data); data->addr < data->end; ++idx) {
267 kvm_pte_t *ptep = &pgt->pgd[idx * PTRS_PER_PTE];
268
269 ret = __kvm_pgtable_walk(data, ptep, pgt->start_level);
270 if (ret)
271 break;
272 }
273
274 return ret;
275 }
276
277 int kvm_pgtable_walk(struct kvm_pgtable *pgt, u64 addr, u64 size,
278 struct kvm_pgtable_walker *walker)
279 {
280 struct kvm_pgtable_walk_data walk_data = {
281 .pgt = pgt,
282 .addr = ALIGN_DOWN(addr, PAGE_SIZE),
283 .end = PAGE_ALIGN(walk_data.addr + size),
284 .walker = walker,
285 };
286
287 return _kvm_pgtable_walk(&walk_data);
288 }
289
290 struct leaf_walk_data {
291 kvm_pte_t pte;
292 u32 level;
293 };
294
295 static int leaf_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
296 enum kvm_pgtable_walk_flags flag, void * const arg)
297 {
298 struct leaf_walk_data *data = arg;
299
300 data->pte = *ptep;
301 data->level = level;
302
303 return 0;
304 }
305
306 int kvm_pgtable_get_leaf(struct kvm_pgtable *pgt, u64 addr,
307 kvm_pte_t *ptep, u32 *level)
308 {
309 struct leaf_walk_data data;
310 struct kvm_pgtable_walker walker = {
311 .cb = leaf_walker,
312 .flags = KVM_PGTABLE_WALK_LEAF,
313 .arg = &data,
314 };
315 int ret;
316
317 ret = kvm_pgtable_walk(pgt, ALIGN_DOWN(addr, PAGE_SIZE),
318 PAGE_SIZE, &walker);
319 if (!ret) {
320 if (ptep)
321 *ptep = data.pte;
322 if (level)
323 *level = data.level;
324 }
325
326 return ret;
327 }
328
329 struct hyp_map_data {
330 u64 phys;
331 kvm_pte_t attr;
332 struct kvm_pgtable_mm_ops *mm_ops;
333 };
334
335 static int hyp_set_prot_attr(enum kvm_pgtable_prot prot, kvm_pte_t *ptep)
336 {
337 bool device = prot & KVM_PGTABLE_PROT_DEVICE;
338 u32 mtype = device ? MT_DEVICE_nGnRE : MT_NORMAL;
339 kvm_pte_t attr = FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_ATTRIDX, mtype);
340 u32 sh = KVM_PTE_LEAF_ATTR_LO_S1_SH_IS;
341 u32 ap = (prot & KVM_PGTABLE_PROT_W) ? KVM_PTE_LEAF_ATTR_LO_S1_AP_RW :
342 KVM_PTE_LEAF_ATTR_LO_S1_AP_RO;
343
344 if (!(prot & KVM_PGTABLE_PROT_R))
345 return -EINVAL;
346
347 if (prot & KVM_PGTABLE_PROT_X) {
348 if (prot & KVM_PGTABLE_PROT_W)
349 return -EINVAL;
350
351 if (device)
352 return -EINVAL;
353 } else {
354 attr |= KVM_PTE_LEAF_ATTR_HI_S1_XN;
355 }
356
357 attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_AP, ap);
358 attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S1_SH, sh);
359 attr |= KVM_PTE_LEAF_ATTR_LO_S1_AF;
360 attr |= prot & KVM_PTE_LEAF_ATTR_HI_SW;
361 *ptep = attr;
362
363 return 0;
364 }
365
366 enum kvm_pgtable_prot kvm_pgtable_hyp_pte_prot(kvm_pte_t pte)
367 {
368 enum kvm_pgtable_prot prot = pte & KVM_PTE_LEAF_ATTR_HI_SW;
369 u32 ap;
370
371 if (!kvm_pte_valid(pte))
372 return prot;
373
374 if (!(pte & KVM_PTE_LEAF_ATTR_HI_S1_XN))
375 prot |= KVM_PGTABLE_PROT_X;
376
377 ap = FIELD_GET(KVM_PTE_LEAF_ATTR_LO_S1_AP, pte);
378 if (ap == KVM_PTE_LEAF_ATTR_LO_S1_AP_RO)
379 prot |= KVM_PGTABLE_PROT_R;
380 else if (ap == KVM_PTE_LEAF_ATTR_LO_S1_AP_RW)
381 prot |= KVM_PGTABLE_PROT_RW;
382
383 return prot;
384 }
385
386 static bool hyp_pte_needs_update(kvm_pte_t old, kvm_pte_t new)
387 {
388 /*
389 * Tolerate KVM recreating the exact same mapping, or changing software
390 * bits if the existing mapping was valid.
391 */
392 if (old == new)
393 return false;
394
395 if (!kvm_pte_valid(old))
396 return true;
397
398 return !WARN_ON((old ^ new) & ~KVM_PTE_LEAF_ATTR_HI_SW);
399 }
400
401 static bool hyp_map_walker_try_leaf(u64 addr, u64 end, u32 level,
402 kvm_pte_t *ptep, struct hyp_map_data *data)
403 {
404 kvm_pte_t new, old = *ptep;
405 u64 granule = kvm_granule_size(level), phys = data->phys;
406
407 if (!kvm_block_mapping_supported(addr, end, phys, level))
408 return false;
409
410 new = kvm_init_valid_leaf_pte(phys, data->attr, level);
411 if (hyp_pte_needs_update(old, new))
412 smp_store_release(ptep, new);
413
414 data->phys += granule;
415 return true;
416 }
417
418 static int hyp_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
419 enum kvm_pgtable_walk_flags flag, void * const arg)
420 {
421 kvm_pte_t *childp;
422 struct hyp_map_data *data = arg;
423 struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
424
425 if (hyp_map_walker_try_leaf(addr, end, level, ptep, arg))
426 return 0;
427
428 if (WARN_ON(level == KVM_PGTABLE_MAX_LEVELS - 1))
429 return -EINVAL;
430
431 childp = (kvm_pte_t *)mm_ops->zalloc_page(NULL);
432 if (!childp)
433 return -ENOMEM;
434
435 kvm_set_table_pte(ptep, childp, mm_ops);
436 return 0;
437 }
438
439 int kvm_pgtable_hyp_map(struct kvm_pgtable *pgt, u64 addr, u64 size, u64 phys,
440 enum kvm_pgtable_prot prot)
441 {
442 int ret;
443 struct hyp_map_data map_data = {
444 .phys = ALIGN_DOWN(phys, PAGE_SIZE),
445 .mm_ops = pgt->mm_ops,
446 };
447 struct kvm_pgtable_walker walker = {
448 .cb = hyp_map_walker,
449 .flags = KVM_PGTABLE_WALK_LEAF,
450 .arg = &map_data,
451 };
452
453 ret = hyp_set_prot_attr(prot, &map_data.attr);
454 if (ret)
455 return ret;
456
457 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
458 dsb(ishst);
459 isb();
460 return ret;
461 }
462
463 int kvm_pgtable_hyp_init(struct kvm_pgtable *pgt, u32 va_bits,
464 struct kvm_pgtable_mm_ops *mm_ops)
465 {
466 u64 levels = ARM64_HW_PGTABLE_LEVELS(va_bits);
467
468 pgt->pgd = (kvm_pte_t *)mm_ops->zalloc_page(NULL);
469 if (!pgt->pgd)
470 return -ENOMEM;
471
472 pgt->ia_bits = va_bits;
473 pgt->start_level = KVM_PGTABLE_MAX_LEVELS - levels;
474 pgt->mm_ops = mm_ops;
475 pgt->mmu = NULL;
476 pgt->force_pte_cb = NULL;
477
478 return 0;
479 }
480
481 static int hyp_free_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
482 enum kvm_pgtable_walk_flags flag, void * const arg)
483 {
484 struct kvm_pgtable_mm_ops *mm_ops = arg;
485
486 mm_ops->put_page((void *)kvm_pte_follow(*ptep, mm_ops));
487 return 0;
488 }
489
490 void kvm_pgtable_hyp_destroy(struct kvm_pgtable *pgt)
491 {
492 struct kvm_pgtable_walker walker = {
493 .cb = hyp_free_walker,
494 .flags = KVM_PGTABLE_WALK_TABLE_POST,
495 .arg = pgt->mm_ops,
496 };
497
498 WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
499 pgt->mm_ops->put_page(pgt->pgd);
500 pgt->pgd = NULL;
501 }
502
503 struct stage2_map_data {
504 u64 phys;
505 kvm_pte_t attr;
506 u8 owner_id;
507
508 kvm_pte_t *anchor;
509 kvm_pte_t *childp;
510
511 struct kvm_s2_mmu *mmu;
512 void *memcache;
513
514 struct kvm_pgtable_mm_ops *mm_ops;
515
516 /* Force mappings to page granularity */
517 bool force_pte;
518 };
519
520 u64 kvm_get_vtcr(u64 mmfr0, u64 mmfr1, u32 phys_shift)
521 {
522 u64 vtcr = VTCR_EL2_FLAGS;
523 u8 lvls;
524
525 vtcr |= kvm_get_parange(mmfr0) << VTCR_EL2_PS_SHIFT;
526 vtcr |= VTCR_EL2_T0SZ(phys_shift);
527 /*
528 * Use a minimum 2 level page table to prevent splitting
529 * host PMD huge pages at stage2.
530 */
531 lvls = stage2_pgtable_levels(phys_shift);
532 if (lvls < 2)
533 lvls = 2;
534 vtcr |= VTCR_EL2_LVLS_TO_SL0(lvls);
535
536 /*
537 * Enable the Hardware Access Flag management, unconditionally
538 * on all CPUs. The features is RES0 on CPUs without the support
539 * and must be ignored by the CPUs.
540 */
541 vtcr |= VTCR_EL2_HA;
542
543 /* Set the vmid bits */
544 vtcr |= (get_vmid_bits(mmfr1) == 16) ?
545 VTCR_EL2_VS_16BIT :
546 VTCR_EL2_VS_8BIT;
547
548 return vtcr;
549 }
550
551 static bool stage2_has_fwb(struct kvm_pgtable *pgt)
552 {
553 if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
554 return false;
555
556 return !(pgt->flags & KVM_PGTABLE_S2_NOFWB);
557 }
558
559 #define KVM_S2_MEMATTR(pgt, attr) PAGE_S2_MEMATTR(attr, stage2_has_fwb(pgt))
560
561 static int stage2_set_prot_attr(struct kvm_pgtable *pgt, enum kvm_pgtable_prot prot,
562 kvm_pte_t *ptep)
563 {
564 bool device = prot & KVM_PGTABLE_PROT_DEVICE;
565 kvm_pte_t attr = device ? KVM_S2_MEMATTR(pgt, DEVICE_nGnRE) :
566 KVM_S2_MEMATTR(pgt, NORMAL);
567 u32 sh = KVM_PTE_LEAF_ATTR_LO_S2_SH_IS;
568
569 if (!(prot & KVM_PGTABLE_PROT_X))
570 attr |= KVM_PTE_LEAF_ATTR_HI_S2_XN;
571 else if (device)
572 return -EINVAL;
573
574 if (prot & KVM_PGTABLE_PROT_R)
575 attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R;
576
577 if (prot & KVM_PGTABLE_PROT_W)
578 attr |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W;
579
580 attr |= FIELD_PREP(KVM_PTE_LEAF_ATTR_LO_S2_SH, sh);
581 attr |= KVM_PTE_LEAF_ATTR_LO_S2_AF;
582 attr |= prot & KVM_PTE_LEAF_ATTR_HI_SW;
583 *ptep = attr;
584
585 return 0;
586 }
587
588 enum kvm_pgtable_prot kvm_pgtable_stage2_pte_prot(kvm_pte_t pte)
589 {
590 enum kvm_pgtable_prot prot = pte & KVM_PTE_LEAF_ATTR_HI_SW;
591
592 if (!kvm_pte_valid(pte))
593 return prot;
594
595 if (pte & KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R)
596 prot |= KVM_PGTABLE_PROT_R;
597 if (pte & KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W)
598 prot |= KVM_PGTABLE_PROT_W;
599 if (!(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN))
600 prot |= KVM_PGTABLE_PROT_X;
601
602 return prot;
603 }
604
605 static bool stage2_pte_needs_update(kvm_pte_t old, kvm_pte_t new)
606 {
607 if (!kvm_pte_valid(old) || !kvm_pte_valid(new))
608 return true;
609
610 return ((old ^ new) & (~KVM_PTE_LEAF_ATTR_S2_PERMS));
611 }
612
613 static bool stage2_pte_is_counted(kvm_pte_t pte)
614 {
615 /*
616 * The refcount tracks valid entries as well as invalid entries if they
617 * encode ownership of a page to another entity than the page-table
618 * owner, whose id is 0.
619 */
620 return !!pte;
621 }
622
623 static void stage2_put_pte(kvm_pte_t *ptep, struct kvm_s2_mmu *mmu, u64 addr,
624 u32 level, struct kvm_pgtable_mm_ops *mm_ops)
625 {
626 /*
627 * Clear the existing PTE, and perform break-before-make with
628 * TLB maintenance if it was valid.
629 */
630 if (kvm_pte_valid(*ptep)) {
631 kvm_clear_pte(ptep);
632 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, mmu, addr, level);
633 }
634
635 mm_ops->put_page(ptep);
636 }
637
638 static bool stage2_pte_cacheable(struct kvm_pgtable *pgt, kvm_pte_t pte)
639 {
640 u64 memattr = pte & KVM_PTE_LEAF_ATTR_LO_S2_MEMATTR;
641 return memattr == KVM_S2_MEMATTR(pgt, NORMAL);
642 }
643
644 static bool stage2_pte_executable(kvm_pte_t pte)
645 {
646 return !(pte & KVM_PTE_LEAF_ATTR_HI_S2_XN);
647 }
648
649 static bool stage2_leaf_mapping_allowed(u64 addr, u64 end, u32 level,
650 struct stage2_map_data *data)
651 {
652 if (data->force_pte && (level < (KVM_PGTABLE_MAX_LEVELS - 1)))
653 return false;
654
655 return kvm_block_mapping_supported(addr, end, data->phys, level);
656 }
657
658 static int stage2_map_walker_try_leaf(u64 addr, u64 end, u32 level,
659 kvm_pte_t *ptep,
660 struct stage2_map_data *data)
661 {
662 kvm_pte_t new, old = *ptep;
663 u64 granule = kvm_granule_size(level), phys = data->phys;
664 struct kvm_pgtable *pgt = data->mmu->pgt;
665 struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
666
667 if (!stage2_leaf_mapping_allowed(addr, end, level, data))
668 return -E2BIG;
669
670 if (kvm_phys_is_valid(phys))
671 new = kvm_init_valid_leaf_pte(phys, data->attr, level);
672 else
673 new = kvm_init_invalid_leaf_owner(data->owner_id);
674
675 if (stage2_pte_is_counted(old)) {
676 /*
677 * Skip updating the PTE if we are trying to recreate the exact
678 * same mapping or only change the access permissions. Instead,
679 * the vCPU will exit one more time from guest if still needed
680 * and then go through the path of relaxing permissions.
681 */
682 if (!stage2_pte_needs_update(old, new))
683 return -EAGAIN;
684
685 stage2_put_pte(ptep, data->mmu, addr, level, mm_ops);
686 }
687
688 /* Perform CMOs before installation of the guest stage-2 PTE */
689 if (mm_ops->dcache_clean_inval_poc && stage2_pte_cacheable(pgt, new))
690 mm_ops->dcache_clean_inval_poc(kvm_pte_follow(new, mm_ops),
691 granule);
692
693 if (mm_ops->icache_inval_pou && stage2_pte_executable(new))
694 mm_ops->icache_inval_pou(kvm_pte_follow(new, mm_ops), granule);
695
696 smp_store_release(ptep, new);
697 if (stage2_pte_is_counted(new))
698 mm_ops->get_page(ptep);
699 if (kvm_phys_is_valid(phys))
700 data->phys += granule;
701 return 0;
702 }
703
704 static int stage2_map_walk_table_pre(u64 addr, u64 end, u32 level,
705 kvm_pte_t *ptep,
706 struct stage2_map_data *data)
707 {
708 if (data->anchor)
709 return 0;
710
711 if (!stage2_leaf_mapping_allowed(addr, end, level, data))
712 return 0;
713
714 data->childp = kvm_pte_follow(*ptep, data->mm_ops);
715 kvm_clear_pte(ptep);
716
717 /*
718 * Invalidate the whole stage-2, as we may have numerous leaf
719 * entries below us which would otherwise need invalidating
720 * individually.
721 */
722 kvm_call_hyp(__kvm_tlb_flush_vmid, data->mmu);
723 data->anchor = ptep;
724 return 0;
725 }
726
727 static int stage2_map_walk_leaf(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
728 struct stage2_map_data *data)
729 {
730 struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
731 kvm_pte_t *childp, pte = *ptep;
732 int ret;
733
734 if (data->anchor) {
735 if (stage2_pte_is_counted(pte))
736 mm_ops->put_page(ptep);
737
738 return 0;
739 }
740
741 ret = stage2_map_walker_try_leaf(addr, end, level, ptep, data);
742 if (ret != -E2BIG)
743 return ret;
744
745 if (WARN_ON(level == KVM_PGTABLE_MAX_LEVELS - 1))
746 return -EINVAL;
747
748 if (!data->memcache)
749 return -ENOMEM;
750
751 childp = mm_ops->zalloc_page(data->memcache);
752 if (!childp)
753 return -ENOMEM;
754
755 /*
756 * If we've run into an existing block mapping then replace it with
757 * a table. Accesses beyond 'end' that fall within the new table
758 * will be mapped lazily.
759 */
760 if (stage2_pte_is_counted(pte))
761 stage2_put_pte(ptep, data->mmu, addr, level, mm_ops);
762
763 kvm_set_table_pte(ptep, childp, mm_ops);
764 mm_ops->get_page(ptep);
765
766 return 0;
767 }
768
769 static int stage2_map_walk_table_post(u64 addr, u64 end, u32 level,
770 kvm_pte_t *ptep,
771 struct stage2_map_data *data)
772 {
773 struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
774 kvm_pte_t *childp;
775 int ret = 0;
776
777 if (!data->anchor)
778 return 0;
779
780 if (data->anchor == ptep) {
781 childp = data->childp;
782 data->anchor = NULL;
783 data->childp = NULL;
784 ret = stage2_map_walk_leaf(addr, end, level, ptep, data);
785 } else {
786 childp = kvm_pte_follow(*ptep, mm_ops);
787 }
788
789 mm_ops->put_page(childp);
790 mm_ops->put_page(ptep);
791
792 return ret;
793 }
794
795 /*
796 * This is a little fiddly, as we use all three of the walk flags. The idea
797 * is that the TABLE_PRE callback runs for table entries on the way down,
798 * looking for table entries which we could conceivably replace with a
799 * block entry for this mapping. If it finds one, then it sets the 'anchor'
800 * field in 'struct stage2_map_data' to point at the table entry, before
801 * clearing the entry to zero and descending into the now detached table.
802 *
803 * The behaviour of the LEAF callback then depends on whether or not the
804 * anchor has been set. If not, then we're not using a block mapping higher
805 * up the table and we perform the mapping at the existing leaves instead.
806 * If, on the other hand, the anchor _is_ set, then we drop references to
807 * all valid leaves so that the pages beneath the anchor can be freed.
808 *
809 * Finally, the TABLE_POST callback does nothing if the anchor has not
810 * been set, but otherwise frees the page-table pages while walking back up
811 * the page-table, installing the block entry when it revisits the anchor
812 * pointer and clearing the anchor to NULL.
813 */
814 static int stage2_map_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
815 enum kvm_pgtable_walk_flags flag, void * const arg)
816 {
817 struct stage2_map_data *data = arg;
818
819 switch (flag) {
820 case KVM_PGTABLE_WALK_TABLE_PRE:
821 return stage2_map_walk_table_pre(addr, end, level, ptep, data);
822 case KVM_PGTABLE_WALK_LEAF:
823 return stage2_map_walk_leaf(addr, end, level, ptep, data);
824 case KVM_PGTABLE_WALK_TABLE_POST:
825 return stage2_map_walk_table_post(addr, end, level, ptep, data);
826 }
827
828 return -EINVAL;
829 }
830
831 int kvm_pgtable_stage2_map(struct kvm_pgtable *pgt, u64 addr, u64 size,
832 u64 phys, enum kvm_pgtable_prot prot,
833 void *mc)
834 {
835 int ret;
836 struct stage2_map_data map_data = {
837 .phys = ALIGN_DOWN(phys, PAGE_SIZE),
838 .mmu = pgt->mmu,
839 .memcache = mc,
840 .mm_ops = pgt->mm_ops,
841 .force_pte = pgt->force_pte_cb && pgt->force_pte_cb(addr, addr + size, prot),
842 };
843 struct kvm_pgtable_walker walker = {
844 .cb = stage2_map_walker,
845 .flags = KVM_PGTABLE_WALK_TABLE_PRE |
846 KVM_PGTABLE_WALK_LEAF |
847 KVM_PGTABLE_WALK_TABLE_POST,
848 .arg = &map_data,
849 };
850
851 if (WARN_ON((pgt->flags & KVM_PGTABLE_S2_IDMAP) && (addr != phys)))
852 return -EINVAL;
853
854 ret = stage2_set_prot_attr(pgt, prot, &map_data.attr);
855 if (ret)
856 return ret;
857
858 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
859 dsb(ishst);
860 return ret;
861 }
862
863 int kvm_pgtable_stage2_set_owner(struct kvm_pgtable *pgt, u64 addr, u64 size,
864 void *mc, u8 owner_id)
865 {
866 int ret;
867 struct stage2_map_data map_data = {
868 .phys = KVM_PHYS_INVALID,
869 .mmu = pgt->mmu,
870 .memcache = mc,
871 .mm_ops = pgt->mm_ops,
872 .owner_id = owner_id,
873 .force_pte = true,
874 };
875 struct kvm_pgtable_walker walker = {
876 .cb = stage2_map_walker,
877 .flags = KVM_PGTABLE_WALK_TABLE_PRE |
878 KVM_PGTABLE_WALK_LEAF |
879 KVM_PGTABLE_WALK_TABLE_POST,
880 .arg = &map_data,
881 };
882
883 if (owner_id > KVM_MAX_OWNER_ID)
884 return -EINVAL;
885
886 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
887 return ret;
888 }
889
890 static int stage2_unmap_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
891 enum kvm_pgtable_walk_flags flag,
892 void * const arg)
893 {
894 struct kvm_pgtable *pgt = arg;
895 struct kvm_s2_mmu *mmu = pgt->mmu;
896 struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
897 kvm_pte_t pte = *ptep, *childp = NULL;
898 bool need_flush = false;
899
900 if (!kvm_pte_valid(pte)) {
901 if (stage2_pte_is_counted(pte)) {
902 kvm_clear_pte(ptep);
903 mm_ops->put_page(ptep);
904 }
905 return 0;
906 }
907
908 if (kvm_pte_table(pte, level)) {
909 childp = kvm_pte_follow(pte, mm_ops);
910
911 if (mm_ops->page_count(childp) != 1)
912 return 0;
913 } else if (stage2_pte_cacheable(pgt, pte)) {
914 need_flush = !stage2_has_fwb(pgt);
915 }
916
917 /*
918 * This is similar to the map() path in that we unmap the entire
919 * block entry and rely on the remaining portions being faulted
920 * back lazily.
921 */
922 stage2_put_pte(ptep, mmu, addr, level, mm_ops);
923
924 if (need_flush && mm_ops->dcache_clean_inval_poc)
925 mm_ops->dcache_clean_inval_poc(kvm_pte_follow(pte, mm_ops),
926 kvm_granule_size(level));
927
928 if (childp)
929 mm_ops->put_page(childp);
930
931 return 0;
932 }
933
934 int kvm_pgtable_stage2_unmap(struct kvm_pgtable *pgt, u64 addr, u64 size)
935 {
936 struct kvm_pgtable_walker walker = {
937 .cb = stage2_unmap_walker,
938 .arg = pgt,
939 .flags = KVM_PGTABLE_WALK_LEAF | KVM_PGTABLE_WALK_TABLE_POST,
940 };
941
942 return kvm_pgtable_walk(pgt, addr, size, &walker);
943 }
944
945 struct stage2_attr_data {
946 kvm_pte_t attr_set;
947 kvm_pte_t attr_clr;
948 kvm_pte_t pte;
949 u32 level;
950 struct kvm_pgtable_mm_ops *mm_ops;
951 };
952
953 static int stage2_attr_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
954 enum kvm_pgtable_walk_flags flag,
955 void * const arg)
956 {
957 kvm_pte_t pte = *ptep;
958 struct stage2_attr_data *data = arg;
959 struct kvm_pgtable_mm_ops *mm_ops = data->mm_ops;
960
961 if (!kvm_pte_valid(pte))
962 return 0;
963
964 data->level = level;
965 data->pte = pte;
966 pte &= ~data->attr_clr;
967 pte |= data->attr_set;
968
969 /*
970 * We may race with the CPU trying to set the access flag here,
971 * but worst-case the access flag update gets lost and will be
972 * set on the next access instead.
973 */
974 if (data->pte != pte) {
975 /*
976 * Invalidate instruction cache before updating the guest
977 * stage-2 PTE if we are going to add executable permission.
978 */
979 if (mm_ops->icache_inval_pou &&
980 stage2_pte_executable(pte) && !stage2_pte_executable(*ptep))
981 mm_ops->icache_inval_pou(kvm_pte_follow(pte, mm_ops),
982 kvm_granule_size(level));
983 WRITE_ONCE(*ptep, pte);
984 }
985
986 return 0;
987 }
988
989 static int stage2_update_leaf_attrs(struct kvm_pgtable *pgt, u64 addr,
990 u64 size, kvm_pte_t attr_set,
991 kvm_pte_t attr_clr, kvm_pte_t *orig_pte,
992 u32 *level)
993 {
994 int ret;
995 kvm_pte_t attr_mask = KVM_PTE_LEAF_ATTR_LO | KVM_PTE_LEAF_ATTR_HI;
996 struct stage2_attr_data data = {
997 .attr_set = attr_set & attr_mask,
998 .attr_clr = attr_clr & attr_mask,
999 .mm_ops = pgt->mm_ops,
1000 };
1001 struct kvm_pgtable_walker walker = {
1002 .cb = stage2_attr_walker,
1003 .arg = &data,
1004 .flags = KVM_PGTABLE_WALK_LEAF,
1005 };
1006
1007 ret = kvm_pgtable_walk(pgt, addr, size, &walker);
1008 if (ret)
1009 return ret;
1010
1011 if (orig_pte)
1012 *orig_pte = data.pte;
1013
1014 if (level)
1015 *level = data.level;
1016 return 0;
1017 }
1018
1019 int kvm_pgtable_stage2_wrprotect(struct kvm_pgtable *pgt, u64 addr, u64 size)
1020 {
1021 return stage2_update_leaf_attrs(pgt, addr, size, 0,
1022 KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W,
1023 NULL, NULL);
1024 }
1025
1026 kvm_pte_t kvm_pgtable_stage2_mkyoung(struct kvm_pgtable *pgt, u64 addr)
1027 {
1028 kvm_pte_t pte = 0;
1029 stage2_update_leaf_attrs(pgt, addr, 1, KVM_PTE_LEAF_ATTR_LO_S2_AF, 0,
1030 &pte, NULL);
1031 dsb(ishst);
1032 return pte;
1033 }
1034
1035 kvm_pte_t kvm_pgtable_stage2_mkold(struct kvm_pgtable *pgt, u64 addr)
1036 {
1037 kvm_pte_t pte = 0;
1038 stage2_update_leaf_attrs(pgt, addr, 1, 0, KVM_PTE_LEAF_ATTR_LO_S2_AF,
1039 &pte, NULL);
1040 /*
1041 * "But where's the TLBI?!", you scream.
1042 * "Over in the core code", I sigh.
1043 *
1044 * See the '->clear_flush_young()' callback on the KVM mmu notifier.
1045 */
1046 return pte;
1047 }
1048
1049 bool kvm_pgtable_stage2_is_young(struct kvm_pgtable *pgt, u64 addr)
1050 {
1051 kvm_pte_t pte = 0;
1052 stage2_update_leaf_attrs(pgt, addr, 1, 0, 0, &pte, NULL);
1053 return pte & KVM_PTE_LEAF_ATTR_LO_S2_AF;
1054 }
1055
1056 int kvm_pgtable_stage2_relax_perms(struct kvm_pgtable *pgt, u64 addr,
1057 enum kvm_pgtable_prot prot)
1058 {
1059 int ret;
1060 u32 level;
1061 kvm_pte_t set = 0, clr = 0;
1062
1063 if (prot & KVM_PTE_LEAF_ATTR_HI_SW)
1064 return -EINVAL;
1065
1066 if (prot & KVM_PGTABLE_PROT_R)
1067 set |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R;
1068
1069 if (prot & KVM_PGTABLE_PROT_W)
1070 set |= KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W;
1071
1072 if (prot & KVM_PGTABLE_PROT_X)
1073 clr |= KVM_PTE_LEAF_ATTR_HI_S2_XN;
1074
1075 ret = stage2_update_leaf_attrs(pgt, addr, 1, set, clr, NULL, &level);
1076 if (!ret)
1077 kvm_call_hyp(__kvm_tlb_flush_vmid_ipa, pgt->mmu, addr, level);
1078 return ret;
1079 }
1080
1081 static int stage2_flush_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
1082 enum kvm_pgtable_walk_flags flag,
1083 void * const arg)
1084 {
1085 struct kvm_pgtable *pgt = arg;
1086 struct kvm_pgtable_mm_ops *mm_ops = pgt->mm_ops;
1087 kvm_pte_t pte = *ptep;
1088
1089 if (!kvm_pte_valid(pte) || !stage2_pte_cacheable(pgt, pte))
1090 return 0;
1091
1092 if (mm_ops->dcache_clean_inval_poc)
1093 mm_ops->dcache_clean_inval_poc(kvm_pte_follow(pte, mm_ops),
1094 kvm_granule_size(level));
1095 return 0;
1096 }
1097
1098 int kvm_pgtable_stage2_flush(struct kvm_pgtable *pgt, u64 addr, u64 size)
1099 {
1100 struct kvm_pgtable_walker walker = {
1101 .cb = stage2_flush_walker,
1102 .flags = KVM_PGTABLE_WALK_LEAF,
1103 .arg = pgt,
1104 };
1105
1106 if (stage2_has_fwb(pgt))
1107 return 0;
1108
1109 return kvm_pgtable_walk(pgt, addr, size, &walker);
1110 }
1111
1112
1113 int __kvm_pgtable_stage2_init(struct kvm_pgtable *pgt, struct kvm_arch *arch,
1114 struct kvm_pgtable_mm_ops *mm_ops,
1115 enum kvm_pgtable_stage2_flags flags,
1116 kvm_pgtable_force_pte_cb_t force_pte_cb)
1117 {
1118 size_t pgd_sz;
1119 u64 vtcr = arch->vtcr;
1120 u32 ia_bits = VTCR_EL2_IPA(vtcr);
1121 u32 sl0 = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
1122 u32 start_level = VTCR_EL2_TGRAN_SL0_BASE - sl0;
1123
1124 pgd_sz = kvm_pgd_pages(ia_bits, start_level) * PAGE_SIZE;
1125 pgt->pgd = mm_ops->zalloc_pages_exact(pgd_sz);
1126 if (!pgt->pgd)
1127 return -ENOMEM;
1128
1129 pgt->ia_bits = ia_bits;
1130 pgt->start_level = start_level;
1131 pgt->mm_ops = mm_ops;
1132 pgt->mmu = &arch->mmu;
1133 pgt->flags = flags;
1134 pgt->force_pte_cb = force_pte_cb;
1135
1136 /* Ensure zeroed PGD pages are visible to the hardware walker */
1137 dsb(ishst);
1138 return 0;
1139 }
1140
1141 static int stage2_free_walker(u64 addr, u64 end, u32 level, kvm_pte_t *ptep,
1142 enum kvm_pgtable_walk_flags flag,
1143 void * const arg)
1144 {
1145 struct kvm_pgtable_mm_ops *mm_ops = arg;
1146 kvm_pte_t pte = *ptep;
1147
1148 if (!stage2_pte_is_counted(pte))
1149 return 0;
1150
1151 mm_ops->put_page(ptep);
1152
1153 if (kvm_pte_table(pte, level))
1154 mm_ops->put_page(kvm_pte_follow(pte, mm_ops));
1155
1156 return 0;
1157 }
1158
1159 void kvm_pgtable_stage2_destroy(struct kvm_pgtable *pgt)
1160 {
1161 size_t pgd_sz;
1162 struct kvm_pgtable_walker walker = {
1163 .cb = stage2_free_walker,
1164 .flags = KVM_PGTABLE_WALK_LEAF |
1165 KVM_PGTABLE_WALK_TABLE_POST,
1166 .arg = pgt->mm_ops,
1167 };
1168
1169 WARN_ON(kvm_pgtable_walk(pgt, 0, BIT(pgt->ia_bits), &walker));
1170 pgd_sz = kvm_pgd_pages(pgt->ia_bits, pgt->start_level) * PAGE_SIZE;
1171 pgt->mm_ops->free_pages_exact(pgt->pgd, pgd_sz);
1172 pgt->pgd = NULL;
1173 }