]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/s390/mm/gmap.c
s390/mm: add shadow gmap support
[mirror_ubuntu-jammy-kernel.git] / arch / s390 / mm / gmap.c
1 /*
2 * KVM guest address space mapping code
3 *
4 * Copyright IBM Corp. 2007, 2016
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 */
7
8 #include <linux/kernel.h>
9 #include <linux/mm.h>
10 #include <linux/swap.h>
11 #include <linux/smp.h>
12 #include <linux/spinlock.h>
13 #include <linux/slab.h>
14 #include <linux/swapops.h>
15 #include <linux/ksm.h>
16 #include <linux/mman.h>
17
18 #include <asm/pgtable.h>
19 #include <asm/pgalloc.h>
20 #include <asm/gmap.h>
21 #include <asm/tlb.h>
22
23 /**
24 * gmap_alloc - allocate and initialize a guest address space
25 * @mm: pointer to the parent mm_struct
26 * @limit: maximum address of the gmap address space
27 *
28 * Returns a guest address space structure.
29 */
30 static struct gmap *gmap_alloc(unsigned long limit)
31 {
32 struct gmap *gmap;
33 struct page *page;
34 unsigned long *table;
35 unsigned long etype, atype;
36
37 if (limit < (1UL << 31)) {
38 limit = (1UL << 31) - 1;
39 atype = _ASCE_TYPE_SEGMENT;
40 etype = _SEGMENT_ENTRY_EMPTY;
41 } else if (limit < (1UL << 42)) {
42 limit = (1UL << 42) - 1;
43 atype = _ASCE_TYPE_REGION3;
44 etype = _REGION3_ENTRY_EMPTY;
45 } else if (limit < (1UL << 53)) {
46 limit = (1UL << 53) - 1;
47 atype = _ASCE_TYPE_REGION2;
48 etype = _REGION2_ENTRY_EMPTY;
49 } else {
50 limit = -1UL;
51 atype = _ASCE_TYPE_REGION1;
52 etype = _REGION1_ENTRY_EMPTY;
53 }
54 gmap = kzalloc(sizeof(struct gmap), GFP_KERNEL);
55 if (!gmap)
56 goto out;
57 INIT_LIST_HEAD(&gmap->crst_list);
58 INIT_LIST_HEAD(&gmap->children);
59 INIT_LIST_HEAD(&gmap->pt_list);
60 INIT_RADIX_TREE(&gmap->guest_to_host, GFP_KERNEL);
61 INIT_RADIX_TREE(&gmap->host_to_guest, GFP_ATOMIC);
62 INIT_RADIX_TREE(&gmap->host_to_rmap, GFP_ATOMIC);
63 spin_lock_init(&gmap->guest_table_lock);
64 spin_lock_init(&gmap->shadow_lock);
65 atomic_set(&gmap->ref_count, 1);
66 page = alloc_pages(GFP_KERNEL, 2);
67 if (!page)
68 goto out_free;
69 page->index = 0;
70 list_add(&page->lru, &gmap->crst_list);
71 table = (unsigned long *) page_to_phys(page);
72 crst_table_init(table, etype);
73 gmap->table = table;
74 gmap->asce = atype | _ASCE_TABLE_LENGTH |
75 _ASCE_USER_BITS | __pa(table);
76 gmap->asce_end = limit;
77 return gmap;
78
79 out_free:
80 kfree(gmap);
81 out:
82 return NULL;
83 }
84
85 /**
86 * gmap_create - create a guest address space
87 * @mm: pointer to the parent mm_struct
88 * @limit: maximum size of the gmap address space
89 *
90 * Returns a guest address space structure.
91 */
92 struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit)
93 {
94 struct gmap *gmap;
95
96 gmap = gmap_alloc(limit);
97 if (!gmap)
98 return NULL;
99 gmap->mm = mm;
100 spin_lock(&mm->context.gmap_lock);
101 list_add_rcu(&gmap->list, &mm->context.gmap_list);
102 spin_unlock(&mm->context.gmap_lock);
103 return gmap;
104 }
105 EXPORT_SYMBOL_GPL(gmap_create);
106
107 static void gmap_flush_tlb(struct gmap *gmap)
108 {
109 if (MACHINE_HAS_IDTE)
110 __tlb_flush_asce(gmap->mm, gmap->asce);
111 else
112 __tlb_flush_global();
113 }
114
115 static void gmap_radix_tree_free(struct radix_tree_root *root)
116 {
117 struct radix_tree_iter iter;
118 unsigned long indices[16];
119 unsigned long index;
120 void **slot;
121 int i, nr;
122
123 /* A radix tree is freed by deleting all of its entries */
124 index = 0;
125 do {
126 nr = 0;
127 radix_tree_for_each_slot(slot, root, &iter, index) {
128 indices[nr] = iter.index;
129 if (++nr == 16)
130 break;
131 }
132 for (i = 0; i < nr; i++) {
133 index = indices[i];
134 radix_tree_delete(root, index);
135 }
136 } while (nr > 0);
137 }
138
139 static void gmap_rmap_radix_tree_free(struct radix_tree_root *root)
140 {
141 struct gmap_rmap *rmap, *rnext, *head;
142 struct radix_tree_iter iter;
143 unsigned long indices[16];
144 unsigned long index;
145 void **slot;
146 int i, nr;
147
148 /* A radix tree is freed by deleting all of its entries */
149 index = 0;
150 do {
151 nr = 0;
152 radix_tree_for_each_slot(slot, root, &iter, index) {
153 indices[nr] = iter.index;
154 if (++nr == 16)
155 break;
156 }
157 for (i = 0; i < nr; i++) {
158 index = indices[i];
159 head = radix_tree_delete(root, index);
160 gmap_for_each_rmap_safe(rmap, rnext, head)
161 kfree(rmap);
162 }
163 } while (nr > 0);
164 }
165
166 /**
167 * gmap_free - free a guest address space
168 * @gmap: pointer to the guest address space structure
169 *
170 * No locks required. There are no references to this gmap anymore.
171 */
172 static void gmap_free(struct gmap *gmap)
173 {
174 struct page *page, *next;
175
176 /* Free all segment & region tables. */
177 list_for_each_entry_safe(page, next, &gmap->crst_list, lru)
178 __free_pages(page, 2);
179 gmap_radix_tree_free(&gmap->guest_to_host);
180 gmap_radix_tree_free(&gmap->host_to_guest);
181
182 /* Free additional data for a shadow gmap */
183 if (gmap_is_shadow(gmap)) {
184 /* Free all page tables. */
185 list_for_each_entry_safe(page, next, &gmap->pt_list, lru)
186 page_table_free_pgste(page);
187 gmap_rmap_radix_tree_free(&gmap->host_to_rmap);
188 /* Release reference to the parent */
189 gmap_put(gmap->parent);
190 }
191
192 kfree(gmap);
193 }
194
195 /**
196 * gmap_get - increase reference counter for guest address space
197 * @gmap: pointer to the guest address space structure
198 *
199 * Returns the gmap pointer
200 */
201 struct gmap *gmap_get(struct gmap *gmap)
202 {
203 atomic_inc(&gmap->ref_count);
204 return gmap;
205 }
206 EXPORT_SYMBOL_GPL(gmap_get);
207
208 /**
209 * gmap_put - decrease reference counter for guest address space
210 * @gmap: pointer to the guest address space structure
211 *
212 * If the reference counter reaches zero the guest address space is freed.
213 */
214 void gmap_put(struct gmap *gmap)
215 {
216 if (atomic_dec_return(&gmap->ref_count) == 0)
217 gmap_free(gmap);
218 }
219 EXPORT_SYMBOL_GPL(gmap_put);
220
221 /**
222 * gmap_remove - remove a guest address space but do not free it yet
223 * @gmap: pointer to the guest address space structure
224 */
225 void gmap_remove(struct gmap *gmap)
226 {
227 struct gmap *sg, *next;
228
229 /* Flush tlb. */
230 gmap_flush_tlb(gmap);
231 /* Remove all shadow gmaps linked to this gmap */
232 if (!list_empty(&gmap->children)) {
233 spin_lock(&gmap->shadow_lock);
234 list_for_each_entry_safe(sg, next, &gmap->children, list) {
235 gmap_flush_tlb(sg);
236 list_del(&sg->list);
237 gmap_put(sg);
238 }
239 spin_unlock(&gmap->shadow_lock);
240 }
241 /* Remove gmap from the pre-mm list */
242 spin_lock(&gmap->mm->context.gmap_lock);
243 list_del_rcu(&gmap->list);
244 spin_unlock(&gmap->mm->context.gmap_lock);
245 synchronize_rcu();
246 /* Put reference */
247 gmap_put(gmap);
248 }
249 EXPORT_SYMBOL_GPL(gmap_remove);
250
251 /**
252 * gmap_enable - switch primary space to the guest address space
253 * @gmap: pointer to the guest address space structure
254 */
255 void gmap_enable(struct gmap *gmap)
256 {
257 S390_lowcore.gmap = (unsigned long) gmap;
258 }
259 EXPORT_SYMBOL_GPL(gmap_enable);
260
261 /**
262 * gmap_disable - switch back to the standard primary address space
263 * @gmap: pointer to the guest address space structure
264 */
265 void gmap_disable(struct gmap *gmap)
266 {
267 S390_lowcore.gmap = 0UL;
268 }
269 EXPORT_SYMBOL_GPL(gmap_disable);
270
271 /*
272 * gmap_alloc_table is assumed to be called with mmap_sem held
273 */
274 static int gmap_alloc_table(struct gmap *gmap, unsigned long *table,
275 unsigned long init, unsigned long gaddr)
276 {
277 struct page *page;
278 unsigned long *new;
279
280 /* since we dont free the gmap table until gmap_free we can unlock */
281 page = alloc_pages(GFP_KERNEL, 2);
282 if (!page)
283 return -ENOMEM;
284 new = (unsigned long *) page_to_phys(page);
285 crst_table_init(new, init);
286 spin_lock(&gmap->guest_table_lock);
287 if (*table & _REGION_ENTRY_INVALID) {
288 list_add(&page->lru, &gmap->crst_list);
289 *table = (unsigned long) new | _REGION_ENTRY_LENGTH |
290 (*table & _REGION_ENTRY_TYPE_MASK);
291 page->index = gaddr;
292 page = NULL;
293 }
294 spin_unlock(&gmap->guest_table_lock);
295 if (page)
296 __free_pages(page, 2);
297 return 0;
298 }
299
300 /**
301 * __gmap_segment_gaddr - find virtual address from segment pointer
302 * @entry: pointer to a segment table entry in the guest address space
303 *
304 * Returns the virtual address in the guest address space for the segment
305 */
306 static unsigned long __gmap_segment_gaddr(unsigned long *entry)
307 {
308 struct page *page;
309 unsigned long offset, mask;
310
311 offset = (unsigned long) entry / sizeof(unsigned long);
312 offset = (offset & (PTRS_PER_PMD - 1)) * PMD_SIZE;
313 mask = ~(PTRS_PER_PMD * sizeof(pmd_t) - 1);
314 page = virt_to_page((void *)((unsigned long) entry & mask));
315 return page->index + offset;
316 }
317
318 /**
319 * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
320 * @gmap: pointer to the guest address space structure
321 * @vmaddr: address in the host process address space
322 *
323 * Returns 1 if a TLB flush is required
324 */
325 static int __gmap_unlink_by_vmaddr(struct gmap *gmap, unsigned long vmaddr)
326 {
327 unsigned long *entry;
328 int flush = 0;
329
330 BUG_ON(gmap_is_shadow(gmap));
331 spin_lock(&gmap->guest_table_lock);
332 entry = radix_tree_delete(&gmap->host_to_guest, vmaddr >> PMD_SHIFT);
333 if (entry) {
334 flush = (*entry != _SEGMENT_ENTRY_INVALID);
335 *entry = _SEGMENT_ENTRY_INVALID;
336 }
337 spin_unlock(&gmap->guest_table_lock);
338 return flush;
339 }
340
341 /**
342 * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
343 * @gmap: pointer to the guest address space structure
344 * @gaddr: address in the guest address space
345 *
346 * Returns 1 if a TLB flush is required
347 */
348 static int __gmap_unmap_by_gaddr(struct gmap *gmap, unsigned long gaddr)
349 {
350 unsigned long vmaddr;
351
352 vmaddr = (unsigned long) radix_tree_delete(&gmap->guest_to_host,
353 gaddr >> PMD_SHIFT);
354 return vmaddr ? __gmap_unlink_by_vmaddr(gmap, vmaddr) : 0;
355 }
356
357 /**
358 * gmap_unmap_segment - unmap segment from the guest address space
359 * @gmap: pointer to the guest address space structure
360 * @to: address in the guest address space
361 * @len: length of the memory area to unmap
362 *
363 * Returns 0 if the unmap succeeded, -EINVAL if not.
364 */
365 int gmap_unmap_segment(struct gmap *gmap, unsigned long to, unsigned long len)
366 {
367 unsigned long off;
368 int flush;
369
370 BUG_ON(gmap_is_shadow(gmap));
371 if ((to | len) & (PMD_SIZE - 1))
372 return -EINVAL;
373 if (len == 0 || to + len < to)
374 return -EINVAL;
375
376 flush = 0;
377 down_write(&gmap->mm->mmap_sem);
378 for (off = 0; off < len; off += PMD_SIZE)
379 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
380 up_write(&gmap->mm->mmap_sem);
381 if (flush)
382 gmap_flush_tlb(gmap);
383 return 0;
384 }
385 EXPORT_SYMBOL_GPL(gmap_unmap_segment);
386
387 /**
388 * gmap_map_segment - map a segment to the guest address space
389 * @gmap: pointer to the guest address space structure
390 * @from: source address in the parent address space
391 * @to: target address in the guest address space
392 * @len: length of the memory area to map
393 *
394 * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
395 */
396 int gmap_map_segment(struct gmap *gmap, unsigned long from,
397 unsigned long to, unsigned long len)
398 {
399 unsigned long off;
400 int flush;
401
402 BUG_ON(gmap_is_shadow(gmap));
403 if ((from | to | len) & (PMD_SIZE - 1))
404 return -EINVAL;
405 if (len == 0 || from + len < from || to + len < to ||
406 from + len - 1 > TASK_MAX_SIZE || to + len - 1 > gmap->asce_end)
407 return -EINVAL;
408
409 flush = 0;
410 down_write(&gmap->mm->mmap_sem);
411 for (off = 0; off < len; off += PMD_SIZE) {
412 /* Remove old translation */
413 flush |= __gmap_unmap_by_gaddr(gmap, to + off);
414 /* Store new translation */
415 if (radix_tree_insert(&gmap->guest_to_host,
416 (to + off) >> PMD_SHIFT,
417 (void *) from + off))
418 break;
419 }
420 up_write(&gmap->mm->mmap_sem);
421 if (flush)
422 gmap_flush_tlb(gmap);
423 if (off >= len)
424 return 0;
425 gmap_unmap_segment(gmap, to, len);
426 return -ENOMEM;
427 }
428 EXPORT_SYMBOL_GPL(gmap_map_segment);
429
430 /**
431 * __gmap_translate - translate a guest address to a user space address
432 * @gmap: pointer to guest mapping meta data structure
433 * @gaddr: guest address
434 *
435 * Returns user space address which corresponds to the guest address or
436 * -EFAULT if no such mapping exists.
437 * This function does not establish potentially missing page table entries.
438 * The mmap_sem of the mm that belongs to the address space must be held
439 * when this function gets called.
440 *
441 * Note: Can also be called for shadow gmaps.
442 */
443 unsigned long __gmap_translate(struct gmap *gmap, unsigned long gaddr)
444 {
445 unsigned long vmaddr;
446
447 vmaddr = (unsigned long)
448 radix_tree_lookup(&gmap->guest_to_host, gaddr >> PMD_SHIFT);
449 /* Note: guest_to_host is empty for a shadow gmap */
450 return vmaddr ? (vmaddr | (gaddr & ~PMD_MASK)) : -EFAULT;
451 }
452 EXPORT_SYMBOL_GPL(__gmap_translate);
453
454 /**
455 * gmap_translate - translate a guest address to a user space address
456 * @gmap: pointer to guest mapping meta data structure
457 * @gaddr: guest address
458 *
459 * Returns user space address which corresponds to the guest address or
460 * -EFAULT if no such mapping exists.
461 * This function does not establish potentially missing page table entries.
462 */
463 unsigned long gmap_translate(struct gmap *gmap, unsigned long gaddr)
464 {
465 unsigned long rc;
466
467 down_read(&gmap->mm->mmap_sem);
468 rc = __gmap_translate(gmap, gaddr);
469 up_read(&gmap->mm->mmap_sem);
470 return rc;
471 }
472 EXPORT_SYMBOL_GPL(gmap_translate);
473
474 /**
475 * gmap_unlink - disconnect a page table from the gmap shadow tables
476 * @gmap: pointer to guest mapping meta data structure
477 * @table: pointer to the host page table
478 * @vmaddr: vm address associated with the host page table
479 */
480 void gmap_unlink(struct mm_struct *mm, unsigned long *table,
481 unsigned long vmaddr)
482 {
483 struct gmap *gmap;
484 int flush;
485
486 rcu_read_lock();
487 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
488 flush = __gmap_unlink_by_vmaddr(gmap, vmaddr);
489 if (flush)
490 gmap_flush_tlb(gmap);
491 }
492 rcu_read_unlock();
493 }
494
495 /**
496 * gmap_link - set up shadow page tables to connect a host to a guest address
497 * @gmap: pointer to guest mapping meta data structure
498 * @gaddr: guest address
499 * @vmaddr: vm address
500 *
501 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
502 * if the vm address is already mapped to a different guest segment.
503 * The mmap_sem of the mm that belongs to the address space must be held
504 * when this function gets called.
505 */
506 int __gmap_link(struct gmap *gmap, unsigned long gaddr, unsigned long vmaddr)
507 {
508 struct mm_struct *mm;
509 unsigned long *table;
510 spinlock_t *ptl;
511 pgd_t *pgd;
512 pud_t *pud;
513 pmd_t *pmd;
514 int rc;
515
516 BUG_ON(gmap_is_shadow(gmap));
517 /* Create higher level tables in the gmap page table */
518 table = gmap->table;
519 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION1) {
520 table += (gaddr >> 53) & 0x7ff;
521 if ((*table & _REGION_ENTRY_INVALID) &&
522 gmap_alloc_table(gmap, table, _REGION2_ENTRY_EMPTY,
523 gaddr & 0xffe0000000000000UL))
524 return -ENOMEM;
525 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
526 }
527 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION2) {
528 table += (gaddr >> 42) & 0x7ff;
529 if ((*table & _REGION_ENTRY_INVALID) &&
530 gmap_alloc_table(gmap, table, _REGION3_ENTRY_EMPTY,
531 gaddr & 0xfffffc0000000000UL))
532 return -ENOMEM;
533 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
534 }
535 if ((gmap->asce & _ASCE_TYPE_MASK) >= _ASCE_TYPE_REGION3) {
536 table += (gaddr >> 31) & 0x7ff;
537 if ((*table & _REGION_ENTRY_INVALID) &&
538 gmap_alloc_table(gmap, table, _SEGMENT_ENTRY_EMPTY,
539 gaddr & 0xffffffff80000000UL))
540 return -ENOMEM;
541 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
542 }
543 table += (gaddr >> 20) & 0x7ff;
544 /* Walk the parent mm page table */
545 mm = gmap->mm;
546 pgd = pgd_offset(mm, vmaddr);
547 VM_BUG_ON(pgd_none(*pgd));
548 pud = pud_offset(pgd, vmaddr);
549 VM_BUG_ON(pud_none(*pud));
550 pmd = pmd_offset(pud, vmaddr);
551 VM_BUG_ON(pmd_none(*pmd));
552 /* large pmds cannot yet be handled */
553 if (pmd_large(*pmd))
554 return -EFAULT;
555 /* Link gmap segment table entry location to page table. */
556 rc = radix_tree_preload(GFP_KERNEL);
557 if (rc)
558 return rc;
559 ptl = pmd_lock(mm, pmd);
560 spin_lock(&gmap->guest_table_lock);
561 if (*table == _SEGMENT_ENTRY_INVALID) {
562 rc = radix_tree_insert(&gmap->host_to_guest,
563 vmaddr >> PMD_SHIFT, table);
564 if (!rc)
565 *table = pmd_val(*pmd);
566 } else
567 rc = 0;
568 spin_unlock(&gmap->guest_table_lock);
569 spin_unlock(ptl);
570 radix_tree_preload_end();
571 return rc;
572 }
573
574 /**
575 * gmap_fault - resolve a fault on a guest address
576 * @gmap: pointer to guest mapping meta data structure
577 * @gaddr: guest address
578 * @fault_flags: flags to pass down to handle_mm_fault()
579 *
580 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
581 * if the vm address is already mapped to a different guest segment.
582 */
583 int gmap_fault(struct gmap *gmap, unsigned long gaddr,
584 unsigned int fault_flags)
585 {
586 unsigned long vmaddr;
587 int rc;
588 bool unlocked;
589
590 down_read(&gmap->mm->mmap_sem);
591
592 retry:
593 unlocked = false;
594 vmaddr = __gmap_translate(gmap, gaddr);
595 if (IS_ERR_VALUE(vmaddr)) {
596 rc = vmaddr;
597 goto out_up;
598 }
599 if (fixup_user_fault(current, gmap->mm, vmaddr, fault_flags,
600 &unlocked)) {
601 rc = -EFAULT;
602 goto out_up;
603 }
604 /*
605 * In the case that fixup_user_fault unlocked the mmap_sem during
606 * faultin redo __gmap_translate to not race with a map/unmap_segment.
607 */
608 if (unlocked)
609 goto retry;
610
611 rc = __gmap_link(gmap, gaddr, vmaddr);
612 out_up:
613 up_read(&gmap->mm->mmap_sem);
614 return rc;
615 }
616 EXPORT_SYMBOL_GPL(gmap_fault);
617
618 /*
619 * this function is assumed to be called with mmap_sem held
620 */
621 void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
622 {
623 unsigned long vmaddr;
624 spinlock_t *ptl;
625 pte_t *ptep;
626
627 /* Find the vm address for the guest address */
628 vmaddr = (unsigned long) radix_tree_lookup(&gmap->guest_to_host,
629 gaddr >> PMD_SHIFT);
630 if (vmaddr) {
631 vmaddr |= gaddr & ~PMD_MASK;
632 /* Get pointer to the page table entry */
633 ptep = get_locked_pte(gmap->mm, vmaddr, &ptl);
634 if (likely(ptep))
635 ptep_zap_unused(gmap->mm, vmaddr, ptep, 0);
636 pte_unmap_unlock(ptep, ptl);
637 }
638 }
639 EXPORT_SYMBOL_GPL(__gmap_zap);
640
641 void gmap_discard(struct gmap *gmap, unsigned long from, unsigned long to)
642 {
643 unsigned long gaddr, vmaddr, size;
644 struct vm_area_struct *vma;
645
646 down_read(&gmap->mm->mmap_sem);
647 for (gaddr = from; gaddr < to;
648 gaddr = (gaddr + PMD_SIZE) & PMD_MASK) {
649 /* Find the vm address for the guest address */
650 vmaddr = (unsigned long)
651 radix_tree_lookup(&gmap->guest_to_host,
652 gaddr >> PMD_SHIFT);
653 if (!vmaddr)
654 continue;
655 vmaddr |= gaddr & ~PMD_MASK;
656 /* Find vma in the parent mm */
657 vma = find_vma(gmap->mm, vmaddr);
658 size = min(to - gaddr, PMD_SIZE - (gaddr & ~PMD_MASK));
659 zap_page_range(vma, vmaddr, size, NULL);
660 }
661 up_read(&gmap->mm->mmap_sem);
662 }
663 EXPORT_SYMBOL_GPL(gmap_discard);
664
665 static LIST_HEAD(gmap_notifier_list);
666 static DEFINE_SPINLOCK(gmap_notifier_lock);
667
668 /**
669 * gmap_register_pte_notifier - register a pte invalidation callback
670 * @nb: pointer to the gmap notifier block
671 */
672 void gmap_register_pte_notifier(struct gmap_notifier *nb)
673 {
674 spin_lock(&gmap_notifier_lock);
675 list_add_rcu(&nb->list, &gmap_notifier_list);
676 spin_unlock(&gmap_notifier_lock);
677 }
678 EXPORT_SYMBOL_GPL(gmap_register_pte_notifier);
679
680 /**
681 * gmap_unregister_pte_notifier - remove a pte invalidation callback
682 * @nb: pointer to the gmap notifier block
683 */
684 void gmap_unregister_pte_notifier(struct gmap_notifier *nb)
685 {
686 spin_lock(&gmap_notifier_lock);
687 list_del_rcu(&nb->list);
688 spin_unlock(&gmap_notifier_lock);
689 synchronize_rcu();
690 }
691 EXPORT_SYMBOL_GPL(gmap_unregister_pte_notifier);
692
693 /**
694 * gmap_call_notifier - call all registered invalidation callbacks
695 * @gmap: pointer to guest mapping meta data structure
696 * @start: start virtual address in the guest address space
697 * @end: end virtual address in the guest address space
698 */
699 static void gmap_call_notifier(struct gmap *gmap, unsigned long start,
700 unsigned long end)
701 {
702 struct gmap_notifier *nb;
703
704 list_for_each_entry(nb, &gmap_notifier_list, list)
705 nb->notifier_call(gmap, start, end);
706 }
707
708 /**
709 * gmap_table_walk - walk the gmap page tables
710 * @gmap: pointer to guest mapping meta data structure
711 * @gaddr: virtual address in the guest address space
712 * @level: page table level to stop at
713 *
714 * Returns a table entry pointer for the given guest address and @level
715 * @level=0 : returns a pointer to a page table table entry (or NULL)
716 * @level=1 : returns a pointer to a segment table entry (or NULL)
717 * @level=2 : returns a pointer to a region-3 table entry (or NULL)
718 * @level=3 : returns a pointer to a region-2 table entry (or NULL)
719 * @level=4 : returns a pointer to a region-1 table entry (or NULL)
720 *
721 * Returns NULL if the gmap page tables could not be walked to the
722 * requested level.
723 *
724 * Note: Can also be called for shadow gmaps.
725 */
726 static inline unsigned long *gmap_table_walk(struct gmap *gmap,
727 unsigned long gaddr, int level)
728 {
729 unsigned long *table;
730
731 if ((gmap->asce & _ASCE_TYPE_MASK) + 4 < (level * 4))
732 return NULL;
733 if (gmap_is_shadow(gmap) && gmap->removed)
734 return NULL;
735 if (gaddr & (-1UL << (31 + ((gmap->asce & _ASCE_TYPE_MASK) >> 2)*11)))
736 return NULL;
737 table = gmap->table;
738 switch (gmap->asce & _ASCE_TYPE_MASK) {
739 case _ASCE_TYPE_REGION1:
740 table += (gaddr >> 53) & 0x7ff;
741 if (level == 4)
742 break;
743 if (*table & _REGION_ENTRY_INVALID)
744 return NULL;
745 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
746 /* Fallthrough */
747 case _ASCE_TYPE_REGION2:
748 table += (gaddr >> 42) & 0x7ff;
749 if (level == 3)
750 break;
751 if (*table & _REGION_ENTRY_INVALID)
752 return NULL;
753 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
754 /* Fallthrough */
755 case _ASCE_TYPE_REGION3:
756 table += (gaddr >> 31) & 0x7ff;
757 if (level == 2)
758 break;
759 if (*table & _REGION_ENTRY_INVALID)
760 return NULL;
761 table = (unsigned long *)(*table & _REGION_ENTRY_ORIGIN);
762 /* Fallthrough */
763 case _ASCE_TYPE_SEGMENT:
764 table += (gaddr >> 20) & 0x7ff;
765 if (level == 1)
766 break;
767 if (*table & _REGION_ENTRY_INVALID)
768 return NULL;
769 table = (unsigned long *)(*table & _SEGMENT_ENTRY_ORIGIN);
770 table += (gaddr >> 12) & 0xff;
771 }
772 return table;
773 }
774
775 /**
776 * gmap_pte_op_walk - walk the gmap page table, get the page table lock
777 * and return the pte pointer
778 * @gmap: pointer to guest mapping meta data structure
779 * @gaddr: virtual address in the guest address space
780 * @ptl: pointer to the spinlock pointer
781 *
782 * Returns a pointer to the locked pte for a guest address, or NULL
783 *
784 * Note: Can also be called for shadow gmaps.
785 */
786 static pte_t *gmap_pte_op_walk(struct gmap *gmap, unsigned long gaddr,
787 spinlock_t **ptl)
788 {
789 unsigned long *table;
790
791 if (gmap_is_shadow(gmap))
792 spin_lock(&gmap->guest_table_lock);
793 /* Walk the gmap page table, lock and get pte pointer */
794 table = gmap_table_walk(gmap, gaddr, 1); /* get segment pointer */
795 if (!table || *table & _SEGMENT_ENTRY_INVALID) {
796 if (gmap_is_shadow(gmap))
797 spin_unlock(&gmap->guest_table_lock);
798 return NULL;
799 }
800 if (gmap_is_shadow(gmap)) {
801 *ptl = &gmap->guest_table_lock;
802 return pte_offset_map((pmd_t *) table, gaddr);
803 }
804 return pte_alloc_map_lock(gmap->mm, (pmd_t *) table, gaddr, ptl);
805 }
806
807 /**
808 * gmap_pte_op_fixup - force a page in and connect the gmap page table
809 * @gmap: pointer to guest mapping meta data structure
810 * @gaddr: virtual address in the guest address space
811 * @vmaddr: address in the host process address space
812 *
813 * Returns 0 if the caller can retry __gmap_translate (might fail again),
814 * -ENOMEM if out of memory and -EFAULT if anything goes wrong while fixing
815 * up or connecting the gmap page table.
816 */
817 static int gmap_pte_op_fixup(struct gmap *gmap, unsigned long gaddr,
818 unsigned long vmaddr)
819 {
820 struct mm_struct *mm = gmap->mm;
821 bool unlocked = false;
822
823 BUG_ON(gmap_is_shadow(gmap));
824 if (fixup_user_fault(current, mm, vmaddr, FAULT_FLAG_WRITE, &unlocked))
825 return -EFAULT;
826 if (unlocked)
827 /* lost mmap_sem, caller has to retry __gmap_translate */
828 return 0;
829 /* Connect the page tables */
830 return __gmap_link(gmap, gaddr, vmaddr);
831 }
832
833 /**
834 * gmap_pte_op_end - release the page table lock
835 * @ptl: pointer to the spinlock pointer
836 */
837 static void gmap_pte_op_end(spinlock_t *ptl)
838 {
839 spin_unlock(ptl);
840 }
841
842 /*
843 * gmap_protect_range - remove access rights to memory and set pgste bits
844 * @gmap: pointer to guest mapping meta data structure
845 * @gaddr: virtual address in the guest address space
846 * @len: size of area
847 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
848 * @bits: pgste notification bits to set
849 *
850 * Returns 0 if successfully protected, -ENOMEM if out of memory and
851 * -EFAULT if gaddr is invalid (or mapping for shadows is missing).
852 *
853 * Called with sg->mm->mmap_sem in read.
854 *
855 * Note: Can also be called for shadow gmaps.
856 */
857 static int gmap_protect_range(struct gmap *gmap, unsigned long gaddr,
858 unsigned long len, int prot, unsigned long bits)
859 {
860 unsigned long vmaddr;
861 spinlock_t *ptl;
862 pte_t *ptep;
863 int rc;
864
865 while (len) {
866 rc = -EAGAIN;
867 ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
868 if (ptep) {
869 rc = ptep_force_prot(gmap->mm, gaddr, ptep, prot, bits);
870 gmap_pte_op_end(ptl);
871 }
872 if (rc) {
873 vmaddr = __gmap_translate(gmap, gaddr);
874 if (IS_ERR_VALUE(vmaddr))
875 return vmaddr;
876 rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr);
877 if (rc)
878 return rc;
879 continue;
880 }
881 gaddr += PAGE_SIZE;
882 len -= PAGE_SIZE;
883 }
884 return 0;
885 }
886
887 /**
888 * gmap_mprotect_notify - change access rights for a range of ptes and
889 * call the notifier if any pte changes again
890 * @gmap: pointer to guest mapping meta data structure
891 * @gaddr: virtual address in the guest address space
892 * @len: size of area
893 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
894 *
895 * Returns 0 if for each page in the given range a gmap mapping exists,
896 * the new access rights could be set and the notifier could be armed.
897 * If the gmap mapping is missing for one or more pages -EFAULT is
898 * returned. If no memory could be allocated -ENOMEM is returned.
899 * This function establishes missing page table entries.
900 */
901 int gmap_mprotect_notify(struct gmap *gmap, unsigned long gaddr,
902 unsigned long len, int prot)
903 {
904 int rc;
905
906 if ((gaddr & ~PAGE_MASK) || (len & ~PAGE_MASK) || gmap_is_shadow(gmap))
907 return -EINVAL;
908 if (!MACHINE_HAS_ESOP && prot == PROT_READ)
909 return -EINVAL;
910 down_read(&gmap->mm->mmap_sem);
911 rc = gmap_protect_range(gmap, gaddr, len, prot, PGSTE_IN_BIT);
912 up_read(&gmap->mm->mmap_sem);
913 return rc;
914 }
915 EXPORT_SYMBOL_GPL(gmap_mprotect_notify);
916
917 /**
918 * gmap_read_table - get an unsigned long value from a guest page table using
919 * absolute addressing, without marking the page referenced.
920 * @gmap: pointer to guest mapping meta data structure
921 * @gaddr: virtual address in the guest address space
922 * @val: pointer to the unsigned long value to return
923 *
924 * Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT
925 * if reading using the virtual address failed.
926 *
927 * Called with gmap->mm->mmap_sem in read.
928 */
929 int gmap_read_table(struct gmap *gmap, unsigned long gaddr, unsigned long *val)
930 {
931 unsigned long address, vmaddr;
932 spinlock_t *ptl;
933 pte_t *ptep, pte;
934 int rc;
935
936 while (1) {
937 rc = -EAGAIN;
938 ptep = gmap_pte_op_walk(gmap, gaddr, &ptl);
939 if (ptep) {
940 pte = *ptep;
941 if (pte_present(pte) && (pte_val(pte) & _PAGE_READ)) {
942 address = pte_val(pte) & PAGE_MASK;
943 address += gaddr & ~PAGE_MASK;
944 *val = *(unsigned long *) address;
945 pte_val(*ptep) |= _PAGE_YOUNG;
946 /* Do *NOT* clear the _PAGE_INVALID bit! */
947 rc = 0;
948 }
949 gmap_pte_op_end(ptl);
950 }
951 if (!rc)
952 break;
953 vmaddr = __gmap_translate(gmap, gaddr);
954 if (IS_ERR_VALUE(vmaddr)) {
955 rc = vmaddr;
956 break;
957 }
958 rc = gmap_pte_op_fixup(gmap, gaddr, vmaddr);
959 if (rc)
960 break;
961 }
962 return rc;
963 }
964 EXPORT_SYMBOL_GPL(gmap_read_table);
965
966 /**
967 * gmap_insert_rmap - add a rmap to the host_to_rmap radix tree
968 * @sg: pointer to the shadow guest address space structure
969 * @vmaddr: vm address associated with the rmap
970 * @rmap: pointer to the rmap structure
971 *
972 * Called with the sg->guest_table_lock
973 */
974 static inline void gmap_insert_rmap(struct gmap *sg, unsigned long vmaddr,
975 struct gmap_rmap *rmap)
976 {
977 void **slot;
978
979 BUG_ON(!gmap_is_shadow(sg));
980 slot = radix_tree_lookup_slot(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT);
981 if (slot) {
982 rmap->next = radix_tree_deref_slot_protected(slot,
983 &sg->guest_table_lock);
984 radix_tree_replace_slot(slot, rmap);
985 } else {
986 rmap->next = NULL;
987 radix_tree_insert(&sg->host_to_rmap, vmaddr >> PAGE_SHIFT,
988 rmap);
989 }
990 }
991
992 /**
993 * gmap_protect_rmap - modify access rights to memory and create an rmap
994 * @sg: pointer to the shadow guest address space structure
995 * @raddr: rmap address in the shadow gmap
996 * @paddr: address in the parent guest address space
997 * @len: length of the memory area to protect
998 * @prot: indicates access rights: none, read-only or read-write
999 *
1000 * Returns 0 if successfully protected and the rmap was created, -ENOMEM
1001 * if out of memory and -EFAULT if paddr is invalid.
1002 */
1003 static int gmap_protect_rmap(struct gmap *sg, unsigned long raddr,
1004 unsigned long paddr, unsigned long len, int prot)
1005 {
1006 struct gmap *parent;
1007 struct gmap_rmap *rmap;
1008 unsigned long vmaddr;
1009 spinlock_t *ptl;
1010 pte_t *ptep;
1011 int rc;
1012
1013 BUG_ON(!gmap_is_shadow(sg));
1014 parent = sg->parent;
1015 while (len) {
1016 vmaddr = __gmap_translate(parent, paddr);
1017 if (IS_ERR_VALUE(vmaddr))
1018 return vmaddr;
1019 rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
1020 if (!rmap)
1021 return -ENOMEM;
1022 rmap->raddr = raddr;
1023 rc = radix_tree_preload(GFP_KERNEL);
1024 if (rc) {
1025 kfree(rmap);
1026 return rc;
1027 }
1028 rc = -EAGAIN;
1029 ptep = gmap_pte_op_walk(parent, paddr, &ptl);
1030 if (ptep) {
1031 spin_lock(&sg->guest_table_lock);
1032 rc = ptep_force_prot(parent->mm, paddr, ptep, prot,
1033 PGSTE_VSIE_BIT);
1034 if (!rc)
1035 gmap_insert_rmap(sg, vmaddr, rmap);
1036 spin_unlock(&sg->guest_table_lock);
1037 gmap_pte_op_end(ptl);
1038 }
1039 radix_tree_preload_end();
1040 if (rc) {
1041 kfree(rmap);
1042 rc = gmap_pte_op_fixup(parent, paddr, vmaddr);
1043 if (rc)
1044 return rc;
1045 continue;
1046 }
1047 paddr += PAGE_SIZE;
1048 len -= PAGE_SIZE;
1049 }
1050 return 0;
1051 }
1052
1053 #define _SHADOW_RMAP_MASK 0x7
1054 #define _SHADOW_RMAP_REGION1 0x5
1055 #define _SHADOW_RMAP_REGION2 0x4
1056 #define _SHADOW_RMAP_REGION3 0x3
1057 #define _SHADOW_RMAP_SEGMENT 0x2
1058 #define _SHADOW_RMAP_PGTABLE 0x1
1059
1060 /**
1061 * gmap_idte_one - invalidate a single region or segment table entry
1062 * @asce: region or segment table *origin* + table-type bits
1063 * @vaddr: virtual address to identify the table entry to flush
1064 *
1065 * The invalid bit of a single region or segment table entry is set
1066 * and the associated TLB entries depending on the entry are flushed.
1067 * The table-type of the @asce identifies the portion of the @vaddr
1068 * that is used as the invalidation index.
1069 */
1070 static inline void gmap_idte_one(unsigned long asce, unsigned long vaddr)
1071 {
1072 asm volatile(
1073 " .insn rrf,0xb98e0000,%0,%1,0,0"
1074 : : "a" (asce), "a" (vaddr) : "cc", "memory");
1075 }
1076
1077 /**
1078 * gmap_unshadow_page - remove a page from a shadow page table
1079 * @sg: pointer to the shadow guest address space structure
1080 * @raddr: rmap address in the shadow guest address space
1081 *
1082 * Called with the sg->guest_table_lock
1083 */
1084 static void gmap_unshadow_page(struct gmap *sg, unsigned long raddr)
1085 {
1086 unsigned long *table;
1087
1088 BUG_ON(!gmap_is_shadow(sg));
1089 table = gmap_table_walk(sg, raddr, 0); /* get page table pointer */
1090 if (!table || *table & _PAGE_INVALID)
1091 return;
1092 gmap_call_notifier(sg, raddr, raddr + (1UL << 12) - 1);
1093 ptep_unshadow_pte(sg->mm, raddr, (pte_t *) table);
1094 }
1095
1096 /**
1097 * __gmap_unshadow_pgt - remove all entries from a shadow page table
1098 * @sg: pointer to the shadow guest address space structure
1099 * @raddr: rmap address in the shadow guest address space
1100 * @pgt: pointer to the start of a shadow page table
1101 *
1102 * Called with the sg->guest_table_lock
1103 */
1104 static void __gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr,
1105 unsigned long *pgt)
1106 {
1107 int i;
1108
1109 BUG_ON(!gmap_is_shadow(sg));
1110 for (i = 0; i < 256; i++, raddr += 1UL << 12)
1111 pgt[i] = _PAGE_INVALID;
1112 }
1113
1114 /**
1115 * gmap_unshadow_pgt - remove a shadow page table from a segment entry
1116 * @sg: pointer to the shadow guest address space structure
1117 * @raddr: address in the shadow guest address space
1118 *
1119 * Called with the sg->guest_table_lock
1120 */
1121 static void gmap_unshadow_pgt(struct gmap *sg, unsigned long raddr)
1122 {
1123 unsigned long sto, *ste, *pgt;
1124 struct page *page;
1125
1126 BUG_ON(!gmap_is_shadow(sg));
1127 ste = gmap_table_walk(sg, raddr, 1); /* get segment pointer */
1128 if (!ste || *ste & _SEGMENT_ENTRY_INVALID)
1129 return;
1130 gmap_call_notifier(sg, raddr, raddr + (1UL << 20) - 1);
1131 sto = (unsigned long) (ste - ((raddr >> 20) & 0x7ff));
1132 gmap_idte_one(sto | _ASCE_TYPE_SEGMENT, raddr);
1133 pgt = (unsigned long *)(*ste & _SEGMENT_ENTRY_ORIGIN);
1134 *ste = _SEGMENT_ENTRY_EMPTY;
1135 __gmap_unshadow_pgt(sg, raddr, pgt);
1136 /* Free page table */
1137 page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
1138 list_del(&page->lru);
1139 page_table_free_pgste(page);
1140 }
1141
1142 /**
1143 * __gmap_unshadow_sgt - remove all entries from a shadow segment table
1144 * @sg: pointer to the shadow guest address space structure
1145 * @raddr: rmap address in the shadow guest address space
1146 * @sgt: pointer to the start of a shadow segment table
1147 *
1148 * Called with the sg->guest_table_lock
1149 */
1150 static void __gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr,
1151 unsigned long *sgt)
1152 {
1153 unsigned long asce, *pgt;
1154 struct page *page;
1155 int i;
1156
1157 BUG_ON(!gmap_is_shadow(sg));
1158 asce = (unsigned long) sgt | _ASCE_TYPE_SEGMENT;
1159 for (i = 0; i < 2048; i++, raddr += 1UL << 20) {
1160 if (sgt[i] & _SEGMENT_ENTRY_INVALID)
1161 continue;
1162 pgt = (unsigned long *)(sgt[i] & _REGION_ENTRY_ORIGIN);
1163 sgt[i] = _SEGMENT_ENTRY_EMPTY;
1164 __gmap_unshadow_pgt(sg, raddr, pgt);
1165 /* Free page table */
1166 page = pfn_to_page(__pa(pgt) >> PAGE_SHIFT);
1167 list_del(&page->lru);
1168 page_table_free_pgste(page);
1169 }
1170 }
1171
1172 /**
1173 * gmap_unshadow_sgt - remove a shadow segment table from a region-3 entry
1174 * @sg: pointer to the shadow guest address space structure
1175 * @raddr: rmap address in the shadow guest address space
1176 *
1177 * Called with the shadow->guest_table_lock
1178 */
1179 static void gmap_unshadow_sgt(struct gmap *sg, unsigned long raddr)
1180 {
1181 unsigned long r3o, *r3e, *sgt;
1182 struct page *page;
1183
1184 BUG_ON(!gmap_is_shadow(sg));
1185 r3e = gmap_table_walk(sg, raddr, 2); /* get region-3 pointer */
1186 if (!r3e || *r3e & _REGION_ENTRY_INVALID)
1187 return;
1188 gmap_call_notifier(sg, raddr, raddr + (1UL << 31) - 1);
1189 r3o = (unsigned long) (r3e - ((raddr >> 31) & 0x7ff));
1190 gmap_idte_one(r3o | _ASCE_TYPE_REGION3, raddr);
1191 sgt = (unsigned long *)(*r3e & _REGION_ENTRY_ORIGIN);
1192 *r3e = _REGION3_ENTRY_EMPTY;
1193 __gmap_unshadow_sgt(sg, raddr, sgt);
1194 /* Free segment table */
1195 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1196 list_del(&page->lru);
1197 __free_pages(page, 2);
1198 }
1199
1200 /**
1201 * __gmap_unshadow_r3t - remove all entries from a shadow region-3 table
1202 * @sg: pointer to the shadow guest address space structure
1203 * @raddr: address in the shadow guest address space
1204 * @r3t: pointer to the start of a shadow region-3 table
1205 *
1206 * Called with the sg->guest_table_lock
1207 */
1208 static void __gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr,
1209 unsigned long *r3t)
1210 {
1211 unsigned long asce, *sgt;
1212 struct page *page;
1213 int i;
1214
1215 BUG_ON(!gmap_is_shadow(sg));
1216 asce = (unsigned long) r3t | _ASCE_TYPE_REGION3;
1217 for (i = 0; i < 2048; i++, raddr += 1UL << 31) {
1218 if (r3t[i] & _REGION_ENTRY_INVALID)
1219 continue;
1220 sgt = (unsigned long *)(r3t[i] & _REGION_ENTRY_ORIGIN);
1221 r3t[i] = _REGION3_ENTRY_EMPTY;
1222 __gmap_unshadow_sgt(sg, raddr, sgt);
1223 /* Free segment table */
1224 page = pfn_to_page(__pa(sgt) >> PAGE_SHIFT);
1225 list_del(&page->lru);
1226 __free_pages(page, 2);
1227 }
1228 }
1229
1230 /**
1231 * gmap_unshadow_r3t - remove a shadow region-3 table from a region-2 entry
1232 * @sg: pointer to the shadow guest address space structure
1233 * @raddr: rmap address in the shadow guest address space
1234 *
1235 * Called with the sg->guest_table_lock
1236 */
1237 static void gmap_unshadow_r3t(struct gmap *sg, unsigned long raddr)
1238 {
1239 unsigned long r2o, *r2e, *r3t;
1240 struct page *page;
1241
1242 BUG_ON(!gmap_is_shadow(sg));
1243 r2e = gmap_table_walk(sg, raddr, 3); /* get region-2 pointer */
1244 if (!r2e || *r2e & _REGION_ENTRY_INVALID)
1245 return;
1246 gmap_call_notifier(sg, raddr, raddr + (1UL << 42) - 1);
1247 r2o = (unsigned long) (r2e - ((raddr >> 42) & 0x7ff));
1248 gmap_idte_one(r2o | _ASCE_TYPE_REGION2, raddr);
1249 r3t = (unsigned long *)(*r2e & _REGION_ENTRY_ORIGIN);
1250 *r2e = _REGION2_ENTRY_EMPTY;
1251 __gmap_unshadow_r3t(sg, raddr, r3t);
1252 /* Free region 3 table */
1253 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1254 list_del(&page->lru);
1255 __free_pages(page, 2);
1256 }
1257
1258 /**
1259 * __gmap_unshadow_r2t - remove all entries from a shadow region-2 table
1260 * @sg: pointer to the shadow guest address space structure
1261 * @raddr: rmap address in the shadow guest address space
1262 * @r2t: pointer to the start of a shadow region-2 table
1263 *
1264 * Called with the sg->guest_table_lock
1265 */
1266 static void __gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr,
1267 unsigned long *r2t)
1268 {
1269 unsigned long asce, *r3t;
1270 struct page *page;
1271 int i;
1272
1273 BUG_ON(!gmap_is_shadow(sg));
1274 asce = (unsigned long) r2t | _ASCE_TYPE_REGION2;
1275 for (i = 0; i < 2048; i++, raddr += 1UL << 42) {
1276 if (r2t[i] & _REGION_ENTRY_INVALID)
1277 continue;
1278 r3t = (unsigned long *)(r2t[i] & _REGION_ENTRY_ORIGIN);
1279 r2t[i] = _REGION2_ENTRY_EMPTY;
1280 __gmap_unshadow_r3t(sg, raddr, r3t);
1281 /* Free region 3 table */
1282 page = pfn_to_page(__pa(r3t) >> PAGE_SHIFT);
1283 list_del(&page->lru);
1284 __free_pages(page, 2);
1285 }
1286 }
1287
1288 /**
1289 * gmap_unshadow_r2t - remove a shadow region-2 table from a region-1 entry
1290 * @sg: pointer to the shadow guest address space structure
1291 * @raddr: rmap address in the shadow guest address space
1292 *
1293 * Called with the sg->guest_table_lock
1294 */
1295 static void gmap_unshadow_r2t(struct gmap *sg, unsigned long raddr)
1296 {
1297 unsigned long r1o, *r1e, *r2t;
1298 struct page *page;
1299
1300 BUG_ON(!gmap_is_shadow(sg));
1301 r1e = gmap_table_walk(sg, raddr, 4); /* get region-1 pointer */
1302 if (!r1e || *r1e & _REGION_ENTRY_INVALID)
1303 return;
1304 gmap_call_notifier(sg, raddr, raddr + (1UL << 53) - 1);
1305 r1o = (unsigned long) (r1e - ((raddr >> 53) & 0x7ff));
1306 gmap_idte_one(r1o | _ASCE_TYPE_REGION1, raddr);
1307 r2t = (unsigned long *)(*r1e & _REGION_ENTRY_ORIGIN);
1308 *r1e = _REGION1_ENTRY_EMPTY;
1309 __gmap_unshadow_r2t(sg, raddr, r2t);
1310 /* Free region 2 table */
1311 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1312 list_del(&page->lru);
1313 __free_pages(page, 2);
1314 }
1315
1316 /**
1317 * __gmap_unshadow_r1t - remove all entries from a shadow region-1 table
1318 * @sg: pointer to the shadow guest address space structure
1319 * @raddr: rmap address in the shadow guest address space
1320 * @r1t: pointer to the start of a shadow region-1 table
1321 *
1322 * Called with the shadow->guest_table_lock
1323 */
1324 static void __gmap_unshadow_r1t(struct gmap *sg, unsigned long raddr,
1325 unsigned long *r1t)
1326 {
1327 unsigned long asce, *r2t;
1328 struct page *page;
1329 int i;
1330
1331 BUG_ON(!gmap_is_shadow(sg));
1332 asce = (unsigned long) r1t | _ASCE_TYPE_REGION1;
1333 for (i = 0; i < 2048; i++, raddr += 1UL << 53) {
1334 if (r1t[i] & _REGION_ENTRY_INVALID)
1335 continue;
1336 r2t = (unsigned long *)(r1t[i] & _REGION_ENTRY_ORIGIN);
1337 __gmap_unshadow_r2t(sg, raddr, r2t);
1338 /* Clear entry and flush translation r1t -> r2t */
1339 gmap_idte_one(asce, raddr);
1340 r1t[i] = _REGION1_ENTRY_EMPTY;
1341 /* Free region 2 table */
1342 page = pfn_to_page(__pa(r2t) >> PAGE_SHIFT);
1343 list_del(&page->lru);
1344 __free_pages(page, 2);
1345 }
1346 }
1347
1348 /**
1349 * gmap_unshadow - remove a shadow page table completely
1350 * @sg: pointer to the shadow guest address space structure
1351 *
1352 * Called with sg->guest_table_lock
1353 */
1354 static void gmap_unshadow(struct gmap *sg)
1355 {
1356 unsigned long *table;
1357
1358 BUG_ON(!gmap_is_shadow(sg));
1359 if (sg->removed)
1360 return;
1361 sg->removed = 1;
1362 gmap_call_notifier(sg, 0, -1UL);
1363 table = (unsigned long *)(sg->asce & _ASCE_ORIGIN);
1364 switch (sg->asce & _ASCE_TYPE_MASK) {
1365 case _ASCE_TYPE_REGION1:
1366 __gmap_unshadow_r1t(sg, 0, table);
1367 break;
1368 case _ASCE_TYPE_REGION2:
1369 __gmap_unshadow_r2t(sg, 0, table);
1370 break;
1371 case _ASCE_TYPE_REGION3:
1372 __gmap_unshadow_r3t(sg, 0, table);
1373 break;
1374 case _ASCE_TYPE_SEGMENT:
1375 __gmap_unshadow_sgt(sg, 0, table);
1376 break;
1377 }
1378 }
1379
1380 /**
1381 * gmap_find_shadow - find a specific asce in the list of shadow tables
1382 * @parent: pointer to the parent gmap
1383 * @asce: ASCE for which the shadow table is created
1384 *
1385 * Returns the pointer to a gmap if a shadow table with the given asce is
1386 * already available, otherwise NULL
1387 */
1388 static struct gmap *gmap_find_shadow(struct gmap *parent, unsigned long asce)
1389 {
1390 struct gmap *sg;
1391
1392 list_for_each_entry(sg, &parent->children, list) {
1393 if (sg->orig_asce != asce || sg->removed)
1394 continue;
1395 atomic_inc(&sg->ref_count);
1396 return sg;
1397 }
1398 return NULL;
1399 }
1400
1401 /**
1402 * gmap_shadow - create/find a shadow guest address space
1403 * @parent: pointer to the parent gmap
1404 * @asce: ASCE for which the shadow table is created
1405 *
1406 * The pages of the top level page table referred by the asce parameter
1407 * will be set to read-only and marked in the PGSTEs of the kvm process.
1408 * The shadow table will be removed automatically on any change to the
1409 * PTE mapping for the source table.
1410 *
1411 * Returns a guest address space structure, NULL if out of memory or if
1412 * anything goes wrong while protecting the top level pages.
1413 */
1414 struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce)
1415 {
1416 struct gmap *sg, *new;
1417 unsigned long limit;
1418 int rc;
1419
1420 BUG_ON(gmap_is_shadow(parent));
1421 spin_lock(&parent->shadow_lock);
1422 sg = gmap_find_shadow(parent, asce);
1423 spin_unlock(&parent->shadow_lock);
1424 if (sg)
1425 return sg;
1426 /* Create a new shadow gmap */
1427 limit = -1UL >> (33 - (((asce & _ASCE_TYPE_MASK) >> 2) * 11));
1428 new = gmap_alloc(limit);
1429 if (!new)
1430 return NULL;
1431 new->mm = parent->mm;
1432 new->parent = gmap_get(parent);
1433 new->orig_asce = asce;
1434 down_read(&parent->mm->mmap_sem);
1435 rc = gmap_protect_range(parent, asce & _ASCE_ORIGIN,
1436 ((asce & _ASCE_TABLE_LENGTH) + 1) * 4096,
1437 PROT_READ, PGSTE_VSIE_BIT);
1438 up_read(&parent->mm->mmap_sem);
1439 if (rc) {
1440 atomic_set(&new->ref_count, 2);
1441 spin_lock(&parent->shadow_lock);
1442 /* Recheck if another CPU created the same shadow */
1443 sg = gmap_find_shadow(parent, asce);
1444 if (!sg) {
1445 list_add(&new->list, &parent->children);
1446 sg = new;
1447 new = NULL;
1448 }
1449 spin_unlock(&parent->shadow_lock);
1450 }
1451 if (new)
1452 gmap_free(new);
1453 return sg;
1454 }
1455 EXPORT_SYMBOL_GPL(gmap_shadow);
1456
1457 /**
1458 * gmap_shadow_r2t - create an empty shadow region 2 table
1459 * @sg: pointer to the shadow guest address space structure
1460 * @saddr: faulting address in the shadow gmap
1461 * @r2t: parent gmap address of the region 2 table to get shadowed
1462 *
1463 * The r2t parameter specifies the address of the source table. The
1464 * four pages of the source table are made read-only in the parent gmap
1465 * address space. A write to the source table area @r2t will automatically
1466 * remove the shadow r2 table and all of its decendents.
1467 *
1468 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1469 * shadow table structure is incomplete, -ENOMEM if out of memory and
1470 * -EFAULT if an address in the parent gmap could not be resolved.
1471 *
1472 * Called with sg->mm->mmap_sem in read.
1473 */
1474 int gmap_shadow_r2t(struct gmap *sg, unsigned long saddr, unsigned long r2t)
1475 {
1476 unsigned long raddr, origin, offset, len;
1477 unsigned long *s_r2t, *table;
1478 struct page *page;
1479 int rc;
1480
1481 BUG_ON(!gmap_is_shadow(sg));
1482 /* Allocate a shadow region second table */
1483 page = alloc_pages(GFP_KERNEL, 2);
1484 if (!page)
1485 return -ENOMEM;
1486 page->index = r2t & _REGION_ENTRY_ORIGIN;
1487 s_r2t = (unsigned long *) page_to_phys(page);
1488 /* Install shadow region second table */
1489 spin_lock(&sg->guest_table_lock);
1490 table = gmap_table_walk(sg, saddr, 4); /* get region-1 pointer */
1491 if (!table) {
1492 rc = -EAGAIN; /* Race with unshadow */
1493 goto out_free;
1494 }
1495 if (!(*table & _REGION_ENTRY_INVALID)) {
1496 rc = 0; /* Already established */
1497 goto out_free;
1498 }
1499 crst_table_init(s_r2t, _REGION2_ENTRY_EMPTY);
1500 *table = (unsigned long) s_r2t |
1501 _REGION_ENTRY_LENGTH | _REGION_ENTRY_TYPE_R1;
1502 list_add(&page->lru, &sg->crst_list);
1503 spin_unlock(&sg->guest_table_lock);
1504 /* Make r2t read-only in parent gmap page table */
1505 raddr = (saddr & 0xffe0000000000000UL) | _SHADOW_RMAP_REGION1;
1506 origin = r2t & _REGION_ENTRY_ORIGIN;
1507 offset = ((r2t & _REGION_ENTRY_OFFSET) >> 6) * 4096;
1508 len = ((r2t & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
1509 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
1510 if (rc) {
1511 spin_lock(&sg->guest_table_lock);
1512 gmap_unshadow_r2t(sg, raddr);
1513 spin_unlock(&sg->guest_table_lock);
1514 }
1515 return rc;
1516 out_free:
1517 spin_unlock(&sg->guest_table_lock);
1518 __free_pages(page, 2);
1519 return rc;
1520 }
1521 EXPORT_SYMBOL_GPL(gmap_shadow_r2t);
1522
1523 /**
1524 * gmap_shadow_r3t - create a shadow region 3 table
1525 * @sg: pointer to the shadow guest address space structure
1526 * @saddr: faulting address in the shadow gmap
1527 * @r3t: parent gmap address of the region 3 table to get shadowed
1528 *
1529 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1530 * shadow table structure is incomplete, -ENOMEM if out of memory and
1531 * -EFAULT if an address in the parent gmap could not be resolved.
1532 *
1533 * Called with sg->mm->mmap_sem in read.
1534 */
1535 int gmap_shadow_r3t(struct gmap *sg, unsigned long saddr, unsigned long r3t)
1536 {
1537 unsigned long raddr, origin, offset, len;
1538 unsigned long *s_r3t, *table;
1539 struct page *page;
1540 int rc;
1541
1542 BUG_ON(!gmap_is_shadow(sg));
1543 /* Allocate a shadow region second table */
1544 page = alloc_pages(GFP_KERNEL, 2);
1545 if (!page)
1546 return -ENOMEM;
1547 page->index = r3t & _REGION_ENTRY_ORIGIN;
1548 s_r3t = (unsigned long *) page_to_phys(page);
1549 /* Install shadow region second table */
1550 spin_lock(&sg->guest_table_lock);
1551 table = gmap_table_walk(sg, saddr, 3); /* get region-2 pointer */
1552 if (!table) {
1553 rc = -EAGAIN; /* Race with unshadow */
1554 goto out_free;
1555 }
1556 if (!(*table & _REGION_ENTRY_INVALID)) {
1557 rc = 0; /* Already established */
1558 goto out_free;
1559 }
1560 crst_table_init(s_r3t, _REGION3_ENTRY_EMPTY);
1561 *table = (unsigned long) s_r3t |
1562 _REGION_ENTRY_LENGTH | _REGION_ENTRY_TYPE_R2;
1563 list_add(&page->lru, &sg->crst_list);
1564 spin_unlock(&sg->guest_table_lock);
1565 /* Make r3t read-only in parent gmap page table */
1566 raddr = (saddr & 0xfffffc0000000000UL) | _SHADOW_RMAP_REGION2;
1567 origin = r3t & _REGION_ENTRY_ORIGIN;
1568 offset = ((r3t & _REGION_ENTRY_OFFSET) >> 6) * 4096;
1569 len = ((r3t & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
1570 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
1571 if (rc) {
1572 spin_lock(&sg->guest_table_lock);
1573 gmap_unshadow_r3t(sg, raddr);
1574 spin_unlock(&sg->guest_table_lock);
1575 }
1576 return rc;
1577 out_free:
1578 spin_unlock(&sg->guest_table_lock);
1579 __free_pages(page, 2);
1580 return rc;
1581 }
1582 EXPORT_SYMBOL_GPL(gmap_shadow_r3t);
1583
1584 /**
1585 * gmap_shadow_sgt - create a shadow segment table
1586 * @sg: pointer to the shadow guest address space structure
1587 * @saddr: faulting address in the shadow gmap
1588 * @sgt: parent gmap address of the segment table to get shadowed
1589 *
1590 * Returns: 0 if successfully shadowed or already shadowed, -EAGAIN if the
1591 * shadow table structure is incomplete, -ENOMEM if out of memory and
1592 * -EFAULT if an address in the parent gmap could not be resolved.
1593 *
1594 * Called with sg->mm->mmap_sem in read.
1595 */
1596 int gmap_shadow_sgt(struct gmap *sg, unsigned long saddr, unsigned long sgt)
1597 {
1598 unsigned long raddr, origin, offset, len;
1599 unsigned long *s_sgt, *table;
1600 struct page *page;
1601 int rc;
1602
1603 BUG_ON(!gmap_is_shadow(sg));
1604 /* Allocate a shadow segment table */
1605 page = alloc_pages(GFP_KERNEL, 2);
1606 if (!page)
1607 return -ENOMEM;
1608 page->index = sgt & _REGION_ENTRY_ORIGIN;
1609 s_sgt = (unsigned long *) page_to_phys(page);
1610 /* Install shadow region second table */
1611 spin_lock(&sg->guest_table_lock);
1612 table = gmap_table_walk(sg, saddr, 2); /* get region-3 pointer */
1613 if (!table) {
1614 rc = -EAGAIN; /* Race with unshadow */
1615 goto out_free;
1616 }
1617 if (!(*table & _REGION_ENTRY_INVALID)) {
1618 rc = 0; /* Already established */
1619 goto out_free;
1620 }
1621 crst_table_init(s_sgt, _SEGMENT_ENTRY_EMPTY);
1622 *table = (unsigned long) s_sgt |
1623 _REGION_ENTRY_LENGTH | _REGION_ENTRY_TYPE_R3;
1624 list_add(&page->lru, &sg->crst_list);
1625 spin_unlock(&sg->guest_table_lock);
1626 /* Make sgt read-only in parent gmap page table */
1627 raddr = (saddr & 0xffffffff80000000UL) | _SHADOW_RMAP_REGION3;
1628 origin = sgt & _REGION_ENTRY_ORIGIN;
1629 offset = ((sgt & _REGION_ENTRY_OFFSET) >> 6) * 4096;
1630 len = ((sgt & _REGION_ENTRY_LENGTH) + 1) * 4096 - offset;
1631 rc = gmap_protect_rmap(sg, raddr, origin + offset, len, PROT_READ);
1632 if (rc) {
1633 spin_lock(&sg->guest_table_lock);
1634 gmap_unshadow_sgt(sg, raddr);
1635 spin_unlock(&sg->guest_table_lock);
1636 }
1637 return rc;
1638 out_free:
1639 spin_unlock(&sg->guest_table_lock);
1640 __free_pages(page, 2);
1641 return rc;
1642 }
1643 EXPORT_SYMBOL_GPL(gmap_shadow_sgt);
1644
1645 /**
1646 * gmap_shadow_lookup_pgtable - find a shadow page table
1647 * @sg: pointer to the shadow guest address space structure
1648 * @saddr: the address in the shadow aguest address space
1649 * @pgt: parent gmap address of the page table to get shadowed
1650 * @dat_protection: if the pgtable is marked as protected by dat
1651 *
1652 * Returns 0 if the shadow page table was found and -EAGAIN if the page
1653 * table was not found.
1654 *
1655 * Called with sg->mm->mmap_sem in read.
1656 */
1657 int gmap_shadow_pgt_lookup(struct gmap *sg, unsigned long saddr,
1658 unsigned long *pgt, int *dat_protection)
1659 {
1660 unsigned long *table;
1661 struct page *page;
1662 int rc;
1663
1664 BUG_ON(!gmap_is_shadow(sg));
1665 spin_lock(&sg->guest_table_lock);
1666 table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
1667 if (table && !(*table & _SEGMENT_ENTRY_INVALID)) {
1668 /* Shadow page tables are full pages (pte+pgste) */
1669 page = pfn_to_page(*table >> PAGE_SHIFT);
1670 *pgt = page->index;
1671 *dat_protection = !!(*table & _SEGMENT_ENTRY_PROTECT);
1672 rc = 0;
1673 } else {
1674 rc = -EAGAIN;
1675 }
1676 spin_unlock(&sg->guest_table_lock);
1677 return rc;
1678
1679 }
1680 EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup);
1681
1682 /**
1683 * gmap_shadow_pgt - instantiate a shadow page table
1684 * @sg: pointer to the shadow guest address space structure
1685 * @saddr: faulting address in the shadow gmap
1686 * @pgt: parent gmap address of the page table to get shadowed
1687 *
1688 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1689 * shadow table structure is incomplete, -ENOMEM if out of memory,
1690 * -EFAULT if an address in the parent gmap could not be resolved and
1691 *
1692 * Called with gmap->mm->mmap_sem in read
1693 */
1694 int gmap_shadow_pgt(struct gmap *sg, unsigned long saddr, unsigned long pgt)
1695 {
1696 unsigned long raddr, origin;
1697 unsigned long *s_pgt, *table;
1698 struct page *page;
1699 int rc;
1700
1701 BUG_ON(!gmap_is_shadow(sg));
1702 /* Allocate a shadow page table */
1703 page = page_table_alloc_pgste(sg->mm);
1704 if (!page)
1705 return -ENOMEM;
1706 page->index = pgt & _SEGMENT_ENTRY_ORIGIN;
1707 s_pgt = (unsigned long *) page_to_phys(page);
1708 /* Install shadow page table */
1709 spin_lock(&sg->guest_table_lock);
1710 table = gmap_table_walk(sg, saddr, 1); /* get segment pointer */
1711 if (!table) {
1712 rc = -EAGAIN; /* Race with unshadow */
1713 goto out_free;
1714 }
1715 if (!(*table & _SEGMENT_ENTRY_INVALID)) {
1716 rc = 0; /* Already established */
1717 goto out_free;
1718 }
1719 *table = (unsigned long) s_pgt | _SEGMENT_ENTRY |
1720 (pgt & _SEGMENT_ENTRY_PROTECT);
1721 list_add(&page->lru, &sg->pt_list);
1722 spin_unlock(&sg->guest_table_lock);
1723 /* Make pgt read-only in parent gmap page table (not the pgste) */
1724 raddr = (saddr & 0xfffffffffff00000UL) | _SHADOW_RMAP_SEGMENT;
1725 origin = pgt & _SEGMENT_ENTRY_ORIGIN & PAGE_MASK;
1726 rc = gmap_protect_rmap(sg, raddr, origin, PAGE_SIZE, PROT_READ);
1727 if (rc) {
1728 spin_lock(&sg->guest_table_lock);
1729 gmap_unshadow_pgt(sg, raddr);
1730 spin_unlock(&sg->guest_table_lock);
1731 }
1732 return rc;
1733 out_free:
1734 spin_unlock(&sg->guest_table_lock);
1735 page_table_free_pgste(page);
1736 return rc;
1737
1738 }
1739 EXPORT_SYMBOL_GPL(gmap_shadow_pgt);
1740
1741 /**
1742 * gmap_shadow_page - create a shadow page mapping
1743 * @sg: pointer to the shadow guest address space structure
1744 * @saddr: faulting address in the shadow gmap
1745 * @paddr: parent gmap address to get mapped at @saddr
1746 * @write: =1 map r/w, =0 map r/o
1747 *
1748 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1749 * shadow table structure is incomplete, -ENOMEM if out of memory and
1750 * -EFAULT if an address in the parent gmap could not be resolved.
1751 *
1752 * Called with sg->mm->mmap_sem in read.
1753 */
1754 int gmap_shadow_page(struct gmap *sg, unsigned long saddr,
1755 unsigned long paddr, int write)
1756 {
1757 struct gmap *parent;
1758 struct gmap_rmap *rmap;
1759 unsigned long vmaddr;
1760 spinlock_t *ptl;
1761 pte_t *sptep, *tptep;
1762 int rc;
1763
1764 BUG_ON(!gmap_is_shadow(sg));
1765 parent = sg->parent;
1766
1767 rmap = kzalloc(sizeof(*rmap), GFP_KERNEL);
1768 if (!rmap)
1769 return -ENOMEM;
1770 rmap->raddr = (saddr & PAGE_MASK) | _SHADOW_RMAP_PGTABLE;
1771
1772 while (1) {
1773 vmaddr = __gmap_translate(parent, paddr);
1774 if (IS_ERR_VALUE(vmaddr)) {
1775 rc = vmaddr;
1776 break;
1777 }
1778 rc = radix_tree_preload(GFP_KERNEL);
1779 if (rc)
1780 break;
1781 rc = -EAGAIN;
1782 sptep = gmap_pte_op_walk(parent, paddr, &ptl);
1783 if (sptep) {
1784 spin_lock(&sg->guest_table_lock);
1785 /* Get page table pointer */
1786 tptep = (pte_t *) gmap_table_walk(sg, saddr, 0);
1787 if (!tptep) {
1788 spin_unlock(&sg->guest_table_lock);
1789 gmap_pte_op_end(ptl);
1790 radix_tree_preload_end();
1791 break;
1792 }
1793 rc = ptep_shadow_pte(sg->mm, saddr,
1794 sptep, tptep, write);
1795 if (rc > 0) {
1796 /* Success and a new mapping */
1797 gmap_insert_rmap(sg, vmaddr, rmap);
1798 rmap = NULL;
1799 rc = 0;
1800 }
1801 gmap_pte_op_end(ptl);
1802 spin_unlock(&sg->guest_table_lock);
1803 }
1804 radix_tree_preload_end();
1805 if (!rc)
1806 break;
1807 rc = gmap_pte_op_fixup(parent, paddr, vmaddr);
1808 if (rc)
1809 break;
1810 }
1811 kfree(rmap);
1812 return rc;
1813 }
1814 EXPORT_SYMBOL_GPL(gmap_shadow_page);
1815
1816 /**
1817 * gmap_shadow_notify - handle notifications for shadow gmap
1818 *
1819 * Called with sg->parent->shadow_lock.
1820 */
1821 static void gmap_shadow_notify(struct gmap *sg, unsigned long vmaddr,
1822 unsigned long offset, pte_t *pte)
1823 {
1824 struct gmap_rmap *rmap, *rnext, *head;
1825 unsigned long gaddr, start, end, bits, raddr;
1826 unsigned long *table;
1827
1828 BUG_ON(!gmap_is_shadow(sg));
1829 spin_lock(&sg->parent->guest_table_lock);
1830 table = radix_tree_lookup(&sg->parent->host_to_guest,
1831 vmaddr >> PMD_SHIFT);
1832 gaddr = table ? __gmap_segment_gaddr(table) + offset : 0;
1833 spin_unlock(&sg->parent->guest_table_lock);
1834 if (!table)
1835 return;
1836
1837 spin_lock(&sg->guest_table_lock);
1838 if (sg->removed) {
1839 spin_unlock(&sg->guest_table_lock);
1840 return;
1841 }
1842 /* Check for top level table */
1843 start = sg->orig_asce & _ASCE_ORIGIN;
1844 end = start + ((sg->orig_asce & _ASCE_TABLE_LENGTH) + 1) * 4096;
1845 if (gaddr >= start && gaddr < end) {
1846 /* The complete shadow table has to go */
1847 gmap_unshadow(sg);
1848 spin_unlock(&sg->guest_table_lock);
1849 list_del(&sg->list);
1850 gmap_put(sg);
1851 return;
1852 }
1853 /* Remove the page table tree from on specific entry */
1854 head = radix_tree_delete(&sg->host_to_rmap, vmaddr >> 12);
1855 gmap_for_each_rmap_safe(rmap, rnext, head) {
1856 bits = rmap->raddr & _SHADOW_RMAP_MASK;
1857 raddr = rmap->raddr ^ bits;
1858 switch (bits) {
1859 case _SHADOW_RMAP_REGION1:
1860 gmap_unshadow_r2t(sg, raddr);
1861 break;
1862 case _SHADOW_RMAP_REGION2:
1863 gmap_unshadow_r3t(sg, raddr);
1864 break;
1865 case _SHADOW_RMAP_REGION3:
1866 gmap_unshadow_sgt(sg, raddr);
1867 break;
1868 case _SHADOW_RMAP_SEGMENT:
1869 gmap_unshadow_pgt(sg, raddr);
1870 break;
1871 case _SHADOW_RMAP_PGTABLE:
1872 gmap_unshadow_page(sg, raddr);
1873 break;
1874 }
1875 kfree(rmap);
1876 }
1877 spin_unlock(&sg->guest_table_lock);
1878 }
1879
1880 /**
1881 * ptep_notify - call all invalidation callbacks for a specific pte.
1882 * @mm: pointer to the process mm_struct
1883 * @addr: virtual address in the process address space
1884 * @pte: pointer to the page table entry
1885 * @bits: bits from the pgste that caused the notify call
1886 *
1887 * This function is assumed to be called with the page table lock held
1888 * for the pte to notify.
1889 */
1890 void ptep_notify(struct mm_struct *mm, unsigned long vmaddr,
1891 pte_t *pte, unsigned long bits)
1892 {
1893 unsigned long offset, gaddr;
1894 unsigned long *table;
1895 struct gmap *gmap, *sg, *next;
1896
1897 offset = ((unsigned long) pte) & (255 * sizeof(pte_t));
1898 offset = offset * (4096 / sizeof(pte_t));
1899 rcu_read_lock();
1900 list_for_each_entry_rcu(gmap, &mm->context.gmap_list, list) {
1901 if (!list_empty(&gmap->children) && (bits & PGSTE_VSIE_BIT)) {
1902 spin_lock(&gmap->shadow_lock);
1903 list_for_each_entry_safe(sg, next,
1904 &gmap->children, list)
1905 gmap_shadow_notify(sg, vmaddr, offset, pte);
1906 spin_unlock(&gmap->shadow_lock);
1907 }
1908 if (!(bits & PGSTE_IN_BIT))
1909 continue;
1910 spin_lock(&gmap->guest_table_lock);
1911 table = radix_tree_lookup(&gmap->host_to_guest,
1912 vmaddr >> PMD_SHIFT);
1913 if (table)
1914 gaddr = __gmap_segment_gaddr(table) + offset;
1915 spin_unlock(&gmap->guest_table_lock);
1916 if (table)
1917 gmap_call_notifier(gmap, gaddr, gaddr + PAGE_SIZE - 1);
1918 }
1919 rcu_read_unlock();
1920 }
1921 EXPORT_SYMBOL_GPL(ptep_notify);
1922
1923 static inline void thp_split_mm(struct mm_struct *mm)
1924 {
1925 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1926 struct vm_area_struct *vma;
1927 unsigned long addr;
1928
1929 for (vma = mm->mmap; vma != NULL; vma = vma->vm_next) {
1930 for (addr = vma->vm_start;
1931 addr < vma->vm_end;
1932 addr += PAGE_SIZE)
1933 follow_page(vma, addr, FOLL_SPLIT);
1934 vma->vm_flags &= ~VM_HUGEPAGE;
1935 vma->vm_flags |= VM_NOHUGEPAGE;
1936 }
1937 mm->def_flags |= VM_NOHUGEPAGE;
1938 #endif
1939 }
1940
1941 /*
1942 * switch on pgstes for its userspace process (for kvm)
1943 */
1944 int s390_enable_sie(void)
1945 {
1946 struct mm_struct *mm = current->mm;
1947
1948 /* Do we have pgstes? if yes, we are done */
1949 if (mm_has_pgste(mm))
1950 return 0;
1951 /* Fail if the page tables are 2K */
1952 if (!mm_alloc_pgste(mm))
1953 return -EINVAL;
1954 down_write(&mm->mmap_sem);
1955 mm->context.has_pgste = 1;
1956 /* split thp mappings and disable thp for future mappings */
1957 thp_split_mm(mm);
1958 up_write(&mm->mmap_sem);
1959 return 0;
1960 }
1961 EXPORT_SYMBOL_GPL(s390_enable_sie);
1962
1963 /*
1964 * Enable storage key handling from now on and initialize the storage
1965 * keys with the default key.
1966 */
1967 static int __s390_enable_skey(pte_t *pte, unsigned long addr,
1968 unsigned long next, struct mm_walk *walk)
1969 {
1970 /*
1971 * Remove all zero page mappings,
1972 * after establishing a policy to forbid zero page mappings
1973 * following faults for that page will get fresh anonymous pages
1974 */
1975 if (is_zero_pfn(pte_pfn(*pte)))
1976 ptep_xchg_direct(walk->mm, addr, pte, __pte(_PAGE_INVALID));
1977 /* Clear storage key */
1978 ptep_zap_key(walk->mm, addr, pte);
1979 return 0;
1980 }
1981
1982 int s390_enable_skey(void)
1983 {
1984 struct mm_walk walk = { .pte_entry = __s390_enable_skey };
1985 struct mm_struct *mm = current->mm;
1986 struct vm_area_struct *vma;
1987 int rc = 0;
1988
1989 down_write(&mm->mmap_sem);
1990 if (mm_use_skey(mm))
1991 goto out_up;
1992
1993 mm->context.use_skey = 1;
1994 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1995 if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
1996 MADV_UNMERGEABLE, &vma->vm_flags)) {
1997 mm->context.use_skey = 0;
1998 rc = -ENOMEM;
1999 goto out_up;
2000 }
2001 }
2002 mm->def_flags &= ~VM_MERGEABLE;
2003
2004 walk.mm = mm;
2005 walk_page_range(0, TASK_SIZE, &walk);
2006
2007 out_up:
2008 up_write(&mm->mmap_sem);
2009 return rc;
2010 }
2011 EXPORT_SYMBOL_GPL(s390_enable_skey);
2012
2013 /*
2014 * Reset CMMA state, make all pages stable again.
2015 */
2016 static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
2017 unsigned long next, struct mm_walk *walk)
2018 {
2019 ptep_zap_unused(walk->mm, addr, pte, 1);
2020 return 0;
2021 }
2022
2023 void s390_reset_cmma(struct mm_struct *mm)
2024 {
2025 struct mm_walk walk = { .pte_entry = __s390_reset_cmma };
2026
2027 down_write(&mm->mmap_sem);
2028 walk.mm = mm;
2029 walk_page_range(0, TASK_SIZE, &walk);
2030 up_write(&mm->mmap_sem);
2031 }
2032 EXPORT_SYMBOL_GPL(s390_reset_cmma);