2 * KVM guest address space mapping code
4 * Copyright IBM Corp. 2007, 2016
5 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
8 #include <linux/kernel.h>
10 #include <linux/swap.h>
11 #include <linux/smp.h>
12 #include <linux/spinlock.h>
13 #include <linux/slab.h>
14 #include <linux/swapops.h>
15 #include <linux/ksm.h>
16 #include <linux/mman.h>
18 #include <asm/pgtable.h>
19 #include <asm/pgalloc.h>
24 * gmap_alloc - allocate and initialize a guest address space
25 * @mm: pointer to the parent mm_struct
26 * @limit: maximum address of the gmap address space
28 * Returns a guest address space structure.
30 static struct gmap
*gmap_alloc(unsigned long limit
)
35 unsigned long etype
, atype
;
37 if (limit
< (1UL << 31)) {
38 limit
= (1UL << 31) - 1;
39 atype
= _ASCE_TYPE_SEGMENT
;
40 etype
= _SEGMENT_ENTRY_EMPTY
;
41 } else if (limit
< (1UL << 42)) {
42 limit
= (1UL << 42) - 1;
43 atype
= _ASCE_TYPE_REGION3
;
44 etype
= _REGION3_ENTRY_EMPTY
;
45 } else if (limit
< (1UL << 53)) {
46 limit
= (1UL << 53) - 1;
47 atype
= _ASCE_TYPE_REGION2
;
48 etype
= _REGION2_ENTRY_EMPTY
;
51 atype
= _ASCE_TYPE_REGION1
;
52 etype
= _REGION1_ENTRY_EMPTY
;
54 gmap
= kzalloc(sizeof(struct gmap
), GFP_KERNEL
);
57 INIT_LIST_HEAD(&gmap
->crst_list
);
58 INIT_LIST_HEAD(&gmap
->children
);
59 INIT_LIST_HEAD(&gmap
->pt_list
);
60 INIT_RADIX_TREE(&gmap
->guest_to_host
, GFP_KERNEL
);
61 INIT_RADIX_TREE(&gmap
->host_to_guest
, GFP_ATOMIC
);
62 INIT_RADIX_TREE(&gmap
->host_to_rmap
, GFP_ATOMIC
);
63 spin_lock_init(&gmap
->guest_table_lock
);
64 spin_lock_init(&gmap
->shadow_lock
);
65 atomic_set(&gmap
->ref_count
, 1);
66 page
= alloc_pages(GFP_KERNEL
, 2);
70 list_add(&page
->lru
, &gmap
->crst_list
);
71 table
= (unsigned long *) page_to_phys(page
);
72 crst_table_init(table
, etype
);
74 gmap
->asce
= atype
| _ASCE_TABLE_LENGTH
|
75 _ASCE_USER_BITS
| __pa(table
);
76 gmap
->asce_end
= limit
;
86 * gmap_create - create a guest address space
87 * @mm: pointer to the parent mm_struct
88 * @limit: maximum size of the gmap address space
90 * Returns a guest address space structure.
92 struct gmap
*gmap_create(struct mm_struct
*mm
, unsigned long limit
)
96 gmap
= gmap_alloc(limit
);
100 spin_lock(&mm
->context
.gmap_lock
);
101 list_add_rcu(&gmap
->list
, &mm
->context
.gmap_list
);
102 spin_unlock(&mm
->context
.gmap_lock
);
105 EXPORT_SYMBOL_GPL(gmap_create
);
107 static void gmap_flush_tlb(struct gmap
*gmap
)
109 if (MACHINE_HAS_IDTE
)
110 __tlb_flush_asce(gmap
->mm
, gmap
->asce
);
112 __tlb_flush_global();
115 static void gmap_radix_tree_free(struct radix_tree_root
*root
)
117 struct radix_tree_iter iter
;
118 unsigned long indices
[16];
123 /* A radix tree is freed by deleting all of its entries */
127 radix_tree_for_each_slot(slot
, root
, &iter
, index
) {
128 indices
[nr
] = iter
.index
;
132 for (i
= 0; i
< nr
; i
++) {
134 radix_tree_delete(root
, index
);
139 static void gmap_rmap_radix_tree_free(struct radix_tree_root
*root
)
141 struct gmap_rmap
*rmap
, *rnext
, *head
;
142 struct radix_tree_iter iter
;
143 unsigned long indices
[16];
148 /* A radix tree is freed by deleting all of its entries */
152 radix_tree_for_each_slot(slot
, root
, &iter
, index
) {
153 indices
[nr
] = iter
.index
;
157 for (i
= 0; i
< nr
; i
++) {
159 head
= radix_tree_delete(root
, index
);
160 gmap_for_each_rmap_safe(rmap
, rnext
, head
)
167 * gmap_free - free a guest address space
168 * @gmap: pointer to the guest address space structure
170 * No locks required. There are no references to this gmap anymore.
172 static void gmap_free(struct gmap
*gmap
)
174 struct page
*page
, *next
;
176 /* Free all segment & region tables. */
177 list_for_each_entry_safe(page
, next
, &gmap
->crst_list
, lru
)
178 __free_pages(page
, 2);
179 gmap_radix_tree_free(&gmap
->guest_to_host
);
180 gmap_radix_tree_free(&gmap
->host_to_guest
);
182 /* Free additional data for a shadow gmap */
183 if (gmap_is_shadow(gmap
)) {
184 /* Free all page tables. */
185 list_for_each_entry_safe(page
, next
, &gmap
->pt_list
, lru
)
186 page_table_free_pgste(page
);
187 gmap_rmap_radix_tree_free(&gmap
->host_to_rmap
);
188 /* Release reference to the parent */
189 gmap_put(gmap
->parent
);
196 * gmap_get - increase reference counter for guest address space
197 * @gmap: pointer to the guest address space structure
199 * Returns the gmap pointer
201 struct gmap
*gmap_get(struct gmap
*gmap
)
203 atomic_inc(&gmap
->ref_count
);
206 EXPORT_SYMBOL_GPL(gmap_get
);
209 * gmap_put - decrease reference counter for guest address space
210 * @gmap: pointer to the guest address space structure
212 * If the reference counter reaches zero the guest address space is freed.
214 void gmap_put(struct gmap
*gmap
)
216 if (atomic_dec_return(&gmap
->ref_count
) == 0)
219 EXPORT_SYMBOL_GPL(gmap_put
);
222 * gmap_remove - remove a guest address space but do not free it yet
223 * @gmap: pointer to the guest address space structure
225 void gmap_remove(struct gmap
*gmap
)
227 struct gmap
*sg
, *next
;
230 gmap_flush_tlb(gmap
);
231 /* Remove all shadow gmaps linked to this gmap */
232 if (!list_empty(&gmap
->children
)) {
233 spin_lock(&gmap
->shadow_lock
);
234 list_for_each_entry_safe(sg
, next
, &gmap
->children
, list
) {
239 spin_unlock(&gmap
->shadow_lock
);
241 /* Remove gmap from the pre-mm list */
242 spin_lock(&gmap
->mm
->context
.gmap_lock
);
243 list_del_rcu(&gmap
->list
);
244 spin_unlock(&gmap
->mm
->context
.gmap_lock
);
249 EXPORT_SYMBOL_GPL(gmap_remove
);
252 * gmap_enable - switch primary space to the guest address space
253 * @gmap: pointer to the guest address space structure
255 void gmap_enable(struct gmap
*gmap
)
257 S390_lowcore
.gmap
= (unsigned long) gmap
;
259 EXPORT_SYMBOL_GPL(gmap_enable
);
262 * gmap_disable - switch back to the standard primary address space
263 * @gmap: pointer to the guest address space structure
265 void gmap_disable(struct gmap
*gmap
)
267 S390_lowcore
.gmap
= 0UL;
269 EXPORT_SYMBOL_GPL(gmap_disable
);
272 * gmap_alloc_table is assumed to be called with mmap_sem held
274 static int gmap_alloc_table(struct gmap
*gmap
, unsigned long *table
,
275 unsigned long init
, unsigned long gaddr
)
280 /* since we dont free the gmap table until gmap_free we can unlock */
281 page
= alloc_pages(GFP_KERNEL
, 2);
284 new = (unsigned long *) page_to_phys(page
);
285 crst_table_init(new, init
);
286 spin_lock(&gmap
->guest_table_lock
);
287 if (*table
& _REGION_ENTRY_INVALID
) {
288 list_add(&page
->lru
, &gmap
->crst_list
);
289 *table
= (unsigned long) new | _REGION_ENTRY_LENGTH
|
290 (*table
& _REGION_ENTRY_TYPE_MASK
);
294 spin_unlock(&gmap
->guest_table_lock
);
296 __free_pages(page
, 2);
301 * __gmap_segment_gaddr - find virtual address from segment pointer
302 * @entry: pointer to a segment table entry in the guest address space
304 * Returns the virtual address in the guest address space for the segment
306 static unsigned long __gmap_segment_gaddr(unsigned long *entry
)
309 unsigned long offset
, mask
;
311 offset
= (unsigned long) entry
/ sizeof(unsigned long);
312 offset
= (offset
& (PTRS_PER_PMD
- 1)) * PMD_SIZE
;
313 mask
= ~(PTRS_PER_PMD
* sizeof(pmd_t
) - 1);
314 page
= virt_to_page((void *)((unsigned long) entry
& mask
));
315 return page
->index
+ offset
;
319 * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
320 * @gmap: pointer to the guest address space structure
321 * @vmaddr: address in the host process address space
323 * Returns 1 if a TLB flush is required
325 static int __gmap_unlink_by_vmaddr(struct gmap
*gmap
, unsigned long vmaddr
)
327 unsigned long *entry
;
330 BUG_ON(gmap_is_shadow(gmap
));
331 spin_lock(&gmap
->guest_table_lock
);
332 entry
= radix_tree_delete(&gmap
->host_to_guest
, vmaddr
>> PMD_SHIFT
);
334 flush
= (*entry
!= _SEGMENT_ENTRY_INVALID
);
335 *entry
= _SEGMENT_ENTRY_INVALID
;
337 spin_unlock(&gmap
->guest_table_lock
);
342 * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
343 * @gmap: pointer to the guest address space structure
344 * @gaddr: address in the guest address space
346 * Returns 1 if a TLB flush is required
348 static int __gmap_unmap_by_gaddr(struct gmap
*gmap
, unsigned long gaddr
)
350 unsigned long vmaddr
;
352 vmaddr
= (unsigned long) radix_tree_delete(&gmap
->guest_to_host
,
354 return vmaddr
? __gmap_unlink_by_vmaddr(gmap
, vmaddr
) : 0;
358 * gmap_unmap_segment - unmap segment from the guest address space
359 * @gmap: pointer to the guest address space structure
360 * @to: address in the guest address space
361 * @len: length of the memory area to unmap
363 * Returns 0 if the unmap succeeded, -EINVAL if not.
365 int gmap_unmap_segment(struct gmap
*gmap
, unsigned long to
, unsigned long len
)
370 BUG_ON(gmap_is_shadow(gmap
));
371 if ((to
| len
) & (PMD_SIZE
- 1))
373 if (len
== 0 || to
+ len
< to
)
377 down_write(&gmap
->mm
->mmap_sem
);
378 for (off
= 0; off
< len
; off
+= PMD_SIZE
)
379 flush
|= __gmap_unmap_by_gaddr(gmap
, to
+ off
);
380 up_write(&gmap
->mm
->mmap_sem
);
382 gmap_flush_tlb(gmap
);
385 EXPORT_SYMBOL_GPL(gmap_unmap_segment
);
388 * gmap_map_segment - map a segment to the guest address space
389 * @gmap: pointer to the guest address space structure
390 * @from: source address in the parent address space
391 * @to: target address in the guest address space
392 * @len: length of the memory area to map
394 * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
396 int gmap_map_segment(struct gmap
*gmap
, unsigned long from
,
397 unsigned long to
, unsigned long len
)
402 BUG_ON(gmap_is_shadow(gmap
));
403 if ((from
| to
| len
) & (PMD_SIZE
- 1))
405 if (len
== 0 || from
+ len
< from
|| to
+ len
< to
||
406 from
+ len
- 1 > TASK_MAX_SIZE
|| to
+ len
- 1 > gmap
->asce_end
)
410 down_write(&gmap
->mm
->mmap_sem
);
411 for (off
= 0; off
< len
; off
+= PMD_SIZE
) {
412 /* Remove old translation */
413 flush
|= __gmap_unmap_by_gaddr(gmap
, to
+ off
);
414 /* Store new translation */
415 if (radix_tree_insert(&gmap
->guest_to_host
,
416 (to
+ off
) >> PMD_SHIFT
,
417 (void *) from
+ off
))
420 up_write(&gmap
->mm
->mmap_sem
);
422 gmap_flush_tlb(gmap
);
425 gmap_unmap_segment(gmap
, to
, len
);
428 EXPORT_SYMBOL_GPL(gmap_map_segment
);
431 * __gmap_translate - translate a guest address to a user space address
432 * @gmap: pointer to guest mapping meta data structure
433 * @gaddr: guest address
435 * Returns user space address which corresponds to the guest address or
436 * -EFAULT if no such mapping exists.
437 * This function does not establish potentially missing page table entries.
438 * The mmap_sem of the mm that belongs to the address space must be held
439 * when this function gets called.
441 * Note: Can also be called for shadow gmaps.
443 unsigned long __gmap_translate(struct gmap
*gmap
, unsigned long gaddr
)
445 unsigned long vmaddr
;
447 vmaddr
= (unsigned long)
448 radix_tree_lookup(&gmap
->guest_to_host
, gaddr
>> PMD_SHIFT
);
449 /* Note: guest_to_host is empty for a shadow gmap */
450 return vmaddr
? (vmaddr
| (gaddr
& ~PMD_MASK
)) : -EFAULT
;
452 EXPORT_SYMBOL_GPL(__gmap_translate
);
455 * gmap_translate - translate a guest address to a user space address
456 * @gmap: pointer to guest mapping meta data structure
457 * @gaddr: guest address
459 * Returns user space address which corresponds to the guest address or
460 * -EFAULT if no such mapping exists.
461 * This function does not establish potentially missing page table entries.
463 unsigned long gmap_translate(struct gmap
*gmap
, unsigned long gaddr
)
467 down_read(&gmap
->mm
->mmap_sem
);
468 rc
= __gmap_translate(gmap
, gaddr
);
469 up_read(&gmap
->mm
->mmap_sem
);
472 EXPORT_SYMBOL_GPL(gmap_translate
);
475 * gmap_unlink - disconnect a page table from the gmap shadow tables
476 * @gmap: pointer to guest mapping meta data structure
477 * @table: pointer to the host page table
478 * @vmaddr: vm address associated with the host page table
480 void gmap_unlink(struct mm_struct
*mm
, unsigned long *table
,
481 unsigned long vmaddr
)
487 list_for_each_entry_rcu(gmap
, &mm
->context
.gmap_list
, list
) {
488 flush
= __gmap_unlink_by_vmaddr(gmap
, vmaddr
);
490 gmap_flush_tlb(gmap
);
496 * gmap_link - set up shadow page tables to connect a host to a guest address
497 * @gmap: pointer to guest mapping meta data structure
498 * @gaddr: guest address
499 * @vmaddr: vm address
501 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
502 * if the vm address is already mapped to a different guest segment.
503 * The mmap_sem of the mm that belongs to the address space must be held
504 * when this function gets called.
506 int __gmap_link(struct gmap
*gmap
, unsigned long gaddr
, unsigned long vmaddr
)
508 struct mm_struct
*mm
;
509 unsigned long *table
;
516 BUG_ON(gmap_is_shadow(gmap
));
517 /* Create higher level tables in the gmap page table */
519 if ((gmap
->asce
& _ASCE_TYPE_MASK
) >= _ASCE_TYPE_REGION1
) {
520 table
+= (gaddr
>> 53) & 0x7ff;
521 if ((*table
& _REGION_ENTRY_INVALID
) &&
522 gmap_alloc_table(gmap
, table
, _REGION2_ENTRY_EMPTY
,
523 gaddr
& 0xffe0000000000000UL
))
525 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
527 if ((gmap
->asce
& _ASCE_TYPE_MASK
) >= _ASCE_TYPE_REGION2
) {
528 table
+= (gaddr
>> 42) & 0x7ff;
529 if ((*table
& _REGION_ENTRY_INVALID
) &&
530 gmap_alloc_table(gmap
, table
, _REGION3_ENTRY_EMPTY
,
531 gaddr
& 0xfffffc0000000000UL
))
533 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
535 if ((gmap
->asce
& _ASCE_TYPE_MASK
) >= _ASCE_TYPE_REGION3
) {
536 table
+= (gaddr
>> 31) & 0x7ff;
537 if ((*table
& _REGION_ENTRY_INVALID
) &&
538 gmap_alloc_table(gmap
, table
, _SEGMENT_ENTRY_EMPTY
,
539 gaddr
& 0xffffffff80000000UL
))
541 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
543 table
+= (gaddr
>> 20) & 0x7ff;
544 /* Walk the parent mm page table */
546 pgd
= pgd_offset(mm
, vmaddr
);
547 VM_BUG_ON(pgd_none(*pgd
));
548 pud
= pud_offset(pgd
, vmaddr
);
549 VM_BUG_ON(pud_none(*pud
));
550 pmd
= pmd_offset(pud
, vmaddr
);
551 VM_BUG_ON(pmd_none(*pmd
));
552 /* large pmds cannot yet be handled */
555 /* Link gmap segment table entry location to page table. */
556 rc
= radix_tree_preload(GFP_KERNEL
);
559 ptl
= pmd_lock(mm
, pmd
);
560 spin_lock(&gmap
->guest_table_lock
);
561 if (*table
== _SEGMENT_ENTRY_INVALID
) {
562 rc
= radix_tree_insert(&gmap
->host_to_guest
,
563 vmaddr
>> PMD_SHIFT
, table
);
565 *table
= pmd_val(*pmd
);
568 spin_unlock(&gmap
->guest_table_lock
);
570 radix_tree_preload_end();
575 * gmap_fault - resolve a fault on a guest address
576 * @gmap: pointer to guest mapping meta data structure
577 * @gaddr: guest address
578 * @fault_flags: flags to pass down to handle_mm_fault()
580 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
581 * if the vm address is already mapped to a different guest segment.
583 int gmap_fault(struct gmap
*gmap
, unsigned long gaddr
,
584 unsigned int fault_flags
)
586 unsigned long vmaddr
;
590 down_read(&gmap
->mm
->mmap_sem
);
594 vmaddr
= __gmap_translate(gmap
, gaddr
);
595 if (IS_ERR_VALUE(vmaddr
)) {
599 if (fixup_user_fault(current
, gmap
->mm
, vmaddr
, fault_flags
,
605 * In the case that fixup_user_fault unlocked the mmap_sem during
606 * faultin redo __gmap_translate to not race with a map/unmap_segment.
611 rc
= __gmap_link(gmap
, gaddr
, vmaddr
);
613 up_read(&gmap
->mm
->mmap_sem
);
616 EXPORT_SYMBOL_GPL(gmap_fault
);
619 * this function is assumed to be called with mmap_sem held
621 void __gmap_zap(struct gmap
*gmap
, unsigned long gaddr
)
623 unsigned long vmaddr
;
627 /* Find the vm address for the guest address */
628 vmaddr
= (unsigned long) radix_tree_lookup(&gmap
->guest_to_host
,
631 vmaddr
|= gaddr
& ~PMD_MASK
;
632 /* Get pointer to the page table entry */
633 ptep
= get_locked_pte(gmap
->mm
, vmaddr
, &ptl
);
635 ptep_zap_unused(gmap
->mm
, vmaddr
, ptep
, 0);
636 pte_unmap_unlock(ptep
, ptl
);
639 EXPORT_SYMBOL_GPL(__gmap_zap
);
641 void gmap_discard(struct gmap
*gmap
, unsigned long from
, unsigned long to
)
643 unsigned long gaddr
, vmaddr
, size
;
644 struct vm_area_struct
*vma
;
646 down_read(&gmap
->mm
->mmap_sem
);
647 for (gaddr
= from
; gaddr
< to
;
648 gaddr
= (gaddr
+ PMD_SIZE
) & PMD_MASK
) {
649 /* Find the vm address for the guest address */
650 vmaddr
= (unsigned long)
651 radix_tree_lookup(&gmap
->guest_to_host
,
655 vmaddr
|= gaddr
& ~PMD_MASK
;
656 /* Find vma in the parent mm */
657 vma
= find_vma(gmap
->mm
, vmaddr
);
658 size
= min(to
- gaddr
, PMD_SIZE
- (gaddr
& ~PMD_MASK
));
659 zap_page_range(vma
, vmaddr
, size
, NULL
);
661 up_read(&gmap
->mm
->mmap_sem
);
663 EXPORT_SYMBOL_GPL(gmap_discard
);
665 static LIST_HEAD(gmap_notifier_list
);
666 static DEFINE_SPINLOCK(gmap_notifier_lock
);
669 * gmap_register_pte_notifier - register a pte invalidation callback
670 * @nb: pointer to the gmap notifier block
672 void gmap_register_pte_notifier(struct gmap_notifier
*nb
)
674 spin_lock(&gmap_notifier_lock
);
675 list_add_rcu(&nb
->list
, &gmap_notifier_list
);
676 spin_unlock(&gmap_notifier_lock
);
678 EXPORT_SYMBOL_GPL(gmap_register_pte_notifier
);
681 * gmap_unregister_pte_notifier - remove a pte invalidation callback
682 * @nb: pointer to the gmap notifier block
684 void gmap_unregister_pte_notifier(struct gmap_notifier
*nb
)
686 spin_lock(&gmap_notifier_lock
);
687 list_del_rcu(&nb
->list
);
688 spin_unlock(&gmap_notifier_lock
);
691 EXPORT_SYMBOL_GPL(gmap_unregister_pte_notifier
);
694 * gmap_call_notifier - call all registered invalidation callbacks
695 * @gmap: pointer to guest mapping meta data structure
696 * @start: start virtual address in the guest address space
697 * @end: end virtual address in the guest address space
699 static void gmap_call_notifier(struct gmap
*gmap
, unsigned long start
,
702 struct gmap_notifier
*nb
;
704 list_for_each_entry(nb
, &gmap_notifier_list
, list
)
705 nb
->notifier_call(gmap
, start
, end
);
709 * gmap_table_walk - walk the gmap page tables
710 * @gmap: pointer to guest mapping meta data structure
711 * @gaddr: virtual address in the guest address space
712 * @level: page table level to stop at
714 * Returns a table entry pointer for the given guest address and @level
715 * @level=0 : returns a pointer to a page table table entry (or NULL)
716 * @level=1 : returns a pointer to a segment table entry (or NULL)
717 * @level=2 : returns a pointer to a region-3 table entry (or NULL)
718 * @level=3 : returns a pointer to a region-2 table entry (or NULL)
719 * @level=4 : returns a pointer to a region-1 table entry (or NULL)
721 * Returns NULL if the gmap page tables could not be walked to the
724 * Note: Can also be called for shadow gmaps.
726 static inline unsigned long *gmap_table_walk(struct gmap
*gmap
,
727 unsigned long gaddr
, int level
)
729 unsigned long *table
;
731 if ((gmap
->asce
& _ASCE_TYPE_MASK
) + 4 < (level
* 4))
733 if (gmap_is_shadow(gmap
) && gmap
->removed
)
735 if (gaddr
& (-1UL << (31 + ((gmap
->asce
& _ASCE_TYPE_MASK
) >> 2)*11)))
738 switch (gmap
->asce
& _ASCE_TYPE_MASK
) {
739 case _ASCE_TYPE_REGION1
:
740 table
+= (gaddr
>> 53) & 0x7ff;
743 if (*table
& _REGION_ENTRY_INVALID
)
745 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
747 case _ASCE_TYPE_REGION2
:
748 table
+= (gaddr
>> 42) & 0x7ff;
751 if (*table
& _REGION_ENTRY_INVALID
)
753 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
755 case _ASCE_TYPE_REGION3
:
756 table
+= (gaddr
>> 31) & 0x7ff;
759 if (*table
& _REGION_ENTRY_INVALID
)
761 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
763 case _ASCE_TYPE_SEGMENT
:
764 table
+= (gaddr
>> 20) & 0x7ff;
767 if (*table
& _REGION_ENTRY_INVALID
)
769 table
= (unsigned long *)(*table
& _SEGMENT_ENTRY_ORIGIN
);
770 table
+= (gaddr
>> 12) & 0xff;
776 * gmap_pte_op_walk - walk the gmap page table, get the page table lock
777 * and return the pte pointer
778 * @gmap: pointer to guest mapping meta data structure
779 * @gaddr: virtual address in the guest address space
780 * @ptl: pointer to the spinlock pointer
782 * Returns a pointer to the locked pte for a guest address, or NULL
784 * Note: Can also be called for shadow gmaps.
786 static pte_t
*gmap_pte_op_walk(struct gmap
*gmap
, unsigned long gaddr
,
789 unsigned long *table
;
791 if (gmap_is_shadow(gmap
))
792 spin_lock(&gmap
->guest_table_lock
);
793 /* Walk the gmap page table, lock and get pte pointer */
794 table
= gmap_table_walk(gmap
, gaddr
, 1); /* get segment pointer */
795 if (!table
|| *table
& _SEGMENT_ENTRY_INVALID
) {
796 if (gmap_is_shadow(gmap
))
797 spin_unlock(&gmap
->guest_table_lock
);
800 if (gmap_is_shadow(gmap
)) {
801 *ptl
= &gmap
->guest_table_lock
;
802 return pte_offset_map((pmd_t
*) table
, gaddr
);
804 return pte_alloc_map_lock(gmap
->mm
, (pmd_t
*) table
, gaddr
, ptl
);
808 * gmap_pte_op_fixup - force a page in and connect the gmap page table
809 * @gmap: pointer to guest mapping meta data structure
810 * @gaddr: virtual address in the guest address space
811 * @vmaddr: address in the host process address space
813 * Returns 0 if the caller can retry __gmap_translate (might fail again),
814 * -ENOMEM if out of memory and -EFAULT if anything goes wrong while fixing
815 * up or connecting the gmap page table.
817 static int gmap_pte_op_fixup(struct gmap
*gmap
, unsigned long gaddr
,
818 unsigned long vmaddr
)
820 struct mm_struct
*mm
= gmap
->mm
;
821 bool unlocked
= false;
823 BUG_ON(gmap_is_shadow(gmap
));
824 if (fixup_user_fault(current
, mm
, vmaddr
, FAULT_FLAG_WRITE
, &unlocked
))
827 /* lost mmap_sem, caller has to retry __gmap_translate */
829 /* Connect the page tables */
830 return __gmap_link(gmap
, gaddr
, vmaddr
);
834 * gmap_pte_op_end - release the page table lock
835 * @ptl: pointer to the spinlock pointer
837 static void gmap_pte_op_end(spinlock_t
*ptl
)
843 * gmap_protect_range - remove access rights to memory and set pgste bits
844 * @gmap: pointer to guest mapping meta data structure
845 * @gaddr: virtual address in the guest address space
847 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
848 * @bits: pgste notification bits to set
850 * Returns 0 if successfully protected, -ENOMEM if out of memory and
851 * -EFAULT if gaddr is invalid (or mapping for shadows is missing).
853 * Called with sg->mm->mmap_sem in read.
855 * Note: Can also be called for shadow gmaps.
857 static int gmap_protect_range(struct gmap
*gmap
, unsigned long gaddr
,
858 unsigned long len
, int prot
, unsigned long bits
)
860 unsigned long vmaddr
;
867 ptep
= gmap_pte_op_walk(gmap
, gaddr
, &ptl
);
869 rc
= ptep_force_prot(gmap
->mm
, gaddr
, ptep
, prot
, bits
);
870 gmap_pte_op_end(ptl
);
873 vmaddr
= __gmap_translate(gmap
, gaddr
);
874 if (IS_ERR_VALUE(vmaddr
))
876 rc
= gmap_pte_op_fixup(gmap
, gaddr
, vmaddr
);
888 * gmap_mprotect_notify - change access rights for a range of ptes and
889 * call the notifier if any pte changes again
890 * @gmap: pointer to guest mapping meta data structure
891 * @gaddr: virtual address in the guest address space
893 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
895 * Returns 0 if for each page in the given range a gmap mapping exists,
896 * the new access rights could be set and the notifier could be armed.
897 * If the gmap mapping is missing for one or more pages -EFAULT is
898 * returned. If no memory could be allocated -ENOMEM is returned.
899 * This function establishes missing page table entries.
901 int gmap_mprotect_notify(struct gmap
*gmap
, unsigned long gaddr
,
902 unsigned long len
, int prot
)
906 if ((gaddr
& ~PAGE_MASK
) || (len
& ~PAGE_MASK
) || gmap_is_shadow(gmap
))
908 if (!MACHINE_HAS_ESOP
&& prot
== PROT_READ
)
910 down_read(&gmap
->mm
->mmap_sem
);
911 rc
= gmap_protect_range(gmap
, gaddr
, len
, prot
, PGSTE_IN_BIT
);
912 up_read(&gmap
->mm
->mmap_sem
);
915 EXPORT_SYMBOL_GPL(gmap_mprotect_notify
);
918 * gmap_read_table - get an unsigned long value from a guest page table using
919 * absolute addressing, without marking the page referenced.
920 * @gmap: pointer to guest mapping meta data structure
921 * @gaddr: virtual address in the guest address space
922 * @val: pointer to the unsigned long value to return
924 * Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT
925 * if reading using the virtual address failed.
927 * Called with gmap->mm->mmap_sem in read.
929 int gmap_read_table(struct gmap
*gmap
, unsigned long gaddr
, unsigned long *val
)
931 unsigned long address
, vmaddr
;
938 ptep
= gmap_pte_op_walk(gmap
, gaddr
, &ptl
);
941 if (pte_present(pte
) && (pte_val(pte
) & _PAGE_READ
)) {
942 address
= pte_val(pte
) & PAGE_MASK
;
943 address
+= gaddr
& ~PAGE_MASK
;
944 *val
= *(unsigned long *) address
;
945 pte_val(*ptep
) |= _PAGE_YOUNG
;
946 /* Do *NOT* clear the _PAGE_INVALID bit! */
949 gmap_pte_op_end(ptl
);
953 vmaddr
= __gmap_translate(gmap
, gaddr
);
954 if (IS_ERR_VALUE(vmaddr
)) {
958 rc
= gmap_pte_op_fixup(gmap
, gaddr
, vmaddr
);
964 EXPORT_SYMBOL_GPL(gmap_read_table
);
967 * gmap_insert_rmap - add a rmap to the host_to_rmap radix tree
968 * @sg: pointer to the shadow guest address space structure
969 * @vmaddr: vm address associated with the rmap
970 * @rmap: pointer to the rmap structure
972 * Called with the sg->guest_table_lock
974 static inline void gmap_insert_rmap(struct gmap
*sg
, unsigned long vmaddr
,
975 struct gmap_rmap
*rmap
)
979 BUG_ON(!gmap_is_shadow(sg
));
980 slot
= radix_tree_lookup_slot(&sg
->host_to_rmap
, vmaddr
>> PAGE_SHIFT
);
982 rmap
->next
= radix_tree_deref_slot_protected(slot
,
983 &sg
->guest_table_lock
);
984 radix_tree_replace_slot(slot
, rmap
);
987 radix_tree_insert(&sg
->host_to_rmap
, vmaddr
>> PAGE_SHIFT
,
993 * gmap_protect_rmap - modify access rights to memory and create an rmap
994 * @sg: pointer to the shadow guest address space structure
995 * @raddr: rmap address in the shadow gmap
996 * @paddr: address in the parent guest address space
997 * @len: length of the memory area to protect
998 * @prot: indicates access rights: none, read-only or read-write
1000 * Returns 0 if successfully protected and the rmap was created, -ENOMEM
1001 * if out of memory and -EFAULT if paddr is invalid.
1003 static int gmap_protect_rmap(struct gmap
*sg
, unsigned long raddr
,
1004 unsigned long paddr
, unsigned long len
, int prot
)
1006 struct gmap
*parent
;
1007 struct gmap_rmap
*rmap
;
1008 unsigned long vmaddr
;
1013 BUG_ON(!gmap_is_shadow(sg
));
1014 parent
= sg
->parent
;
1016 vmaddr
= __gmap_translate(parent
, paddr
);
1017 if (IS_ERR_VALUE(vmaddr
))
1019 rmap
= kzalloc(sizeof(*rmap
), GFP_KERNEL
);
1022 rmap
->raddr
= raddr
;
1023 rc
= radix_tree_preload(GFP_KERNEL
);
1029 ptep
= gmap_pte_op_walk(parent
, paddr
, &ptl
);
1031 spin_lock(&sg
->guest_table_lock
);
1032 rc
= ptep_force_prot(parent
->mm
, paddr
, ptep
, prot
,
1035 gmap_insert_rmap(sg
, vmaddr
, rmap
);
1036 spin_unlock(&sg
->guest_table_lock
);
1037 gmap_pte_op_end(ptl
);
1039 radix_tree_preload_end();
1042 rc
= gmap_pte_op_fixup(parent
, paddr
, vmaddr
);
1053 #define _SHADOW_RMAP_MASK 0x7
1054 #define _SHADOW_RMAP_REGION1 0x5
1055 #define _SHADOW_RMAP_REGION2 0x4
1056 #define _SHADOW_RMAP_REGION3 0x3
1057 #define _SHADOW_RMAP_SEGMENT 0x2
1058 #define _SHADOW_RMAP_PGTABLE 0x1
1061 * gmap_idte_one - invalidate a single region or segment table entry
1062 * @asce: region or segment table *origin* + table-type bits
1063 * @vaddr: virtual address to identify the table entry to flush
1065 * The invalid bit of a single region or segment table entry is set
1066 * and the associated TLB entries depending on the entry are flushed.
1067 * The table-type of the @asce identifies the portion of the @vaddr
1068 * that is used as the invalidation index.
1070 static inline void gmap_idte_one(unsigned long asce
, unsigned long vaddr
)
1073 " .insn rrf,0xb98e0000,%0,%1,0,0"
1074 : : "a" (asce
), "a" (vaddr
) : "cc", "memory");
1078 * gmap_unshadow_page - remove a page from a shadow page table
1079 * @sg: pointer to the shadow guest address space structure
1080 * @raddr: rmap address in the shadow guest address space
1082 * Called with the sg->guest_table_lock
1084 static void gmap_unshadow_page(struct gmap
*sg
, unsigned long raddr
)
1086 unsigned long *table
;
1088 BUG_ON(!gmap_is_shadow(sg
));
1089 table
= gmap_table_walk(sg
, raddr
, 0); /* get page table pointer */
1090 if (!table
|| *table
& _PAGE_INVALID
)
1092 gmap_call_notifier(sg
, raddr
, raddr
+ (1UL << 12) - 1);
1093 ptep_unshadow_pte(sg
->mm
, raddr
, (pte_t
*) table
);
1097 * __gmap_unshadow_pgt - remove all entries from a shadow page table
1098 * @sg: pointer to the shadow guest address space structure
1099 * @raddr: rmap address in the shadow guest address space
1100 * @pgt: pointer to the start of a shadow page table
1102 * Called with the sg->guest_table_lock
1104 static void __gmap_unshadow_pgt(struct gmap
*sg
, unsigned long raddr
,
1109 BUG_ON(!gmap_is_shadow(sg
));
1110 for (i
= 0; i
< 256; i
++, raddr
+= 1UL << 12)
1111 pgt
[i
] = _PAGE_INVALID
;
1115 * gmap_unshadow_pgt - remove a shadow page table from a segment entry
1116 * @sg: pointer to the shadow guest address space structure
1117 * @raddr: address in the shadow guest address space
1119 * Called with the sg->guest_table_lock
1121 static void gmap_unshadow_pgt(struct gmap
*sg
, unsigned long raddr
)
1123 unsigned long sto
, *ste
, *pgt
;
1126 BUG_ON(!gmap_is_shadow(sg
));
1127 ste
= gmap_table_walk(sg
, raddr
, 1); /* get segment pointer */
1128 if (!ste
|| *ste
& _SEGMENT_ENTRY_INVALID
)
1130 gmap_call_notifier(sg
, raddr
, raddr
+ (1UL << 20) - 1);
1131 sto
= (unsigned long) (ste
- ((raddr
>> 20) & 0x7ff));
1132 gmap_idte_one(sto
| _ASCE_TYPE_SEGMENT
, raddr
);
1133 pgt
= (unsigned long *)(*ste
& _SEGMENT_ENTRY_ORIGIN
);
1134 *ste
= _SEGMENT_ENTRY_EMPTY
;
1135 __gmap_unshadow_pgt(sg
, raddr
, pgt
);
1136 /* Free page table */
1137 page
= pfn_to_page(__pa(pgt
) >> PAGE_SHIFT
);
1138 list_del(&page
->lru
);
1139 page_table_free_pgste(page
);
1143 * __gmap_unshadow_sgt - remove all entries from a shadow segment table
1144 * @sg: pointer to the shadow guest address space structure
1145 * @raddr: rmap address in the shadow guest address space
1146 * @sgt: pointer to the start of a shadow segment table
1148 * Called with the sg->guest_table_lock
1150 static void __gmap_unshadow_sgt(struct gmap
*sg
, unsigned long raddr
,
1153 unsigned long asce
, *pgt
;
1157 BUG_ON(!gmap_is_shadow(sg
));
1158 asce
= (unsigned long) sgt
| _ASCE_TYPE_SEGMENT
;
1159 for (i
= 0; i
< 2048; i
++, raddr
+= 1UL << 20) {
1160 if (sgt
[i
] & _SEGMENT_ENTRY_INVALID
)
1162 pgt
= (unsigned long *)(sgt
[i
] & _REGION_ENTRY_ORIGIN
);
1163 sgt
[i
] = _SEGMENT_ENTRY_EMPTY
;
1164 __gmap_unshadow_pgt(sg
, raddr
, pgt
);
1165 /* Free page table */
1166 page
= pfn_to_page(__pa(pgt
) >> PAGE_SHIFT
);
1167 list_del(&page
->lru
);
1168 page_table_free_pgste(page
);
1173 * gmap_unshadow_sgt - remove a shadow segment table from a region-3 entry
1174 * @sg: pointer to the shadow guest address space structure
1175 * @raddr: rmap address in the shadow guest address space
1177 * Called with the shadow->guest_table_lock
1179 static void gmap_unshadow_sgt(struct gmap
*sg
, unsigned long raddr
)
1181 unsigned long r3o
, *r3e
, *sgt
;
1184 BUG_ON(!gmap_is_shadow(sg
));
1185 r3e
= gmap_table_walk(sg
, raddr
, 2); /* get region-3 pointer */
1186 if (!r3e
|| *r3e
& _REGION_ENTRY_INVALID
)
1188 gmap_call_notifier(sg
, raddr
, raddr
+ (1UL << 31) - 1);
1189 r3o
= (unsigned long) (r3e
- ((raddr
>> 31) & 0x7ff));
1190 gmap_idte_one(r3o
| _ASCE_TYPE_REGION3
, raddr
);
1191 sgt
= (unsigned long *)(*r3e
& _REGION_ENTRY_ORIGIN
);
1192 *r3e
= _REGION3_ENTRY_EMPTY
;
1193 __gmap_unshadow_sgt(sg
, raddr
, sgt
);
1194 /* Free segment table */
1195 page
= pfn_to_page(__pa(sgt
) >> PAGE_SHIFT
);
1196 list_del(&page
->lru
);
1197 __free_pages(page
, 2);
1201 * __gmap_unshadow_r3t - remove all entries from a shadow region-3 table
1202 * @sg: pointer to the shadow guest address space structure
1203 * @raddr: address in the shadow guest address space
1204 * @r3t: pointer to the start of a shadow region-3 table
1206 * Called with the sg->guest_table_lock
1208 static void __gmap_unshadow_r3t(struct gmap
*sg
, unsigned long raddr
,
1211 unsigned long asce
, *sgt
;
1215 BUG_ON(!gmap_is_shadow(sg
));
1216 asce
= (unsigned long) r3t
| _ASCE_TYPE_REGION3
;
1217 for (i
= 0; i
< 2048; i
++, raddr
+= 1UL << 31) {
1218 if (r3t
[i
] & _REGION_ENTRY_INVALID
)
1220 sgt
= (unsigned long *)(r3t
[i
] & _REGION_ENTRY_ORIGIN
);
1221 r3t
[i
] = _REGION3_ENTRY_EMPTY
;
1222 __gmap_unshadow_sgt(sg
, raddr
, sgt
);
1223 /* Free segment table */
1224 page
= pfn_to_page(__pa(sgt
) >> PAGE_SHIFT
);
1225 list_del(&page
->lru
);
1226 __free_pages(page
, 2);
1231 * gmap_unshadow_r3t - remove a shadow region-3 table from a region-2 entry
1232 * @sg: pointer to the shadow guest address space structure
1233 * @raddr: rmap address in the shadow guest address space
1235 * Called with the sg->guest_table_lock
1237 static void gmap_unshadow_r3t(struct gmap
*sg
, unsigned long raddr
)
1239 unsigned long r2o
, *r2e
, *r3t
;
1242 BUG_ON(!gmap_is_shadow(sg
));
1243 r2e
= gmap_table_walk(sg
, raddr
, 3); /* get region-2 pointer */
1244 if (!r2e
|| *r2e
& _REGION_ENTRY_INVALID
)
1246 gmap_call_notifier(sg
, raddr
, raddr
+ (1UL << 42) - 1);
1247 r2o
= (unsigned long) (r2e
- ((raddr
>> 42) & 0x7ff));
1248 gmap_idte_one(r2o
| _ASCE_TYPE_REGION2
, raddr
);
1249 r3t
= (unsigned long *)(*r2e
& _REGION_ENTRY_ORIGIN
);
1250 *r2e
= _REGION2_ENTRY_EMPTY
;
1251 __gmap_unshadow_r3t(sg
, raddr
, r3t
);
1252 /* Free region 3 table */
1253 page
= pfn_to_page(__pa(r3t
) >> PAGE_SHIFT
);
1254 list_del(&page
->lru
);
1255 __free_pages(page
, 2);
1259 * __gmap_unshadow_r2t - remove all entries from a shadow region-2 table
1260 * @sg: pointer to the shadow guest address space structure
1261 * @raddr: rmap address in the shadow guest address space
1262 * @r2t: pointer to the start of a shadow region-2 table
1264 * Called with the sg->guest_table_lock
1266 static void __gmap_unshadow_r2t(struct gmap
*sg
, unsigned long raddr
,
1269 unsigned long asce
, *r3t
;
1273 BUG_ON(!gmap_is_shadow(sg
));
1274 asce
= (unsigned long) r2t
| _ASCE_TYPE_REGION2
;
1275 for (i
= 0; i
< 2048; i
++, raddr
+= 1UL << 42) {
1276 if (r2t
[i
] & _REGION_ENTRY_INVALID
)
1278 r3t
= (unsigned long *)(r2t
[i
] & _REGION_ENTRY_ORIGIN
);
1279 r2t
[i
] = _REGION2_ENTRY_EMPTY
;
1280 __gmap_unshadow_r3t(sg
, raddr
, r3t
);
1281 /* Free region 3 table */
1282 page
= pfn_to_page(__pa(r3t
) >> PAGE_SHIFT
);
1283 list_del(&page
->lru
);
1284 __free_pages(page
, 2);
1289 * gmap_unshadow_r2t - remove a shadow region-2 table from a region-1 entry
1290 * @sg: pointer to the shadow guest address space structure
1291 * @raddr: rmap address in the shadow guest address space
1293 * Called with the sg->guest_table_lock
1295 static void gmap_unshadow_r2t(struct gmap
*sg
, unsigned long raddr
)
1297 unsigned long r1o
, *r1e
, *r2t
;
1300 BUG_ON(!gmap_is_shadow(sg
));
1301 r1e
= gmap_table_walk(sg
, raddr
, 4); /* get region-1 pointer */
1302 if (!r1e
|| *r1e
& _REGION_ENTRY_INVALID
)
1304 gmap_call_notifier(sg
, raddr
, raddr
+ (1UL << 53) - 1);
1305 r1o
= (unsigned long) (r1e
- ((raddr
>> 53) & 0x7ff));
1306 gmap_idte_one(r1o
| _ASCE_TYPE_REGION1
, raddr
);
1307 r2t
= (unsigned long *)(*r1e
& _REGION_ENTRY_ORIGIN
);
1308 *r1e
= _REGION1_ENTRY_EMPTY
;
1309 __gmap_unshadow_r2t(sg
, raddr
, r2t
);
1310 /* Free region 2 table */
1311 page
= pfn_to_page(__pa(r2t
) >> PAGE_SHIFT
);
1312 list_del(&page
->lru
);
1313 __free_pages(page
, 2);
1317 * __gmap_unshadow_r1t - remove all entries from a shadow region-1 table
1318 * @sg: pointer to the shadow guest address space structure
1319 * @raddr: rmap address in the shadow guest address space
1320 * @r1t: pointer to the start of a shadow region-1 table
1322 * Called with the shadow->guest_table_lock
1324 static void __gmap_unshadow_r1t(struct gmap
*sg
, unsigned long raddr
,
1327 unsigned long asce
, *r2t
;
1331 BUG_ON(!gmap_is_shadow(sg
));
1332 asce
= (unsigned long) r1t
| _ASCE_TYPE_REGION1
;
1333 for (i
= 0; i
< 2048; i
++, raddr
+= 1UL << 53) {
1334 if (r1t
[i
] & _REGION_ENTRY_INVALID
)
1336 r2t
= (unsigned long *)(r1t
[i
] & _REGION_ENTRY_ORIGIN
);
1337 __gmap_unshadow_r2t(sg
, raddr
, r2t
);
1338 /* Clear entry and flush translation r1t -> r2t */
1339 gmap_idte_one(asce
, raddr
);
1340 r1t
[i
] = _REGION1_ENTRY_EMPTY
;
1341 /* Free region 2 table */
1342 page
= pfn_to_page(__pa(r2t
) >> PAGE_SHIFT
);
1343 list_del(&page
->lru
);
1344 __free_pages(page
, 2);
1349 * gmap_unshadow - remove a shadow page table completely
1350 * @sg: pointer to the shadow guest address space structure
1352 * Called with sg->guest_table_lock
1354 static void gmap_unshadow(struct gmap
*sg
)
1356 unsigned long *table
;
1358 BUG_ON(!gmap_is_shadow(sg
));
1362 gmap_call_notifier(sg
, 0, -1UL);
1363 table
= (unsigned long *)(sg
->asce
& _ASCE_ORIGIN
);
1364 switch (sg
->asce
& _ASCE_TYPE_MASK
) {
1365 case _ASCE_TYPE_REGION1
:
1366 __gmap_unshadow_r1t(sg
, 0, table
);
1368 case _ASCE_TYPE_REGION2
:
1369 __gmap_unshadow_r2t(sg
, 0, table
);
1371 case _ASCE_TYPE_REGION3
:
1372 __gmap_unshadow_r3t(sg
, 0, table
);
1374 case _ASCE_TYPE_SEGMENT
:
1375 __gmap_unshadow_sgt(sg
, 0, table
);
1381 * gmap_find_shadow - find a specific asce in the list of shadow tables
1382 * @parent: pointer to the parent gmap
1383 * @asce: ASCE for which the shadow table is created
1385 * Returns the pointer to a gmap if a shadow table with the given asce is
1386 * already available, otherwise NULL
1388 static struct gmap
*gmap_find_shadow(struct gmap
*parent
, unsigned long asce
)
1392 list_for_each_entry(sg
, &parent
->children
, list
) {
1393 if (sg
->orig_asce
!= asce
|| sg
->removed
)
1395 atomic_inc(&sg
->ref_count
);
1402 * gmap_shadow - create/find a shadow guest address space
1403 * @parent: pointer to the parent gmap
1404 * @asce: ASCE for which the shadow table is created
1406 * The pages of the top level page table referred by the asce parameter
1407 * will be set to read-only and marked in the PGSTEs of the kvm process.
1408 * The shadow table will be removed automatically on any change to the
1409 * PTE mapping for the source table.
1411 * Returns a guest address space structure, NULL if out of memory or if
1412 * anything goes wrong while protecting the top level pages.
1414 struct gmap
*gmap_shadow(struct gmap
*parent
, unsigned long asce
)
1416 struct gmap
*sg
, *new;
1417 unsigned long limit
;
1420 BUG_ON(gmap_is_shadow(parent
));
1421 spin_lock(&parent
->shadow_lock
);
1422 sg
= gmap_find_shadow(parent
, asce
);
1423 spin_unlock(&parent
->shadow_lock
);
1426 /* Create a new shadow gmap */
1427 limit
= -1UL >> (33 - (((asce
& _ASCE_TYPE_MASK
) >> 2) * 11));
1428 new = gmap_alloc(limit
);
1431 new->mm
= parent
->mm
;
1432 new->parent
= gmap_get(parent
);
1433 new->orig_asce
= asce
;
1434 down_read(&parent
->mm
->mmap_sem
);
1435 rc
= gmap_protect_range(parent
, asce
& _ASCE_ORIGIN
,
1436 ((asce
& _ASCE_TABLE_LENGTH
) + 1) * 4096,
1437 PROT_READ
, PGSTE_VSIE_BIT
);
1438 up_read(&parent
->mm
->mmap_sem
);
1440 atomic_set(&new->ref_count
, 2);
1441 spin_lock(&parent
->shadow_lock
);
1442 /* Recheck if another CPU created the same shadow */
1443 sg
= gmap_find_shadow(parent
, asce
);
1445 list_add(&new->list
, &parent
->children
);
1449 spin_unlock(&parent
->shadow_lock
);
1455 EXPORT_SYMBOL_GPL(gmap_shadow
);
1458 * gmap_shadow_r2t - create an empty shadow region 2 table
1459 * @sg: pointer to the shadow guest address space structure
1460 * @saddr: faulting address in the shadow gmap
1461 * @r2t: parent gmap address of the region 2 table to get shadowed
1463 * The r2t parameter specifies the address of the source table. The
1464 * four pages of the source table are made read-only in the parent gmap
1465 * address space. A write to the source table area @r2t will automatically
1466 * remove the shadow r2 table and all of its decendents.
1468 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1469 * shadow table structure is incomplete, -ENOMEM if out of memory and
1470 * -EFAULT if an address in the parent gmap could not be resolved.
1472 * Called with sg->mm->mmap_sem in read.
1474 int gmap_shadow_r2t(struct gmap
*sg
, unsigned long saddr
, unsigned long r2t
)
1476 unsigned long raddr
, origin
, offset
, len
;
1477 unsigned long *s_r2t
, *table
;
1481 BUG_ON(!gmap_is_shadow(sg
));
1482 /* Allocate a shadow region second table */
1483 page
= alloc_pages(GFP_KERNEL
, 2);
1486 page
->index
= r2t
& _REGION_ENTRY_ORIGIN
;
1487 s_r2t
= (unsigned long *) page_to_phys(page
);
1488 /* Install shadow region second table */
1489 spin_lock(&sg
->guest_table_lock
);
1490 table
= gmap_table_walk(sg
, saddr
, 4); /* get region-1 pointer */
1492 rc
= -EAGAIN
; /* Race with unshadow */
1495 if (!(*table
& _REGION_ENTRY_INVALID
)) {
1496 rc
= 0; /* Already established */
1499 crst_table_init(s_r2t
, _REGION2_ENTRY_EMPTY
);
1500 *table
= (unsigned long) s_r2t
|
1501 _REGION_ENTRY_LENGTH
| _REGION_ENTRY_TYPE_R1
;
1502 list_add(&page
->lru
, &sg
->crst_list
);
1503 spin_unlock(&sg
->guest_table_lock
);
1504 /* Make r2t read-only in parent gmap page table */
1505 raddr
= (saddr
& 0xffe0000000000000UL
) | _SHADOW_RMAP_REGION1
;
1506 origin
= r2t
& _REGION_ENTRY_ORIGIN
;
1507 offset
= ((r2t
& _REGION_ENTRY_OFFSET
) >> 6) * 4096;
1508 len
= ((r2t
& _REGION_ENTRY_LENGTH
) + 1) * 4096 - offset
;
1509 rc
= gmap_protect_rmap(sg
, raddr
, origin
+ offset
, len
, PROT_READ
);
1511 spin_lock(&sg
->guest_table_lock
);
1512 gmap_unshadow_r2t(sg
, raddr
);
1513 spin_unlock(&sg
->guest_table_lock
);
1517 spin_unlock(&sg
->guest_table_lock
);
1518 __free_pages(page
, 2);
1521 EXPORT_SYMBOL_GPL(gmap_shadow_r2t
);
1524 * gmap_shadow_r3t - create a shadow region 3 table
1525 * @sg: pointer to the shadow guest address space structure
1526 * @saddr: faulting address in the shadow gmap
1527 * @r3t: parent gmap address of the region 3 table to get shadowed
1529 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1530 * shadow table structure is incomplete, -ENOMEM if out of memory and
1531 * -EFAULT if an address in the parent gmap could not be resolved.
1533 * Called with sg->mm->mmap_sem in read.
1535 int gmap_shadow_r3t(struct gmap
*sg
, unsigned long saddr
, unsigned long r3t
)
1537 unsigned long raddr
, origin
, offset
, len
;
1538 unsigned long *s_r3t
, *table
;
1542 BUG_ON(!gmap_is_shadow(sg
));
1543 /* Allocate a shadow region second table */
1544 page
= alloc_pages(GFP_KERNEL
, 2);
1547 page
->index
= r3t
& _REGION_ENTRY_ORIGIN
;
1548 s_r3t
= (unsigned long *) page_to_phys(page
);
1549 /* Install shadow region second table */
1550 spin_lock(&sg
->guest_table_lock
);
1551 table
= gmap_table_walk(sg
, saddr
, 3); /* get region-2 pointer */
1553 rc
= -EAGAIN
; /* Race with unshadow */
1556 if (!(*table
& _REGION_ENTRY_INVALID
)) {
1557 rc
= 0; /* Already established */
1560 crst_table_init(s_r3t
, _REGION3_ENTRY_EMPTY
);
1561 *table
= (unsigned long) s_r3t
|
1562 _REGION_ENTRY_LENGTH
| _REGION_ENTRY_TYPE_R2
;
1563 list_add(&page
->lru
, &sg
->crst_list
);
1564 spin_unlock(&sg
->guest_table_lock
);
1565 /* Make r3t read-only in parent gmap page table */
1566 raddr
= (saddr
& 0xfffffc0000000000UL
) | _SHADOW_RMAP_REGION2
;
1567 origin
= r3t
& _REGION_ENTRY_ORIGIN
;
1568 offset
= ((r3t
& _REGION_ENTRY_OFFSET
) >> 6) * 4096;
1569 len
= ((r3t
& _REGION_ENTRY_LENGTH
) + 1) * 4096 - offset
;
1570 rc
= gmap_protect_rmap(sg
, raddr
, origin
+ offset
, len
, PROT_READ
);
1572 spin_lock(&sg
->guest_table_lock
);
1573 gmap_unshadow_r3t(sg
, raddr
);
1574 spin_unlock(&sg
->guest_table_lock
);
1578 spin_unlock(&sg
->guest_table_lock
);
1579 __free_pages(page
, 2);
1582 EXPORT_SYMBOL_GPL(gmap_shadow_r3t
);
1585 * gmap_shadow_sgt - create a shadow segment table
1586 * @sg: pointer to the shadow guest address space structure
1587 * @saddr: faulting address in the shadow gmap
1588 * @sgt: parent gmap address of the segment table to get shadowed
1590 * Returns: 0 if successfully shadowed or already shadowed, -EAGAIN if the
1591 * shadow table structure is incomplete, -ENOMEM if out of memory and
1592 * -EFAULT if an address in the parent gmap could not be resolved.
1594 * Called with sg->mm->mmap_sem in read.
1596 int gmap_shadow_sgt(struct gmap
*sg
, unsigned long saddr
, unsigned long sgt
)
1598 unsigned long raddr
, origin
, offset
, len
;
1599 unsigned long *s_sgt
, *table
;
1603 BUG_ON(!gmap_is_shadow(sg
));
1604 /* Allocate a shadow segment table */
1605 page
= alloc_pages(GFP_KERNEL
, 2);
1608 page
->index
= sgt
& _REGION_ENTRY_ORIGIN
;
1609 s_sgt
= (unsigned long *) page_to_phys(page
);
1610 /* Install shadow region second table */
1611 spin_lock(&sg
->guest_table_lock
);
1612 table
= gmap_table_walk(sg
, saddr
, 2); /* get region-3 pointer */
1614 rc
= -EAGAIN
; /* Race with unshadow */
1617 if (!(*table
& _REGION_ENTRY_INVALID
)) {
1618 rc
= 0; /* Already established */
1621 crst_table_init(s_sgt
, _SEGMENT_ENTRY_EMPTY
);
1622 *table
= (unsigned long) s_sgt
|
1623 _REGION_ENTRY_LENGTH
| _REGION_ENTRY_TYPE_R3
;
1624 list_add(&page
->lru
, &sg
->crst_list
);
1625 spin_unlock(&sg
->guest_table_lock
);
1626 /* Make sgt read-only in parent gmap page table */
1627 raddr
= (saddr
& 0xffffffff80000000UL
) | _SHADOW_RMAP_REGION3
;
1628 origin
= sgt
& _REGION_ENTRY_ORIGIN
;
1629 offset
= ((sgt
& _REGION_ENTRY_OFFSET
) >> 6) * 4096;
1630 len
= ((sgt
& _REGION_ENTRY_LENGTH
) + 1) * 4096 - offset
;
1631 rc
= gmap_protect_rmap(sg
, raddr
, origin
+ offset
, len
, PROT_READ
);
1633 spin_lock(&sg
->guest_table_lock
);
1634 gmap_unshadow_sgt(sg
, raddr
);
1635 spin_unlock(&sg
->guest_table_lock
);
1639 spin_unlock(&sg
->guest_table_lock
);
1640 __free_pages(page
, 2);
1643 EXPORT_SYMBOL_GPL(gmap_shadow_sgt
);
1646 * gmap_shadow_lookup_pgtable - find a shadow page table
1647 * @sg: pointer to the shadow guest address space structure
1648 * @saddr: the address in the shadow aguest address space
1649 * @pgt: parent gmap address of the page table to get shadowed
1650 * @dat_protection: if the pgtable is marked as protected by dat
1652 * Returns 0 if the shadow page table was found and -EAGAIN if the page
1653 * table was not found.
1655 * Called with sg->mm->mmap_sem in read.
1657 int gmap_shadow_pgt_lookup(struct gmap
*sg
, unsigned long saddr
,
1658 unsigned long *pgt
, int *dat_protection
)
1660 unsigned long *table
;
1664 BUG_ON(!gmap_is_shadow(sg
));
1665 spin_lock(&sg
->guest_table_lock
);
1666 table
= gmap_table_walk(sg
, saddr
, 1); /* get segment pointer */
1667 if (table
&& !(*table
& _SEGMENT_ENTRY_INVALID
)) {
1668 /* Shadow page tables are full pages (pte+pgste) */
1669 page
= pfn_to_page(*table
>> PAGE_SHIFT
);
1671 *dat_protection
= !!(*table
& _SEGMENT_ENTRY_PROTECT
);
1676 spin_unlock(&sg
->guest_table_lock
);
1680 EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup
);
1683 * gmap_shadow_pgt - instantiate a shadow page table
1684 * @sg: pointer to the shadow guest address space structure
1685 * @saddr: faulting address in the shadow gmap
1686 * @pgt: parent gmap address of the page table to get shadowed
1688 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1689 * shadow table structure is incomplete, -ENOMEM if out of memory,
1690 * -EFAULT if an address in the parent gmap could not be resolved and
1692 * Called with gmap->mm->mmap_sem in read
1694 int gmap_shadow_pgt(struct gmap
*sg
, unsigned long saddr
, unsigned long pgt
)
1696 unsigned long raddr
, origin
;
1697 unsigned long *s_pgt
, *table
;
1701 BUG_ON(!gmap_is_shadow(sg
));
1702 /* Allocate a shadow page table */
1703 page
= page_table_alloc_pgste(sg
->mm
);
1706 page
->index
= pgt
& _SEGMENT_ENTRY_ORIGIN
;
1707 s_pgt
= (unsigned long *) page_to_phys(page
);
1708 /* Install shadow page table */
1709 spin_lock(&sg
->guest_table_lock
);
1710 table
= gmap_table_walk(sg
, saddr
, 1); /* get segment pointer */
1712 rc
= -EAGAIN
; /* Race with unshadow */
1715 if (!(*table
& _SEGMENT_ENTRY_INVALID
)) {
1716 rc
= 0; /* Already established */
1719 *table
= (unsigned long) s_pgt
| _SEGMENT_ENTRY
|
1720 (pgt
& _SEGMENT_ENTRY_PROTECT
);
1721 list_add(&page
->lru
, &sg
->pt_list
);
1722 spin_unlock(&sg
->guest_table_lock
);
1723 /* Make pgt read-only in parent gmap page table (not the pgste) */
1724 raddr
= (saddr
& 0xfffffffffff00000UL
) | _SHADOW_RMAP_SEGMENT
;
1725 origin
= pgt
& _SEGMENT_ENTRY_ORIGIN
& PAGE_MASK
;
1726 rc
= gmap_protect_rmap(sg
, raddr
, origin
, PAGE_SIZE
, PROT_READ
);
1728 spin_lock(&sg
->guest_table_lock
);
1729 gmap_unshadow_pgt(sg
, raddr
);
1730 spin_unlock(&sg
->guest_table_lock
);
1734 spin_unlock(&sg
->guest_table_lock
);
1735 page_table_free_pgste(page
);
1739 EXPORT_SYMBOL_GPL(gmap_shadow_pgt
);
1742 * gmap_shadow_page - create a shadow page mapping
1743 * @sg: pointer to the shadow guest address space structure
1744 * @saddr: faulting address in the shadow gmap
1745 * @paddr: parent gmap address to get mapped at @saddr
1746 * @write: =1 map r/w, =0 map r/o
1748 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1749 * shadow table structure is incomplete, -ENOMEM if out of memory and
1750 * -EFAULT if an address in the parent gmap could not be resolved.
1752 * Called with sg->mm->mmap_sem in read.
1754 int gmap_shadow_page(struct gmap
*sg
, unsigned long saddr
,
1755 unsigned long paddr
, int write
)
1757 struct gmap
*parent
;
1758 struct gmap_rmap
*rmap
;
1759 unsigned long vmaddr
;
1761 pte_t
*sptep
, *tptep
;
1764 BUG_ON(!gmap_is_shadow(sg
));
1765 parent
= sg
->parent
;
1767 rmap
= kzalloc(sizeof(*rmap
), GFP_KERNEL
);
1770 rmap
->raddr
= (saddr
& PAGE_MASK
) | _SHADOW_RMAP_PGTABLE
;
1773 vmaddr
= __gmap_translate(parent
, paddr
);
1774 if (IS_ERR_VALUE(vmaddr
)) {
1778 rc
= radix_tree_preload(GFP_KERNEL
);
1782 sptep
= gmap_pte_op_walk(parent
, paddr
, &ptl
);
1784 spin_lock(&sg
->guest_table_lock
);
1785 /* Get page table pointer */
1786 tptep
= (pte_t
*) gmap_table_walk(sg
, saddr
, 0);
1788 spin_unlock(&sg
->guest_table_lock
);
1789 gmap_pte_op_end(ptl
);
1790 radix_tree_preload_end();
1793 rc
= ptep_shadow_pte(sg
->mm
, saddr
,
1794 sptep
, tptep
, write
);
1796 /* Success and a new mapping */
1797 gmap_insert_rmap(sg
, vmaddr
, rmap
);
1801 gmap_pte_op_end(ptl
);
1802 spin_unlock(&sg
->guest_table_lock
);
1804 radix_tree_preload_end();
1807 rc
= gmap_pte_op_fixup(parent
, paddr
, vmaddr
);
1814 EXPORT_SYMBOL_GPL(gmap_shadow_page
);
1817 * gmap_shadow_notify - handle notifications for shadow gmap
1819 * Called with sg->parent->shadow_lock.
1821 static void gmap_shadow_notify(struct gmap
*sg
, unsigned long vmaddr
,
1822 unsigned long offset
, pte_t
*pte
)
1824 struct gmap_rmap
*rmap
, *rnext
, *head
;
1825 unsigned long gaddr
, start
, end
, bits
, raddr
;
1826 unsigned long *table
;
1828 BUG_ON(!gmap_is_shadow(sg
));
1829 spin_lock(&sg
->parent
->guest_table_lock
);
1830 table
= radix_tree_lookup(&sg
->parent
->host_to_guest
,
1831 vmaddr
>> PMD_SHIFT
);
1832 gaddr
= table
? __gmap_segment_gaddr(table
) + offset
: 0;
1833 spin_unlock(&sg
->parent
->guest_table_lock
);
1837 spin_lock(&sg
->guest_table_lock
);
1839 spin_unlock(&sg
->guest_table_lock
);
1842 /* Check for top level table */
1843 start
= sg
->orig_asce
& _ASCE_ORIGIN
;
1844 end
= start
+ ((sg
->orig_asce
& _ASCE_TABLE_LENGTH
) + 1) * 4096;
1845 if (gaddr
>= start
&& gaddr
< end
) {
1846 /* The complete shadow table has to go */
1848 spin_unlock(&sg
->guest_table_lock
);
1849 list_del(&sg
->list
);
1853 /* Remove the page table tree from on specific entry */
1854 head
= radix_tree_delete(&sg
->host_to_rmap
, vmaddr
>> 12);
1855 gmap_for_each_rmap_safe(rmap
, rnext
, head
) {
1856 bits
= rmap
->raddr
& _SHADOW_RMAP_MASK
;
1857 raddr
= rmap
->raddr
^ bits
;
1859 case _SHADOW_RMAP_REGION1
:
1860 gmap_unshadow_r2t(sg
, raddr
);
1862 case _SHADOW_RMAP_REGION2
:
1863 gmap_unshadow_r3t(sg
, raddr
);
1865 case _SHADOW_RMAP_REGION3
:
1866 gmap_unshadow_sgt(sg
, raddr
);
1868 case _SHADOW_RMAP_SEGMENT
:
1869 gmap_unshadow_pgt(sg
, raddr
);
1871 case _SHADOW_RMAP_PGTABLE
:
1872 gmap_unshadow_page(sg
, raddr
);
1877 spin_unlock(&sg
->guest_table_lock
);
1881 * ptep_notify - call all invalidation callbacks for a specific pte.
1882 * @mm: pointer to the process mm_struct
1883 * @addr: virtual address in the process address space
1884 * @pte: pointer to the page table entry
1885 * @bits: bits from the pgste that caused the notify call
1887 * This function is assumed to be called with the page table lock held
1888 * for the pte to notify.
1890 void ptep_notify(struct mm_struct
*mm
, unsigned long vmaddr
,
1891 pte_t
*pte
, unsigned long bits
)
1893 unsigned long offset
, gaddr
;
1894 unsigned long *table
;
1895 struct gmap
*gmap
, *sg
, *next
;
1897 offset
= ((unsigned long) pte
) & (255 * sizeof(pte_t
));
1898 offset
= offset
* (4096 / sizeof(pte_t
));
1900 list_for_each_entry_rcu(gmap
, &mm
->context
.gmap_list
, list
) {
1901 if (!list_empty(&gmap
->children
) && (bits
& PGSTE_VSIE_BIT
)) {
1902 spin_lock(&gmap
->shadow_lock
);
1903 list_for_each_entry_safe(sg
, next
,
1904 &gmap
->children
, list
)
1905 gmap_shadow_notify(sg
, vmaddr
, offset
, pte
);
1906 spin_unlock(&gmap
->shadow_lock
);
1908 if (!(bits
& PGSTE_IN_BIT
))
1910 spin_lock(&gmap
->guest_table_lock
);
1911 table
= radix_tree_lookup(&gmap
->host_to_guest
,
1912 vmaddr
>> PMD_SHIFT
);
1914 gaddr
= __gmap_segment_gaddr(table
) + offset
;
1915 spin_unlock(&gmap
->guest_table_lock
);
1917 gmap_call_notifier(gmap
, gaddr
, gaddr
+ PAGE_SIZE
- 1);
1921 EXPORT_SYMBOL_GPL(ptep_notify
);
1923 static inline void thp_split_mm(struct mm_struct
*mm
)
1925 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1926 struct vm_area_struct
*vma
;
1929 for (vma
= mm
->mmap
; vma
!= NULL
; vma
= vma
->vm_next
) {
1930 for (addr
= vma
->vm_start
;
1933 follow_page(vma
, addr
, FOLL_SPLIT
);
1934 vma
->vm_flags
&= ~VM_HUGEPAGE
;
1935 vma
->vm_flags
|= VM_NOHUGEPAGE
;
1937 mm
->def_flags
|= VM_NOHUGEPAGE
;
1942 * switch on pgstes for its userspace process (for kvm)
1944 int s390_enable_sie(void)
1946 struct mm_struct
*mm
= current
->mm
;
1948 /* Do we have pgstes? if yes, we are done */
1949 if (mm_has_pgste(mm
))
1951 /* Fail if the page tables are 2K */
1952 if (!mm_alloc_pgste(mm
))
1954 down_write(&mm
->mmap_sem
);
1955 mm
->context
.has_pgste
= 1;
1956 /* split thp mappings and disable thp for future mappings */
1958 up_write(&mm
->mmap_sem
);
1961 EXPORT_SYMBOL_GPL(s390_enable_sie
);
1964 * Enable storage key handling from now on and initialize the storage
1965 * keys with the default key.
1967 static int __s390_enable_skey(pte_t
*pte
, unsigned long addr
,
1968 unsigned long next
, struct mm_walk
*walk
)
1971 * Remove all zero page mappings,
1972 * after establishing a policy to forbid zero page mappings
1973 * following faults for that page will get fresh anonymous pages
1975 if (is_zero_pfn(pte_pfn(*pte
)))
1976 ptep_xchg_direct(walk
->mm
, addr
, pte
, __pte(_PAGE_INVALID
));
1977 /* Clear storage key */
1978 ptep_zap_key(walk
->mm
, addr
, pte
);
1982 int s390_enable_skey(void)
1984 struct mm_walk walk
= { .pte_entry
= __s390_enable_skey
};
1985 struct mm_struct
*mm
= current
->mm
;
1986 struct vm_area_struct
*vma
;
1989 down_write(&mm
->mmap_sem
);
1990 if (mm_use_skey(mm
))
1993 mm
->context
.use_skey
= 1;
1994 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
) {
1995 if (ksm_madvise(vma
, vma
->vm_start
, vma
->vm_end
,
1996 MADV_UNMERGEABLE
, &vma
->vm_flags
)) {
1997 mm
->context
.use_skey
= 0;
2002 mm
->def_flags
&= ~VM_MERGEABLE
;
2005 walk_page_range(0, TASK_SIZE
, &walk
);
2008 up_write(&mm
->mmap_sem
);
2011 EXPORT_SYMBOL_GPL(s390_enable_skey
);
2014 * Reset CMMA state, make all pages stable again.
2016 static int __s390_reset_cmma(pte_t
*pte
, unsigned long addr
,
2017 unsigned long next
, struct mm_walk
*walk
)
2019 ptep_zap_unused(walk
->mm
, addr
, pte
, 1);
2023 void s390_reset_cmma(struct mm_struct
*mm
)
2025 struct mm_walk walk
= { .pte_entry
= __s390_reset_cmma
};
2027 down_write(&mm
->mmap_sem
);
2029 walk_page_range(0, TASK_SIZE
, &walk
);
2030 up_write(&mm
->mmap_sem
);
2032 EXPORT_SYMBOL_GPL(s390_reset_cmma
);