1 // SPDX-License-Identifier: GPL-2.0
3 * KVM guest address space mapping code
5 * Copyright IBM Corp. 2007, 2016
6 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
9 #include <linux/kernel.h>
11 #include <linux/swap.h>
12 #include <linux/smp.h>
13 #include <linux/spinlock.h>
14 #include <linux/slab.h>
15 #include <linux/swapops.h>
16 #include <linux/ksm.h>
17 #include <linux/mman.h>
19 #include <asm/pgtable.h>
20 #include <asm/pgalloc.h>
24 #define GMAP_SHADOW_FAKE_TABLE 1ULL
27 * gmap_alloc - allocate and initialize a guest address space
28 * @mm: pointer to the parent mm_struct
29 * @limit: maximum address of the gmap address space
31 * Returns a guest address space structure.
33 static struct gmap
*gmap_alloc(unsigned long limit
)
38 unsigned long etype
, atype
;
40 if (limit
< _REGION3_SIZE
) {
41 limit
= _REGION3_SIZE
- 1;
42 atype
= _ASCE_TYPE_SEGMENT
;
43 etype
= _SEGMENT_ENTRY_EMPTY
;
44 } else if (limit
< _REGION2_SIZE
) {
45 limit
= _REGION2_SIZE
- 1;
46 atype
= _ASCE_TYPE_REGION3
;
47 etype
= _REGION3_ENTRY_EMPTY
;
48 } else if (limit
< _REGION1_SIZE
) {
49 limit
= _REGION1_SIZE
- 1;
50 atype
= _ASCE_TYPE_REGION2
;
51 etype
= _REGION2_ENTRY_EMPTY
;
54 atype
= _ASCE_TYPE_REGION1
;
55 etype
= _REGION1_ENTRY_EMPTY
;
57 gmap
= kzalloc(sizeof(struct gmap
), GFP_KERNEL
);
60 INIT_LIST_HEAD(&gmap
->crst_list
);
61 INIT_LIST_HEAD(&gmap
->children
);
62 INIT_LIST_HEAD(&gmap
->pt_list
);
63 INIT_RADIX_TREE(&gmap
->guest_to_host
, GFP_KERNEL
);
64 INIT_RADIX_TREE(&gmap
->host_to_guest
, GFP_ATOMIC
);
65 INIT_RADIX_TREE(&gmap
->host_to_rmap
, GFP_ATOMIC
);
66 spin_lock_init(&gmap
->guest_table_lock
);
67 spin_lock_init(&gmap
->shadow_lock
);
68 atomic_set(&gmap
->ref_count
, 1);
69 page
= alloc_pages(GFP_KERNEL
, CRST_ALLOC_ORDER
);
73 list_add(&page
->lru
, &gmap
->crst_list
);
74 table
= (unsigned long *) page_to_phys(page
);
75 crst_table_init(table
, etype
);
77 gmap
->asce
= atype
| _ASCE_TABLE_LENGTH
|
78 _ASCE_USER_BITS
| __pa(table
);
79 gmap
->asce_end
= limit
;
89 * gmap_create - create a guest address space
90 * @mm: pointer to the parent mm_struct
91 * @limit: maximum size of the gmap address space
93 * Returns a guest address space structure.
95 struct gmap
*gmap_create(struct mm_struct
*mm
, unsigned long limit
)
98 unsigned long gmap_asce
;
100 gmap
= gmap_alloc(limit
);
104 spin_lock(&mm
->context
.lock
);
105 list_add_rcu(&gmap
->list
, &mm
->context
.gmap_list
);
106 if (list_is_singular(&mm
->context
.gmap_list
))
107 gmap_asce
= gmap
->asce
;
110 WRITE_ONCE(mm
->context
.gmap_asce
, gmap_asce
);
111 spin_unlock(&mm
->context
.lock
);
114 EXPORT_SYMBOL_GPL(gmap_create
);
116 static void gmap_flush_tlb(struct gmap
*gmap
)
118 if (MACHINE_HAS_IDTE
)
119 __tlb_flush_idte(gmap
->asce
);
121 __tlb_flush_global();
124 static void gmap_radix_tree_free(struct radix_tree_root
*root
)
126 struct radix_tree_iter iter
;
127 unsigned long indices
[16];
132 /* A radix tree is freed by deleting all of its entries */
136 radix_tree_for_each_slot(slot
, root
, &iter
, index
) {
137 indices
[nr
] = iter
.index
;
141 for (i
= 0; i
< nr
; i
++) {
143 radix_tree_delete(root
, index
);
148 static void gmap_rmap_radix_tree_free(struct radix_tree_root
*root
)
150 struct gmap_rmap
*rmap
, *rnext
, *head
;
151 struct radix_tree_iter iter
;
152 unsigned long indices
[16];
157 /* A radix tree is freed by deleting all of its entries */
161 radix_tree_for_each_slot(slot
, root
, &iter
, index
) {
162 indices
[nr
] = iter
.index
;
166 for (i
= 0; i
< nr
; i
++) {
168 head
= radix_tree_delete(root
, index
);
169 gmap_for_each_rmap_safe(rmap
, rnext
, head
)
176 * gmap_free - free a guest address space
177 * @gmap: pointer to the guest address space structure
179 * No locks required. There are no references to this gmap anymore.
181 static void gmap_free(struct gmap
*gmap
)
183 struct page
*page
, *next
;
185 /* Flush tlb of all gmaps (if not already done for shadows) */
186 if (!(gmap_is_shadow(gmap
) && gmap
->removed
))
187 gmap_flush_tlb(gmap
);
188 /* Free all segment & region tables. */
189 list_for_each_entry_safe(page
, next
, &gmap
->crst_list
, lru
)
190 __free_pages(page
, CRST_ALLOC_ORDER
);
191 gmap_radix_tree_free(&gmap
->guest_to_host
);
192 gmap_radix_tree_free(&gmap
->host_to_guest
);
194 /* Free additional data for a shadow gmap */
195 if (gmap_is_shadow(gmap
)) {
196 /* Free all page tables. */
197 list_for_each_entry_safe(page
, next
, &gmap
->pt_list
, lru
)
198 page_table_free_pgste(page
);
199 gmap_rmap_radix_tree_free(&gmap
->host_to_rmap
);
200 /* Release reference to the parent */
201 gmap_put(gmap
->parent
);
208 * gmap_get - increase reference counter for guest address space
209 * @gmap: pointer to the guest address space structure
211 * Returns the gmap pointer
213 struct gmap
*gmap_get(struct gmap
*gmap
)
215 atomic_inc(&gmap
->ref_count
);
218 EXPORT_SYMBOL_GPL(gmap_get
);
221 * gmap_put - decrease reference counter for guest address space
222 * @gmap: pointer to the guest address space structure
224 * If the reference counter reaches zero the guest address space is freed.
226 void gmap_put(struct gmap
*gmap
)
228 if (atomic_dec_return(&gmap
->ref_count
) == 0)
231 EXPORT_SYMBOL_GPL(gmap_put
);
234 * gmap_remove - remove a guest address space but do not free it yet
235 * @gmap: pointer to the guest address space structure
237 void gmap_remove(struct gmap
*gmap
)
239 struct gmap
*sg
, *next
;
240 unsigned long gmap_asce
;
242 /* Remove all shadow gmaps linked to this gmap */
243 if (!list_empty(&gmap
->children
)) {
244 spin_lock(&gmap
->shadow_lock
);
245 list_for_each_entry_safe(sg
, next
, &gmap
->children
, list
) {
249 spin_unlock(&gmap
->shadow_lock
);
251 /* Remove gmap from the pre-mm list */
252 spin_lock(&gmap
->mm
->context
.lock
);
253 list_del_rcu(&gmap
->list
);
254 if (list_empty(&gmap
->mm
->context
.gmap_list
))
256 else if (list_is_singular(&gmap
->mm
->context
.gmap_list
))
257 gmap_asce
= list_first_entry(&gmap
->mm
->context
.gmap_list
,
258 struct gmap
, list
)->asce
;
261 WRITE_ONCE(gmap
->mm
->context
.gmap_asce
, gmap_asce
);
262 spin_unlock(&gmap
->mm
->context
.lock
);
267 EXPORT_SYMBOL_GPL(gmap_remove
);
270 * gmap_enable - switch primary space to the guest address space
271 * @gmap: pointer to the guest address space structure
273 void gmap_enable(struct gmap
*gmap
)
275 S390_lowcore
.gmap
= (unsigned long) gmap
;
277 EXPORT_SYMBOL_GPL(gmap_enable
);
280 * gmap_disable - switch back to the standard primary address space
281 * @gmap: pointer to the guest address space structure
283 void gmap_disable(struct gmap
*gmap
)
285 S390_lowcore
.gmap
= 0UL;
287 EXPORT_SYMBOL_GPL(gmap_disable
);
290 * gmap_get_enabled - get a pointer to the currently enabled gmap
292 * Returns a pointer to the currently enabled gmap. 0 if none is enabled.
294 struct gmap
*gmap_get_enabled(void)
296 return (struct gmap
*) S390_lowcore
.gmap
;
298 EXPORT_SYMBOL_GPL(gmap_get_enabled
);
301 * gmap_alloc_table is assumed to be called with mmap_sem held
303 static int gmap_alloc_table(struct gmap
*gmap
, unsigned long *table
,
304 unsigned long init
, unsigned long gaddr
)
309 /* since we dont free the gmap table until gmap_free we can unlock */
310 page
= alloc_pages(GFP_KERNEL
, CRST_ALLOC_ORDER
);
313 new = (unsigned long *) page_to_phys(page
);
314 crst_table_init(new, init
);
315 spin_lock(&gmap
->guest_table_lock
);
316 if (*table
& _REGION_ENTRY_INVALID
) {
317 list_add(&page
->lru
, &gmap
->crst_list
);
318 *table
= (unsigned long) new | _REGION_ENTRY_LENGTH
|
319 (*table
& _REGION_ENTRY_TYPE_MASK
);
323 spin_unlock(&gmap
->guest_table_lock
);
325 __free_pages(page
, CRST_ALLOC_ORDER
);
330 * __gmap_segment_gaddr - find virtual address from segment pointer
331 * @entry: pointer to a segment table entry in the guest address space
333 * Returns the virtual address in the guest address space for the segment
335 static unsigned long __gmap_segment_gaddr(unsigned long *entry
)
338 unsigned long offset
, mask
;
340 offset
= (unsigned long) entry
/ sizeof(unsigned long);
341 offset
= (offset
& (PTRS_PER_PMD
- 1)) * PMD_SIZE
;
342 mask
= ~(PTRS_PER_PMD
* sizeof(pmd_t
) - 1);
343 page
= virt_to_page((void *)((unsigned long) entry
& mask
));
344 return page
->index
+ offset
;
348 * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
349 * @gmap: pointer to the guest address space structure
350 * @vmaddr: address in the host process address space
352 * Returns 1 if a TLB flush is required
354 static int __gmap_unlink_by_vmaddr(struct gmap
*gmap
, unsigned long vmaddr
)
356 unsigned long *entry
;
359 BUG_ON(gmap_is_shadow(gmap
));
360 spin_lock(&gmap
->guest_table_lock
);
361 entry
= radix_tree_delete(&gmap
->host_to_guest
, vmaddr
>> PMD_SHIFT
);
363 flush
= (*entry
!= _SEGMENT_ENTRY_EMPTY
);
364 *entry
= _SEGMENT_ENTRY_EMPTY
;
366 spin_unlock(&gmap
->guest_table_lock
);
371 * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
372 * @gmap: pointer to the guest address space structure
373 * @gaddr: address in the guest address space
375 * Returns 1 if a TLB flush is required
377 static int __gmap_unmap_by_gaddr(struct gmap
*gmap
, unsigned long gaddr
)
379 unsigned long vmaddr
;
381 vmaddr
= (unsigned long) radix_tree_delete(&gmap
->guest_to_host
,
383 return vmaddr
? __gmap_unlink_by_vmaddr(gmap
, vmaddr
) : 0;
387 * gmap_unmap_segment - unmap segment from the guest address space
388 * @gmap: pointer to the guest address space structure
389 * @to: address in the guest address space
390 * @len: length of the memory area to unmap
392 * Returns 0 if the unmap succeeded, -EINVAL if not.
394 int gmap_unmap_segment(struct gmap
*gmap
, unsigned long to
, unsigned long len
)
399 BUG_ON(gmap_is_shadow(gmap
));
400 if ((to
| len
) & (PMD_SIZE
- 1))
402 if (len
== 0 || to
+ len
< to
)
406 down_write(&gmap
->mm
->mmap_sem
);
407 for (off
= 0; off
< len
; off
+= PMD_SIZE
)
408 flush
|= __gmap_unmap_by_gaddr(gmap
, to
+ off
);
409 up_write(&gmap
->mm
->mmap_sem
);
411 gmap_flush_tlb(gmap
);
414 EXPORT_SYMBOL_GPL(gmap_unmap_segment
);
417 * gmap_map_segment - map a segment to the guest address space
418 * @gmap: pointer to the guest address space structure
419 * @from: source address in the parent address space
420 * @to: target address in the guest address space
421 * @len: length of the memory area to map
423 * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
425 int gmap_map_segment(struct gmap
*gmap
, unsigned long from
,
426 unsigned long to
, unsigned long len
)
431 BUG_ON(gmap_is_shadow(gmap
));
432 if ((from
| to
| len
) & (PMD_SIZE
- 1))
434 if (len
== 0 || from
+ len
< from
|| to
+ len
< to
||
435 from
+ len
- 1 > TASK_SIZE_MAX
|| to
+ len
- 1 > gmap
->asce_end
)
439 down_write(&gmap
->mm
->mmap_sem
);
440 for (off
= 0; off
< len
; off
+= PMD_SIZE
) {
441 /* Remove old translation */
442 flush
|= __gmap_unmap_by_gaddr(gmap
, to
+ off
);
443 /* Store new translation */
444 if (radix_tree_insert(&gmap
->guest_to_host
,
445 (to
+ off
) >> PMD_SHIFT
,
446 (void *) from
+ off
))
449 up_write(&gmap
->mm
->mmap_sem
);
451 gmap_flush_tlb(gmap
);
454 gmap_unmap_segment(gmap
, to
, len
);
457 EXPORT_SYMBOL_GPL(gmap_map_segment
);
460 * __gmap_translate - translate a guest address to a user space address
461 * @gmap: pointer to guest mapping meta data structure
462 * @gaddr: guest address
464 * Returns user space address which corresponds to the guest address or
465 * -EFAULT if no such mapping exists.
466 * This function does not establish potentially missing page table entries.
467 * The mmap_sem of the mm that belongs to the address space must be held
468 * when this function gets called.
470 * Note: Can also be called for shadow gmaps.
472 unsigned long __gmap_translate(struct gmap
*gmap
, unsigned long gaddr
)
474 unsigned long vmaddr
;
476 vmaddr
= (unsigned long)
477 radix_tree_lookup(&gmap
->guest_to_host
, gaddr
>> PMD_SHIFT
);
478 /* Note: guest_to_host is empty for a shadow gmap */
479 return vmaddr
? (vmaddr
| (gaddr
& ~PMD_MASK
)) : -EFAULT
;
481 EXPORT_SYMBOL_GPL(__gmap_translate
);
484 * gmap_translate - translate a guest address to a user space address
485 * @gmap: pointer to guest mapping meta data structure
486 * @gaddr: guest address
488 * Returns user space address which corresponds to the guest address or
489 * -EFAULT if no such mapping exists.
490 * This function does not establish potentially missing page table entries.
492 unsigned long gmap_translate(struct gmap
*gmap
, unsigned long gaddr
)
496 down_read(&gmap
->mm
->mmap_sem
);
497 rc
= __gmap_translate(gmap
, gaddr
);
498 up_read(&gmap
->mm
->mmap_sem
);
501 EXPORT_SYMBOL_GPL(gmap_translate
);
504 * gmap_unlink - disconnect a page table from the gmap shadow tables
505 * @gmap: pointer to guest mapping meta data structure
506 * @table: pointer to the host page table
507 * @vmaddr: vm address associated with the host page table
509 void gmap_unlink(struct mm_struct
*mm
, unsigned long *table
,
510 unsigned long vmaddr
)
516 list_for_each_entry_rcu(gmap
, &mm
->context
.gmap_list
, list
) {
517 flush
= __gmap_unlink_by_vmaddr(gmap
, vmaddr
);
519 gmap_flush_tlb(gmap
);
525 * gmap_link - set up shadow page tables to connect a host to a guest address
526 * @gmap: pointer to guest mapping meta data structure
527 * @gaddr: guest address
528 * @vmaddr: vm address
530 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
531 * if the vm address is already mapped to a different guest segment.
532 * The mmap_sem of the mm that belongs to the address space must be held
533 * when this function gets called.
535 int __gmap_link(struct gmap
*gmap
, unsigned long gaddr
, unsigned long vmaddr
)
537 struct mm_struct
*mm
;
538 unsigned long *table
;
546 BUG_ON(gmap_is_shadow(gmap
));
547 /* Create higher level tables in the gmap page table */
549 if ((gmap
->asce
& _ASCE_TYPE_MASK
) >= _ASCE_TYPE_REGION1
) {
550 table
+= (gaddr
& _REGION1_INDEX
) >> _REGION1_SHIFT
;
551 if ((*table
& _REGION_ENTRY_INVALID
) &&
552 gmap_alloc_table(gmap
, table
, _REGION2_ENTRY_EMPTY
,
553 gaddr
& _REGION1_MASK
))
555 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
557 if ((gmap
->asce
& _ASCE_TYPE_MASK
) >= _ASCE_TYPE_REGION2
) {
558 table
+= (gaddr
& _REGION2_INDEX
) >> _REGION2_SHIFT
;
559 if ((*table
& _REGION_ENTRY_INVALID
) &&
560 gmap_alloc_table(gmap
, table
, _REGION3_ENTRY_EMPTY
,
561 gaddr
& _REGION2_MASK
))
563 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
565 if ((gmap
->asce
& _ASCE_TYPE_MASK
) >= _ASCE_TYPE_REGION3
) {
566 table
+= (gaddr
& _REGION3_INDEX
) >> _REGION3_SHIFT
;
567 if ((*table
& _REGION_ENTRY_INVALID
) &&
568 gmap_alloc_table(gmap
, table
, _SEGMENT_ENTRY_EMPTY
,
569 gaddr
& _REGION3_MASK
))
571 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
573 table
+= (gaddr
& _SEGMENT_INDEX
) >> _SEGMENT_SHIFT
;
574 /* Walk the parent mm page table */
576 pgd
= pgd_offset(mm
, vmaddr
);
577 VM_BUG_ON(pgd_none(*pgd
));
578 p4d
= p4d_offset(pgd
, vmaddr
);
579 VM_BUG_ON(p4d_none(*p4d
));
580 pud
= pud_offset(p4d
, vmaddr
);
581 VM_BUG_ON(pud_none(*pud
));
582 /* large puds cannot yet be handled */
585 pmd
= pmd_offset(pud
, vmaddr
);
586 VM_BUG_ON(pmd_none(*pmd
));
587 /* large pmds cannot yet be handled */
590 /* Link gmap segment table entry location to page table. */
591 rc
= radix_tree_preload(GFP_KERNEL
);
594 ptl
= pmd_lock(mm
, pmd
);
595 spin_lock(&gmap
->guest_table_lock
);
596 if (*table
== _SEGMENT_ENTRY_EMPTY
) {
597 rc
= radix_tree_insert(&gmap
->host_to_guest
,
598 vmaddr
>> PMD_SHIFT
, table
);
600 *table
= pmd_val(*pmd
);
603 spin_unlock(&gmap
->guest_table_lock
);
605 radix_tree_preload_end();
610 * gmap_fault - resolve a fault on a guest address
611 * @gmap: pointer to guest mapping meta data structure
612 * @gaddr: guest address
613 * @fault_flags: flags to pass down to handle_mm_fault()
615 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
616 * if the vm address is already mapped to a different guest segment.
618 int gmap_fault(struct gmap
*gmap
, unsigned long gaddr
,
619 unsigned int fault_flags
)
621 unsigned long vmaddr
;
625 down_read(&gmap
->mm
->mmap_sem
);
629 vmaddr
= __gmap_translate(gmap
, gaddr
);
630 if (IS_ERR_VALUE(vmaddr
)) {
634 if (fixup_user_fault(current
, gmap
->mm
, vmaddr
, fault_flags
,
640 * In the case that fixup_user_fault unlocked the mmap_sem during
641 * faultin redo __gmap_translate to not race with a map/unmap_segment.
646 rc
= __gmap_link(gmap
, gaddr
, vmaddr
);
648 up_read(&gmap
->mm
->mmap_sem
);
651 EXPORT_SYMBOL_GPL(gmap_fault
);
654 * this function is assumed to be called with mmap_sem held
656 void __gmap_zap(struct gmap
*gmap
, unsigned long gaddr
)
658 unsigned long vmaddr
;
662 /* Find the vm address for the guest address */
663 vmaddr
= (unsigned long) radix_tree_lookup(&gmap
->guest_to_host
,
666 vmaddr
|= gaddr
& ~PMD_MASK
;
667 /* Get pointer to the page table entry */
668 ptep
= get_locked_pte(gmap
->mm
, vmaddr
, &ptl
);
670 ptep_zap_unused(gmap
->mm
, vmaddr
, ptep
, 0);
671 pte_unmap_unlock(ptep
, ptl
);
674 EXPORT_SYMBOL_GPL(__gmap_zap
);
676 void gmap_discard(struct gmap
*gmap
, unsigned long from
, unsigned long to
)
678 unsigned long gaddr
, vmaddr
, size
;
679 struct vm_area_struct
*vma
;
681 down_read(&gmap
->mm
->mmap_sem
);
682 for (gaddr
= from
; gaddr
< to
;
683 gaddr
= (gaddr
+ PMD_SIZE
) & PMD_MASK
) {
684 /* Find the vm address for the guest address */
685 vmaddr
= (unsigned long)
686 radix_tree_lookup(&gmap
->guest_to_host
,
690 vmaddr
|= gaddr
& ~PMD_MASK
;
691 /* Find vma in the parent mm */
692 vma
= find_vma(gmap
->mm
, vmaddr
);
693 size
= min(to
- gaddr
, PMD_SIZE
- (gaddr
& ~PMD_MASK
));
694 zap_page_range(vma
, vmaddr
, size
);
696 up_read(&gmap
->mm
->mmap_sem
);
698 EXPORT_SYMBOL_GPL(gmap_discard
);
700 static LIST_HEAD(gmap_notifier_list
);
701 static DEFINE_SPINLOCK(gmap_notifier_lock
);
704 * gmap_register_pte_notifier - register a pte invalidation callback
705 * @nb: pointer to the gmap notifier block
707 void gmap_register_pte_notifier(struct gmap_notifier
*nb
)
709 spin_lock(&gmap_notifier_lock
);
710 list_add_rcu(&nb
->list
, &gmap_notifier_list
);
711 spin_unlock(&gmap_notifier_lock
);
713 EXPORT_SYMBOL_GPL(gmap_register_pte_notifier
);
716 * gmap_unregister_pte_notifier - remove a pte invalidation callback
717 * @nb: pointer to the gmap notifier block
719 void gmap_unregister_pte_notifier(struct gmap_notifier
*nb
)
721 spin_lock(&gmap_notifier_lock
);
722 list_del_rcu(&nb
->list
);
723 spin_unlock(&gmap_notifier_lock
);
726 EXPORT_SYMBOL_GPL(gmap_unregister_pte_notifier
);
729 * gmap_call_notifier - call all registered invalidation callbacks
730 * @gmap: pointer to guest mapping meta data structure
731 * @start: start virtual address in the guest address space
732 * @end: end virtual address in the guest address space
734 static void gmap_call_notifier(struct gmap
*gmap
, unsigned long start
,
737 struct gmap_notifier
*nb
;
739 list_for_each_entry(nb
, &gmap_notifier_list
, list
)
740 nb
->notifier_call(gmap
, start
, end
);
744 * gmap_table_walk - walk the gmap page tables
745 * @gmap: pointer to guest mapping meta data structure
746 * @gaddr: virtual address in the guest address space
747 * @level: page table level to stop at
749 * Returns a table entry pointer for the given guest address and @level
750 * @level=0 : returns a pointer to a page table table entry (or NULL)
751 * @level=1 : returns a pointer to a segment table entry (or NULL)
752 * @level=2 : returns a pointer to a region-3 table entry (or NULL)
753 * @level=3 : returns a pointer to a region-2 table entry (or NULL)
754 * @level=4 : returns a pointer to a region-1 table entry (or NULL)
756 * Returns NULL if the gmap page tables could not be walked to the
759 * Note: Can also be called for shadow gmaps.
761 static inline unsigned long *gmap_table_walk(struct gmap
*gmap
,
762 unsigned long gaddr
, int level
)
764 unsigned long *table
;
766 if ((gmap
->asce
& _ASCE_TYPE_MASK
) + 4 < (level
* 4))
768 if (gmap_is_shadow(gmap
) && gmap
->removed
)
770 if (gaddr
& (-1UL << (31 + ((gmap
->asce
& _ASCE_TYPE_MASK
) >> 2)*11)))
773 switch (gmap
->asce
& _ASCE_TYPE_MASK
) {
774 case _ASCE_TYPE_REGION1
:
775 table
+= (gaddr
& _REGION1_INDEX
) >> _REGION1_SHIFT
;
778 if (*table
& _REGION_ENTRY_INVALID
)
780 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
782 case _ASCE_TYPE_REGION2
:
783 table
+= (gaddr
& _REGION2_INDEX
) >> _REGION2_SHIFT
;
786 if (*table
& _REGION_ENTRY_INVALID
)
788 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
790 case _ASCE_TYPE_REGION3
:
791 table
+= (gaddr
& _REGION3_INDEX
) >> _REGION3_SHIFT
;
794 if (*table
& _REGION_ENTRY_INVALID
)
796 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
798 case _ASCE_TYPE_SEGMENT
:
799 table
+= (gaddr
& _SEGMENT_INDEX
) >> _SEGMENT_SHIFT
;
802 if (*table
& _REGION_ENTRY_INVALID
)
804 table
= (unsigned long *)(*table
& _SEGMENT_ENTRY_ORIGIN
);
805 table
+= (gaddr
& _PAGE_INDEX
) >> _PAGE_SHIFT
;
811 * gmap_pte_op_walk - walk the gmap page table, get the page table lock
812 * and return the pte pointer
813 * @gmap: pointer to guest mapping meta data structure
814 * @gaddr: virtual address in the guest address space
815 * @ptl: pointer to the spinlock pointer
817 * Returns a pointer to the locked pte for a guest address, or NULL
819 * Note: Can also be called for shadow gmaps.
821 static pte_t
*gmap_pte_op_walk(struct gmap
*gmap
, unsigned long gaddr
,
824 unsigned long *table
;
826 if (gmap_is_shadow(gmap
))
827 spin_lock(&gmap
->guest_table_lock
);
828 /* Walk the gmap page table, lock and get pte pointer */
829 table
= gmap_table_walk(gmap
, gaddr
, 1); /* get segment pointer */
830 if (!table
|| *table
& _SEGMENT_ENTRY_INVALID
) {
831 if (gmap_is_shadow(gmap
))
832 spin_unlock(&gmap
->guest_table_lock
);
835 if (gmap_is_shadow(gmap
)) {
836 *ptl
= &gmap
->guest_table_lock
;
837 return pte_offset_map((pmd_t
*) table
, gaddr
);
839 return pte_alloc_map_lock(gmap
->mm
, (pmd_t
*) table
, gaddr
, ptl
);
843 * gmap_pte_op_fixup - force a page in and connect the gmap page table
844 * @gmap: pointer to guest mapping meta data structure
845 * @gaddr: virtual address in the guest address space
846 * @vmaddr: address in the host process address space
847 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
849 * Returns 0 if the caller can retry __gmap_translate (might fail again),
850 * -ENOMEM if out of memory and -EFAULT if anything goes wrong while fixing
851 * up or connecting the gmap page table.
853 static int gmap_pte_op_fixup(struct gmap
*gmap
, unsigned long gaddr
,
854 unsigned long vmaddr
, int prot
)
856 struct mm_struct
*mm
= gmap
->mm
;
857 unsigned int fault_flags
;
858 bool unlocked
= false;
860 BUG_ON(gmap_is_shadow(gmap
));
861 fault_flags
= (prot
== PROT_WRITE
) ? FAULT_FLAG_WRITE
: 0;
862 if (fixup_user_fault(current
, mm
, vmaddr
, fault_flags
, &unlocked
))
865 /* lost mmap_sem, caller has to retry __gmap_translate */
867 /* Connect the page tables */
868 return __gmap_link(gmap
, gaddr
, vmaddr
);
872 * gmap_pte_op_end - release the page table lock
873 * @ptl: pointer to the spinlock pointer
875 static void gmap_pte_op_end(spinlock_t
*ptl
)
881 * gmap_protect_range - remove access rights to memory and set pgste bits
882 * @gmap: pointer to guest mapping meta data structure
883 * @gaddr: virtual address in the guest address space
885 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
886 * @bits: pgste notification bits to set
888 * Returns 0 if successfully protected, -ENOMEM if out of memory and
889 * -EFAULT if gaddr is invalid (or mapping for shadows is missing).
891 * Called with sg->mm->mmap_sem in read.
893 * Note: Can also be called for shadow gmaps.
895 static int gmap_protect_range(struct gmap
*gmap
, unsigned long gaddr
,
896 unsigned long len
, int prot
, unsigned long bits
)
898 unsigned long vmaddr
;
905 ptep
= gmap_pte_op_walk(gmap
, gaddr
, &ptl
);
907 rc
= ptep_force_prot(gmap
->mm
, gaddr
, ptep
, prot
, bits
);
908 gmap_pte_op_end(ptl
);
911 vmaddr
= __gmap_translate(gmap
, gaddr
);
912 if (IS_ERR_VALUE(vmaddr
))
914 rc
= gmap_pte_op_fixup(gmap
, gaddr
, vmaddr
, prot
);
926 * gmap_mprotect_notify - change access rights for a range of ptes and
927 * call the notifier if any pte changes again
928 * @gmap: pointer to guest mapping meta data structure
929 * @gaddr: virtual address in the guest address space
931 * @prot: indicates access rights: PROT_NONE, PROT_READ or PROT_WRITE
933 * Returns 0 if for each page in the given range a gmap mapping exists,
934 * the new access rights could be set and the notifier could be armed.
935 * If the gmap mapping is missing for one or more pages -EFAULT is
936 * returned. If no memory could be allocated -ENOMEM is returned.
937 * This function establishes missing page table entries.
939 int gmap_mprotect_notify(struct gmap
*gmap
, unsigned long gaddr
,
940 unsigned long len
, int prot
)
944 if ((gaddr
& ~PAGE_MASK
) || (len
& ~PAGE_MASK
) || gmap_is_shadow(gmap
))
946 if (!MACHINE_HAS_ESOP
&& prot
== PROT_READ
)
948 down_read(&gmap
->mm
->mmap_sem
);
949 rc
= gmap_protect_range(gmap
, gaddr
, len
, prot
, PGSTE_IN_BIT
);
950 up_read(&gmap
->mm
->mmap_sem
);
953 EXPORT_SYMBOL_GPL(gmap_mprotect_notify
);
956 * gmap_read_table - get an unsigned long value from a guest page table using
957 * absolute addressing, without marking the page referenced.
958 * @gmap: pointer to guest mapping meta data structure
959 * @gaddr: virtual address in the guest address space
960 * @val: pointer to the unsigned long value to return
962 * Returns 0 if the value was read, -ENOMEM if out of memory and -EFAULT
963 * if reading using the virtual address failed.
965 * Called with gmap->mm->mmap_sem in read.
967 int gmap_read_table(struct gmap
*gmap
, unsigned long gaddr
, unsigned long *val
)
969 unsigned long address
, vmaddr
;
976 ptep
= gmap_pte_op_walk(gmap
, gaddr
, &ptl
);
979 if (pte_present(pte
) && (pte_val(pte
) & _PAGE_READ
)) {
980 address
= pte_val(pte
) & PAGE_MASK
;
981 address
+= gaddr
& ~PAGE_MASK
;
982 *val
= *(unsigned long *) address
;
983 pte_val(*ptep
) |= _PAGE_YOUNG
;
984 /* Do *NOT* clear the _PAGE_INVALID bit! */
987 gmap_pte_op_end(ptl
);
991 vmaddr
= __gmap_translate(gmap
, gaddr
);
992 if (IS_ERR_VALUE(vmaddr
)) {
996 rc
= gmap_pte_op_fixup(gmap
, gaddr
, vmaddr
, PROT_READ
);
1002 EXPORT_SYMBOL_GPL(gmap_read_table
);
1005 * gmap_insert_rmap - add a rmap to the host_to_rmap radix tree
1006 * @sg: pointer to the shadow guest address space structure
1007 * @vmaddr: vm address associated with the rmap
1008 * @rmap: pointer to the rmap structure
1010 * Called with the sg->guest_table_lock
1012 static inline void gmap_insert_rmap(struct gmap
*sg
, unsigned long vmaddr
,
1013 struct gmap_rmap
*rmap
)
1017 BUG_ON(!gmap_is_shadow(sg
));
1018 slot
= radix_tree_lookup_slot(&sg
->host_to_rmap
, vmaddr
>> PAGE_SHIFT
);
1020 rmap
->next
= radix_tree_deref_slot_protected(slot
,
1021 &sg
->guest_table_lock
);
1022 radix_tree_replace_slot(&sg
->host_to_rmap
, slot
, rmap
);
1025 radix_tree_insert(&sg
->host_to_rmap
, vmaddr
>> PAGE_SHIFT
,
1031 * gmap_protect_rmap - modify access rights to memory and create an rmap
1032 * @sg: pointer to the shadow guest address space structure
1033 * @raddr: rmap address in the shadow gmap
1034 * @paddr: address in the parent guest address space
1035 * @len: length of the memory area to protect
1036 * @prot: indicates access rights: none, read-only or read-write
1038 * Returns 0 if successfully protected and the rmap was created, -ENOMEM
1039 * if out of memory and -EFAULT if paddr is invalid.
1041 static int gmap_protect_rmap(struct gmap
*sg
, unsigned long raddr
,
1042 unsigned long paddr
, unsigned long len
, int prot
)
1044 struct gmap
*parent
;
1045 struct gmap_rmap
*rmap
;
1046 unsigned long vmaddr
;
1051 BUG_ON(!gmap_is_shadow(sg
));
1052 parent
= sg
->parent
;
1054 vmaddr
= __gmap_translate(parent
, paddr
);
1055 if (IS_ERR_VALUE(vmaddr
))
1057 rmap
= kzalloc(sizeof(*rmap
), GFP_KERNEL
);
1060 rmap
->raddr
= raddr
;
1061 rc
= radix_tree_preload(GFP_KERNEL
);
1067 ptep
= gmap_pte_op_walk(parent
, paddr
, &ptl
);
1069 spin_lock(&sg
->guest_table_lock
);
1070 rc
= ptep_force_prot(parent
->mm
, paddr
, ptep
, prot
,
1073 gmap_insert_rmap(sg
, vmaddr
, rmap
);
1074 spin_unlock(&sg
->guest_table_lock
);
1075 gmap_pte_op_end(ptl
);
1077 radix_tree_preload_end();
1080 rc
= gmap_pte_op_fixup(parent
, paddr
, vmaddr
, prot
);
1091 #define _SHADOW_RMAP_MASK 0x7
1092 #define _SHADOW_RMAP_REGION1 0x5
1093 #define _SHADOW_RMAP_REGION2 0x4
1094 #define _SHADOW_RMAP_REGION3 0x3
1095 #define _SHADOW_RMAP_SEGMENT 0x2
1096 #define _SHADOW_RMAP_PGTABLE 0x1
1099 * gmap_idte_one - invalidate a single region or segment table entry
1100 * @asce: region or segment table *origin* + table-type bits
1101 * @vaddr: virtual address to identify the table entry to flush
1103 * The invalid bit of a single region or segment table entry is set
1104 * and the associated TLB entries depending on the entry are flushed.
1105 * The table-type of the @asce identifies the portion of the @vaddr
1106 * that is used as the invalidation index.
1108 static inline void gmap_idte_one(unsigned long asce
, unsigned long vaddr
)
1111 " .insn rrf,0xb98e0000,%0,%1,0,0"
1112 : : "a" (asce
), "a" (vaddr
) : "cc", "memory");
1116 * gmap_unshadow_page - remove a page from a shadow page table
1117 * @sg: pointer to the shadow guest address space structure
1118 * @raddr: rmap address in the shadow guest address space
1120 * Called with the sg->guest_table_lock
1122 static void gmap_unshadow_page(struct gmap
*sg
, unsigned long raddr
)
1124 unsigned long *table
;
1126 BUG_ON(!gmap_is_shadow(sg
));
1127 table
= gmap_table_walk(sg
, raddr
, 0); /* get page table pointer */
1128 if (!table
|| *table
& _PAGE_INVALID
)
1130 gmap_call_notifier(sg
, raddr
, raddr
+ _PAGE_SIZE
- 1);
1131 ptep_unshadow_pte(sg
->mm
, raddr
, (pte_t
*) table
);
1135 * __gmap_unshadow_pgt - remove all entries from a shadow page table
1136 * @sg: pointer to the shadow guest address space structure
1137 * @raddr: rmap address in the shadow guest address space
1138 * @pgt: pointer to the start of a shadow page table
1140 * Called with the sg->guest_table_lock
1142 static void __gmap_unshadow_pgt(struct gmap
*sg
, unsigned long raddr
,
1147 BUG_ON(!gmap_is_shadow(sg
));
1148 for (i
= 0; i
< _PAGE_ENTRIES
; i
++, raddr
+= _PAGE_SIZE
)
1149 pgt
[i
] = _PAGE_INVALID
;
1153 * gmap_unshadow_pgt - remove a shadow page table from a segment entry
1154 * @sg: pointer to the shadow guest address space structure
1155 * @raddr: address in the shadow guest address space
1157 * Called with the sg->guest_table_lock
1159 static void gmap_unshadow_pgt(struct gmap
*sg
, unsigned long raddr
)
1161 unsigned long sto
, *ste
, *pgt
;
1164 BUG_ON(!gmap_is_shadow(sg
));
1165 ste
= gmap_table_walk(sg
, raddr
, 1); /* get segment pointer */
1166 if (!ste
|| !(*ste
& _SEGMENT_ENTRY_ORIGIN
))
1168 gmap_call_notifier(sg
, raddr
, raddr
+ _SEGMENT_SIZE
- 1);
1169 sto
= (unsigned long) (ste
- ((raddr
& _SEGMENT_INDEX
) >> _SEGMENT_SHIFT
));
1170 gmap_idte_one(sto
| _ASCE_TYPE_SEGMENT
, raddr
);
1171 pgt
= (unsigned long *)(*ste
& _SEGMENT_ENTRY_ORIGIN
);
1172 *ste
= _SEGMENT_ENTRY_EMPTY
;
1173 __gmap_unshadow_pgt(sg
, raddr
, pgt
);
1174 /* Free page table */
1175 page
= pfn_to_page(__pa(pgt
) >> PAGE_SHIFT
);
1176 list_del(&page
->lru
);
1177 page_table_free_pgste(page
);
1181 * __gmap_unshadow_sgt - remove all entries from a shadow segment table
1182 * @sg: pointer to the shadow guest address space structure
1183 * @raddr: rmap address in the shadow guest address space
1184 * @sgt: pointer to the start of a shadow segment table
1186 * Called with the sg->guest_table_lock
1188 static void __gmap_unshadow_sgt(struct gmap
*sg
, unsigned long raddr
,
1195 BUG_ON(!gmap_is_shadow(sg
));
1196 for (i
= 0; i
< _CRST_ENTRIES
; i
++, raddr
+= _SEGMENT_SIZE
) {
1197 if (!(sgt
[i
] & _SEGMENT_ENTRY_ORIGIN
))
1199 pgt
= (unsigned long *)(sgt
[i
] & _REGION_ENTRY_ORIGIN
);
1200 sgt
[i
] = _SEGMENT_ENTRY_EMPTY
;
1201 __gmap_unshadow_pgt(sg
, raddr
, pgt
);
1202 /* Free page table */
1203 page
= pfn_to_page(__pa(pgt
) >> PAGE_SHIFT
);
1204 list_del(&page
->lru
);
1205 page_table_free_pgste(page
);
1210 * gmap_unshadow_sgt - remove a shadow segment table from a region-3 entry
1211 * @sg: pointer to the shadow guest address space structure
1212 * @raddr: rmap address in the shadow guest address space
1214 * Called with the shadow->guest_table_lock
1216 static void gmap_unshadow_sgt(struct gmap
*sg
, unsigned long raddr
)
1218 unsigned long r3o
, *r3e
, *sgt
;
1221 BUG_ON(!gmap_is_shadow(sg
));
1222 r3e
= gmap_table_walk(sg
, raddr
, 2); /* get region-3 pointer */
1223 if (!r3e
|| !(*r3e
& _REGION_ENTRY_ORIGIN
))
1225 gmap_call_notifier(sg
, raddr
, raddr
+ _REGION3_SIZE
- 1);
1226 r3o
= (unsigned long) (r3e
- ((raddr
& _REGION3_INDEX
) >> _REGION3_SHIFT
));
1227 gmap_idte_one(r3o
| _ASCE_TYPE_REGION3
, raddr
);
1228 sgt
= (unsigned long *)(*r3e
& _REGION_ENTRY_ORIGIN
);
1229 *r3e
= _REGION3_ENTRY_EMPTY
;
1230 __gmap_unshadow_sgt(sg
, raddr
, sgt
);
1231 /* Free segment table */
1232 page
= pfn_to_page(__pa(sgt
) >> PAGE_SHIFT
);
1233 list_del(&page
->lru
);
1234 __free_pages(page
, CRST_ALLOC_ORDER
);
1238 * __gmap_unshadow_r3t - remove all entries from a shadow region-3 table
1239 * @sg: pointer to the shadow guest address space structure
1240 * @raddr: address in the shadow guest address space
1241 * @r3t: pointer to the start of a shadow region-3 table
1243 * Called with the sg->guest_table_lock
1245 static void __gmap_unshadow_r3t(struct gmap
*sg
, unsigned long raddr
,
1252 BUG_ON(!gmap_is_shadow(sg
));
1253 for (i
= 0; i
< _CRST_ENTRIES
; i
++, raddr
+= _REGION3_SIZE
) {
1254 if (!(r3t
[i
] & _REGION_ENTRY_ORIGIN
))
1256 sgt
= (unsigned long *)(r3t
[i
] & _REGION_ENTRY_ORIGIN
);
1257 r3t
[i
] = _REGION3_ENTRY_EMPTY
;
1258 __gmap_unshadow_sgt(sg
, raddr
, sgt
);
1259 /* Free segment table */
1260 page
= pfn_to_page(__pa(sgt
) >> PAGE_SHIFT
);
1261 list_del(&page
->lru
);
1262 __free_pages(page
, CRST_ALLOC_ORDER
);
1267 * gmap_unshadow_r3t - remove a shadow region-3 table from a region-2 entry
1268 * @sg: pointer to the shadow guest address space structure
1269 * @raddr: rmap address in the shadow guest address space
1271 * Called with the sg->guest_table_lock
1273 static void gmap_unshadow_r3t(struct gmap
*sg
, unsigned long raddr
)
1275 unsigned long r2o
, *r2e
, *r3t
;
1278 BUG_ON(!gmap_is_shadow(sg
));
1279 r2e
= gmap_table_walk(sg
, raddr
, 3); /* get region-2 pointer */
1280 if (!r2e
|| !(*r2e
& _REGION_ENTRY_ORIGIN
))
1282 gmap_call_notifier(sg
, raddr
, raddr
+ _REGION2_SIZE
- 1);
1283 r2o
= (unsigned long) (r2e
- ((raddr
& _REGION2_INDEX
) >> _REGION2_SHIFT
));
1284 gmap_idte_one(r2o
| _ASCE_TYPE_REGION2
, raddr
);
1285 r3t
= (unsigned long *)(*r2e
& _REGION_ENTRY_ORIGIN
);
1286 *r2e
= _REGION2_ENTRY_EMPTY
;
1287 __gmap_unshadow_r3t(sg
, raddr
, r3t
);
1288 /* Free region 3 table */
1289 page
= pfn_to_page(__pa(r3t
) >> PAGE_SHIFT
);
1290 list_del(&page
->lru
);
1291 __free_pages(page
, CRST_ALLOC_ORDER
);
1295 * __gmap_unshadow_r2t - remove all entries from a shadow region-2 table
1296 * @sg: pointer to the shadow guest address space structure
1297 * @raddr: rmap address in the shadow guest address space
1298 * @r2t: pointer to the start of a shadow region-2 table
1300 * Called with the sg->guest_table_lock
1302 static void __gmap_unshadow_r2t(struct gmap
*sg
, unsigned long raddr
,
1309 BUG_ON(!gmap_is_shadow(sg
));
1310 for (i
= 0; i
< _CRST_ENTRIES
; i
++, raddr
+= _REGION2_SIZE
) {
1311 if (!(r2t
[i
] & _REGION_ENTRY_ORIGIN
))
1313 r3t
= (unsigned long *)(r2t
[i
] & _REGION_ENTRY_ORIGIN
);
1314 r2t
[i
] = _REGION2_ENTRY_EMPTY
;
1315 __gmap_unshadow_r3t(sg
, raddr
, r3t
);
1316 /* Free region 3 table */
1317 page
= pfn_to_page(__pa(r3t
) >> PAGE_SHIFT
);
1318 list_del(&page
->lru
);
1319 __free_pages(page
, CRST_ALLOC_ORDER
);
1324 * gmap_unshadow_r2t - remove a shadow region-2 table from a region-1 entry
1325 * @sg: pointer to the shadow guest address space structure
1326 * @raddr: rmap address in the shadow guest address space
1328 * Called with the sg->guest_table_lock
1330 static void gmap_unshadow_r2t(struct gmap
*sg
, unsigned long raddr
)
1332 unsigned long r1o
, *r1e
, *r2t
;
1335 BUG_ON(!gmap_is_shadow(sg
));
1336 r1e
= gmap_table_walk(sg
, raddr
, 4); /* get region-1 pointer */
1337 if (!r1e
|| !(*r1e
& _REGION_ENTRY_ORIGIN
))
1339 gmap_call_notifier(sg
, raddr
, raddr
+ _REGION1_SIZE
- 1);
1340 r1o
= (unsigned long) (r1e
- ((raddr
& _REGION1_INDEX
) >> _REGION1_SHIFT
));
1341 gmap_idte_one(r1o
| _ASCE_TYPE_REGION1
, raddr
);
1342 r2t
= (unsigned long *)(*r1e
& _REGION_ENTRY_ORIGIN
);
1343 *r1e
= _REGION1_ENTRY_EMPTY
;
1344 __gmap_unshadow_r2t(sg
, raddr
, r2t
);
1345 /* Free region 2 table */
1346 page
= pfn_to_page(__pa(r2t
) >> PAGE_SHIFT
);
1347 list_del(&page
->lru
);
1348 __free_pages(page
, CRST_ALLOC_ORDER
);
1352 * __gmap_unshadow_r1t - remove all entries from a shadow region-1 table
1353 * @sg: pointer to the shadow guest address space structure
1354 * @raddr: rmap address in the shadow guest address space
1355 * @r1t: pointer to the start of a shadow region-1 table
1357 * Called with the shadow->guest_table_lock
1359 static void __gmap_unshadow_r1t(struct gmap
*sg
, unsigned long raddr
,
1362 unsigned long asce
, *r2t
;
1366 BUG_ON(!gmap_is_shadow(sg
));
1367 asce
= (unsigned long) r1t
| _ASCE_TYPE_REGION1
;
1368 for (i
= 0; i
< _CRST_ENTRIES
; i
++, raddr
+= _REGION1_SIZE
) {
1369 if (!(r1t
[i
] & _REGION_ENTRY_ORIGIN
))
1371 r2t
= (unsigned long *)(r1t
[i
] & _REGION_ENTRY_ORIGIN
);
1372 __gmap_unshadow_r2t(sg
, raddr
, r2t
);
1373 /* Clear entry and flush translation r1t -> r2t */
1374 gmap_idte_one(asce
, raddr
);
1375 r1t
[i
] = _REGION1_ENTRY_EMPTY
;
1376 /* Free region 2 table */
1377 page
= pfn_to_page(__pa(r2t
) >> PAGE_SHIFT
);
1378 list_del(&page
->lru
);
1379 __free_pages(page
, CRST_ALLOC_ORDER
);
1384 * gmap_unshadow - remove a shadow page table completely
1385 * @sg: pointer to the shadow guest address space structure
1387 * Called with sg->guest_table_lock
1389 static void gmap_unshadow(struct gmap
*sg
)
1391 unsigned long *table
;
1393 BUG_ON(!gmap_is_shadow(sg
));
1397 gmap_call_notifier(sg
, 0, -1UL);
1399 table
= (unsigned long *)(sg
->asce
& _ASCE_ORIGIN
);
1400 switch (sg
->asce
& _ASCE_TYPE_MASK
) {
1401 case _ASCE_TYPE_REGION1
:
1402 __gmap_unshadow_r1t(sg
, 0, table
);
1404 case _ASCE_TYPE_REGION2
:
1405 __gmap_unshadow_r2t(sg
, 0, table
);
1407 case _ASCE_TYPE_REGION3
:
1408 __gmap_unshadow_r3t(sg
, 0, table
);
1410 case _ASCE_TYPE_SEGMENT
:
1411 __gmap_unshadow_sgt(sg
, 0, table
);
1417 * gmap_find_shadow - find a specific asce in the list of shadow tables
1418 * @parent: pointer to the parent gmap
1419 * @asce: ASCE for which the shadow table is created
1420 * @edat_level: edat level to be used for the shadow translation
1422 * Returns the pointer to a gmap if a shadow table with the given asce is
1423 * already available, ERR_PTR(-EAGAIN) if another one is just being created,
1426 static struct gmap
*gmap_find_shadow(struct gmap
*parent
, unsigned long asce
,
1431 list_for_each_entry(sg
, &parent
->children
, list
) {
1432 if (sg
->orig_asce
!= asce
|| sg
->edat_level
!= edat_level
||
1435 if (!sg
->initialized
)
1436 return ERR_PTR(-EAGAIN
);
1437 atomic_inc(&sg
->ref_count
);
1444 * gmap_shadow_valid - check if a shadow guest address space matches the
1445 * given properties and is still valid
1446 * @sg: pointer to the shadow guest address space structure
1447 * @asce: ASCE for which the shadow table is requested
1448 * @edat_level: edat level to be used for the shadow translation
1450 * Returns 1 if the gmap shadow is still valid and matches the given
1451 * properties, the caller can continue using it. Returns 0 otherwise, the
1452 * caller has to request a new shadow gmap in this case.
1455 int gmap_shadow_valid(struct gmap
*sg
, unsigned long asce
, int edat_level
)
1459 return sg
->orig_asce
== asce
&& sg
->edat_level
== edat_level
;
1461 EXPORT_SYMBOL_GPL(gmap_shadow_valid
);
1464 * gmap_shadow - create/find a shadow guest address space
1465 * @parent: pointer to the parent gmap
1466 * @asce: ASCE for which the shadow table is created
1467 * @edat_level: edat level to be used for the shadow translation
1469 * The pages of the top level page table referred by the asce parameter
1470 * will be set to read-only and marked in the PGSTEs of the kvm process.
1471 * The shadow table will be removed automatically on any change to the
1472 * PTE mapping for the source table.
1474 * Returns a guest address space structure, ERR_PTR(-ENOMEM) if out of memory,
1475 * ERR_PTR(-EAGAIN) if the caller has to retry and ERR_PTR(-EFAULT) if the
1476 * parent gmap table could not be protected.
1478 struct gmap
*gmap_shadow(struct gmap
*parent
, unsigned long asce
,
1481 struct gmap
*sg
, *new;
1482 unsigned long limit
;
1485 BUG_ON(gmap_is_shadow(parent
));
1486 spin_lock(&parent
->shadow_lock
);
1487 sg
= gmap_find_shadow(parent
, asce
, edat_level
);
1488 spin_unlock(&parent
->shadow_lock
);
1491 /* Create a new shadow gmap */
1492 limit
= -1UL >> (33 - (((asce
& _ASCE_TYPE_MASK
) >> 2) * 11));
1493 if (asce
& _ASCE_REAL_SPACE
)
1495 new = gmap_alloc(limit
);
1497 return ERR_PTR(-ENOMEM
);
1498 new->mm
= parent
->mm
;
1499 new->parent
= gmap_get(parent
);
1500 new->orig_asce
= asce
;
1501 new->edat_level
= edat_level
;
1502 new->initialized
= false;
1503 spin_lock(&parent
->shadow_lock
);
1504 /* Recheck if another CPU created the same shadow */
1505 sg
= gmap_find_shadow(parent
, asce
, edat_level
);
1507 spin_unlock(&parent
->shadow_lock
);
1511 if (asce
& _ASCE_REAL_SPACE
) {
1512 /* only allow one real-space gmap shadow */
1513 list_for_each_entry(sg
, &parent
->children
, list
) {
1514 if (sg
->orig_asce
& _ASCE_REAL_SPACE
) {
1515 spin_lock(&sg
->guest_table_lock
);
1517 spin_unlock(&sg
->guest_table_lock
);
1518 list_del(&sg
->list
);
1524 atomic_set(&new->ref_count
, 2);
1525 list_add(&new->list
, &parent
->children
);
1526 if (asce
& _ASCE_REAL_SPACE
) {
1527 /* nothing to protect, return right away */
1528 new->initialized
= true;
1529 spin_unlock(&parent
->shadow_lock
);
1532 spin_unlock(&parent
->shadow_lock
);
1533 /* protect after insertion, so it will get properly invalidated */
1534 down_read(&parent
->mm
->mmap_sem
);
1535 rc
= gmap_protect_range(parent
, asce
& _ASCE_ORIGIN
,
1536 ((asce
& _ASCE_TABLE_LENGTH
) + 1) * PAGE_SIZE
,
1537 PROT_READ
, PGSTE_VSIE_BIT
);
1538 up_read(&parent
->mm
->mmap_sem
);
1539 spin_lock(&parent
->shadow_lock
);
1540 new->initialized
= true;
1542 list_del(&new->list
);
1546 spin_unlock(&parent
->shadow_lock
);
1549 EXPORT_SYMBOL_GPL(gmap_shadow
);
1552 * gmap_shadow_r2t - create an empty shadow region 2 table
1553 * @sg: pointer to the shadow guest address space structure
1554 * @saddr: faulting address in the shadow gmap
1555 * @r2t: parent gmap address of the region 2 table to get shadowed
1556 * @fake: r2t references contiguous guest memory block, not a r2t
1558 * The r2t parameter specifies the address of the source table. The
1559 * four pages of the source table are made read-only in the parent gmap
1560 * address space. A write to the source table area @r2t will automatically
1561 * remove the shadow r2 table and all of its decendents.
1563 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1564 * shadow table structure is incomplete, -ENOMEM if out of memory and
1565 * -EFAULT if an address in the parent gmap could not be resolved.
1567 * Called with sg->mm->mmap_sem in read.
1569 int gmap_shadow_r2t(struct gmap
*sg
, unsigned long saddr
, unsigned long r2t
,
1572 unsigned long raddr
, origin
, offset
, len
;
1573 unsigned long *s_r2t
, *table
;
1577 BUG_ON(!gmap_is_shadow(sg
));
1578 /* Allocate a shadow region second table */
1579 page
= alloc_pages(GFP_KERNEL
, CRST_ALLOC_ORDER
);
1582 page
->index
= r2t
& _REGION_ENTRY_ORIGIN
;
1584 page
->index
|= GMAP_SHADOW_FAKE_TABLE
;
1585 s_r2t
= (unsigned long *) page_to_phys(page
);
1586 /* Install shadow region second table */
1587 spin_lock(&sg
->guest_table_lock
);
1588 table
= gmap_table_walk(sg
, saddr
, 4); /* get region-1 pointer */
1590 rc
= -EAGAIN
; /* Race with unshadow */
1593 if (!(*table
& _REGION_ENTRY_INVALID
)) {
1594 rc
= 0; /* Already established */
1596 } else if (*table
& _REGION_ENTRY_ORIGIN
) {
1597 rc
= -EAGAIN
; /* Race with shadow */
1600 crst_table_init(s_r2t
, _REGION2_ENTRY_EMPTY
);
1601 /* mark as invalid as long as the parent table is not protected */
1602 *table
= (unsigned long) s_r2t
| _REGION_ENTRY_LENGTH
|
1603 _REGION_ENTRY_TYPE_R1
| _REGION_ENTRY_INVALID
;
1604 if (sg
->edat_level
>= 1)
1605 *table
|= (r2t
& _REGION_ENTRY_PROTECT
);
1606 list_add(&page
->lru
, &sg
->crst_list
);
1608 /* nothing to protect for fake tables */
1609 *table
&= ~_REGION_ENTRY_INVALID
;
1610 spin_unlock(&sg
->guest_table_lock
);
1613 spin_unlock(&sg
->guest_table_lock
);
1614 /* Make r2t read-only in parent gmap page table */
1615 raddr
= (saddr
& _REGION1_MASK
) | _SHADOW_RMAP_REGION1
;
1616 origin
= r2t
& _REGION_ENTRY_ORIGIN
;
1617 offset
= ((r2t
& _REGION_ENTRY_OFFSET
) >> 6) * PAGE_SIZE
;
1618 len
= ((r2t
& _REGION_ENTRY_LENGTH
) + 1) * PAGE_SIZE
- offset
;
1619 rc
= gmap_protect_rmap(sg
, raddr
, origin
+ offset
, len
, PROT_READ
);
1620 spin_lock(&sg
->guest_table_lock
);
1622 table
= gmap_table_walk(sg
, saddr
, 4);
1623 if (!table
|| (*table
& _REGION_ENTRY_ORIGIN
) !=
1624 (unsigned long) s_r2t
)
1625 rc
= -EAGAIN
; /* Race with unshadow */
1627 *table
&= ~_REGION_ENTRY_INVALID
;
1629 gmap_unshadow_r2t(sg
, raddr
);
1631 spin_unlock(&sg
->guest_table_lock
);
1634 spin_unlock(&sg
->guest_table_lock
);
1635 __free_pages(page
, CRST_ALLOC_ORDER
);
1638 EXPORT_SYMBOL_GPL(gmap_shadow_r2t
);
1641 * gmap_shadow_r3t - create a shadow region 3 table
1642 * @sg: pointer to the shadow guest address space structure
1643 * @saddr: faulting address in the shadow gmap
1644 * @r3t: parent gmap address of the region 3 table to get shadowed
1645 * @fake: r3t references contiguous guest memory block, not a r3t
1647 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1648 * shadow table structure is incomplete, -ENOMEM if out of memory and
1649 * -EFAULT if an address in the parent gmap could not be resolved.
1651 * Called with sg->mm->mmap_sem in read.
1653 int gmap_shadow_r3t(struct gmap
*sg
, unsigned long saddr
, unsigned long r3t
,
1656 unsigned long raddr
, origin
, offset
, len
;
1657 unsigned long *s_r3t
, *table
;
1661 BUG_ON(!gmap_is_shadow(sg
));
1662 /* Allocate a shadow region second table */
1663 page
= alloc_pages(GFP_KERNEL
, CRST_ALLOC_ORDER
);
1666 page
->index
= r3t
& _REGION_ENTRY_ORIGIN
;
1668 page
->index
|= GMAP_SHADOW_FAKE_TABLE
;
1669 s_r3t
= (unsigned long *) page_to_phys(page
);
1670 /* Install shadow region second table */
1671 spin_lock(&sg
->guest_table_lock
);
1672 table
= gmap_table_walk(sg
, saddr
, 3); /* get region-2 pointer */
1674 rc
= -EAGAIN
; /* Race with unshadow */
1677 if (!(*table
& _REGION_ENTRY_INVALID
)) {
1678 rc
= 0; /* Already established */
1680 } else if (*table
& _REGION_ENTRY_ORIGIN
) {
1681 rc
= -EAGAIN
; /* Race with shadow */
1683 crst_table_init(s_r3t
, _REGION3_ENTRY_EMPTY
);
1684 /* mark as invalid as long as the parent table is not protected */
1685 *table
= (unsigned long) s_r3t
| _REGION_ENTRY_LENGTH
|
1686 _REGION_ENTRY_TYPE_R2
| _REGION_ENTRY_INVALID
;
1687 if (sg
->edat_level
>= 1)
1688 *table
|= (r3t
& _REGION_ENTRY_PROTECT
);
1689 list_add(&page
->lru
, &sg
->crst_list
);
1691 /* nothing to protect for fake tables */
1692 *table
&= ~_REGION_ENTRY_INVALID
;
1693 spin_unlock(&sg
->guest_table_lock
);
1696 spin_unlock(&sg
->guest_table_lock
);
1697 /* Make r3t read-only in parent gmap page table */
1698 raddr
= (saddr
& _REGION2_MASK
) | _SHADOW_RMAP_REGION2
;
1699 origin
= r3t
& _REGION_ENTRY_ORIGIN
;
1700 offset
= ((r3t
& _REGION_ENTRY_OFFSET
) >> 6) * PAGE_SIZE
;
1701 len
= ((r3t
& _REGION_ENTRY_LENGTH
) + 1) * PAGE_SIZE
- offset
;
1702 rc
= gmap_protect_rmap(sg
, raddr
, origin
+ offset
, len
, PROT_READ
);
1703 spin_lock(&sg
->guest_table_lock
);
1705 table
= gmap_table_walk(sg
, saddr
, 3);
1706 if (!table
|| (*table
& _REGION_ENTRY_ORIGIN
) !=
1707 (unsigned long) s_r3t
)
1708 rc
= -EAGAIN
; /* Race with unshadow */
1710 *table
&= ~_REGION_ENTRY_INVALID
;
1712 gmap_unshadow_r3t(sg
, raddr
);
1714 spin_unlock(&sg
->guest_table_lock
);
1717 spin_unlock(&sg
->guest_table_lock
);
1718 __free_pages(page
, CRST_ALLOC_ORDER
);
1721 EXPORT_SYMBOL_GPL(gmap_shadow_r3t
);
1724 * gmap_shadow_sgt - create a shadow segment table
1725 * @sg: pointer to the shadow guest address space structure
1726 * @saddr: faulting address in the shadow gmap
1727 * @sgt: parent gmap address of the segment table to get shadowed
1728 * @fake: sgt references contiguous guest memory block, not a sgt
1730 * Returns: 0 if successfully shadowed or already shadowed, -EAGAIN if the
1731 * shadow table structure is incomplete, -ENOMEM if out of memory and
1732 * -EFAULT if an address in the parent gmap could not be resolved.
1734 * Called with sg->mm->mmap_sem in read.
1736 int gmap_shadow_sgt(struct gmap
*sg
, unsigned long saddr
, unsigned long sgt
,
1739 unsigned long raddr
, origin
, offset
, len
;
1740 unsigned long *s_sgt
, *table
;
1744 BUG_ON(!gmap_is_shadow(sg
) || (sgt
& _REGION3_ENTRY_LARGE
));
1745 /* Allocate a shadow segment table */
1746 page
= alloc_pages(GFP_KERNEL
, CRST_ALLOC_ORDER
);
1749 page
->index
= sgt
& _REGION_ENTRY_ORIGIN
;
1751 page
->index
|= GMAP_SHADOW_FAKE_TABLE
;
1752 s_sgt
= (unsigned long *) page_to_phys(page
);
1753 /* Install shadow region second table */
1754 spin_lock(&sg
->guest_table_lock
);
1755 table
= gmap_table_walk(sg
, saddr
, 2); /* get region-3 pointer */
1757 rc
= -EAGAIN
; /* Race with unshadow */
1760 if (!(*table
& _REGION_ENTRY_INVALID
)) {
1761 rc
= 0; /* Already established */
1763 } else if (*table
& _REGION_ENTRY_ORIGIN
) {
1764 rc
= -EAGAIN
; /* Race with shadow */
1767 crst_table_init(s_sgt
, _SEGMENT_ENTRY_EMPTY
);
1768 /* mark as invalid as long as the parent table is not protected */
1769 *table
= (unsigned long) s_sgt
| _REGION_ENTRY_LENGTH
|
1770 _REGION_ENTRY_TYPE_R3
| _REGION_ENTRY_INVALID
;
1771 if (sg
->edat_level
>= 1)
1772 *table
|= sgt
& _REGION_ENTRY_PROTECT
;
1773 list_add(&page
->lru
, &sg
->crst_list
);
1775 /* nothing to protect for fake tables */
1776 *table
&= ~_REGION_ENTRY_INVALID
;
1777 spin_unlock(&sg
->guest_table_lock
);
1780 spin_unlock(&sg
->guest_table_lock
);
1781 /* Make sgt read-only in parent gmap page table */
1782 raddr
= (saddr
& _REGION3_MASK
) | _SHADOW_RMAP_REGION3
;
1783 origin
= sgt
& _REGION_ENTRY_ORIGIN
;
1784 offset
= ((sgt
& _REGION_ENTRY_OFFSET
) >> 6) * PAGE_SIZE
;
1785 len
= ((sgt
& _REGION_ENTRY_LENGTH
) + 1) * PAGE_SIZE
- offset
;
1786 rc
= gmap_protect_rmap(sg
, raddr
, origin
+ offset
, len
, PROT_READ
);
1787 spin_lock(&sg
->guest_table_lock
);
1789 table
= gmap_table_walk(sg
, saddr
, 2);
1790 if (!table
|| (*table
& _REGION_ENTRY_ORIGIN
) !=
1791 (unsigned long) s_sgt
)
1792 rc
= -EAGAIN
; /* Race with unshadow */
1794 *table
&= ~_REGION_ENTRY_INVALID
;
1796 gmap_unshadow_sgt(sg
, raddr
);
1798 spin_unlock(&sg
->guest_table_lock
);
1801 spin_unlock(&sg
->guest_table_lock
);
1802 __free_pages(page
, CRST_ALLOC_ORDER
);
1805 EXPORT_SYMBOL_GPL(gmap_shadow_sgt
);
1808 * gmap_shadow_lookup_pgtable - find a shadow page table
1809 * @sg: pointer to the shadow guest address space structure
1810 * @saddr: the address in the shadow aguest address space
1811 * @pgt: parent gmap address of the page table to get shadowed
1812 * @dat_protection: if the pgtable is marked as protected by dat
1813 * @fake: pgt references contiguous guest memory block, not a pgtable
1815 * Returns 0 if the shadow page table was found and -EAGAIN if the page
1816 * table was not found.
1818 * Called with sg->mm->mmap_sem in read.
1820 int gmap_shadow_pgt_lookup(struct gmap
*sg
, unsigned long saddr
,
1821 unsigned long *pgt
, int *dat_protection
,
1824 unsigned long *table
;
1828 BUG_ON(!gmap_is_shadow(sg
));
1829 spin_lock(&sg
->guest_table_lock
);
1830 table
= gmap_table_walk(sg
, saddr
, 1); /* get segment pointer */
1831 if (table
&& !(*table
& _SEGMENT_ENTRY_INVALID
)) {
1832 /* Shadow page tables are full pages (pte+pgste) */
1833 page
= pfn_to_page(*table
>> PAGE_SHIFT
);
1834 *pgt
= page
->index
& ~GMAP_SHADOW_FAKE_TABLE
;
1835 *dat_protection
= !!(*table
& _SEGMENT_ENTRY_PROTECT
);
1836 *fake
= !!(page
->index
& GMAP_SHADOW_FAKE_TABLE
);
1841 spin_unlock(&sg
->guest_table_lock
);
1845 EXPORT_SYMBOL_GPL(gmap_shadow_pgt_lookup
);
1848 * gmap_shadow_pgt - instantiate a shadow page table
1849 * @sg: pointer to the shadow guest address space structure
1850 * @saddr: faulting address in the shadow gmap
1851 * @pgt: parent gmap address of the page table to get shadowed
1852 * @fake: pgt references contiguous guest memory block, not a pgtable
1854 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1855 * shadow table structure is incomplete, -ENOMEM if out of memory,
1856 * -EFAULT if an address in the parent gmap could not be resolved and
1858 * Called with gmap->mm->mmap_sem in read
1860 int gmap_shadow_pgt(struct gmap
*sg
, unsigned long saddr
, unsigned long pgt
,
1863 unsigned long raddr
, origin
;
1864 unsigned long *s_pgt
, *table
;
1868 BUG_ON(!gmap_is_shadow(sg
) || (pgt
& _SEGMENT_ENTRY_LARGE
));
1869 /* Allocate a shadow page table */
1870 page
= page_table_alloc_pgste(sg
->mm
);
1873 page
->index
= pgt
& _SEGMENT_ENTRY_ORIGIN
;
1875 page
->index
|= GMAP_SHADOW_FAKE_TABLE
;
1876 s_pgt
= (unsigned long *) page_to_phys(page
);
1877 /* Install shadow page table */
1878 spin_lock(&sg
->guest_table_lock
);
1879 table
= gmap_table_walk(sg
, saddr
, 1); /* get segment pointer */
1881 rc
= -EAGAIN
; /* Race with unshadow */
1884 if (!(*table
& _SEGMENT_ENTRY_INVALID
)) {
1885 rc
= 0; /* Already established */
1887 } else if (*table
& _SEGMENT_ENTRY_ORIGIN
) {
1888 rc
= -EAGAIN
; /* Race with shadow */
1891 /* mark as invalid as long as the parent table is not protected */
1892 *table
= (unsigned long) s_pgt
| _SEGMENT_ENTRY
|
1893 (pgt
& _SEGMENT_ENTRY_PROTECT
) | _SEGMENT_ENTRY_INVALID
;
1894 list_add(&page
->lru
, &sg
->pt_list
);
1896 /* nothing to protect for fake tables */
1897 *table
&= ~_SEGMENT_ENTRY_INVALID
;
1898 spin_unlock(&sg
->guest_table_lock
);
1901 spin_unlock(&sg
->guest_table_lock
);
1902 /* Make pgt read-only in parent gmap page table (not the pgste) */
1903 raddr
= (saddr
& _SEGMENT_MASK
) | _SHADOW_RMAP_SEGMENT
;
1904 origin
= pgt
& _SEGMENT_ENTRY_ORIGIN
& PAGE_MASK
;
1905 rc
= gmap_protect_rmap(sg
, raddr
, origin
, PAGE_SIZE
, PROT_READ
);
1906 spin_lock(&sg
->guest_table_lock
);
1908 table
= gmap_table_walk(sg
, saddr
, 1);
1909 if (!table
|| (*table
& _SEGMENT_ENTRY_ORIGIN
) !=
1910 (unsigned long) s_pgt
)
1911 rc
= -EAGAIN
; /* Race with unshadow */
1913 *table
&= ~_SEGMENT_ENTRY_INVALID
;
1915 gmap_unshadow_pgt(sg
, raddr
);
1917 spin_unlock(&sg
->guest_table_lock
);
1920 spin_unlock(&sg
->guest_table_lock
);
1921 page_table_free_pgste(page
);
1925 EXPORT_SYMBOL_GPL(gmap_shadow_pgt
);
1928 * gmap_shadow_page - create a shadow page mapping
1929 * @sg: pointer to the shadow guest address space structure
1930 * @saddr: faulting address in the shadow gmap
1931 * @pte: pte in parent gmap address space to get shadowed
1933 * Returns 0 if successfully shadowed or already shadowed, -EAGAIN if the
1934 * shadow table structure is incomplete, -ENOMEM if out of memory and
1935 * -EFAULT if an address in the parent gmap could not be resolved.
1937 * Called with sg->mm->mmap_sem in read.
1939 int gmap_shadow_page(struct gmap
*sg
, unsigned long saddr
, pte_t pte
)
1941 struct gmap
*parent
;
1942 struct gmap_rmap
*rmap
;
1943 unsigned long vmaddr
, paddr
;
1945 pte_t
*sptep
, *tptep
;
1949 BUG_ON(!gmap_is_shadow(sg
));
1950 parent
= sg
->parent
;
1951 prot
= (pte_val(pte
) & _PAGE_PROTECT
) ? PROT_READ
: PROT_WRITE
;
1953 rmap
= kzalloc(sizeof(*rmap
), GFP_KERNEL
);
1956 rmap
->raddr
= (saddr
& PAGE_MASK
) | _SHADOW_RMAP_PGTABLE
;
1959 paddr
= pte_val(pte
) & PAGE_MASK
;
1960 vmaddr
= __gmap_translate(parent
, paddr
);
1961 if (IS_ERR_VALUE(vmaddr
)) {
1965 rc
= radix_tree_preload(GFP_KERNEL
);
1969 sptep
= gmap_pte_op_walk(parent
, paddr
, &ptl
);
1971 spin_lock(&sg
->guest_table_lock
);
1972 /* Get page table pointer */
1973 tptep
= (pte_t
*) gmap_table_walk(sg
, saddr
, 0);
1975 spin_unlock(&sg
->guest_table_lock
);
1976 gmap_pte_op_end(ptl
);
1977 radix_tree_preload_end();
1980 rc
= ptep_shadow_pte(sg
->mm
, saddr
, sptep
, tptep
, pte
);
1982 /* Success and a new mapping */
1983 gmap_insert_rmap(sg
, vmaddr
, rmap
);
1987 gmap_pte_op_end(ptl
);
1988 spin_unlock(&sg
->guest_table_lock
);
1990 radix_tree_preload_end();
1993 rc
= gmap_pte_op_fixup(parent
, paddr
, vmaddr
, prot
);
2000 EXPORT_SYMBOL_GPL(gmap_shadow_page
);
2003 * gmap_shadow_notify - handle notifications for shadow gmap
2005 * Called with sg->parent->shadow_lock.
2007 static void gmap_shadow_notify(struct gmap
*sg
, unsigned long vmaddr
,
2008 unsigned long gaddr
, pte_t
*pte
)
2010 struct gmap_rmap
*rmap
, *rnext
, *head
;
2011 unsigned long start
, end
, bits
, raddr
;
2013 BUG_ON(!gmap_is_shadow(sg
));
2015 spin_lock(&sg
->guest_table_lock
);
2017 spin_unlock(&sg
->guest_table_lock
);
2020 /* Check for top level table */
2021 start
= sg
->orig_asce
& _ASCE_ORIGIN
;
2022 end
= start
+ ((sg
->orig_asce
& _ASCE_TABLE_LENGTH
) + 1) * PAGE_SIZE
;
2023 if (!(sg
->orig_asce
& _ASCE_REAL_SPACE
) && gaddr
>= start
&&
2025 /* The complete shadow table has to go */
2027 spin_unlock(&sg
->guest_table_lock
);
2028 list_del(&sg
->list
);
2032 /* Remove the page table tree from on specific entry */
2033 head
= radix_tree_delete(&sg
->host_to_rmap
, vmaddr
>> PAGE_SHIFT
);
2034 gmap_for_each_rmap_safe(rmap
, rnext
, head
) {
2035 bits
= rmap
->raddr
& _SHADOW_RMAP_MASK
;
2036 raddr
= rmap
->raddr
^ bits
;
2038 case _SHADOW_RMAP_REGION1
:
2039 gmap_unshadow_r2t(sg
, raddr
);
2041 case _SHADOW_RMAP_REGION2
:
2042 gmap_unshadow_r3t(sg
, raddr
);
2044 case _SHADOW_RMAP_REGION3
:
2045 gmap_unshadow_sgt(sg
, raddr
);
2047 case _SHADOW_RMAP_SEGMENT
:
2048 gmap_unshadow_pgt(sg
, raddr
);
2050 case _SHADOW_RMAP_PGTABLE
:
2051 gmap_unshadow_page(sg
, raddr
);
2056 spin_unlock(&sg
->guest_table_lock
);
2060 * ptep_notify - call all invalidation callbacks for a specific pte.
2061 * @mm: pointer to the process mm_struct
2062 * @addr: virtual address in the process address space
2063 * @pte: pointer to the page table entry
2064 * @bits: bits from the pgste that caused the notify call
2066 * This function is assumed to be called with the page table lock held
2067 * for the pte to notify.
2069 void ptep_notify(struct mm_struct
*mm
, unsigned long vmaddr
,
2070 pte_t
*pte
, unsigned long bits
)
2072 unsigned long offset
, gaddr
= 0;
2073 unsigned long *table
;
2074 struct gmap
*gmap
, *sg
, *next
;
2076 offset
= ((unsigned long) pte
) & (255 * sizeof(pte_t
));
2077 offset
= offset
* (PAGE_SIZE
/ sizeof(pte_t
));
2079 list_for_each_entry_rcu(gmap
, &mm
->context
.gmap_list
, list
) {
2080 spin_lock(&gmap
->guest_table_lock
);
2081 table
= radix_tree_lookup(&gmap
->host_to_guest
,
2082 vmaddr
>> PMD_SHIFT
);
2084 gaddr
= __gmap_segment_gaddr(table
) + offset
;
2085 spin_unlock(&gmap
->guest_table_lock
);
2089 if (!list_empty(&gmap
->children
) && (bits
& PGSTE_VSIE_BIT
)) {
2090 spin_lock(&gmap
->shadow_lock
);
2091 list_for_each_entry_safe(sg
, next
,
2092 &gmap
->children
, list
)
2093 gmap_shadow_notify(sg
, vmaddr
, gaddr
, pte
);
2094 spin_unlock(&gmap
->shadow_lock
);
2096 if (bits
& PGSTE_IN_BIT
)
2097 gmap_call_notifier(gmap
, gaddr
, gaddr
+ PAGE_SIZE
- 1);
2101 EXPORT_SYMBOL_GPL(ptep_notify
);
2103 static inline void thp_split_mm(struct mm_struct
*mm
)
2105 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
2106 struct vm_area_struct
*vma
;
2109 for (vma
= mm
->mmap
; vma
!= NULL
; vma
= vma
->vm_next
) {
2110 for (addr
= vma
->vm_start
;
2113 follow_page(vma
, addr
, FOLL_SPLIT
);
2114 vma
->vm_flags
&= ~VM_HUGEPAGE
;
2115 vma
->vm_flags
|= VM_NOHUGEPAGE
;
2117 mm
->def_flags
|= VM_NOHUGEPAGE
;
2122 * Remove all empty zero pages from the mapping for lazy refaulting
2123 * - This must be called after mm->context.has_pgste is set, to avoid
2124 * future creation of zero pages
2125 * - This must be called after THP was enabled
2127 static int __zap_zero_pages(pmd_t
*pmd
, unsigned long start
,
2128 unsigned long end
, struct mm_walk
*walk
)
2132 for (addr
= start
; addr
!= end
; addr
+= PAGE_SIZE
) {
2136 ptep
= pte_offset_map_lock(walk
->mm
, pmd
, addr
, &ptl
);
2137 if (is_zero_pfn(pte_pfn(*ptep
)))
2138 ptep_xchg_direct(walk
->mm
, addr
, ptep
, __pte(_PAGE_INVALID
));
2139 pte_unmap_unlock(ptep
, ptl
);
2144 static inline void zap_zero_pages(struct mm_struct
*mm
)
2146 struct mm_walk walk
= { .pmd_entry
= __zap_zero_pages
};
2149 walk_page_range(0, TASK_SIZE
, &walk
);
2153 * switch on pgstes for its userspace process (for kvm)
2155 int s390_enable_sie(void)
2157 struct mm_struct
*mm
= current
->mm
;
2159 /* Do we have pgstes? if yes, we are done */
2160 if (mm_has_pgste(mm
))
2162 /* Fail if the page tables are 2K */
2163 if (!mm_alloc_pgste(mm
))
2165 down_write(&mm
->mmap_sem
);
2166 mm
->context
.has_pgste
= 1;
2167 /* split thp mappings and disable thp for future mappings */
2170 up_write(&mm
->mmap_sem
);
2173 EXPORT_SYMBOL_GPL(s390_enable_sie
);
2176 * Enable storage key handling from now on and initialize the storage
2177 * keys with the default key.
2179 static int __s390_enable_skey(pte_t
*pte
, unsigned long addr
,
2180 unsigned long next
, struct mm_walk
*walk
)
2182 /* Clear storage key */
2183 ptep_zap_key(walk
->mm
, addr
, pte
);
2187 int s390_enable_skey(void)
2189 struct mm_walk walk
= { .pte_entry
= __s390_enable_skey
};
2190 struct mm_struct
*mm
= current
->mm
;
2191 struct vm_area_struct
*vma
;
2194 down_write(&mm
->mmap_sem
);
2195 if (mm_use_skey(mm
))
2198 mm
->context
.use_skey
= 1;
2199 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
) {
2200 if (ksm_madvise(vma
, vma
->vm_start
, vma
->vm_end
,
2201 MADV_UNMERGEABLE
, &vma
->vm_flags
)) {
2202 mm
->context
.use_skey
= 0;
2207 mm
->def_flags
&= ~VM_MERGEABLE
;
2210 walk_page_range(0, TASK_SIZE
, &walk
);
2213 up_write(&mm
->mmap_sem
);
2216 EXPORT_SYMBOL_GPL(s390_enable_skey
);
2219 * Reset CMMA state, make all pages stable again.
2221 static int __s390_reset_cmma(pte_t
*pte
, unsigned long addr
,
2222 unsigned long next
, struct mm_walk
*walk
)
2224 ptep_zap_unused(walk
->mm
, addr
, pte
, 1);
2228 void s390_reset_cmma(struct mm_struct
*mm
)
2230 struct mm_walk walk
= { .pte_entry
= __s390_reset_cmma
};
2232 down_write(&mm
->mmap_sem
);
2234 walk_page_range(0, TASK_SIZE
, &walk
);
2235 up_write(&mm
->mmap_sem
);
2237 EXPORT_SYMBOL_GPL(s390_reset_cmma
);