2 * Copyright IBM Corp. 2007, 2011
3 * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
6 #include <linux/sched.h>
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
11 #include <linux/swap.h>
12 #include <linux/smp.h>
13 #include <linux/highmem.h>
14 #include <linux/pagemap.h>
15 #include <linux/spinlock.h>
16 #include <linux/module.h>
17 #include <linux/quicklist.h>
18 #include <linux/rcupdate.h>
19 #include <linux/slab.h>
20 #include <linux/swapops.h>
21 #include <linux/ksm.h>
22 #include <linux/mman.h>
24 #include <asm/pgtable.h>
25 #include <asm/pgalloc.h>
27 #include <asm/tlbflush.h>
28 #include <asm/mmu_context.h>
32 #define FRAG_MASK 0x0f
35 #define FRAG_MASK 0x03
39 unsigned long *crst_table_alloc(struct mm_struct
*mm
)
41 struct page
*page
= alloc_pages(GFP_KERNEL
, ALLOC_ORDER
);
45 return (unsigned long *) page_to_phys(page
);
48 void crst_table_free(struct mm_struct
*mm
, unsigned long *table
)
50 free_pages((unsigned long) table
, ALLOC_ORDER
);
54 static void __crst_table_upgrade(void *arg
)
56 struct mm_struct
*mm
= arg
;
58 if (current
->active_mm
== mm
) {
65 int crst_table_upgrade(struct mm_struct
*mm
, unsigned long limit
)
67 unsigned long *table
, *pgd
;
71 BUG_ON(limit
> (1UL << 53));
74 table
= crst_table_alloc(mm
);
77 spin_lock_bh(&mm
->page_table_lock
);
78 if (mm
->context
.asce_limit
< limit
) {
79 pgd
= (unsigned long *) mm
->pgd
;
80 if (mm
->context
.asce_limit
<= (1UL << 31)) {
81 entry
= _REGION3_ENTRY_EMPTY
;
82 mm
->context
.asce_limit
= 1UL << 42;
83 mm
->context
.asce_bits
= _ASCE_TABLE_LENGTH
|
87 entry
= _REGION2_ENTRY_EMPTY
;
88 mm
->context
.asce_limit
= 1UL << 53;
89 mm
->context
.asce_bits
= _ASCE_TABLE_LENGTH
|
93 crst_table_init(table
, entry
);
94 pgd_populate(mm
, (pgd_t
*) table
, (pud_t
*) pgd
);
95 mm
->pgd
= (pgd_t
*) table
;
96 mm
->task_size
= mm
->context
.asce_limit
;
100 spin_unlock_bh(&mm
->page_table_lock
);
102 crst_table_free(mm
, table
);
103 if (mm
->context
.asce_limit
< limit
)
106 on_each_cpu(__crst_table_upgrade
, mm
, 0);
110 void crst_table_downgrade(struct mm_struct
*mm
, unsigned long limit
)
114 if (current
->active_mm
== mm
) {
118 while (mm
->context
.asce_limit
> limit
) {
120 switch (pgd_val(*pgd
) & _REGION_ENTRY_TYPE_MASK
) {
121 case _REGION_ENTRY_TYPE_R2
:
122 mm
->context
.asce_limit
= 1UL << 42;
123 mm
->context
.asce_bits
= _ASCE_TABLE_LENGTH
|
127 case _REGION_ENTRY_TYPE_R3
:
128 mm
->context
.asce_limit
= 1UL << 31;
129 mm
->context
.asce_bits
= _ASCE_TABLE_LENGTH
|
136 mm
->pgd
= (pgd_t
*) (pgd_val(*pgd
) & _REGION_ENTRY_ORIGIN
);
137 mm
->task_size
= mm
->context
.asce_limit
;
138 crst_table_free(mm
, (unsigned long *) pgd
);
140 if (current
->active_mm
== mm
)
148 * gmap_alloc - allocate a guest address space
149 * @mm: pointer to the parent mm_struct
150 * @limit: maximum size of the gmap address space
152 * Returns a guest address space structure.
154 struct gmap
*gmap_alloc(struct mm_struct
*mm
, unsigned long limit
)
158 unsigned long *table
;
159 unsigned long etype
, atype
;
161 if (limit
< (1UL << 31)) {
162 limit
= (1UL << 31) - 1;
163 atype
= _ASCE_TYPE_SEGMENT
;
164 etype
= _SEGMENT_ENTRY_EMPTY
;
165 } else if (limit
< (1UL << 42)) {
166 limit
= (1UL << 42) - 1;
167 atype
= _ASCE_TYPE_REGION3
;
168 etype
= _REGION3_ENTRY_EMPTY
;
169 } else if (limit
< (1UL << 53)) {
170 limit
= (1UL << 53) - 1;
171 atype
= _ASCE_TYPE_REGION2
;
172 etype
= _REGION2_ENTRY_EMPTY
;
175 atype
= _ASCE_TYPE_REGION1
;
176 etype
= _REGION1_ENTRY_EMPTY
;
178 gmap
= kzalloc(sizeof(struct gmap
), GFP_KERNEL
);
181 INIT_LIST_HEAD(&gmap
->crst_list
);
182 INIT_RADIX_TREE(&gmap
->guest_to_host
, GFP_KERNEL
);
183 INIT_RADIX_TREE(&gmap
->host_to_guest
, GFP_ATOMIC
);
184 spin_lock_init(&gmap
->guest_table_lock
);
186 page
= alloc_pages(GFP_KERNEL
, ALLOC_ORDER
);
190 list_add(&page
->lru
, &gmap
->crst_list
);
191 table
= (unsigned long *) page_to_phys(page
);
192 crst_table_init(table
, etype
);
194 gmap
->asce
= atype
| _ASCE_TABLE_LENGTH
|
195 _ASCE_USER_BITS
| __pa(table
);
196 gmap
->asce_end
= limit
;
197 down_write(&mm
->mmap_sem
);
198 list_add(&gmap
->list
, &mm
->context
.gmap_list
);
199 up_write(&mm
->mmap_sem
);
207 EXPORT_SYMBOL_GPL(gmap_alloc
);
209 static void gmap_flush_tlb(struct gmap
*gmap
)
211 if (MACHINE_HAS_IDTE
)
212 __tlb_flush_asce(gmap
->mm
, gmap
->asce
);
214 __tlb_flush_global();
217 static void gmap_radix_tree_free(struct radix_tree_root
*root
)
219 struct radix_tree_iter iter
;
220 unsigned long indices
[16];
225 /* A radix tree is freed by deleting all of its entries */
229 radix_tree_for_each_slot(slot
, root
, &iter
, index
) {
230 indices
[nr
] = iter
.index
;
234 for (i
= 0; i
< nr
; i
++) {
236 radix_tree_delete(root
, index
);
242 * gmap_free - free a guest address space
243 * @gmap: pointer to the guest address space structure
245 void gmap_free(struct gmap
*gmap
)
247 struct page
*page
, *next
;
250 if (MACHINE_HAS_IDTE
)
251 __tlb_flush_asce(gmap
->mm
, gmap
->asce
);
253 __tlb_flush_global();
255 /* Free all segment & region tables. */
256 list_for_each_entry_safe(page
, next
, &gmap
->crst_list
, lru
)
257 __free_pages(page
, ALLOC_ORDER
);
258 gmap_radix_tree_free(&gmap
->guest_to_host
);
259 gmap_radix_tree_free(&gmap
->host_to_guest
);
260 down_write(&gmap
->mm
->mmap_sem
);
261 list_del(&gmap
->list
);
262 up_write(&gmap
->mm
->mmap_sem
);
265 EXPORT_SYMBOL_GPL(gmap_free
);
268 * gmap_enable - switch primary space to the guest address space
269 * @gmap: pointer to the guest address space structure
271 void gmap_enable(struct gmap
*gmap
)
273 S390_lowcore
.gmap
= (unsigned long) gmap
;
275 EXPORT_SYMBOL_GPL(gmap_enable
);
278 * gmap_disable - switch back to the standard primary address space
279 * @gmap: pointer to the guest address space structure
281 void gmap_disable(struct gmap
*gmap
)
283 S390_lowcore
.gmap
= 0UL;
285 EXPORT_SYMBOL_GPL(gmap_disable
);
288 * gmap_alloc_table is assumed to be called with mmap_sem held
290 static int gmap_alloc_table(struct gmap
*gmap
, unsigned long *table
,
291 unsigned long init
, unsigned long gaddr
)
296 /* since we dont free the gmap table until gmap_free we can unlock */
297 page
= alloc_pages(GFP_KERNEL
, ALLOC_ORDER
);
300 new = (unsigned long *) page_to_phys(page
);
301 crst_table_init(new, init
);
302 spin_lock(&gmap
->mm
->page_table_lock
);
303 if (*table
& _REGION_ENTRY_INVALID
) {
304 list_add(&page
->lru
, &gmap
->crst_list
);
305 *table
= (unsigned long) new | _REGION_ENTRY_LENGTH
|
306 (*table
& _REGION_ENTRY_TYPE_MASK
);
310 spin_unlock(&gmap
->mm
->page_table_lock
);
312 __free_pages(page
, ALLOC_ORDER
);
317 * __gmap_segment_gaddr - find virtual address from segment pointer
318 * @entry: pointer to a segment table entry in the guest address space
320 * Returns the virtual address in the guest address space for the segment
322 static unsigned long __gmap_segment_gaddr(unsigned long *entry
)
325 unsigned long offset
;
327 offset
= (unsigned long) entry
/ sizeof(unsigned long);
328 offset
= (offset
& (PTRS_PER_PMD
- 1)) * PMD_SIZE
;
329 page
= pmd_to_page((pmd_t
*) entry
);
330 return page
->index
+ offset
;
334 * __gmap_unlink_by_vmaddr - unlink a single segment via a host address
335 * @gmap: pointer to the guest address space structure
336 * @vmaddr: address in the host process address space
338 * Returns 1 if a TLB flush is required
340 static int __gmap_unlink_by_vmaddr(struct gmap
*gmap
, unsigned long vmaddr
)
342 unsigned long *entry
;
345 spin_lock(&gmap
->guest_table_lock
);
346 entry
= radix_tree_delete(&gmap
->host_to_guest
, vmaddr
>> PMD_SHIFT
);
348 flush
= (*entry
!= _SEGMENT_ENTRY_INVALID
);
349 *entry
= _SEGMENT_ENTRY_INVALID
;
351 spin_unlock(&gmap
->guest_table_lock
);
356 * __gmap_unmap_by_gaddr - unmap a single segment via a guest address
357 * @gmap: pointer to the guest address space structure
358 * @gaddr: address in the guest address space
360 * Returns 1 if a TLB flush is required
362 static int __gmap_unmap_by_gaddr(struct gmap
*gmap
, unsigned long gaddr
)
364 unsigned long vmaddr
;
366 vmaddr
= (unsigned long) radix_tree_delete(&gmap
->guest_to_host
,
368 return vmaddr
? __gmap_unlink_by_vmaddr(gmap
, vmaddr
) : 0;
372 * gmap_unmap_segment - unmap segment from the guest address space
373 * @gmap: pointer to the guest address space structure
374 * @to: address in the guest address space
375 * @len: length of the memory area to unmap
377 * Returns 0 if the unmap succeeded, -EINVAL if not.
379 int gmap_unmap_segment(struct gmap
*gmap
, unsigned long to
, unsigned long len
)
384 if ((to
| len
) & (PMD_SIZE
- 1))
386 if (len
== 0 || to
+ len
< to
)
390 down_write(&gmap
->mm
->mmap_sem
);
391 for (off
= 0; off
< len
; off
+= PMD_SIZE
)
392 flush
|= __gmap_unmap_by_gaddr(gmap
, to
+ off
);
393 up_write(&gmap
->mm
->mmap_sem
);
395 gmap_flush_tlb(gmap
);
398 EXPORT_SYMBOL_GPL(gmap_unmap_segment
);
401 * gmap_mmap_segment - map a segment to the guest address space
402 * @gmap: pointer to the guest address space structure
403 * @from: source address in the parent address space
404 * @to: target address in the guest address space
405 * @len: length of the memory area to map
407 * Returns 0 if the mmap succeeded, -EINVAL or -ENOMEM if not.
409 int gmap_map_segment(struct gmap
*gmap
, unsigned long from
,
410 unsigned long to
, unsigned long len
)
415 if ((from
| to
| len
) & (PMD_SIZE
- 1))
417 if (len
== 0 || from
+ len
< from
|| to
+ len
< to
||
418 from
+ len
> TASK_MAX_SIZE
|| to
+ len
> gmap
->asce_end
)
422 down_write(&gmap
->mm
->mmap_sem
);
423 for (off
= 0; off
< len
; off
+= PMD_SIZE
) {
424 /* Remove old translation */
425 flush
|= __gmap_unmap_by_gaddr(gmap
, to
+ off
);
426 /* Store new translation */
427 if (radix_tree_insert(&gmap
->guest_to_host
,
428 (to
+ off
) >> PMD_SHIFT
,
429 (void *) from
+ off
))
432 up_write(&gmap
->mm
->mmap_sem
);
434 gmap_flush_tlb(gmap
);
437 gmap_unmap_segment(gmap
, to
, len
);
440 EXPORT_SYMBOL_GPL(gmap_map_segment
);
443 * __gmap_translate - translate a guest address to a user space address
444 * @gmap: pointer to guest mapping meta data structure
445 * @gaddr: guest address
447 * Returns user space address which corresponds to the guest address or
448 * -EFAULT if no such mapping exists.
449 * This function does not establish potentially missing page table entries.
450 * The mmap_sem of the mm that belongs to the address space must be held
451 * when this function gets called.
453 unsigned long __gmap_translate(struct gmap
*gmap
, unsigned long gaddr
)
455 unsigned long vmaddr
;
457 vmaddr
= (unsigned long)
458 radix_tree_lookup(&gmap
->guest_to_host
, gaddr
>> PMD_SHIFT
);
459 return vmaddr
? (vmaddr
| (gaddr
& ~PMD_MASK
)) : -EFAULT
;
461 EXPORT_SYMBOL_GPL(__gmap_translate
);
464 * gmap_translate - translate a guest address to a user space address
465 * @gmap: pointer to guest mapping meta data structure
466 * @gaddr: guest address
468 * Returns user space address which corresponds to the guest address or
469 * -EFAULT if no such mapping exists.
470 * This function does not establish potentially missing page table entries.
472 unsigned long gmap_translate(struct gmap
*gmap
, unsigned long gaddr
)
476 down_read(&gmap
->mm
->mmap_sem
);
477 rc
= __gmap_translate(gmap
, gaddr
);
478 up_read(&gmap
->mm
->mmap_sem
);
481 EXPORT_SYMBOL_GPL(gmap_translate
);
484 * gmap_unlink - disconnect a page table from the gmap shadow tables
485 * @gmap: pointer to guest mapping meta data structure
486 * @table: pointer to the host page table
487 * @vmaddr: vm address associated with the host page table
489 static void gmap_unlink(struct mm_struct
*mm
, unsigned long *table
,
490 unsigned long vmaddr
)
495 list_for_each_entry(gmap
, &mm
->context
.gmap_list
, list
) {
496 flush
= __gmap_unlink_by_vmaddr(gmap
, vmaddr
);
498 gmap_flush_tlb(gmap
);
503 * gmap_link - set up shadow page tables to connect a host to a guest address
504 * @gmap: pointer to guest mapping meta data structure
505 * @gaddr: guest address
506 * @vmaddr: vm address
508 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
509 * if the vm address is already mapped to a different guest segment.
510 * The mmap_sem of the mm that belongs to the address space must be held
511 * when this function gets called.
513 int __gmap_link(struct gmap
*gmap
, unsigned long gaddr
, unsigned long vmaddr
)
515 struct mm_struct
*mm
;
516 unsigned long *table
;
523 /* Create higher level tables in the gmap page table */
525 if ((gmap
->asce
& _ASCE_TYPE_MASK
) >= _ASCE_TYPE_REGION1
) {
526 table
+= (gaddr
>> 53) & 0x7ff;
527 if ((*table
& _REGION_ENTRY_INVALID
) &&
528 gmap_alloc_table(gmap
, table
, _REGION2_ENTRY_EMPTY
,
529 gaddr
& 0xffe0000000000000))
531 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
533 if ((gmap
->asce
& _ASCE_TYPE_MASK
) >= _ASCE_TYPE_REGION2
) {
534 table
+= (gaddr
>> 42) & 0x7ff;
535 if ((*table
& _REGION_ENTRY_INVALID
) &&
536 gmap_alloc_table(gmap
, table
, _REGION3_ENTRY_EMPTY
,
537 gaddr
& 0xfffffc0000000000))
539 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
541 if ((gmap
->asce
& _ASCE_TYPE_MASK
) >= _ASCE_TYPE_REGION3
) {
542 table
+= (gaddr
>> 31) & 0x7ff;
543 if ((*table
& _REGION_ENTRY_INVALID
) &&
544 gmap_alloc_table(gmap
, table
, _SEGMENT_ENTRY_EMPTY
,
545 gaddr
& 0xffffffff80000000))
547 table
= (unsigned long *)(*table
& _REGION_ENTRY_ORIGIN
);
549 table
+= (gaddr
>> 20) & 0x7ff;
550 /* Walk the parent mm page table */
552 pgd
= pgd_offset(mm
, vmaddr
);
553 VM_BUG_ON(pgd_none(*pgd
));
554 pud
= pud_offset(pgd
, vmaddr
);
555 VM_BUG_ON(pud_none(*pud
));
556 pmd
= pmd_offset(pud
, vmaddr
);
557 VM_BUG_ON(pmd_none(*pmd
));
558 /* large pmds cannot yet be handled */
561 /* Link gmap segment table entry location to page table. */
562 rc
= radix_tree_preload(GFP_KERNEL
);
565 ptl
= pmd_lock(mm
, pmd
);
566 spin_lock(&gmap
->guest_table_lock
);
567 if (*table
== _SEGMENT_ENTRY_INVALID
) {
568 rc
= radix_tree_insert(&gmap
->host_to_guest
,
569 vmaddr
>> PMD_SHIFT
, table
);
571 *table
= pmd_val(*pmd
);
574 spin_unlock(&gmap
->guest_table_lock
);
576 radix_tree_preload_end();
581 * gmap_fault - resolve a fault on a guest address
582 * @gmap: pointer to guest mapping meta data structure
583 * @gaddr: guest address
584 * @fault_flags: flags to pass down to handle_mm_fault()
586 * Returns 0 on success, -ENOMEM for out of memory conditions, and -EFAULT
587 * if the vm address is already mapped to a different guest segment.
589 int gmap_fault(struct gmap
*gmap
, unsigned long gaddr
,
590 unsigned int fault_flags
)
592 unsigned long vmaddr
;
595 down_read(&gmap
->mm
->mmap_sem
);
596 vmaddr
= __gmap_translate(gmap
, gaddr
);
597 if (IS_ERR_VALUE(vmaddr
)) {
601 if (fixup_user_fault(current
, gmap
->mm
, vmaddr
, fault_flags
)) {
605 rc
= __gmap_link(gmap
, gaddr
, vmaddr
);
607 up_read(&gmap
->mm
->mmap_sem
);
610 EXPORT_SYMBOL_GPL(gmap_fault
);
612 static void gmap_zap_swap_entry(swp_entry_t entry
, struct mm_struct
*mm
)
614 if (!non_swap_entry(entry
))
615 dec_mm_counter(mm
, MM_SWAPENTS
);
616 else if (is_migration_entry(entry
)) {
617 struct page
*page
= migration_entry_to_page(entry
);
620 dec_mm_counter(mm
, MM_ANONPAGES
);
622 dec_mm_counter(mm
, MM_FILEPAGES
);
624 free_swap_and_cache(entry
);
628 * this function is assumed to be called with mmap_sem held
630 void __gmap_zap(struct gmap
*gmap
, unsigned long gaddr
)
632 unsigned long vmaddr
, ptev
, pgstev
;
637 /* Find the vm address for the guest address */
638 vmaddr
= (unsigned long) radix_tree_lookup(&gmap
->guest_to_host
,
642 vmaddr
|= gaddr
& ~PMD_MASK
;
643 /* Get pointer to the page table entry */
644 ptep
= get_locked_pte(gmap
->mm
, vmaddr
, &ptl
);
650 /* Zap unused and logically-zero pages */
651 pgste
= pgste_get_lock(ptep
);
652 pgstev
= pgste_val(pgste
);
654 if (((pgstev
& _PGSTE_GPS_USAGE_MASK
) == _PGSTE_GPS_USAGE_UNUSED
) ||
655 ((pgstev
& _PGSTE_GPS_ZERO
) && (ptev
& _PAGE_INVALID
))) {
656 gmap_zap_swap_entry(pte_to_swp_entry(pte
), gmap
->mm
);
657 pte_clear(gmap
->mm
, vmaddr
, ptep
);
659 pgste_set_unlock(ptep
, pgste
);
661 pte_unmap_unlock(ptep
, ptl
);
663 EXPORT_SYMBOL_GPL(__gmap_zap
);
665 void gmap_discard(struct gmap
*gmap
, unsigned long from
, unsigned long to
)
667 unsigned long gaddr
, vmaddr
, size
;
668 struct vm_area_struct
*vma
;
670 down_read(&gmap
->mm
->mmap_sem
);
671 for (gaddr
= from
; gaddr
< to
;
672 gaddr
= (gaddr
+ PMD_SIZE
) & PMD_MASK
) {
673 /* Find the vm address for the guest address */
674 vmaddr
= (unsigned long)
675 radix_tree_lookup(&gmap
->guest_to_host
,
679 vmaddr
|= gaddr
& ~PMD_MASK
;
680 /* Find vma in the parent mm */
681 vma
= find_vma(gmap
->mm
, vmaddr
);
682 size
= min(to
- gaddr
, PMD_SIZE
- (gaddr
& ~PMD_MASK
));
683 zap_page_range(vma
, vmaddr
, size
, NULL
);
685 up_read(&gmap
->mm
->mmap_sem
);
687 EXPORT_SYMBOL_GPL(gmap_discard
);
689 static LIST_HEAD(gmap_notifier_list
);
690 static DEFINE_SPINLOCK(gmap_notifier_lock
);
693 * gmap_register_ipte_notifier - register a pte invalidation callback
694 * @nb: pointer to the gmap notifier block
696 void gmap_register_ipte_notifier(struct gmap_notifier
*nb
)
698 spin_lock(&gmap_notifier_lock
);
699 list_add(&nb
->list
, &gmap_notifier_list
);
700 spin_unlock(&gmap_notifier_lock
);
702 EXPORT_SYMBOL_GPL(gmap_register_ipte_notifier
);
705 * gmap_unregister_ipte_notifier - remove a pte invalidation callback
706 * @nb: pointer to the gmap notifier block
708 void gmap_unregister_ipte_notifier(struct gmap_notifier
*nb
)
710 spin_lock(&gmap_notifier_lock
);
711 list_del_init(&nb
->list
);
712 spin_unlock(&gmap_notifier_lock
);
714 EXPORT_SYMBOL_GPL(gmap_unregister_ipte_notifier
);
717 * gmap_ipte_notify - mark a range of ptes for invalidation notification
718 * @gmap: pointer to guest mapping meta data structure
719 * @gaddr: virtual address in the guest address space
722 * Returns 0 if for each page in the given range a gmap mapping exists and
723 * the invalidation notification could be set. If the gmap mapping is missing
724 * for one or more pages -EFAULT is returned. If no memory could be allocated
725 * -ENOMEM is returned. This function establishes missing page table entries.
727 int gmap_ipte_notify(struct gmap
*gmap
, unsigned long gaddr
, unsigned long len
)
735 if ((gaddr
& ~PAGE_MASK
) || (len
& ~PAGE_MASK
))
737 down_read(&gmap
->mm
->mmap_sem
);
739 /* Convert gmap address and connect the page tables */
740 addr
= __gmap_translate(gmap
, gaddr
);
741 if (IS_ERR_VALUE(addr
)) {
745 /* Get the page mapped */
746 if (fixup_user_fault(current
, gmap
->mm
, addr
, FAULT_FLAG_WRITE
)) {
750 rc
= __gmap_link(gmap
, gaddr
, addr
);
753 /* Walk the process page table, lock and get pte pointer */
754 ptep
= get_locked_pte(gmap
->mm
, addr
, &ptl
);
756 /* Set notification bit in the pgste of the pte */
758 if ((pte_val(entry
) & (_PAGE_INVALID
| _PAGE_PROTECT
)) == 0) {
759 pgste
= pgste_get_lock(ptep
);
760 pgste_val(pgste
) |= PGSTE_IN_BIT
;
761 pgste_set_unlock(ptep
, pgste
);
765 pte_unmap_unlock(ptep
, ptl
);
767 up_read(&gmap
->mm
->mmap_sem
);
770 EXPORT_SYMBOL_GPL(gmap_ipte_notify
);
773 * gmap_do_ipte_notify - call all invalidation callbacks for a specific pte.
774 * @mm: pointer to the process mm_struct
775 * @addr: virtual address in the process address space
776 * @pte: pointer to the page table entry
778 * This function is assumed to be called with the page table lock held
779 * for the pte to notify.
781 void gmap_do_ipte_notify(struct mm_struct
*mm
, unsigned long vmaddr
, pte_t
*pte
)
783 unsigned long offset
, gaddr
;
784 unsigned long *table
;
785 struct gmap_notifier
*nb
;
788 offset
= ((unsigned long) pte
) & (255 * sizeof(pte_t
));
789 offset
= offset
* (4096 / sizeof(pte_t
));
790 spin_lock(&gmap_notifier_lock
);
791 list_for_each_entry(gmap
, &mm
->context
.gmap_list
, list
) {
792 table
= radix_tree_lookup(&gmap
->host_to_guest
,
793 vmaddr
>> PMD_SHIFT
);
796 gaddr
= __gmap_segment_gaddr(table
) + offset
;
797 list_for_each_entry(nb
, &gmap_notifier_list
, list
)
798 nb
->notifier_call(gmap
, gaddr
);
800 spin_unlock(&gmap_notifier_lock
);
802 EXPORT_SYMBOL_GPL(gmap_do_ipte_notify
);
804 static inline int page_table_with_pgste(struct page
*page
)
806 return atomic_read(&page
->_mapcount
) == 0;
809 static inline unsigned long *page_table_alloc_pgste(struct mm_struct
*mm
)
812 unsigned long *table
;
814 page
= alloc_page(GFP_KERNEL
|__GFP_REPEAT
);
817 if (!pgtable_page_ctor(page
)) {
821 atomic_set(&page
->_mapcount
, 0);
822 table
= (unsigned long *) page_to_phys(page
);
823 clear_table(table
, _PAGE_INVALID
, PAGE_SIZE
/2);
824 clear_table(table
+ PTRS_PER_PTE
, 0, PAGE_SIZE
/2);
828 static inline void page_table_free_pgste(unsigned long *table
)
832 page
= pfn_to_page(__pa(table
) >> PAGE_SHIFT
);
833 pgtable_page_dtor(page
);
834 atomic_set(&page
->_mapcount
, -1);
838 int set_guest_storage_key(struct mm_struct
*mm
, unsigned long addr
,
839 unsigned long key
, bool nq
)
845 down_read(&mm
->mmap_sem
);
847 ptep
= get_locked_pte(mm
, addr
, &ptl
);
848 if (unlikely(!ptep
)) {
849 up_read(&mm
->mmap_sem
);
852 if (!(pte_val(*ptep
) & _PAGE_INVALID
) &&
853 (pte_val(*ptep
) & _PAGE_PROTECT
)) {
854 pte_unmap_unlock(ptep
, ptl
);
855 if (fixup_user_fault(current
, mm
, addr
, FAULT_FLAG_WRITE
)) {
856 up_read(&mm
->mmap_sem
);
862 new = old
= pgste_get_lock(ptep
);
863 pgste_val(new) &= ~(PGSTE_GR_BIT
| PGSTE_GC_BIT
|
864 PGSTE_ACC_BITS
| PGSTE_FP_BIT
);
865 pgste_val(new) |= (key
& (_PAGE_CHANGED
| _PAGE_REFERENCED
)) << 48;
866 pgste_val(new) |= (key
& (_PAGE_ACC_BITS
| _PAGE_FP_BIT
)) << 56;
867 if (!(pte_val(*ptep
) & _PAGE_INVALID
)) {
868 unsigned long address
, bits
, skey
;
870 address
= pte_val(*ptep
) & PAGE_MASK
;
871 skey
= (unsigned long) page_get_storage_key(address
);
872 bits
= skey
& (_PAGE_CHANGED
| _PAGE_REFERENCED
);
873 skey
= key
& (_PAGE_ACC_BITS
| _PAGE_FP_BIT
);
874 /* Set storage key ACC and FP */
875 page_set_storage_key(address
, skey
, !nq
);
876 /* Merge host changed & referenced into pgste */
877 pgste_val(new) |= bits
<< 52;
879 /* changing the guest storage key is considered a change of the page */
880 if ((pgste_val(new) ^ pgste_val(old
)) &
881 (PGSTE_ACC_BITS
| PGSTE_FP_BIT
| PGSTE_GR_BIT
| PGSTE_GC_BIT
))
882 pgste_val(new) |= PGSTE_UC_BIT
;
884 pgste_set_unlock(ptep
, new);
885 pte_unmap_unlock(ptep
, ptl
);
886 up_read(&mm
->mmap_sem
);
889 EXPORT_SYMBOL(set_guest_storage_key
);
891 unsigned long get_guest_storage_key(struct mm_struct
*mm
, unsigned long addr
)
897 unsigned long key
= 0;
899 down_read(&mm
->mmap_sem
);
900 ptep
= get_locked_pte(mm
, addr
, &ptl
);
901 if (unlikely(!ptep
)) {
902 up_read(&mm
->mmap_sem
);
905 pgste
= pgste_get_lock(ptep
);
907 if (pte_val(*ptep
) & _PAGE_INVALID
) {
908 key
|= (pgste_val(pgste
) & PGSTE_ACC_BITS
) >> 56;
909 key
|= (pgste_val(pgste
) & PGSTE_FP_BIT
) >> 56;
910 key
|= (pgste_val(pgste
) & PGSTE_GR_BIT
) >> 48;
911 key
|= (pgste_val(pgste
) & PGSTE_GC_BIT
) >> 48;
913 physaddr
= pte_val(*ptep
) & PAGE_MASK
;
914 key
= page_get_storage_key(physaddr
);
916 /* Reflect guest's logical view, not physical */
917 if (pgste_val(pgste
) & PGSTE_GR_BIT
)
918 key
|= _PAGE_REFERENCED
;
919 if (pgste_val(pgste
) & PGSTE_GC_BIT
)
920 key
|= _PAGE_CHANGED
;
923 pgste_set_unlock(ptep
, pgste
);
924 pte_unmap_unlock(ptep
, ptl
);
925 up_read(&mm
->mmap_sem
);
928 EXPORT_SYMBOL(get_guest_storage_key
);
930 #else /* CONFIG_PGSTE */
932 static inline int page_table_with_pgste(struct page
*page
)
937 static inline unsigned long *page_table_alloc_pgste(struct mm_struct
*mm
)
942 static inline void page_table_free_pgste(unsigned long *table
)
946 static inline void gmap_unlink(struct mm_struct
*mm
, unsigned long *table
,
947 unsigned long vmaddr
)
951 #endif /* CONFIG_PGSTE */
953 static inline unsigned int atomic_xor_bits(atomic_t
*v
, unsigned int bits
)
955 unsigned int old
, new;
958 old
= atomic_read(v
);
960 } while (atomic_cmpxchg(v
, old
, new) != old
);
965 * page table entry allocation/free routines.
967 unsigned long *page_table_alloc(struct mm_struct
*mm
)
969 unsigned long *uninitialized_var(table
);
970 struct page
*uninitialized_var(page
);
971 unsigned int mask
, bit
;
973 if (mm_has_pgste(mm
))
974 return page_table_alloc_pgste(mm
);
975 /* Allocate fragments of a 4K page as 1K/2K page table */
976 spin_lock_bh(&mm
->context
.list_lock
);
978 if (!list_empty(&mm
->context
.pgtable_list
)) {
979 page
= list_first_entry(&mm
->context
.pgtable_list
,
981 table
= (unsigned long *) page_to_phys(page
);
982 mask
= atomic_read(&page
->_mapcount
);
983 mask
= mask
| (mask
>> 4);
985 if ((mask
& FRAG_MASK
) == FRAG_MASK
) {
986 spin_unlock_bh(&mm
->context
.list_lock
);
987 page
= alloc_page(GFP_KERNEL
|__GFP_REPEAT
);
990 if (!pgtable_page_ctor(page
)) {
994 atomic_set(&page
->_mapcount
, 1);
995 table
= (unsigned long *) page_to_phys(page
);
996 clear_table(table
, _PAGE_INVALID
, PAGE_SIZE
);
997 spin_lock_bh(&mm
->context
.list_lock
);
998 list_add(&page
->lru
, &mm
->context
.pgtable_list
);
1000 for (bit
= 1; mask
& bit
; bit
<<= 1)
1001 table
+= PTRS_PER_PTE
;
1002 mask
= atomic_xor_bits(&page
->_mapcount
, bit
);
1003 if ((mask
& FRAG_MASK
) == FRAG_MASK
)
1004 list_del(&page
->lru
);
1006 spin_unlock_bh(&mm
->context
.list_lock
);
1010 void page_table_free(struct mm_struct
*mm
, unsigned long *table
)
1013 unsigned int bit
, mask
;
1015 page
= pfn_to_page(__pa(table
) >> PAGE_SHIFT
);
1016 if (page_table_with_pgste(page
))
1017 return page_table_free_pgste(table
);
1018 /* Free 1K/2K page table fragment of a 4K page */
1019 bit
= 1 << ((__pa(table
) & ~PAGE_MASK
)/(PTRS_PER_PTE
*sizeof(pte_t
)));
1020 spin_lock_bh(&mm
->context
.list_lock
);
1021 if ((atomic_read(&page
->_mapcount
) & FRAG_MASK
) != FRAG_MASK
)
1022 list_del(&page
->lru
);
1023 mask
= atomic_xor_bits(&page
->_mapcount
, bit
);
1024 if (mask
& FRAG_MASK
)
1025 list_add(&page
->lru
, &mm
->context
.pgtable_list
);
1026 spin_unlock_bh(&mm
->context
.list_lock
);
1028 pgtable_page_dtor(page
);
1029 atomic_set(&page
->_mapcount
, -1);
1034 static void __page_table_free_rcu(void *table
, unsigned bit
)
1038 if (bit
== FRAG_MASK
)
1039 return page_table_free_pgste(table
);
1040 /* Free 1K/2K page table fragment of a 4K page */
1041 page
= pfn_to_page(__pa(table
) >> PAGE_SHIFT
);
1042 if (atomic_xor_bits(&page
->_mapcount
, bit
) == 0) {
1043 pgtable_page_dtor(page
);
1044 atomic_set(&page
->_mapcount
, -1);
1049 void page_table_free_rcu(struct mmu_gather
*tlb
, unsigned long *table
,
1050 unsigned long vmaddr
)
1052 struct mm_struct
*mm
;
1054 unsigned int bit
, mask
;
1057 page
= pfn_to_page(__pa(table
) >> PAGE_SHIFT
);
1058 if (page_table_with_pgste(page
)) {
1059 gmap_unlink(mm
, table
, vmaddr
);
1060 table
= (unsigned long *) (__pa(table
) | FRAG_MASK
);
1061 tlb_remove_table(tlb
, table
);
1064 bit
= 1 << ((__pa(table
) & ~PAGE_MASK
) / (PTRS_PER_PTE
*sizeof(pte_t
)));
1065 spin_lock_bh(&mm
->context
.list_lock
);
1066 if ((atomic_read(&page
->_mapcount
) & FRAG_MASK
) != FRAG_MASK
)
1067 list_del(&page
->lru
);
1068 mask
= atomic_xor_bits(&page
->_mapcount
, bit
| (bit
<< 4));
1069 if (mask
& FRAG_MASK
)
1070 list_add_tail(&page
->lru
, &mm
->context
.pgtable_list
);
1071 spin_unlock_bh(&mm
->context
.list_lock
);
1072 table
= (unsigned long *) (__pa(table
) | (bit
<< 4));
1073 tlb_remove_table(tlb
, table
);
1076 static void __tlb_remove_table(void *_table
)
1078 const unsigned long mask
= (FRAG_MASK
<< 4) | FRAG_MASK
;
1079 void *table
= (void *)((unsigned long) _table
& ~mask
);
1080 unsigned type
= (unsigned long) _table
& mask
;
1083 __page_table_free_rcu(table
, type
);
1085 free_pages((unsigned long) table
, ALLOC_ORDER
);
1088 static void tlb_remove_table_smp_sync(void *arg
)
1090 /* Simply deliver the interrupt */
1093 static void tlb_remove_table_one(void *table
)
1096 * This isn't an RCU grace period and hence the page-tables cannot be
1097 * assumed to be actually RCU-freed.
1099 * It is however sufficient for software page-table walkers that rely
1100 * on IRQ disabling. See the comment near struct mmu_table_batch.
1102 smp_call_function(tlb_remove_table_smp_sync
, NULL
, 1);
1103 __tlb_remove_table(table
);
1106 static void tlb_remove_table_rcu(struct rcu_head
*head
)
1108 struct mmu_table_batch
*batch
;
1111 batch
= container_of(head
, struct mmu_table_batch
, rcu
);
1113 for (i
= 0; i
< batch
->nr
; i
++)
1114 __tlb_remove_table(batch
->tables
[i
]);
1116 free_page((unsigned long)batch
);
1119 void tlb_table_flush(struct mmu_gather
*tlb
)
1121 struct mmu_table_batch
**batch
= &tlb
->batch
;
1124 call_rcu_sched(&(*batch
)->rcu
, tlb_remove_table_rcu
);
1129 void tlb_remove_table(struct mmu_gather
*tlb
, void *table
)
1131 struct mmu_table_batch
**batch
= &tlb
->batch
;
1133 tlb
->mm
->context
.flush_mm
= 1;
1134 if (*batch
== NULL
) {
1135 *batch
= (struct mmu_table_batch
*)
1136 __get_free_page(GFP_NOWAIT
| __GFP_NOWARN
);
1137 if (*batch
== NULL
) {
1138 __tlb_flush_mm_lazy(tlb
->mm
);
1139 tlb_remove_table_one(table
);
1144 (*batch
)->tables
[(*batch
)->nr
++] = table
;
1145 if ((*batch
)->nr
== MAX_TABLE_BATCH
)
1149 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1150 static inline void thp_split_vma(struct vm_area_struct
*vma
)
1154 for (addr
= vma
->vm_start
; addr
< vma
->vm_end
; addr
+= PAGE_SIZE
)
1155 follow_page(vma
, addr
, FOLL_SPLIT
);
1158 static inline void thp_split_mm(struct mm_struct
*mm
)
1160 struct vm_area_struct
*vma
;
1162 for (vma
= mm
->mmap
; vma
!= NULL
; vma
= vma
->vm_next
) {
1164 vma
->vm_flags
&= ~VM_HUGEPAGE
;
1165 vma
->vm_flags
|= VM_NOHUGEPAGE
;
1167 mm
->def_flags
|= VM_NOHUGEPAGE
;
1170 static inline void thp_split_mm(struct mm_struct
*mm
)
1173 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1175 static unsigned long page_table_realloc_pmd(struct mmu_gather
*tlb
,
1176 struct mm_struct
*mm
, pud_t
*pud
,
1177 unsigned long addr
, unsigned long end
)
1179 unsigned long next
, *table
, *new;
1184 pmd
= pmd_offset(pud
, addr
);
1186 next
= pmd_addr_end(addr
, end
);
1188 if (pmd_none_or_clear_bad(pmd
))
1190 table
= (unsigned long *) pmd_deref(*pmd
);
1191 page
= pfn_to_page(__pa(table
) >> PAGE_SHIFT
);
1192 if (page_table_with_pgste(page
))
1194 /* Allocate new page table with pgstes */
1195 new = page_table_alloc_pgste(mm
);
1199 ptl
= pmd_lock(mm
, pmd
);
1200 if (likely((unsigned long *) pmd_deref(*pmd
) == table
)) {
1201 /* Nuke pmd entry pointing to the "short" page table */
1202 pmdp_flush_lazy(mm
, addr
, pmd
);
1204 /* Copy ptes from old table to new table */
1205 memcpy(new, table
, PAGE_SIZE
/2);
1206 clear_table(table
, _PAGE_INVALID
, PAGE_SIZE
/2);
1207 /* Establish new table */
1208 pmd_populate(mm
, pmd
, (pte_t
*) new);
1209 /* Free old table with rcu, there might be a walker! */
1210 page_table_free_rcu(tlb
, table
, addr
);
1215 page_table_free_pgste(new);
1218 } while (pmd
++, addr
= next
, addr
!= end
);
1223 static unsigned long page_table_realloc_pud(struct mmu_gather
*tlb
,
1224 struct mm_struct
*mm
, pgd_t
*pgd
,
1225 unsigned long addr
, unsigned long end
)
1230 pud
= pud_offset(pgd
, addr
);
1232 next
= pud_addr_end(addr
, end
);
1233 if (pud_none_or_clear_bad(pud
))
1235 next
= page_table_realloc_pmd(tlb
, mm
, pud
, addr
, next
);
1236 if (unlikely(IS_ERR_VALUE(next
)))
1238 } while (pud
++, addr
= next
, addr
!= end
);
1243 static unsigned long page_table_realloc(struct mmu_gather
*tlb
, struct mm_struct
*mm
,
1244 unsigned long addr
, unsigned long end
)
1249 pgd
= pgd_offset(mm
, addr
);
1251 next
= pgd_addr_end(addr
, end
);
1252 if (pgd_none_or_clear_bad(pgd
))
1254 next
= page_table_realloc_pud(tlb
, mm
, pgd
, addr
, next
);
1255 if (unlikely(IS_ERR_VALUE(next
)))
1257 } while (pgd
++, addr
= next
, addr
!= end
);
1263 * switch on pgstes for its userspace process (for kvm)
1265 int s390_enable_sie(void)
1267 struct task_struct
*tsk
= current
;
1268 struct mm_struct
*mm
= tsk
->mm
;
1269 struct mmu_gather tlb
;
1271 /* Do we have pgstes? if yes, we are done */
1272 if (mm_has_pgste(tsk
->mm
))
1275 down_write(&mm
->mmap_sem
);
1276 /* split thp mappings and disable thp for future mappings */
1278 /* Reallocate the page tables with pgstes */
1279 tlb_gather_mmu(&tlb
, mm
, 0, TASK_SIZE
);
1280 if (!page_table_realloc(&tlb
, mm
, 0, TASK_SIZE
))
1281 mm
->context
.has_pgste
= 1;
1282 tlb_finish_mmu(&tlb
, 0, TASK_SIZE
);
1283 up_write(&mm
->mmap_sem
);
1284 return mm
->context
.has_pgste
? 0 : -ENOMEM
;
1286 EXPORT_SYMBOL_GPL(s390_enable_sie
);
1289 * Enable storage key handling from now on and initialize the storage
1290 * keys with the default key.
1292 static int __s390_enable_skey(pte_t
*pte
, unsigned long addr
,
1293 unsigned long next
, struct mm_walk
*walk
)
1298 pgste
= pgste_get_lock(pte
);
1300 * Remove all zero page mappings,
1301 * after establishing a policy to forbid zero page mappings
1302 * following faults for that page will get fresh anonymous pages
1304 if (is_zero_pfn(pte_pfn(*pte
))) {
1305 ptep_flush_direct(walk
->mm
, addr
, pte
);
1306 pte_val(*pte
) = _PAGE_INVALID
;
1308 /* Clear storage key */
1309 pgste_val(pgste
) &= ~(PGSTE_ACC_BITS
| PGSTE_FP_BIT
|
1310 PGSTE_GR_BIT
| PGSTE_GC_BIT
);
1311 ptev
= pte_val(*pte
);
1312 if (!(ptev
& _PAGE_INVALID
) && (ptev
& _PAGE_WRITE
))
1313 page_set_storage_key(ptev
& PAGE_MASK
, PAGE_DEFAULT_KEY
, 1);
1314 pgste_set_unlock(pte
, pgste
);
1318 int s390_enable_skey(void)
1320 struct mm_walk walk
= { .pte_entry
= __s390_enable_skey
};
1321 struct mm_struct
*mm
= current
->mm
;
1322 struct vm_area_struct
*vma
;
1325 down_write(&mm
->mmap_sem
);
1326 if (mm_use_skey(mm
))
1329 mm
->context
.use_skey
= 1;
1330 for (vma
= mm
->mmap
; vma
; vma
= vma
->vm_next
) {
1331 if (ksm_madvise(vma
, vma
->vm_start
, vma
->vm_end
,
1332 MADV_UNMERGEABLE
, &vma
->vm_flags
)) {
1333 mm
->context
.use_skey
= 0;
1338 mm
->def_flags
&= ~VM_MERGEABLE
;
1341 walk_page_range(0, TASK_SIZE
, &walk
);
1344 up_write(&mm
->mmap_sem
);
1347 EXPORT_SYMBOL_GPL(s390_enable_skey
);
1350 * Reset CMMA state, make all pages stable again.
1352 static int __s390_reset_cmma(pte_t
*pte
, unsigned long addr
,
1353 unsigned long next
, struct mm_walk
*walk
)
1357 pgste
= pgste_get_lock(pte
);
1358 pgste_val(pgste
) &= ~_PGSTE_GPS_USAGE_MASK
;
1359 pgste_set_unlock(pte
, pgste
);
1363 void s390_reset_cmma(struct mm_struct
*mm
)
1365 struct mm_walk walk
= { .pte_entry
= __s390_reset_cmma
};
1367 down_write(&mm
->mmap_sem
);
1369 walk_page_range(0, TASK_SIZE
, &walk
);
1370 up_write(&mm
->mmap_sem
);
1372 EXPORT_SYMBOL_GPL(s390_reset_cmma
);
1375 * Test and reset if a guest page is dirty
1377 bool gmap_test_and_clear_dirty(unsigned long address
, struct gmap
*gmap
)
1383 pte
= get_locked_pte(gmap
->mm
, address
, &ptl
);
1387 if (ptep_test_and_clear_user_dirty(gmap
->mm
, address
, pte
))
1393 EXPORT_SYMBOL_GPL(gmap_test_and_clear_dirty
);
1395 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
1396 int pmdp_clear_flush_young(struct vm_area_struct
*vma
, unsigned long address
,
1399 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
1400 /* No need to flush TLB
1401 * On s390 reference bits are in storage key and never in TLB */
1402 return pmdp_test_and_clear_young(vma
, address
, pmdp
);
1405 int pmdp_set_access_flags(struct vm_area_struct
*vma
,
1406 unsigned long address
, pmd_t
*pmdp
,
1407 pmd_t entry
, int dirty
)
1409 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
1411 entry
= pmd_mkyoung(entry
);
1413 entry
= pmd_mkdirty(entry
);
1414 if (pmd_same(*pmdp
, entry
))
1416 pmdp_invalidate(vma
, address
, pmdp
);
1417 set_pmd_at(vma
->vm_mm
, address
, pmdp
, entry
);
1421 static void pmdp_splitting_flush_sync(void *arg
)
1423 /* Simply deliver the interrupt */
1426 void pmdp_splitting_flush(struct vm_area_struct
*vma
, unsigned long address
,
1429 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
1430 if (!test_and_set_bit(_SEGMENT_ENTRY_SPLIT_BIT
,
1431 (unsigned long *) pmdp
)) {
1432 /* need to serialize against gup-fast (IRQ disabled) */
1433 smp_call_function(pmdp_splitting_flush_sync
, NULL
, 1);
1437 void pgtable_trans_huge_deposit(struct mm_struct
*mm
, pmd_t
*pmdp
,
1440 struct list_head
*lh
= (struct list_head
*) pgtable
;
1442 assert_spin_locked(pmd_lockptr(mm
, pmdp
));
1445 if (!pmd_huge_pte(mm
, pmdp
))
1448 list_add(lh
, (struct list_head
*) pmd_huge_pte(mm
, pmdp
));
1449 pmd_huge_pte(mm
, pmdp
) = pgtable
;
1452 pgtable_t
pgtable_trans_huge_withdraw(struct mm_struct
*mm
, pmd_t
*pmdp
)
1454 struct list_head
*lh
;
1458 assert_spin_locked(pmd_lockptr(mm
, pmdp
));
1461 pgtable
= pmd_huge_pte(mm
, pmdp
);
1462 lh
= (struct list_head
*) pgtable
;
1464 pmd_huge_pte(mm
, pmdp
) = NULL
;
1466 pmd_huge_pte(mm
, pmdp
) = (pgtable_t
) lh
->next
;
1469 ptep
= (pte_t
*) pgtable
;
1470 pte_val(*ptep
) = _PAGE_INVALID
;
1472 pte_val(*ptep
) = _PAGE_INVALID
;
1475 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */