1 // SPDX-License-Identifier: GPL-2.0
4 #include <linux/hugetlb.h>
5 #include <asm/pgalloc.h>
6 #include <asm/pgtable.h>
8 #include <asm/fixmap.h>
11 #define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
14 #define PGALLOC_USER_GFP __GFP_HIGHMEM
16 #define PGALLOC_USER_GFP 0
19 gfp_t __userpte_alloc_gfp
= PGALLOC_GFP
| PGALLOC_USER_GFP
;
21 pte_t
*pte_alloc_one_kernel(struct mm_struct
*mm
, unsigned long address
)
23 return (pte_t
*)__get_free_page(PGALLOC_GFP
& ~__GFP_ACCOUNT
);
26 pgtable_t
pte_alloc_one(struct mm_struct
*mm
, unsigned long address
)
30 pte
= alloc_pages(__userpte_alloc_gfp
, 0);
33 if (!pgtable_page_ctor(pte
)) {
40 static int __init
setup_userpte(char *arg
)
46 * "userpte=nohigh" disables allocation of user pagetables in
49 if (strcmp(arg
, "nohigh") == 0)
50 __userpte_alloc_gfp
&= ~__GFP_HIGHMEM
;
55 early_param("userpte", setup_userpte
);
57 void ___pte_free_tlb(struct mmu_gather
*tlb
, struct page
*pte
)
59 pgtable_page_dtor(pte
);
60 paravirt_release_pte(page_to_pfn(pte
));
61 tlb_remove_table(tlb
, pte
);
64 #if CONFIG_PGTABLE_LEVELS > 2
65 void ___pmd_free_tlb(struct mmu_gather
*tlb
, pmd_t
*pmd
)
67 struct page
*page
= virt_to_page(pmd
);
68 paravirt_release_pmd(__pa(pmd
) >> PAGE_SHIFT
);
70 * NOTE! For PAE, any changes to the top page-directory-pointer-table
71 * entries need a full cr3 reload to flush.
74 tlb
->need_flush_all
= 1;
76 pgtable_pmd_page_dtor(page
);
77 tlb_remove_table(tlb
, page
);
80 #if CONFIG_PGTABLE_LEVELS > 3
81 void ___pud_free_tlb(struct mmu_gather
*tlb
, pud_t
*pud
)
83 paravirt_release_pud(__pa(pud
) >> PAGE_SHIFT
);
84 tlb_remove_table(tlb
, virt_to_page(pud
));
87 #if CONFIG_PGTABLE_LEVELS > 4
88 void ___p4d_free_tlb(struct mmu_gather
*tlb
, p4d_t
*p4d
)
90 paravirt_release_p4d(__pa(p4d
) >> PAGE_SHIFT
);
91 tlb_remove_table(tlb
, virt_to_page(p4d
));
93 #endif /* CONFIG_PGTABLE_LEVELS > 4 */
94 #endif /* CONFIG_PGTABLE_LEVELS > 3 */
95 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
97 static inline void pgd_list_add(pgd_t
*pgd
)
99 struct page
*page
= virt_to_page(pgd
);
101 list_add(&page
->lru
, &pgd_list
);
104 static inline void pgd_list_del(pgd_t
*pgd
)
106 struct page
*page
= virt_to_page(pgd
);
108 list_del(&page
->lru
);
111 #define UNSHARED_PTRS_PER_PGD \
112 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
115 static void pgd_set_mm(pgd_t
*pgd
, struct mm_struct
*mm
)
117 BUILD_BUG_ON(sizeof(virt_to_page(pgd
)->index
) < sizeof(mm
));
118 virt_to_page(pgd
)->index
= (pgoff_t
)mm
;
121 struct mm_struct
*pgd_page_get_mm(struct page
*page
)
123 return (struct mm_struct
*)page
->index
;
126 static void pgd_ctor(struct mm_struct
*mm
, pgd_t
*pgd
)
128 /* If the pgd points to a shared pagetable level (either the
129 ptes in non-PAE, or shared PMD in PAE), then just copy the
130 references from swapper_pg_dir. */
131 if (CONFIG_PGTABLE_LEVELS
== 2 ||
132 (CONFIG_PGTABLE_LEVELS
== 3 && SHARED_KERNEL_PMD
) ||
133 CONFIG_PGTABLE_LEVELS
>= 4) {
134 clone_pgd_range(pgd
+ KERNEL_PGD_BOUNDARY
,
135 swapper_pg_dir
+ KERNEL_PGD_BOUNDARY
,
139 /* list required to sync kernel mapping updates */
140 if (!SHARED_KERNEL_PMD
) {
146 static void pgd_dtor(pgd_t
*pgd
)
148 if (SHARED_KERNEL_PMD
)
151 spin_lock(&pgd_lock
);
153 spin_unlock(&pgd_lock
);
157 * List of all pgd's needed for non-PAE so it can invalidate entries
158 * in both cached and uncached pgd's; not needed for PAE since the
159 * kernel pmd is shared. If PAE were not to share the pmd a similar
160 * tactic would be needed. This is essentially codepath-based locking
161 * against pageattr.c; it is the unique case in which a valid change
162 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
163 * vmalloc faults work because attached pagetables are never freed.
167 #ifdef CONFIG_X86_PAE
169 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
170 * updating the top-level pagetable entries to guarantee the
171 * processor notices the update. Since this is expensive, and
172 * all 4 top-level entries are used almost immediately in a
173 * new process's life, we just pre-populate them here.
175 * Also, if we're in a paravirt environment where the kernel pmd is
176 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
177 * and initialize the kernel pmds here.
179 #define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
182 * We allocate separate PMDs for the kernel part of the user page-table
183 * when PTI is enabled. We need them to map the per-process LDT into the
184 * user-space page-table.
186 #define PREALLOCATED_USER_PMDS (static_cpu_has(X86_FEATURE_PTI) ? \
189 void pud_populate(struct mm_struct
*mm
, pud_t
*pudp
, pmd_t
*pmd
)
191 paravirt_alloc_pmd(mm
, __pa(pmd
) >> PAGE_SHIFT
);
193 /* Note: almost everything apart from _PAGE_PRESENT is
194 reserved at the pmd (PDPT) level. */
195 set_pud(pudp
, __pud(__pa(pmd
) | _PAGE_PRESENT
));
198 * According to Intel App note "TLBs, Paging-Structure Caches,
199 * and Their Invalidation", April 2007, document 317080-001,
200 * section 8.1: in PAE mode we explicitly have to flush the
201 * TLB via cr3 if the top-level pgd is changed...
205 #else /* !CONFIG_X86_PAE */
207 /* No need to prepopulate any pagetable entries in non-PAE modes. */
208 #define PREALLOCATED_PMDS 0
209 #define PREALLOCATED_USER_PMDS 0
210 #endif /* CONFIG_X86_PAE */
212 static void free_pmds(struct mm_struct
*mm
, pmd_t
*pmds
[], int count
)
216 for (i
= 0; i
< count
; i
++)
218 pgtable_pmd_page_dtor(virt_to_page(pmds
[i
]));
219 free_page((unsigned long)pmds
[i
]);
224 static int preallocate_pmds(struct mm_struct
*mm
, pmd_t
*pmds
[], int count
)
228 gfp_t gfp
= PGALLOC_GFP
;
231 gfp
&= ~__GFP_ACCOUNT
;
233 for (i
= 0; i
< count
; i
++) {
234 pmd_t
*pmd
= (pmd_t
*)__get_free_page(gfp
);
237 if (pmd
&& !pgtable_pmd_page_ctor(virt_to_page(pmd
))) {
238 free_page((unsigned long)pmd
);
248 free_pmds(mm
, pmds
, count
);
256 * Mop up any pmd pages which may still be attached to the pgd.
257 * Normally they will be freed by munmap/exit_mmap, but any pmd we
258 * preallocate which never got a corresponding vma will need to be
261 static void mop_up_one_pmd(struct mm_struct
*mm
, pgd_t
*pgdp
)
265 if (pgd_val(pgd
) != 0) {
266 pmd_t
*pmd
= (pmd_t
*)pgd_page_vaddr(pgd
);
270 paravirt_release_pmd(pgd_val(pgd
) >> PAGE_SHIFT
);
276 static void pgd_mop_up_pmds(struct mm_struct
*mm
, pgd_t
*pgdp
)
280 for (i
= 0; i
< PREALLOCATED_PMDS
; i
++)
281 mop_up_one_pmd(mm
, &pgdp
[i
]);
283 #ifdef CONFIG_PAGE_TABLE_ISOLATION
285 if (!static_cpu_has(X86_FEATURE_PTI
))
288 pgdp
= kernel_to_user_pgdp(pgdp
);
290 for (i
= 0; i
< PREALLOCATED_USER_PMDS
; i
++)
291 mop_up_one_pmd(mm
, &pgdp
[i
+ KERNEL_PGD_BOUNDARY
]);
295 static void pgd_prepopulate_pmd(struct mm_struct
*mm
, pgd_t
*pgd
, pmd_t
*pmds
[])
301 if (PREALLOCATED_PMDS
== 0) /* Work around gcc-3.4.x bug */
304 p4d
= p4d_offset(pgd
, 0);
305 pud
= pud_offset(p4d
, 0);
307 for (i
= 0; i
< PREALLOCATED_PMDS
; i
++, pud
++) {
308 pmd_t
*pmd
= pmds
[i
];
310 if (i
>= KERNEL_PGD_BOUNDARY
)
311 memcpy(pmd
, (pmd_t
*)pgd_page_vaddr(swapper_pg_dir
[i
]),
312 sizeof(pmd_t
) * PTRS_PER_PMD
);
314 pud_populate(mm
, pud
, pmd
);
318 #ifdef CONFIG_PAGE_TABLE_ISOLATION
319 static void pgd_prepopulate_user_pmd(struct mm_struct
*mm
,
320 pgd_t
*k_pgd
, pmd_t
*pmds
[])
322 pgd_t
*s_pgd
= kernel_to_user_pgdp(swapper_pg_dir
);
323 pgd_t
*u_pgd
= kernel_to_user_pgdp(k_pgd
);
328 u_p4d
= p4d_offset(u_pgd
, 0);
329 u_pud
= pud_offset(u_p4d
, 0);
331 s_pgd
+= KERNEL_PGD_BOUNDARY
;
332 u_pud
+= KERNEL_PGD_BOUNDARY
;
334 for (i
= 0; i
< PREALLOCATED_USER_PMDS
; i
++, u_pud
++, s_pgd
++) {
335 pmd_t
*pmd
= pmds
[i
];
337 memcpy(pmd
, (pmd_t
*)pgd_page_vaddr(*s_pgd
),
338 sizeof(pmd_t
) * PTRS_PER_PMD
);
340 pud_populate(mm
, u_pud
, pmd
);
345 static void pgd_prepopulate_user_pmd(struct mm_struct
*mm
,
346 pgd_t
*k_pgd
, pmd_t
*pmds
[])
351 * Xen paravirt assumes pgd table should be in one page. 64 bit kernel also
352 * assumes that pgd should be in one page.
354 * But kernel with PAE paging that is not running as a Xen domain
355 * only needs to allocate 32 bytes for pgd instead of one page.
357 #ifdef CONFIG_X86_PAE
359 #include <linux/slab.h>
361 #define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
364 static struct kmem_cache
*pgd_cache
;
366 static int __init
pgd_cache_init(void)
369 * When PAE kernel is running as a Xen domain, it does not use
370 * shared kernel pmd. And this requires a whole page for pgd.
372 if (!SHARED_KERNEL_PMD
)
376 * when PAE kernel is not running as a Xen domain, it uses
377 * shared kernel pmd. Shared kernel pmd does not require a whole
378 * page for pgd. We are able to just allocate a 32-byte for pgd.
379 * During boot time, we create a 32-byte slab for pgd table allocation.
381 pgd_cache
= kmem_cache_create("pgd_cache", PGD_SIZE
, PGD_ALIGN
,
388 core_initcall(pgd_cache_init
);
390 static inline pgd_t
*_pgd_alloc(void)
393 * If no SHARED_KERNEL_PMD, PAE kernel is running as a Xen domain.
394 * We allocate one page for pgd.
396 if (!SHARED_KERNEL_PMD
)
397 return (pgd_t
*)__get_free_pages(PGALLOC_GFP
,
398 PGD_ALLOCATION_ORDER
);
401 * Now PAE kernel is not running as a Xen domain. We can allocate
402 * a 32-byte slab for pgd to save memory space.
404 return kmem_cache_alloc(pgd_cache
, PGALLOC_GFP
);
407 static inline void _pgd_free(pgd_t
*pgd
)
409 if (!SHARED_KERNEL_PMD
)
410 free_pages((unsigned long)pgd
, PGD_ALLOCATION_ORDER
);
412 kmem_cache_free(pgd_cache
, pgd
);
416 static inline pgd_t
*_pgd_alloc(void)
418 return (pgd_t
*)__get_free_pages(PGALLOC_GFP
, PGD_ALLOCATION_ORDER
);
421 static inline void _pgd_free(pgd_t
*pgd
)
423 free_pages((unsigned long)pgd
, PGD_ALLOCATION_ORDER
);
425 #endif /* CONFIG_X86_PAE */
427 pgd_t
*pgd_alloc(struct mm_struct
*mm
)
430 pmd_t
*u_pmds
[PREALLOCATED_USER_PMDS
];
431 pmd_t
*pmds
[PREALLOCATED_PMDS
];
440 if (preallocate_pmds(mm
, pmds
, PREALLOCATED_PMDS
) != 0)
443 if (preallocate_pmds(mm
, u_pmds
, PREALLOCATED_USER_PMDS
) != 0)
446 if (paravirt_pgd_alloc(mm
) != 0)
447 goto out_free_user_pmds
;
450 * Make sure that pre-populating the pmds is atomic with
451 * respect to anything walking the pgd_list, so that they
452 * never see a partially populated pgd.
454 spin_lock(&pgd_lock
);
457 pgd_prepopulate_pmd(mm
, pgd
, pmds
);
458 pgd_prepopulate_user_pmd(mm
, pgd
, u_pmds
);
460 spin_unlock(&pgd_lock
);
465 free_pmds(mm
, u_pmds
, PREALLOCATED_USER_PMDS
);
467 free_pmds(mm
, pmds
, PREALLOCATED_PMDS
);
474 void pgd_free(struct mm_struct
*mm
, pgd_t
*pgd
)
476 pgd_mop_up_pmds(mm
, pgd
);
478 paravirt_pgd_free(mm
, pgd
);
483 * Used to set accessed or dirty bits in the page table entries
484 * on other architectures. On x86, the accessed and dirty bits
485 * are tracked by hardware. However, do_wp_page calls this function
486 * to also make the pte writeable at the same time the dirty bit is
487 * set. In that case we do actually need to write the PTE.
489 int ptep_set_access_flags(struct vm_area_struct
*vma
,
490 unsigned long address
, pte_t
*ptep
,
491 pte_t entry
, int dirty
)
493 int changed
= !pte_same(*ptep
, entry
);
495 if (changed
&& dirty
)
496 set_pte(ptep
, entry
);
501 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
502 int pmdp_set_access_flags(struct vm_area_struct
*vma
,
503 unsigned long address
, pmd_t
*pmdp
,
504 pmd_t entry
, int dirty
)
506 int changed
= !pmd_same(*pmdp
, entry
);
508 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
510 if (changed
&& dirty
) {
511 set_pmd(pmdp
, entry
);
513 * We had a write-protection fault here and changed the pmd
514 * to to more permissive. No need to flush the TLB for that,
515 * #PF is architecturally guaranteed to do that and in the
516 * worst-case we'll generate a spurious fault.
523 int pudp_set_access_flags(struct vm_area_struct
*vma
, unsigned long address
,
524 pud_t
*pudp
, pud_t entry
, int dirty
)
526 int changed
= !pud_same(*pudp
, entry
);
528 VM_BUG_ON(address
& ~HPAGE_PUD_MASK
);
530 if (changed
&& dirty
) {
531 set_pud(pudp
, entry
);
533 * We had a write-protection fault here and changed the pud
534 * to to more permissive. No need to flush the TLB for that,
535 * #PF is architecturally guaranteed to do that and in the
536 * worst-case we'll generate a spurious fault.
544 int ptep_test_and_clear_young(struct vm_area_struct
*vma
,
545 unsigned long addr
, pte_t
*ptep
)
549 if (pte_young(*ptep
))
550 ret
= test_and_clear_bit(_PAGE_BIT_ACCESSED
,
551 (unsigned long *) &ptep
->pte
);
556 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
557 int pmdp_test_and_clear_young(struct vm_area_struct
*vma
,
558 unsigned long addr
, pmd_t
*pmdp
)
562 if (pmd_young(*pmdp
))
563 ret
= test_and_clear_bit(_PAGE_BIT_ACCESSED
,
564 (unsigned long *)pmdp
);
568 int pudp_test_and_clear_young(struct vm_area_struct
*vma
,
569 unsigned long addr
, pud_t
*pudp
)
573 if (pud_young(*pudp
))
574 ret
= test_and_clear_bit(_PAGE_BIT_ACCESSED
,
575 (unsigned long *)pudp
);
581 int ptep_clear_flush_young(struct vm_area_struct
*vma
,
582 unsigned long address
, pte_t
*ptep
)
585 * On x86 CPUs, clearing the accessed bit without a TLB flush
586 * doesn't cause data corruption. [ It could cause incorrect
587 * page aging and the (mistaken) reclaim of hot pages, but the
588 * chance of that should be relatively low. ]
590 * So as a performance optimization don't flush the TLB when
591 * clearing the accessed bit, it will eventually be flushed by
592 * a context switch or a VM operation anyway. [ In the rare
593 * event of it not getting flushed for a long time the delay
594 * shouldn't really matter because there's no real memory
595 * pressure for swapout to react to. ]
597 return ptep_test_and_clear_young(vma
, address
, ptep
);
600 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
601 int pmdp_clear_flush_young(struct vm_area_struct
*vma
,
602 unsigned long address
, pmd_t
*pmdp
)
606 VM_BUG_ON(address
& ~HPAGE_PMD_MASK
);
608 young
= pmdp_test_and_clear_young(vma
, address
, pmdp
);
610 flush_tlb_range(vma
, address
, address
+ HPAGE_PMD_SIZE
);
617 * reserve_top_address - reserves a hole in the top of kernel address space
618 * @reserve - size of hole to reserve
620 * Can be used to relocate the fixmap area and poke a hole in the top
621 * of kernel address space to make room for a hypervisor.
623 void __init
reserve_top_address(unsigned long reserve
)
626 BUG_ON(fixmaps_set
> 0);
627 __FIXADDR_TOP
= round_down(-reserve
, 1 << PMD_SHIFT
) - PAGE_SIZE
;
628 printk(KERN_INFO
"Reserving virtual address space above 0x%08lx (rounded to 0x%08lx)\n",
629 -reserve
, __FIXADDR_TOP
+ PAGE_SIZE
);
635 void __native_set_fixmap(enum fixed_addresses idx
, pte_t pte
)
637 unsigned long address
= __fix_to_virt(idx
);
641 * Ensure that the static initial page tables are covering the
644 BUILD_BUG_ON(__end_of_permanent_fixed_addresses
>
645 (FIXMAP_PMD_NUM
* PTRS_PER_PTE
));
648 if (idx
>= __end_of_fixed_addresses
) {
652 set_pte_vaddr(address
, pte
);
656 void native_set_fixmap(unsigned /* enum fixed_addresses */ idx
,
657 phys_addr_t phys
, pgprot_t flags
)
659 /* Sanitize 'prot' against any unsupported bits: */
660 pgprot_val(flags
) &= __default_kernel_pte_mask
;
662 __native_set_fixmap(idx
, pfn_pte(phys
>> PAGE_SHIFT
, flags
));
665 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
666 #ifdef CONFIG_X86_5LEVEL
668 * p4d_set_huge - setup kernel P4D mapping
670 * No 512GB pages yet -- always return 0
672 int p4d_set_huge(p4d_t
*p4d
, phys_addr_t addr
, pgprot_t prot
)
678 * p4d_clear_huge - clear kernel P4D mapping when it is set
680 * No 512GB pages yet -- always return 0
682 int p4d_clear_huge(p4d_t
*p4d
)
689 * pud_set_huge - setup kernel PUD mapping
691 * MTRRs can override PAT memory types with 4KiB granularity. Therefore, this
692 * function sets up a huge page only if any of the following conditions are met:
694 * - MTRRs are disabled, or
696 * - MTRRs are enabled and the range is completely covered by a single MTRR, or
698 * - MTRRs are enabled and the corresponding MTRR memory type is WB, which
699 * has no effect on the requested PAT memory type.
701 * Callers should try to decrease page size (1GB -> 2MB -> 4K) if the bigger
702 * page mapping attempt fails.
704 * Returns 1 on success and 0 on failure.
706 int pud_set_huge(pud_t
*pud
, phys_addr_t addr
, pgprot_t prot
)
710 mtrr
= mtrr_type_lookup(addr
, addr
+ PUD_SIZE
, &uniform
);
711 if ((mtrr
!= MTRR_TYPE_INVALID
) && (!uniform
) &&
712 (mtrr
!= MTRR_TYPE_WRBACK
))
715 /* Bail out if we are we on a populated non-leaf entry: */
716 if (pud_present(*pud
) && !pud_huge(*pud
))
719 prot
= pgprot_4k_2_large(prot
);
721 set_pte((pte_t
*)pud
, pfn_pte(
722 (u64
)addr
>> PAGE_SHIFT
,
723 __pgprot(pgprot_val(prot
) | _PAGE_PSE
)));
729 * pmd_set_huge - setup kernel PMD mapping
731 * See text over pud_set_huge() above.
733 * Returns 1 on success and 0 on failure.
735 int pmd_set_huge(pmd_t
*pmd
, phys_addr_t addr
, pgprot_t prot
)
739 mtrr
= mtrr_type_lookup(addr
, addr
+ PMD_SIZE
, &uniform
);
740 if ((mtrr
!= MTRR_TYPE_INVALID
) && (!uniform
) &&
741 (mtrr
!= MTRR_TYPE_WRBACK
)) {
742 pr_warn_once("%s: Cannot satisfy [mem %#010llx-%#010llx] with a huge-page mapping due to MTRR override.\n",
743 __func__
, addr
, addr
+ PMD_SIZE
);
747 /* Bail out if we are we on a populated non-leaf entry: */
748 if (pmd_present(*pmd
) && !pmd_huge(*pmd
))
751 prot
= pgprot_4k_2_large(prot
);
753 set_pte((pte_t
*)pmd
, pfn_pte(
754 (u64
)addr
>> PAGE_SHIFT
,
755 __pgprot(pgprot_val(prot
) | _PAGE_PSE
)));
761 * pud_clear_huge - clear kernel PUD mapping when it is set
763 * Returns 1 on success and 0 on failure (no PUD map is found).
765 int pud_clear_huge(pud_t
*pud
)
767 if (pud_large(*pud
)) {
776 * pmd_clear_huge - clear kernel PMD mapping when it is set
778 * Returns 1 on success and 0 on failure (no PMD map is found).
780 int pmd_clear_huge(pmd_t
*pmd
)
782 if (pmd_large(*pmd
)) {
792 * pud_free_pmd_page - Clear pud entry and free pmd page.
793 * @pud: Pointer to a PUD.
794 * @addr: Virtual address associated with pud.
796 * Context: The pud range has been unmapped and TLB purged.
797 * Return: 1 if clearing the entry succeeded. 0 otherwise.
799 * NOTE: Callers must allow a single page allocation.
801 int pud_free_pmd_page(pud_t
*pud
, unsigned long addr
)
810 pmd
= (pmd_t
*)pud_page_vaddr(*pud
);
811 pmd_sv
= (pmd_t
*)__get_free_page(GFP_KERNEL
);
815 for (i
= 0; i
< PTRS_PER_PMD
; i
++) {
817 if (!pmd_none(pmd
[i
]))
823 /* INVLPG to clear all paging-structure caches */
824 flush_tlb_kernel_range(addr
, addr
+ PAGE_SIZE
-1);
826 for (i
= 0; i
< PTRS_PER_PMD
; i
++) {
827 if (!pmd_none(pmd_sv
[i
])) {
828 pte
= (pte_t
*)pmd_page_vaddr(pmd_sv
[i
]);
829 free_page((unsigned long)pte
);
833 free_page((unsigned long)pmd_sv
);
834 free_page((unsigned long)pmd
);
840 * pmd_free_pte_page - Clear pmd entry and free pte page.
841 * @pmd: Pointer to a PMD.
842 * @addr: Virtual address associated with pmd.
844 * Context: The pmd range has been unmapped and TLB purged.
845 * Return: 1 if clearing the entry succeeded. 0 otherwise.
847 int pmd_free_pte_page(pmd_t
*pmd
, unsigned long addr
)
854 pte
= (pte_t
*)pmd_page_vaddr(*pmd
);
857 /* INVLPG to clear all paging-structure caches */
858 flush_tlb_kernel_range(addr
, addr
+ PAGE_SIZE
-1);
860 free_page((unsigned long)pte
);
865 #else /* !CONFIG_X86_64 */
867 int pud_free_pmd_page(pud_t
*pud
, unsigned long addr
)
869 return pud_none(*pud
);
873 * Disable free page handling on x86-PAE. This assures that ioremap()
874 * does not update sync'd pmd entries. See vmalloc_sync_one().
876 int pmd_free_pte_page(pmd_t
*pmd
, unsigned long addr
)
878 return pmd_none(*pmd
);
881 #endif /* CONFIG_X86_64 */
882 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */