]>
git.proxmox.com Git - mirror_ubuntu-eoan-kernel.git/blob - arch/x86/mm/pgtable.c
2 #include <asm/pgalloc.h>
3 #include <asm/pgtable.h>
6 pte_t
*pte_alloc_one_kernel(struct mm_struct
*mm
, unsigned long address
)
8 return (pte_t
*)__get_free_page(GFP_KERNEL
|__GFP_REPEAT
|__GFP_ZERO
);
11 pgtable_t
pte_alloc_one(struct mm_struct
*mm
, unsigned long address
)
16 pte
= alloc_pages(GFP_KERNEL
|__GFP_HIGHMEM
|__GFP_REPEAT
|__GFP_ZERO
, 0);
18 pte
= alloc_pages(GFP_KERNEL
|__GFP_REPEAT
|__GFP_ZERO
, 0);
21 pgtable_page_ctor(pte
);
25 void __pte_free_tlb(struct mmu_gather
*tlb
, struct page
*pte
)
27 pgtable_page_dtor(pte
);
28 paravirt_release_pte(page_to_pfn(pte
));
29 tlb_remove_page(tlb
, pte
);
32 #if PAGETABLE_LEVELS > 2
33 void __pmd_free_tlb(struct mmu_gather
*tlb
, pmd_t
*pmd
)
35 paravirt_release_pmd(__pa(pmd
) >> PAGE_SHIFT
);
36 tlb_remove_page(tlb
, virt_to_page(pmd
));
39 #if PAGETABLE_LEVELS > 3
40 void __pud_free_tlb(struct mmu_gather
*tlb
, pud_t
*pud
)
42 paravirt_release_pud(__pa(pud
) >> PAGE_SHIFT
);
43 tlb_remove_page(tlb
, virt_to_page(pud
));
45 #endif /* PAGETABLE_LEVELS > 3 */
46 #endif /* PAGETABLE_LEVELS > 2 */
48 static inline void pgd_list_add(pgd_t
*pgd
)
50 struct page
*page
= virt_to_page(pgd
);
52 list_add(&page
->lru
, &pgd_list
);
55 static inline void pgd_list_del(pgd_t
*pgd
)
57 struct page
*page
= virt_to_page(pgd
);
63 pgd_t
*pgd_alloc(struct mm_struct
*mm
)
66 pgd_t
*pgd
= (pgd_t
*)__get_free_page(GFP_KERNEL
|__GFP_REPEAT
);
70 spin_lock_irqsave(&pgd_lock
, flags
);
72 spin_unlock_irqrestore(&pgd_lock
, flags
);
74 * Copy kernel pointers in from init.
75 * Could keep a freelist or slab cache of those because the kernel
78 boundary
= pgd_index(__PAGE_OFFSET
);
79 memset(pgd
, 0, boundary
* sizeof(pgd_t
));
80 memcpy(pgd
+ boundary
,
81 init_level4_pgt
+ boundary
,
82 (PTRS_PER_PGD
- boundary
) * sizeof(pgd_t
));
86 void pgd_free(struct mm_struct
*mm
, pgd_t
*pgd
)
89 BUG_ON((unsigned long)pgd
& (PAGE_SIZE
-1));
90 spin_lock_irqsave(&pgd_lock
, flags
);
92 spin_unlock_irqrestore(&pgd_lock
, flags
);
93 free_page((unsigned long)pgd
);
97 * List of all pgd's needed for non-PAE so it can invalidate entries
98 * in both cached and uncached pgd's; not needed for PAE since the
99 * kernel pmd is shared. If PAE were not to share the pmd a similar
100 * tactic would be needed. This is essentially codepath-based locking
101 * against pageattr.c; it is the unique case in which a valid change
102 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
103 * vmalloc faults work because attached pagetables are never freed.
106 #define UNSHARED_PTRS_PER_PGD \
107 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
109 static void pgd_ctor(void *p
)
114 /* Clear usermode parts of PGD */
115 memset(pgd
, 0, KERNEL_PGD_BOUNDARY
*sizeof(pgd_t
));
117 spin_lock_irqsave(&pgd_lock
, flags
);
119 /* If the pgd points to a shared pagetable level (either the
120 ptes in non-PAE, or shared PMD in PAE), then just copy the
121 references from swapper_pg_dir. */
122 if (PAGETABLE_LEVELS
== 2 ||
123 (PAGETABLE_LEVELS
== 3 && SHARED_KERNEL_PMD
)) {
124 clone_pgd_range(pgd
+ KERNEL_PGD_BOUNDARY
,
125 swapper_pg_dir
+ KERNEL_PGD_BOUNDARY
,
127 paravirt_alloc_pmd_clone(__pa(pgd
) >> PAGE_SHIFT
,
128 __pa(swapper_pg_dir
) >> PAGE_SHIFT
,
133 /* list required to sync kernel mapping updates */
134 if (!SHARED_KERNEL_PMD
)
137 spin_unlock_irqrestore(&pgd_lock
, flags
);
140 static void pgd_dtor(void *pgd
)
142 unsigned long flags
; /* can be called from interrupt context */
144 if (SHARED_KERNEL_PMD
)
147 spin_lock_irqsave(&pgd_lock
, flags
);
149 spin_unlock_irqrestore(&pgd_lock
, flags
);
152 #ifdef CONFIG_X86_PAE
154 * Mop up any pmd pages which may still be attached to the pgd.
155 * Normally they will be freed by munmap/exit_mmap, but any pmd we
156 * preallocate which never got a corresponding vma will need to be
159 static void pgd_mop_up_pmds(struct mm_struct
*mm
, pgd_t
*pgdp
)
163 for(i
= 0; i
< UNSHARED_PTRS_PER_PGD
; i
++) {
166 if (pgd_val(pgd
) != 0) {
167 pmd_t
*pmd
= (pmd_t
*)pgd_page_vaddr(pgd
);
169 pgdp
[i
] = native_make_pgd(0);
171 paravirt_release_pmd(pgd_val(pgd
) >> PAGE_SHIFT
);
178 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
179 * updating the top-level pagetable entries to guarantee the
180 * processor notices the update. Since this is expensive, and
181 * all 4 top-level entries are used almost immediately in a
182 * new process's life, we just pre-populate them here.
184 * Also, if we're in a paravirt environment where the kernel pmd is
185 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
186 * and initialize the kernel pmds here.
188 static int pgd_prepopulate_pmd(struct mm_struct
*mm
, pgd_t
*pgd
)
194 pud
= pud_offset(pgd
, 0);
195 for (addr
= i
= 0; i
< UNSHARED_PTRS_PER_PGD
;
196 i
++, pud
++, addr
+= PUD_SIZE
) {
197 pmd_t
*pmd
= pmd_alloc_one(mm
, addr
);
200 pgd_mop_up_pmds(mm
, pgd
);
204 if (i
>= KERNEL_PGD_BOUNDARY
)
205 memcpy(pmd
, (pmd_t
*)pgd_page_vaddr(swapper_pg_dir
[i
]),
206 sizeof(pmd_t
) * PTRS_PER_PMD
);
208 pud_populate(mm
, pud
, pmd
);
214 void pud_populate(struct mm_struct
*mm
, pud_t
*pudp
, pmd_t
*pmd
)
216 paravirt_alloc_pmd(mm
, __pa(pmd
) >> PAGE_SHIFT
);
218 /* Note: almost everything apart from _PAGE_PRESENT is
219 reserved at the pmd (PDPT) level. */
220 set_pud(pudp
, __pud(__pa(pmd
) | _PAGE_PRESENT
));
223 * According to Intel App note "TLBs, Paging-Structure Caches,
224 * and Their Invalidation", April 2007, document 317080-001,
225 * section 8.1: in PAE mode we explicitly have to flush the
226 * TLB via cr3 if the top-level pgd is changed...
228 if (mm
== current
->active_mm
)
229 write_cr3(read_cr3());
231 #else /* !CONFIG_X86_PAE */
232 /* No need to prepopulate any pagetable entries in non-PAE modes. */
233 static int pgd_prepopulate_pmd(struct mm_struct
*mm
, pgd_t
*pgd
)
238 static void pgd_mop_up_pmds(struct mm_struct
*mm
, pgd_t
*pgd
)
241 #endif /* CONFIG_X86_PAE */
243 pgd_t
*pgd_alloc(struct mm_struct
*mm
)
245 pgd_t
*pgd
= (pgd_t
*)__get_free_page(GFP_KERNEL
| __GFP_ZERO
);
247 /* so that alloc_pmd can use it */
252 if (pgd
&& !pgd_prepopulate_pmd(mm
, pgd
)) {
254 free_page((unsigned long)pgd
);
261 void pgd_free(struct mm_struct
*mm
, pgd_t
*pgd
)
263 pgd_mop_up_pmds(mm
, pgd
);
265 free_page((unsigned long)pgd
);
269 int ptep_set_access_flags(struct vm_area_struct
*vma
,
270 unsigned long address
, pte_t
*ptep
,
271 pte_t entry
, int dirty
)
273 int changed
= !pte_same(*ptep
, entry
);
275 if (changed
&& dirty
) {
277 pte_update_defer(vma
->vm_mm
, address
, ptep
);
278 flush_tlb_page(vma
, address
);
284 int ptep_test_and_clear_young(struct vm_area_struct
*vma
,
285 unsigned long addr
, pte_t
*ptep
)
289 if (pte_young(*ptep
))
290 ret
= test_and_clear_bit(_PAGE_BIT_ACCESSED
,
294 pte_update(vma
->vm_mm
, addr
, ptep
);
299 int ptep_clear_flush_young(struct vm_area_struct
*vma
,
300 unsigned long address
, pte_t
*ptep
)
304 young
= ptep_test_and_clear_young(vma
, address
, ptep
);
306 flush_tlb_page(vma
, address
);