]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/x86/mm/pgtable.c
x86, dumpstack: Correct stack dump info when frame pointer is available
[mirror_ubuntu-bionic-kernel.git] / arch / x86 / mm / pgtable.c
CommitLineData
4f76cd38 1#include <linux/mm.h>
5a0e3ad6 2#include <linux/gfp.h>
4f76cd38 3#include <asm/pgalloc.h>
ee5aa8d3 4#include <asm/pgtable.h>
4f76cd38 5#include <asm/tlb.h>
a1d5a869 6#include <asm/fixmap.h>
4f76cd38 7
9e730237
VN
8#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
9
14315592
IC
10#ifdef CONFIG_HIGHPTE
11#define PGALLOC_USER_GFP __GFP_HIGHMEM
12#else
13#define PGALLOC_USER_GFP 0
14#endif
15
16gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP;
17
4f76cd38
JF
18pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
19{
9e730237 20 return (pte_t *)__get_free_page(PGALLOC_GFP);
4f76cd38
JF
21}
22
23pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
24{
25 struct page *pte;
26
14315592 27 pte = alloc_pages(__userpte_alloc_gfp, 0);
4f76cd38
JF
28 if (pte)
29 pgtable_page_ctor(pte);
30 return pte;
31}
32
14315592
IC
33static int __init setup_userpte(char *arg)
34{
35 if (!arg)
36 return -EINVAL;
37
38 /*
39 * "userpte=nohigh" disables allocation of user pagetables in
40 * high memory.
41 */
42 if (strcmp(arg, "nohigh") == 0)
43 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
44 else
45 return -EINVAL;
46 return 0;
47}
48early_param("userpte", setup_userpte);
49
9e1b32ca 50void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
397f687a
JF
51{
52 pgtable_page_dtor(pte);
6944a9c8 53 paravirt_release_pte(page_to_pfn(pte));
397f687a
JF
54 tlb_remove_page(tlb, pte);
55}
56
170fdff7 57#if PAGETABLE_LEVELS > 2
9e1b32ca 58void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
170fdff7 59{
6944a9c8 60 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
170fdff7
JF
61 tlb_remove_page(tlb, virt_to_page(pmd));
62}
5a5f8f42
JF
63
64#if PAGETABLE_LEVELS > 3
9e1b32ca 65void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
5a5f8f42 66{
2761fa09 67 paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
5a5f8f42
JF
68 tlb_remove_page(tlb, virt_to_page(pud));
69}
70#endif /* PAGETABLE_LEVELS > 3 */
170fdff7
JF
71#endif /* PAGETABLE_LEVELS > 2 */
72
4f76cd38
JF
73static inline void pgd_list_add(pgd_t *pgd)
74{
75 struct page *page = virt_to_page(pgd);
4f76cd38 76
4f76cd38 77 list_add(&page->lru, &pgd_list);
4f76cd38
JF
78}
79
80static inline void pgd_list_del(pgd_t *pgd)
81{
82 struct page *page = virt_to_page(pgd);
4f76cd38 83
4f76cd38 84 list_del(&page->lru);
4f76cd38
JF
85}
86
4f76cd38 87#define UNSHARED_PTRS_PER_PGD \
68db065c 88 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
4f76cd38 89
617d34d9
JF
90
91static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
92{
93 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
94 virt_to_page(pgd)->index = (pgoff_t)mm;
95}
96
97struct mm_struct *pgd_page_get_mm(struct page *page)
98{
99 return (struct mm_struct *)page->index;
100}
101
102static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
4f76cd38 103{
4f76cd38
JF
104 /* If the pgd points to a shared pagetable level (either the
105 ptes in non-PAE, or shared PMD in PAE), then just copy the
106 references from swapper_pg_dir. */
107 if (PAGETABLE_LEVELS == 2 ||
85958b46
JF
108 (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
109 PAGETABLE_LEVELS == 4) {
68db065c
JF
110 clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
111 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
4f76cd38 112 KERNEL_PGD_PTRS);
4f76cd38
JF
113 }
114
115 /* list required to sync kernel mapping updates */
617d34d9
JF
116 if (!SHARED_KERNEL_PMD) {
117 pgd_set_mm(pgd, mm);
4f76cd38 118 pgd_list_add(pgd);
617d34d9 119 }
4f76cd38
JF
120}
121
17b74627 122static void pgd_dtor(pgd_t *pgd)
4f76cd38 123{
4f76cd38
JF
124 if (SHARED_KERNEL_PMD)
125 return;
126
a79e53d8 127 spin_lock(&pgd_lock);
4f76cd38 128 pgd_list_del(pgd);
a79e53d8 129 spin_unlock(&pgd_lock);
4f76cd38
JF
130}
131
85958b46
JF
132/*
133 * List of all pgd's needed for non-PAE so it can invalidate entries
134 * in both cached and uncached pgd's; not needed for PAE since the
135 * kernel pmd is shared. If PAE were not to share the pmd a similar
136 * tactic would be needed. This is essentially codepath-based locking
137 * against pageattr.c; it is the unique case in which a valid change
138 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
139 * vmalloc faults work because attached pagetables are never freed.
140 * -- wli
141 */
142
4f76cd38 143#ifdef CONFIG_X86_PAE
d8d5900e
JF
144/*
145 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
146 * updating the top-level pagetable entries to guarantee the
147 * processor notices the update. Since this is expensive, and
148 * all 4 top-level entries are used almost immediately in a
149 * new process's life, we just pre-populate them here.
150 *
151 * Also, if we're in a paravirt environment where the kernel pmd is
152 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
153 * and initialize the kernel pmds here.
154 */
155#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
156
157void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
158{
159 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
160
161 /* Note: almost everything apart from _PAGE_PRESENT is
162 reserved at the pmd (PDPT) level. */
163 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
164
165 /*
166 * According to Intel App note "TLBs, Paging-Structure Caches,
167 * and Their Invalidation", April 2007, document 317080-001,
168 * section 8.1: in PAE mode we explicitly have to flush the
169 * TLB via cr3 if the top-level pgd is changed...
170 */
171 if (mm == current->active_mm)
172 write_cr3(read_cr3());
173}
174#else /* !CONFIG_X86_PAE */
175
176/* No need to prepopulate any pagetable entries in non-PAE modes. */
177#define PREALLOCATED_PMDS 0
178
179#endif /* CONFIG_X86_PAE */
180
181static void free_pmds(pmd_t *pmds[])
182{
183 int i;
184
185 for(i = 0; i < PREALLOCATED_PMDS; i++)
186 if (pmds[i])
187 free_page((unsigned long)pmds[i]);
188}
189
190static int preallocate_pmds(pmd_t *pmds[])
191{
192 int i;
193 bool failed = false;
194
195 for(i = 0; i < PREALLOCATED_PMDS; i++) {
9e730237 196 pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
d8d5900e
JF
197 if (pmd == NULL)
198 failed = true;
199 pmds[i] = pmd;
200 }
201
202 if (failed) {
203 free_pmds(pmds);
204 return -ENOMEM;
205 }
206
207 return 0;
208}
209
4f76cd38
JF
210/*
211 * Mop up any pmd pages which may still be attached to the pgd.
212 * Normally they will be freed by munmap/exit_mmap, but any pmd we
213 * preallocate which never got a corresponding vma will need to be
214 * freed manually.
215 */
216static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
217{
218 int i;
219
d8d5900e 220 for(i = 0; i < PREALLOCATED_PMDS; i++) {
4f76cd38
JF
221 pgd_t pgd = pgdp[i];
222
223 if (pgd_val(pgd) != 0) {
224 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
225
226 pgdp[i] = native_make_pgd(0);
227
6944a9c8 228 paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
4f76cd38
JF
229 pmd_free(mm, pmd);
230 }
231 }
232}
233
d8d5900e 234static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
4f76cd38
JF
235{
236 pud_t *pud;
237 unsigned long addr;
238 int i;
239
cf3e5050
JF
240 if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
241 return;
242
4f76cd38 243 pud = pud_offset(pgd, 0);
4f76cd38 244
d8d5900e
JF
245 for (addr = i = 0; i < PREALLOCATED_PMDS;
246 i++, pud++, addr += PUD_SIZE) {
247 pmd_t *pmd = pmds[i];
4f76cd38 248
68db065c 249 if (i >= KERNEL_PGD_BOUNDARY)
4f76cd38
JF
250 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
251 sizeof(pmd_t) * PTRS_PER_PMD);
252
253 pud_populate(mm, pud, pmd);
254 }
4f76cd38 255}
1ec1fe73 256
d8d5900e 257pgd_t *pgd_alloc(struct mm_struct *mm)
1ec1fe73 258{
d8d5900e
JF
259 pgd_t *pgd;
260 pmd_t *pmds[PREALLOCATED_PMDS];
1ec1fe73 261
9e730237 262 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
d8d5900e
JF
263
264 if (pgd == NULL)
265 goto out;
266
267 mm->pgd = pgd;
268
269 if (preallocate_pmds(pmds) != 0)
270 goto out_free_pgd;
271
272 if (paravirt_pgd_alloc(mm) != 0)
273 goto out_free_pmds;
1ec1fe73
IM
274
275 /*
d8d5900e
JF
276 * Make sure that pre-populating the pmds is atomic with
277 * respect to anything walking the pgd_list, so that they
278 * never see a partially populated pgd.
1ec1fe73 279 */
a79e53d8 280 spin_lock(&pgd_lock);
4f76cd38 281
617d34d9 282 pgd_ctor(mm, pgd);
d8d5900e 283 pgd_prepopulate_pmd(mm, pgd, pmds);
4f76cd38 284
a79e53d8 285 spin_unlock(&pgd_lock);
4f76cd38
JF
286
287 return pgd;
d8d5900e
JF
288
289out_free_pmds:
290 free_pmds(pmds);
291out_free_pgd:
292 free_page((unsigned long)pgd);
293out:
294 return NULL;
4f76cd38
JF
295}
296
297void pgd_free(struct mm_struct *mm, pgd_t *pgd)
298{
299 pgd_mop_up_pmds(mm, pgd);
300 pgd_dtor(pgd);
eba0045f 301 paravirt_pgd_free(mm, pgd);
4f76cd38
JF
302 free_page((unsigned long)pgd);
303}
ee5aa8d3
JF
304
305int ptep_set_access_flags(struct vm_area_struct *vma,
306 unsigned long address, pte_t *ptep,
307 pte_t entry, int dirty)
308{
309 int changed = !pte_same(*ptep, entry);
310
311 if (changed && dirty) {
312 *ptep = entry;
313 pte_update_defer(vma->vm_mm, address, ptep);
314 flush_tlb_page(vma, address);
315 }
316
317 return changed;
318}
f9fbf1a3 319
db3eb96f
AA
320#ifdef CONFIG_TRANSPARENT_HUGEPAGE
321int pmdp_set_access_flags(struct vm_area_struct *vma,
322 unsigned long address, pmd_t *pmdp,
323 pmd_t entry, int dirty)
324{
325 int changed = !pmd_same(*pmdp, entry);
326
327 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
328
329 if (changed && dirty) {
330 *pmdp = entry;
331 pmd_update_defer(vma->vm_mm, address, pmdp);
332 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
333 }
334
335 return changed;
336}
337#endif
338
f9fbf1a3
JF
339int ptep_test_and_clear_young(struct vm_area_struct *vma,
340 unsigned long addr, pte_t *ptep)
341{
342 int ret = 0;
343
344 if (pte_young(*ptep))
345 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
48e23957 346 (unsigned long *) &ptep->pte);
f9fbf1a3
JF
347
348 if (ret)
349 pte_update(vma->vm_mm, addr, ptep);
350
351 return ret;
352}
c20311e1 353
db3eb96f
AA
354#ifdef CONFIG_TRANSPARENT_HUGEPAGE
355int pmdp_test_and_clear_young(struct vm_area_struct *vma,
356 unsigned long addr, pmd_t *pmdp)
357{
358 int ret = 0;
359
360 if (pmd_young(*pmdp))
361 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
f2d6bfe9 362 (unsigned long *)pmdp);
db3eb96f
AA
363
364 if (ret)
365 pmd_update(vma->vm_mm, addr, pmdp);
366
367 return ret;
368}
369#endif
370
c20311e1
JF
371int ptep_clear_flush_young(struct vm_area_struct *vma,
372 unsigned long address, pte_t *ptep)
373{
374 int young;
375
376 young = ptep_test_and_clear_young(vma, address, ptep);
377 if (young)
378 flush_tlb_page(vma, address);
379
380 return young;
381}
7c7e6e07 382
db3eb96f
AA
383#ifdef CONFIG_TRANSPARENT_HUGEPAGE
384int pmdp_clear_flush_young(struct vm_area_struct *vma,
385 unsigned long address, pmd_t *pmdp)
386{
387 int young;
388
389 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
390
391 young = pmdp_test_and_clear_young(vma, address, pmdp);
392 if (young)
393 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
394
395 return young;
396}
397
398void pmdp_splitting_flush(struct vm_area_struct *vma,
399 unsigned long address, pmd_t *pmdp)
400{
401 int set;
402 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
403 set = !test_and_set_bit(_PAGE_BIT_SPLITTING,
f2d6bfe9 404 (unsigned long *)pmdp);
db3eb96f
AA
405 if (set) {
406 pmd_update(vma->vm_mm, address, pmdp);
407 /* need tlb flush only to serialize against gup-fast */
408 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
409 }
410}
411#endif
412
fd862dde
GP
413/**
414 * reserve_top_address - reserves a hole in the top of kernel address space
415 * @reserve - size of hole to reserve
416 *
417 * Can be used to relocate the fixmap area and poke a hole in the top
418 * of kernel address space to make room for a hypervisor.
419 */
420void __init reserve_top_address(unsigned long reserve)
421{
422#ifdef CONFIG_X86_32
423 BUG_ON(fixmaps_set > 0);
424 printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
425 (int)-reserve);
426 __FIXADDR_TOP = -reserve - PAGE_SIZE;
fd862dde
GP
427#endif
428}
429
7c7e6e07
JF
430int fixmaps_set;
431
aeaaa59c 432void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
7c7e6e07
JF
433{
434 unsigned long address = __fix_to_virt(idx);
435
436 if (idx >= __end_of_fixed_addresses) {
437 BUG();
438 return;
439 }
aeaaa59c 440 set_pte_vaddr(address, pte);
7c7e6e07
JF
441 fixmaps_set++;
442}
aeaaa59c 443
3b3809ac
MH
444void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
445 pgprot_t flags)
aeaaa59c
JF
446{
447 __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
448}