]> git.proxmox.com Git - mirror_ubuntu-focal-kernel.git/blob - arch/x86/mm/pgtable.c
x86/mm: Initialize PGD cache during mm initialization
[mirror_ubuntu-focal-kernel.git] / arch / x86 / mm / pgtable.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/mm.h>
3 #include <linux/gfp.h>
4 #include <linux/hugetlb.h>
5 #include <asm/pgalloc.h>
6 #include <asm/pgtable.h>
7 #include <asm/tlb.h>
8 #include <asm/fixmap.h>
9 #include <asm/mtrr.h>
10
11 #ifdef CONFIG_DYNAMIC_PHYSICAL_MASK
12 phys_addr_t physical_mask __ro_after_init = (1ULL << __PHYSICAL_MASK_SHIFT) - 1;
13 EXPORT_SYMBOL(physical_mask);
14 #endif
15
16 #define PGALLOC_GFP (GFP_KERNEL_ACCOUNT | __GFP_ZERO)
17
18 #ifdef CONFIG_HIGHPTE
19 #define PGALLOC_USER_GFP __GFP_HIGHMEM
20 #else
21 #define PGALLOC_USER_GFP 0
22 #endif
23
24 gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP;
25
26 pte_t *pte_alloc_one_kernel(struct mm_struct *mm)
27 {
28 return (pte_t *)__get_free_page(PGALLOC_GFP & ~__GFP_ACCOUNT);
29 }
30
31 pgtable_t pte_alloc_one(struct mm_struct *mm)
32 {
33 struct page *pte;
34
35 pte = alloc_pages(__userpte_alloc_gfp, 0);
36 if (!pte)
37 return NULL;
38 if (!pgtable_page_ctor(pte)) {
39 __free_page(pte);
40 return NULL;
41 }
42 return pte;
43 }
44
45 static int __init setup_userpte(char *arg)
46 {
47 if (!arg)
48 return -EINVAL;
49
50 /*
51 * "userpte=nohigh" disables allocation of user pagetables in
52 * high memory.
53 */
54 if (strcmp(arg, "nohigh") == 0)
55 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
56 else
57 return -EINVAL;
58 return 0;
59 }
60 early_param("userpte", setup_userpte);
61
62 void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
63 {
64 pgtable_page_dtor(pte);
65 paravirt_release_pte(page_to_pfn(pte));
66 paravirt_tlb_remove_table(tlb, pte);
67 }
68
69 #if CONFIG_PGTABLE_LEVELS > 2
70 void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
71 {
72 struct page *page = virt_to_page(pmd);
73 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
74 /*
75 * NOTE! For PAE, any changes to the top page-directory-pointer-table
76 * entries need a full cr3 reload to flush.
77 */
78 #ifdef CONFIG_X86_PAE
79 tlb->need_flush_all = 1;
80 #endif
81 pgtable_pmd_page_dtor(page);
82 paravirt_tlb_remove_table(tlb, page);
83 }
84
85 #if CONFIG_PGTABLE_LEVELS > 3
86 void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
87 {
88 paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
89 paravirt_tlb_remove_table(tlb, virt_to_page(pud));
90 }
91
92 #if CONFIG_PGTABLE_LEVELS > 4
93 void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d)
94 {
95 paravirt_release_p4d(__pa(p4d) >> PAGE_SHIFT);
96 paravirt_tlb_remove_table(tlb, virt_to_page(p4d));
97 }
98 #endif /* CONFIG_PGTABLE_LEVELS > 4 */
99 #endif /* CONFIG_PGTABLE_LEVELS > 3 */
100 #endif /* CONFIG_PGTABLE_LEVELS > 2 */
101
102 static inline void pgd_list_add(pgd_t *pgd)
103 {
104 struct page *page = virt_to_page(pgd);
105
106 list_add(&page->lru, &pgd_list);
107 }
108
109 static inline void pgd_list_del(pgd_t *pgd)
110 {
111 struct page *page = virt_to_page(pgd);
112
113 list_del(&page->lru);
114 }
115
116 #define UNSHARED_PTRS_PER_PGD \
117 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
118 #define MAX_UNSHARED_PTRS_PER_PGD \
119 max_t(size_t, KERNEL_PGD_BOUNDARY, PTRS_PER_PGD)
120
121
122 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
123 {
124 virt_to_page(pgd)->pt_mm = mm;
125 }
126
127 struct mm_struct *pgd_page_get_mm(struct page *page)
128 {
129 return page->pt_mm;
130 }
131
132 static void pgd_ctor(struct mm_struct *mm, pgd_t *pgd)
133 {
134 /* If the pgd points to a shared pagetable level (either the
135 ptes in non-PAE, or shared PMD in PAE), then just copy the
136 references from swapper_pg_dir. */
137 if (CONFIG_PGTABLE_LEVELS == 2 ||
138 (CONFIG_PGTABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
139 CONFIG_PGTABLE_LEVELS >= 4) {
140 clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
141 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
142 KERNEL_PGD_PTRS);
143 }
144
145 /* list required to sync kernel mapping updates */
146 if (!SHARED_KERNEL_PMD) {
147 pgd_set_mm(pgd, mm);
148 pgd_list_add(pgd);
149 }
150 }
151
152 static void pgd_dtor(pgd_t *pgd)
153 {
154 if (SHARED_KERNEL_PMD)
155 return;
156
157 spin_lock(&pgd_lock);
158 pgd_list_del(pgd);
159 spin_unlock(&pgd_lock);
160 }
161
162 /*
163 * List of all pgd's needed for non-PAE so it can invalidate entries
164 * in both cached and uncached pgd's; not needed for PAE since the
165 * kernel pmd is shared. If PAE were not to share the pmd a similar
166 * tactic would be needed. This is essentially codepath-based locking
167 * against pageattr.c; it is the unique case in which a valid change
168 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
169 * vmalloc faults work because attached pagetables are never freed.
170 * -- nyc
171 */
172
173 #ifdef CONFIG_X86_PAE
174 /*
175 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
176 * updating the top-level pagetable entries to guarantee the
177 * processor notices the update. Since this is expensive, and
178 * all 4 top-level entries are used almost immediately in a
179 * new process's life, we just pre-populate them here.
180 *
181 * Also, if we're in a paravirt environment where the kernel pmd is
182 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
183 * and initialize the kernel pmds here.
184 */
185 #define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
186 #define MAX_PREALLOCATED_PMDS MAX_UNSHARED_PTRS_PER_PGD
187
188 /*
189 * We allocate separate PMDs for the kernel part of the user page-table
190 * when PTI is enabled. We need them to map the per-process LDT into the
191 * user-space page-table.
192 */
193 #define PREALLOCATED_USER_PMDS (static_cpu_has(X86_FEATURE_PTI) ? \
194 KERNEL_PGD_PTRS : 0)
195 #define MAX_PREALLOCATED_USER_PMDS KERNEL_PGD_PTRS
196
197 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
198 {
199 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
200
201 /* Note: almost everything apart from _PAGE_PRESENT is
202 reserved at the pmd (PDPT) level. */
203 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
204
205 /*
206 * According to Intel App note "TLBs, Paging-Structure Caches,
207 * and Their Invalidation", April 2007, document 317080-001,
208 * section 8.1: in PAE mode we explicitly have to flush the
209 * TLB via cr3 if the top-level pgd is changed...
210 */
211 flush_tlb_mm(mm);
212 }
213 #else /* !CONFIG_X86_PAE */
214
215 /* No need to prepopulate any pagetable entries in non-PAE modes. */
216 #define PREALLOCATED_PMDS 0
217 #define MAX_PREALLOCATED_PMDS 0
218 #define PREALLOCATED_USER_PMDS 0
219 #define MAX_PREALLOCATED_USER_PMDS 0
220 #endif /* CONFIG_X86_PAE */
221
222 static void free_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
223 {
224 int i;
225
226 for (i = 0; i < count; i++)
227 if (pmds[i]) {
228 pgtable_pmd_page_dtor(virt_to_page(pmds[i]));
229 free_page((unsigned long)pmds[i]);
230 mm_dec_nr_pmds(mm);
231 }
232 }
233
234 static int preallocate_pmds(struct mm_struct *mm, pmd_t *pmds[], int count)
235 {
236 int i;
237 bool failed = false;
238 gfp_t gfp = PGALLOC_GFP;
239
240 if (mm == &init_mm)
241 gfp &= ~__GFP_ACCOUNT;
242
243 for (i = 0; i < count; i++) {
244 pmd_t *pmd = (pmd_t *)__get_free_page(gfp);
245 if (!pmd)
246 failed = true;
247 if (pmd && !pgtable_pmd_page_ctor(virt_to_page(pmd))) {
248 free_page((unsigned long)pmd);
249 pmd = NULL;
250 failed = true;
251 }
252 if (pmd)
253 mm_inc_nr_pmds(mm);
254 pmds[i] = pmd;
255 }
256
257 if (failed) {
258 free_pmds(mm, pmds, count);
259 return -ENOMEM;
260 }
261
262 return 0;
263 }
264
265 /*
266 * Mop up any pmd pages which may still be attached to the pgd.
267 * Normally they will be freed by munmap/exit_mmap, but any pmd we
268 * preallocate which never got a corresponding vma will need to be
269 * freed manually.
270 */
271 static void mop_up_one_pmd(struct mm_struct *mm, pgd_t *pgdp)
272 {
273 pgd_t pgd = *pgdp;
274
275 if (pgd_val(pgd) != 0) {
276 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
277
278 pgd_clear(pgdp);
279
280 paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
281 pmd_free(mm, pmd);
282 mm_dec_nr_pmds(mm);
283 }
284 }
285
286 static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
287 {
288 int i;
289
290 for (i = 0; i < PREALLOCATED_PMDS; i++)
291 mop_up_one_pmd(mm, &pgdp[i]);
292
293 #ifdef CONFIG_PAGE_TABLE_ISOLATION
294
295 if (!static_cpu_has(X86_FEATURE_PTI))
296 return;
297
298 pgdp = kernel_to_user_pgdp(pgdp);
299
300 for (i = 0; i < PREALLOCATED_USER_PMDS; i++)
301 mop_up_one_pmd(mm, &pgdp[i + KERNEL_PGD_BOUNDARY]);
302 #endif
303 }
304
305 static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
306 {
307 p4d_t *p4d;
308 pud_t *pud;
309 int i;
310
311 if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
312 return;
313
314 p4d = p4d_offset(pgd, 0);
315 pud = pud_offset(p4d, 0);
316
317 for (i = 0; i < PREALLOCATED_PMDS; i++, pud++) {
318 pmd_t *pmd = pmds[i];
319
320 if (i >= KERNEL_PGD_BOUNDARY)
321 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
322 sizeof(pmd_t) * PTRS_PER_PMD);
323
324 pud_populate(mm, pud, pmd);
325 }
326 }
327
328 #ifdef CONFIG_PAGE_TABLE_ISOLATION
329 static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
330 pgd_t *k_pgd, pmd_t *pmds[])
331 {
332 pgd_t *s_pgd = kernel_to_user_pgdp(swapper_pg_dir);
333 pgd_t *u_pgd = kernel_to_user_pgdp(k_pgd);
334 p4d_t *u_p4d;
335 pud_t *u_pud;
336 int i;
337
338 u_p4d = p4d_offset(u_pgd, 0);
339 u_pud = pud_offset(u_p4d, 0);
340
341 s_pgd += KERNEL_PGD_BOUNDARY;
342 u_pud += KERNEL_PGD_BOUNDARY;
343
344 for (i = 0; i < PREALLOCATED_USER_PMDS; i++, u_pud++, s_pgd++) {
345 pmd_t *pmd = pmds[i];
346
347 memcpy(pmd, (pmd_t *)pgd_page_vaddr(*s_pgd),
348 sizeof(pmd_t) * PTRS_PER_PMD);
349
350 pud_populate(mm, u_pud, pmd);
351 }
352
353 }
354 #else
355 static void pgd_prepopulate_user_pmd(struct mm_struct *mm,
356 pgd_t *k_pgd, pmd_t *pmds[])
357 {
358 }
359 #endif
360 /*
361 * Xen paravirt assumes pgd table should be in one page. 64 bit kernel also
362 * assumes that pgd should be in one page.
363 *
364 * But kernel with PAE paging that is not running as a Xen domain
365 * only needs to allocate 32 bytes for pgd instead of one page.
366 */
367 #ifdef CONFIG_X86_PAE
368
369 #include <linux/slab.h>
370
371 #define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
372 #define PGD_ALIGN 32
373
374 static struct kmem_cache *pgd_cache;
375
376 void __init pgd_cache_init(void)
377 {
378 /*
379 * When PAE kernel is running as a Xen domain, it does not use
380 * shared kernel pmd. And this requires a whole page for pgd.
381 */
382 if (!SHARED_KERNEL_PMD)
383 return;
384
385 /*
386 * when PAE kernel is not running as a Xen domain, it uses
387 * shared kernel pmd. Shared kernel pmd does not require a whole
388 * page for pgd. We are able to just allocate a 32-byte for pgd.
389 * During boot time, we create a 32-byte slab for pgd table allocation.
390 */
391 pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_ALIGN,
392 SLAB_PANIC, NULL);
393 }
394
395 static inline pgd_t *_pgd_alloc(void)
396 {
397 /*
398 * If no SHARED_KERNEL_PMD, PAE kernel is running as a Xen domain.
399 * We allocate one page for pgd.
400 */
401 if (!SHARED_KERNEL_PMD)
402 return (pgd_t *)__get_free_pages(PGALLOC_GFP,
403 PGD_ALLOCATION_ORDER);
404
405 /*
406 * Now PAE kernel is not running as a Xen domain. We can allocate
407 * a 32-byte slab for pgd to save memory space.
408 */
409 return kmem_cache_alloc(pgd_cache, PGALLOC_GFP);
410 }
411
412 static inline void _pgd_free(pgd_t *pgd)
413 {
414 if (!SHARED_KERNEL_PMD)
415 free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
416 else
417 kmem_cache_free(pgd_cache, pgd);
418 }
419 #else
420
421 void __init pgd_cache_init(void)
422 {
423 }
424
425 static inline pgd_t *_pgd_alloc(void)
426 {
427 return (pgd_t *)__get_free_pages(PGALLOC_GFP, PGD_ALLOCATION_ORDER);
428 }
429
430 static inline void _pgd_free(pgd_t *pgd)
431 {
432 free_pages((unsigned long)pgd, PGD_ALLOCATION_ORDER);
433 }
434 #endif /* CONFIG_X86_PAE */
435
436 pgd_t *pgd_alloc(struct mm_struct *mm)
437 {
438 pgd_t *pgd;
439 pmd_t *u_pmds[MAX_PREALLOCATED_USER_PMDS];
440 pmd_t *pmds[MAX_PREALLOCATED_PMDS];
441
442 pgd = _pgd_alloc();
443
444 if (pgd == NULL)
445 goto out;
446
447 mm->pgd = pgd;
448
449 if (preallocate_pmds(mm, pmds, PREALLOCATED_PMDS) != 0)
450 goto out_free_pgd;
451
452 if (preallocate_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS) != 0)
453 goto out_free_pmds;
454
455 if (paravirt_pgd_alloc(mm) != 0)
456 goto out_free_user_pmds;
457
458 /*
459 * Make sure that pre-populating the pmds is atomic with
460 * respect to anything walking the pgd_list, so that they
461 * never see a partially populated pgd.
462 */
463 spin_lock(&pgd_lock);
464
465 pgd_ctor(mm, pgd);
466 pgd_prepopulate_pmd(mm, pgd, pmds);
467 pgd_prepopulate_user_pmd(mm, pgd, u_pmds);
468
469 spin_unlock(&pgd_lock);
470
471 return pgd;
472
473 out_free_user_pmds:
474 free_pmds(mm, u_pmds, PREALLOCATED_USER_PMDS);
475 out_free_pmds:
476 free_pmds(mm, pmds, PREALLOCATED_PMDS);
477 out_free_pgd:
478 _pgd_free(pgd);
479 out:
480 return NULL;
481 }
482
483 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
484 {
485 pgd_mop_up_pmds(mm, pgd);
486 pgd_dtor(pgd);
487 paravirt_pgd_free(mm, pgd);
488 _pgd_free(pgd);
489 }
490
491 /*
492 * Used to set accessed or dirty bits in the page table entries
493 * on other architectures. On x86, the accessed and dirty bits
494 * are tracked by hardware. However, do_wp_page calls this function
495 * to also make the pte writeable at the same time the dirty bit is
496 * set. In that case we do actually need to write the PTE.
497 */
498 int ptep_set_access_flags(struct vm_area_struct *vma,
499 unsigned long address, pte_t *ptep,
500 pte_t entry, int dirty)
501 {
502 int changed = !pte_same(*ptep, entry);
503
504 if (changed && dirty)
505 set_pte(ptep, entry);
506
507 return changed;
508 }
509
510 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
511 int pmdp_set_access_flags(struct vm_area_struct *vma,
512 unsigned long address, pmd_t *pmdp,
513 pmd_t entry, int dirty)
514 {
515 int changed = !pmd_same(*pmdp, entry);
516
517 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
518
519 if (changed && dirty) {
520 set_pmd(pmdp, entry);
521 /*
522 * We had a write-protection fault here and changed the pmd
523 * to to more permissive. No need to flush the TLB for that,
524 * #PF is architecturally guaranteed to do that and in the
525 * worst-case we'll generate a spurious fault.
526 */
527 }
528
529 return changed;
530 }
531
532 int pudp_set_access_flags(struct vm_area_struct *vma, unsigned long address,
533 pud_t *pudp, pud_t entry, int dirty)
534 {
535 int changed = !pud_same(*pudp, entry);
536
537 VM_BUG_ON(address & ~HPAGE_PUD_MASK);
538
539 if (changed && dirty) {
540 set_pud(pudp, entry);
541 /*
542 * We had a write-protection fault here and changed the pud
543 * to to more permissive. No need to flush the TLB for that,
544 * #PF is architecturally guaranteed to do that and in the
545 * worst-case we'll generate a spurious fault.
546 */
547 }
548
549 return changed;
550 }
551 #endif
552
553 int ptep_test_and_clear_young(struct vm_area_struct *vma,
554 unsigned long addr, pte_t *ptep)
555 {
556 int ret = 0;
557
558 if (pte_young(*ptep))
559 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
560 (unsigned long *) &ptep->pte);
561
562 return ret;
563 }
564
565 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
566 int pmdp_test_and_clear_young(struct vm_area_struct *vma,
567 unsigned long addr, pmd_t *pmdp)
568 {
569 int ret = 0;
570
571 if (pmd_young(*pmdp))
572 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
573 (unsigned long *)pmdp);
574
575 return ret;
576 }
577 int pudp_test_and_clear_young(struct vm_area_struct *vma,
578 unsigned long addr, pud_t *pudp)
579 {
580 int ret = 0;
581
582 if (pud_young(*pudp))
583 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
584 (unsigned long *)pudp);
585
586 return ret;
587 }
588 #endif
589
590 int ptep_clear_flush_young(struct vm_area_struct *vma,
591 unsigned long address, pte_t *ptep)
592 {
593 /*
594 * On x86 CPUs, clearing the accessed bit without a TLB flush
595 * doesn't cause data corruption. [ It could cause incorrect
596 * page aging and the (mistaken) reclaim of hot pages, but the
597 * chance of that should be relatively low. ]
598 *
599 * So as a performance optimization don't flush the TLB when
600 * clearing the accessed bit, it will eventually be flushed by
601 * a context switch or a VM operation anyway. [ In the rare
602 * event of it not getting flushed for a long time the delay
603 * shouldn't really matter because there's no real memory
604 * pressure for swapout to react to. ]
605 */
606 return ptep_test_and_clear_young(vma, address, ptep);
607 }
608
609 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
610 int pmdp_clear_flush_young(struct vm_area_struct *vma,
611 unsigned long address, pmd_t *pmdp)
612 {
613 int young;
614
615 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
616
617 young = pmdp_test_and_clear_young(vma, address, pmdp);
618 if (young)
619 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
620
621 return young;
622 }
623 #endif
624
625 /**
626 * reserve_top_address - reserves a hole in the top of kernel address space
627 * @reserve - size of hole to reserve
628 *
629 * Can be used to relocate the fixmap area and poke a hole in the top
630 * of kernel address space to make room for a hypervisor.
631 */
632 void __init reserve_top_address(unsigned long reserve)
633 {
634 #ifdef CONFIG_X86_32
635 BUG_ON(fixmaps_set > 0);
636 __FIXADDR_TOP = round_down(-reserve, 1 << PMD_SHIFT) - PAGE_SIZE;
637 printk(KERN_INFO "Reserving virtual address space above 0x%08lx (rounded to 0x%08lx)\n",
638 -reserve, __FIXADDR_TOP + PAGE_SIZE);
639 #endif
640 }
641
642 int fixmaps_set;
643
644 void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
645 {
646 unsigned long address = __fix_to_virt(idx);
647
648 #ifdef CONFIG_X86_64
649 /*
650 * Ensure that the static initial page tables are covering the
651 * fixmap completely.
652 */
653 BUILD_BUG_ON(__end_of_permanent_fixed_addresses >
654 (FIXMAP_PMD_NUM * PTRS_PER_PTE));
655 #endif
656
657 if (idx >= __end_of_fixed_addresses) {
658 BUG();
659 return;
660 }
661 set_pte_vaddr(address, pte);
662 fixmaps_set++;
663 }
664
665 void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
666 pgprot_t flags)
667 {
668 /* Sanitize 'prot' against any unsupported bits: */
669 pgprot_val(flags) &= __default_kernel_pte_mask;
670
671 __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
672 }
673
674 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
675 #ifdef CONFIG_X86_5LEVEL
676 /**
677 * p4d_set_huge - setup kernel P4D mapping
678 *
679 * No 512GB pages yet -- always return 0
680 */
681 int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
682 {
683 return 0;
684 }
685
686 /**
687 * p4d_clear_huge - clear kernel P4D mapping when it is set
688 *
689 * No 512GB pages yet -- always return 0
690 */
691 int p4d_clear_huge(p4d_t *p4d)
692 {
693 return 0;
694 }
695 #endif
696
697 /**
698 * pud_set_huge - setup kernel PUD mapping
699 *
700 * MTRRs can override PAT memory types with 4KiB granularity. Therefore, this
701 * function sets up a huge page only if any of the following conditions are met:
702 *
703 * - MTRRs are disabled, or
704 *
705 * - MTRRs are enabled and the range is completely covered by a single MTRR, or
706 *
707 * - MTRRs are enabled and the corresponding MTRR memory type is WB, which
708 * has no effect on the requested PAT memory type.
709 *
710 * Callers should try to decrease page size (1GB -> 2MB -> 4K) if the bigger
711 * page mapping attempt fails.
712 *
713 * Returns 1 on success and 0 on failure.
714 */
715 int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
716 {
717 u8 mtrr, uniform;
718
719 mtrr = mtrr_type_lookup(addr, addr + PUD_SIZE, &uniform);
720 if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) &&
721 (mtrr != MTRR_TYPE_WRBACK))
722 return 0;
723
724 /* Bail out if we are we on a populated non-leaf entry: */
725 if (pud_present(*pud) && !pud_huge(*pud))
726 return 0;
727
728 prot = pgprot_4k_2_large(prot);
729
730 set_pte((pte_t *)pud, pfn_pte(
731 (u64)addr >> PAGE_SHIFT,
732 __pgprot(pgprot_val(prot) | _PAGE_PSE)));
733
734 return 1;
735 }
736
737 /**
738 * pmd_set_huge - setup kernel PMD mapping
739 *
740 * See text over pud_set_huge() above.
741 *
742 * Returns 1 on success and 0 on failure.
743 */
744 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
745 {
746 u8 mtrr, uniform;
747
748 mtrr = mtrr_type_lookup(addr, addr + PMD_SIZE, &uniform);
749 if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) &&
750 (mtrr != MTRR_TYPE_WRBACK)) {
751 pr_warn_once("%s: Cannot satisfy [mem %#010llx-%#010llx] with a huge-page mapping due to MTRR override.\n",
752 __func__, addr, addr + PMD_SIZE);
753 return 0;
754 }
755
756 /* Bail out if we are we on a populated non-leaf entry: */
757 if (pmd_present(*pmd) && !pmd_huge(*pmd))
758 return 0;
759
760 prot = pgprot_4k_2_large(prot);
761
762 set_pte((pte_t *)pmd, pfn_pte(
763 (u64)addr >> PAGE_SHIFT,
764 __pgprot(pgprot_val(prot) | _PAGE_PSE)));
765
766 return 1;
767 }
768
769 /**
770 * pud_clear_huge - clear kernel PUD mapping when it is set
771 *
772 * Returns 1 on success and 0 on failure (no PUD map is found).
773 */
774 int pud_clear_huge(pud_t *pud)
775 {
776 if (pud_large(*pud)) {
777 pud_clear(pud);
778 return 1;
779 }
780
781 return 0;
782 }
783
784 /**
785 * pmd_clear_huge - clear kernel PMD mapping when it is set
786 *
787 * Returns 1 on success and 0 on failure (no PMD map is found).
788 */
789 int pmd_clear_huge(pmd_t *pmd)
790 {
791 if (pmd_large(*pmd)) {
792 pmd_clear(pmd);
793 return 1;
794 }
795
796 return 0;
797 }
798
799 /*
800 * Until we support 512GB pages, skip them in the vmap area.
801 */
802 int p4d_free_pud_page(p4d_t *p4d, unsigned long addr)
803 {
804 return 0;
805 }
806
807 #ifdef CONFIG_X86_64
808 /**
809 * pud_free_pmd_page - Clear pud entry and free pmd page.
810 * @pud: Pointer to a PUD.
811 * @addr: Virtual address associated with pud.
812 *
813 * Context: The pud range has been unmapped and TLB purged.
814 * Return: 1 if clearing the entry succeeded. 0 otherwise.
815 *
816 * NOTE: Callers must allow a single page allocation.
817 */
818 int pud_free_pmd_page(pud_t *pud, unsigned long addr)
819 {
820 pmd_t *pmd, *pmd_sv;
821 pte_t *pte;
822 int i;
823
824 pmd = (pmd_t *)pud_page_vaddr(*pud);
825 pmd_sv = (pmd_t *)__get_free_page(GFP_KERNEL);
826 if (!pmd_sv)
827 return 0;
828
829 for (i = 0; i < PTRS_PER_PMD; i++) {
830 pmd_sv[i] = pmd[i];
831 if (!pmd_none(pmd[i]))
832 pmd_clear(&pmd[i]);
833 }
834
835 pud_clear(pud);
836
837 /* INVLPG to clear all paging-structure caches */
838 flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
839
840 for (i = 0; i < PTRS_PER_PMD; i++) {
841 if (!pmd_none(pmd_sv[i])) {
842 pte = (pte_t *)pmd_page_vaddr(pmd_sv[i]);
843 free_page((unsigned long)pte);
844 }
845 }
846
847 free_page((unsigned long)pmd_sv);
848 free_page((unsigned long)pmd);
849
850 return 1;
851 }
852
853 /**
854 * pmd_free_pte_page - Clear pmd entry and free pte page.
855 * @pmd: Pointer to a PMD.
856 * @addr: Virtual address associated with pmd.
857 *
858 * Context: The pmd range has been unmapped and TLB purged.
859 * Return: 1 if clearing the entry succeeded. 0 otherwise.
860 */
861 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
862 {
863 pte_t *pte;
864
865 pte = (pte_t *)pmd_page_vaddr(*pmd);
866 pmd_clear(pmd);
867
868 /* INVLPG to clear all paging-structure caches */
869 flush_tlb_kernel_range(addr, addr + PAGE_SIZE-1);
870
871 free_page((unsigned long)pte);
872
873 return 1;
874 }
875
876 #else /* !CONFIG_X86_64 */
877
878 int pud_free_pmd_page(pud_t *pud, unsigned long addr)
879 {
880 return pud_none(*pud);
881 }
882
883 /*
884 * Disable free page handling on x86-PAE. This assures that ioremap()
885 * does not update sync'd pmd entries. See vmalloc_sync_one().
886 */
887 int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
888 {
889 return pmd_none(*pmd);
890 }
891
892 #endif /* CONFIG_X86_64 */
893 #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */