3 #include <asm/pgalloc.h>
4 #include <asm/pgtable.h>
6 #include <asm/fixmap.h>
8 #define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
11 #define PGALLOC_USER_GFP __GFP_HIGHMEM
13 #define PGALLOC_USER_GFP 0
16 gfp_t __userpte_alloc_gfp
= PGALLOC_GFP
| PGALLOC_USER_GFP
;
18 pte_t
*pte_alloc_one_kernel(struct mm_struct
*mm
, unsigned long address
)
20 return (pte_t
*)__get_free_page(PGALLOC_GFP
);
23 pgtable_t
pte_alloc_one(struct mm_struct
*mm
, unsigned long address
)
27 pte
= alloc_pages(__userpte_alloc_gfp
, 0);
29 pgtable_page_ctor(pte
);
33 static int __init
setup_userpte(char *arg
)
39 * "userpte=nohigh" disables allocation of user pagetables in
42 if (strcmp(arg
, "nohigh") == 0)
43 __userpte_alloc_gfp
&= ~__GFP_HIGHMEM
;
48 early_param("userpte", setup_userpte
);
50 void ___pte_free_tlb(struct mmu_gather
*tlb
, struct page
*pte
)
52 pgtable_page_dtor(pte
);
53 paravirt_release_pte(page_to_pfn(pte
));
54 tlb_remove_page(tlb
, pte
);
57 #if PAGETABLE_LEVELS > 2
58 void ___pmd_free_tlb(struct mmu_gather
*tlb
, pmd_t
*pmd
)
60 paravirt_release_pmd(__pa(pmd
) >> PAGE_SHIFT
);
61 tlb_remove_page(tlb
, virt_to_page(pmd
));
64 #if PAGETABLE_LEVELS > 3
65 void ___pud_free_tlb(struct mmu_gather
*tlb
, pud_t
*pud
)
67 paravirt_release_pud(__pa(pud
) >> PAGE_SHIFT
);
68 tlb_remove_page(tlb
, virt_to_page(pud
));
70 #endif /* PAGETABLE_LEVELS > 3 */
71 #endif /* PAGETABLE_LEVELS > 2 */
73 static inline void pgd_list_add(pgd_t
*pgd
)
75 struct page
*page
= virt_to_page(pgd
);
77 list_add(&page
->lru
, &pgd_list
);
80 static inline void pgd_list_del(pgd_t
*pgd
)
82 struct page
*page
= virt_to_page(pgd
);
87 #define UNSHARED_PTRS_PER_PGD \
88 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
90 static void pgd_ctor(pgd_t
*pgd
)
92 /* If the pgd points to a shared pagetable level (either the
93 ptes in non-PAE, or shared PMD in PAE), then just copy the
94 references from swapper_pg_dir. */
95 if (PAGETABLE_LEVELS
== 2 ||
96 (PAGETABLE_LEVELS
== 3 && SHARED_KERNEL_PMD
) ||
97 PAGETABLE_LEVELS
== 4) {
98 clone_pgd_range(pgd
+ KERNEL_PGD_BOUNDARY
,
99 swapper_pg_dir
+ KERNEL_PGD_BOUNDARY
,
101 paravirt_alloc_pmd_clone(__pa(pgd
) >> PAGE_SHIFT
,
102 __pa(swapper_pg_dir
) >> PAGE_SHIFT
,
107 /* list required to sync kernel mapping updates */
108 if (!SHARED_KERNEL_PMD
)
112 static void pgd_dtor(pgd_t
*pgd
)
114 unsigned long flags
; /* can be called from interrupt context */
116 if (SHARED_KERNEL_PMD
)
119 spin_lock_irqsave(&pgd_lock
, flags
);
121 spin_unlock_irqrestore(&pgd_lock
, flags
);
125 * List of all pgd's needed for non-PAE so it can invalidate entries
126 * in both cached and uncached pgd's; not needed for PAE since the
127 * kernel pmd is shared. If PAE were not to share the pmd a similar
128 * tactic would be needed. This is essentially codepath-based locking
129 * against pageattr.c; it is the unique case in which a valid change
130 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
131 * vmalloc faults work because attached pagetables are never freed.
135 #ifdef CONFIG_X86_PAE
137 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
138 * updating the top-level pagetable entries to guarantee the
139 * processor notices the update. Since this is expensive, and
140 * all 4 top-level entries are used almost immediately in a
141 * new process's life, we just pre-populate them here.
143 * Also, if we're in a paravirt environment where the kernel pmd is
144 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
145 * and initialize the kernel pmds here.
147 #define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
149 void pud_populate(struct mm_struct
*mm
, pud_t
*pudp
, pmd_t
*pmd
)
151 paravirt_alloc_pmd(mm
, __pa(pmd
) >> PAGE_SHIFT
);
153 /* Note: almost everything apart from _PAGE_PRESENT is
154 reserved at the pmd (PDPT) level. */
155 set_pud(pudp
, __pud(__pa(pmd
) | _PAGE_PRESENT
));
158 * According to Intel App note "TLBs, Paging-Structure Caches,
159 * and Their Invalidation", April 2007, document 317080-001,
160 * section 8.1: in PAE mode we explicitly have to flush the
161 * TLB via cr3 if the top-level pgd is changed...
163 if (mm
== current
->active_mm
)
164 write_cr3(read_cr3());
166 #else /* !CONFIG_X86_PAE */
168 /* No need to prepopulate any pagetable entries in non-PAE modes. */
169 #define PREALLOCATED_PMDS 0
171 #endif /* CONFIG_X86_PAE */
173 static void free_pmds(pmd_t
*pmds
[])
177 for(i
= 0; i
< PREALLOCATED_PMDS
; i
++)
179 free_page((unsigned long)pmds
[i
]);
182 static int preallocate_pmds(pmd_t
*pmds
[])
187 for(i
= 0; i
< PREALLOCATED_PMDS
; i
++) {
188 pmd_t
*pmd
= (pmd_t
*)__get_free_page(PGALLOC_GFP
);
203 * Mop up any pmd pages which may still be attached to the pgd.
204 * Normally they will be freed by munmap/exit_mmap, but any pmd we
205 * preallocate which never got a corresponding vma will need to be
208 static void pgd_mop_up_pmds(struct mm_struct
*mm
, pgd_t
*pgdp
)
212 for(i
= 0; i
< PREALLOCATED_PMDS
; i
++) {
215 if (pgd_val(pgd
) != 0) {
216 pmd_t
*pmd
= (pmd_t
*)pgd_page_vaddr(pgd
);
218 pgdp
[i
] = native_make_pgd(0);
220 paravirt_release_pmd(pgd_val(pgd
) >> PAGE_SHIFT
);
226 static void pgd_prepopulate_pmd(struct mm_struct
*mm
, pgd_t
*pgd
, pmd_t
*pmds
[])
232 if (PREALLOCATED_PMDS
== 0) /* Work around gcc-3.4.x bug */
235 pud
= pud_offset(pgd
, 0);
237 for (addr
= i
= 0; i
< PREALLOCATED_PMDS
;
238 i
++, pud
++, addr
+= PUD_SIZE
) {
239 pmd_t
*pmd
= pmds
[i
];
241 if (i
>= KERNEL_PGD_BOUNDARY
)
242 memcpy(pmd
, (pmd_t
*)pgd_page_vaddr(swapper_pg_dir
[i
]),
243 sizeof(pmd_t
) * PTRS_PER_PMD
);
245 pud_populate(mm
, pud
, pmd
);
249 pgd_t
*pgd_alloc(struct mm_struct
*mm
)
252 pmd_t
*pmds
[PREALLOCATED_PMDS
];
255 pgd
= (pgd_t
*)__get_free_page(PGALLOC_GFP
);
262 if (preallocate_pmds(pmds
) != 0)
265 if (paravirt_pgd_alloc(mm
) != 0)
269 * Make sure that pre-populating the pmds is atomic with
270 * respect to anything walking the pgd_list, so that they
271 * never see a partially populated pgd.
273 spin_lock_irqsave(&pgd_lock
, flags
);
276 pgd_prepopulate_pmd(mm
, pgd
, pmds
);
278 spin_unlock_irqrestore(&pgd_lock
, flags
);
285 free_page((unsigned long)pgd
);
290 void pgd_free(struct mm_struct
*mm
, pgd_t
*pgd
)
292 pgd_mop_up_pmds(mm
, pgd
);
294 paravirt_pgd_free(mm
, pgd
);
295 free_page((unsigned long)pgd
);
298 int ptep_set_access_flags(struct vm_area_struct
*vma
,
299 unsigned long address
, pte_t
*ptep
,
300 pte_t entry
, int dirty
)
302 int changed
= !pte_same(*ptep
, entry
);
304 if (changed
&& dirty
) {
306 pte_update_defer(vma
->vm_mm
, address
, ptep
);
307 flush_tlb_page(vma
, address
);
313 int ptep_test_and_clear_young(struct vm_area_struct
*vma
,
314 unsigned long addr
, pte_t
*ptep
)
318 if (pte_young(*ptep
))
319 ret
= test_and_clear_bit(_PAGE_BIT_ACCESSED
,
320 (unsigned long *) &ptep
->pte
);
323 pte_update(vma
->vm_mm
, addr
, ptep
);
328 int ptep_clear_flush_young(struct vm_area_struct
*vma
,
329 unsigned long address
, pte_t
*ptep
)
333 young
= ptep_test_and_clear_young(vma
, address
, ptep
);
335 flush_tlb_page(vma
, address
);
341 * reserve_top_address - reserves a hole in the top of kernel address space
342 * @reserve - size of hole to reserve
344 * Can be used to relocate the fixmap area and poke a hole in the top
345 * of kernel address space to make room for a hypervisor.
347 void __init
reserve_top_address(unsigned long reserve
)
350 BUG_ON(fixmaps_set
> 0);
351 printk(KERN_INFO
"Reserving virtual address space above 0x%08x\n",
353 __FIXADDR_TOP
= -reserve
- PAGE_SIZE
;
359 void __native_set_fixmap(enum fixed_addresses idx
, pte_t pte
)
361 unsigned long address
= __fix_to_virt(idx
);
363 if (idx
>= __end_of_fixed_addresses
) {
367 set_pte_vaddr(address
, pte
);
371 void native_set_fixmap(enum fixed_addresses idx
, phys_addr_t phys
,
374 __native_set_fixmap(idx
, pfn_pte(phys
>> PAGE_SHIFT
, flags
));