]>
Commit | Line | Data |
---|---|---|
4f76cd38 JF |
1 | #include <linux/mm.h> |
2 | #include <asm/pgalloc.h> | |
ee5aa8d3 | 3 | #include <asm/pgtable.h> |
4f76cd38 | 4 | #include <asm/tlb.h> |
a1d5a869 | 5 | #include <asm/fixmap.h> |
4f76cd38 | 6 | |
9e730237 VN |
7 | #define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO |
8 | ||
4f76cd38 JF |
9 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) |
10 | { | |
9e730237 | 11 | return (pte_t *)__get_free_page(PGALLOC_GFP); |
4f76cd38 JF |
12 | } |
13 | ||
14 | pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) | |
15 | { | |
16 | struct page *pte; | |
17 | ||
18 | #ifdef CONFIG_HIGHPTE | |
9e730237 | 19 | pte = alloc_pages(PGALLOC_GFP | __GFP_HIGHMEM, 0); |
4f76cd38 | 20 | #else |
9e730237 | 21 | pte = alloc_pages(PGALLOC_GFP, 0); |
4f76cd38 JF |
22 | #endif |
23 | if (pte) | |
24 | pgtable_page_ctor(pte); | |
25 | return pte; | |
26 | } | |
27 | ||
9e1b32ca | 28 | void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte) |
397f687a JF |
29 | { |
30 | pgtable_page_dtor(pte); | |
6944a9c8 | 31 | paravirt_release_pte(page_to_pfn(pte)); |
397f687a JF |
32 | tlb_remove_page(tlb, pte); |
33 | } | |
34 | ||
170fdff7 | 35 | #if PAGETABLE_LEVELS > 2 |
9e1b32ca | 36 | void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) |
170fdff7 | 37 | { |
6944a9c8 | 38 | paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT); |
170fdff7 JF |
39 | tlb_remove_page(tlb, virt_to_page(pmd)); |
40 | } | |
5a5f8f42 JF |
41 | |
42 | #if PAGETABLE_LEVELS > 3 | |
9e1b32ca | 43 | void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud) |
5a5f8f42 | 44 | { |
2761fa09 | 45 | paravirt_release_pud(__pa(pud) >> PAGE_SHIFT); |
5a5f8f42 JF |
46 | tlb_remove_page(tlb, virt_to_page(pud)); |
47 | } | |
48 | #endif /* PAGETABLE_LEVELS > 3 */ | |
170fdff7 JF |
49 | #endif /* PAGETABLE_LEVELS > 2 */ |
50 | ||
4f76cd38 JF |
51 | static inline void pgd_list_add(pgd_t *pgd) |
52 | { | |
53 | struct page *page = virt_to_page(pgd); | |
4f76cd38 | 54 | |
4f76cd38 | 55 | list_add(&page->lru, &pgd_list); |
4f76cd38 JF |
56 | } |
57 | ||
58 | static inline void pgd_list_del(pgd_t *pgd) | |
59 | { | |
60 | struct page *page = virt_to_page(pgd); | |
4f76cd38 | 61 | |
4f76cd38 | 62 | list_del(&page->lru); |
4f76cd38 JF |
63 | } |
64 | ||
4f76cd38 | 65 | #define UNSHARED_PTRS_PER_PGD \ |
68db065c | 66 | (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) |
4f76cd38 | 67 | |
17b74627 | 68 | static void pgd_ctor(pgd_t *pgd) |
4f76cd38 | 69 | { |
4f76cd38 JF |
70 | /* If the pgd points to a shared pagetable level (either the |
71 | ptes in non-PAE, or shared PMD in PAE), then just copy the | |
72 | references from swapper_pg_dir. */ | |
73 | if (PAGETABLE_LEVELS == 2 || | |
85958b46 JF |
74 | (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD) || |
75 | PAGETABLE_LEVELS == 4) { | |
68db065c JF |
76 | clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY, |
77 | swapper_pg_dir + KERNEL_PGD_BOUNDARY, | |
4f76cd38 | 78 | KERNEL_PGD_PTRS); |
6944a9c8 JF |
79 | paravirt_alloc_pmd_clone(__pa(pgd) >> PAGE_SHIFT, |
80 | __pa(swapper_pg_dir) >> PAGE_SHIFT, | |
68db065c | 81 | KERNEL_PGD_BOUNDARY, |
6944a9c8 | 82 | KERNEL_PGD_PTRS); |
4f76cd38 JF |
83 | } |
84 | ||
85 | /* list required to sync kernel mapping updates */ | |
86 | if (!SHARED_KERNEL_PMD) | |
87 | pgd_list_add(pgd); | |
4f76cd38 JF |
88 | } |
89 | ||
17b74627 | 90 | static void pgd_dtor(pgd_t *pgd) |
4f76cd38 JF |
91 | { |
92 | unsigned long flags; /* can be called from interrupt context */ | |
93 | ||
94 | if (SHARED_KERNEL_PMD) | |
95 | return; | |
96 | ||
97 | spin_lock_irqsave(&pgd_lock, flags); | |
98 | pgd_list_del(pgd); | |
99 | spin_unlock_irqrestore(&pgd_lock, flags); | |
100 | } | |
101 | ||
85958b46 JF |
102 | /* |
103 | * List of all pgd's needed for non-PAE so it can invalidate entries | |
104 | * in both cached and uncached pgd's; not needed for PAE since the | |
105 | * kernel pmd is shared. If PAE were not to share the pmd a similar | |
106 | * tactic would be needed. This is essentially codepath-based locking | |
107 | * against pageattr.c; it is the unique case in which a valid change | |
108 | * of kernel pagetables can't be lazily synchronized by vmalloc faults. | |
109 | * vmalloc faults work because attached pagetables are never freed. | |
110 | * -- wli | |
111 | */ | |
112 | ||
4f76cd38 | 113 | #ifdef CONFIG_X86_PAE |
d8d5900e JF |
114 | /* |
115 | * In PAE mode, we need to do a cr3 reload (=tlb flush) when | |
116 | * updating the top-level pagetable entries to guarantee the | |
117 | * processor notices the update. Since this is expensive, and | |
118 | * all 4 top-level entries are used almost immediately in a | |
119 | * new process's life, we just pre-populate them here. | |
120 | * | |
121 | * Also, if we're in a paravirt environment where the kernel pmd is | |
122 | * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate | |
123 | * and initialize the kernel pmds here. | |
124 | */ | |
125 | #define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD | |
126 | ||
127 | void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) | |
128 | { | |
129 | paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT); | |
130 | ||
131 | /* Note: almost everything apart from _PAGE_PRESENT is | |
132 | reserved at the pmd (PDPT) level. */ | |
133 | set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT)); | |
134 | ||
135 | /* | |
136 | * According to Intel App note "TLBs, Paging-Structure Caches, | |
137 | * and Their Invalidation", April 2007, document 317080-001, | |
138 | * section 8.1: in PAE mode we explicitly have to flush the | |
139 | * TLB via cr3 if the top-level pgd is changed... | |
140 | */ | |
141 | if (mm == current->active_mm) | |
142 | write_cr3(read_cr3()); | |
143 | } | |
144 | #else /* !CONFIG_X86_PAE */ | |
145 | ||
146 | /* No need to prepopulate any pagetable entries in non-PAE modes. */ | |
147 | #define PREALLOCATED_PMDS 0 | |
148 | ||
149 | #endif /* CONFIG_X86_PAE */ | |
150 | ||
151 | static void free_pmds(pmd_t *pmds[]) | |
152 | { | |
153 | int i; | |
154 | ||
155 | for(i = 0; i < PREALLOCATED_PMDS; i++) | |
156 | if (pmds[i]) | |
157 | free_page((unsigned long)pmds[i]); | |
158 | } | |
159 | ||
160 | static int preallocate_pmds(pmd_t *pmds[]) | |
161 | { | |
162 | int i; | |
163 | bool failed = false; | |
164 | ||
165 | for(i = 0; i < PREALLOCATED_PMDS; i++) { | |
9e730237 | 166 | pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP); |
d8d5900e JF |
167 | if (pmd == NULL) |
168 | failed = true; | |
169 | pmds[i] = pmd; | |
170 | } | |
171 | ||
172 | if (failed) { | |
173 | free_pmds(pmds); | |
174 | return -ENOMEM; | |
175 | } | |
176 | ||
177 | return 0; | |
178 | } | |
179 | ||
4f76cd38 JF |
180 | /* |
181 | * Mop up any pmd pages which may still be attached to the pgd. | |
182 | * Normally they will be freed by munmap/exit_mmap, but any pmd we | |
183 | * preallocate which never got a corresponding vma will need to be | |
184 | * freed manually. | |
185 | */ | |
186 | static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp) | |
187 | { | |
188 | int i; | |
189 | ||
d8d5900e | 190 | for(i = 0; i < PREALLOCATED_PMDS; i++) { |
4f76cd38 JF |
191 | pgd_t pgd = pgdp[i]; |
192 | ||
193 | if (pgd_val(pgd) != 0) { | |
194 | pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); | |
195 | ||
196 | pgdp[i] = native_make_pgd(0); | |
197 | ||
6944a9c8 | 198 | paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT); |
4f76cd38 JF |
199 | pmd_free(mm, pmd); |
200 | } | |
201 | } | |
202 | } | |
203 | ||
d8d5900e | 204 | static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[]) |
4f76cd38 JF |
205 | { |
206 | pud_t *pud; | |
207 | unsigned long addr; | |
208 | int i; | |
209 | ||
cf3e5050 JF |
210 | if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */ |
211 | return; | |
212 | ||
4f76cd38 | 213 | pud = pud_offset(pgd, 0); |
4f76cd38 | 214 | |
d8d5900e JF |
215 | for (addr = i = 0; i < PREALLOCATED_PMDS; |
216 | i++, pud++, addr += PUD_SIZE) { | |
217 | pmd_t *pmd = pmds[i]; | |
4f76cd38 | 218 | |
68db065c | 219 | if (i >= KERNEL_PGD_BOUNDARY) |
4f76cd38 JF |
220 | memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]), |
221 | sizeof(pmd_t) * PTRS_PER_PMD); | |
222 | ||
223 | pud_populate(mm, pud, pmd); | |
224 | } | |
4f76cd38 | 225 | } |
1ec1fe73 | 226 | |
d8d5900e | 227 | pgd_t *pgd_alloc(struct mm_struct *mm) |
1ec1fe73 | 228 | { |
d8d5900e JF |
229 | pgd_t *pgd; |
230 | pmd_t *pmds[PREALLOCATED_PMDS]; | |
231 | unsigned long flags; | |
1ec1fe73 | 232 | |
9e730237 | 233 | pgd = (pgd_t *)__get_free_page(PGALLOC_GFP); |
d8d5900e JF |
234 | |
235 | if (pgd == NULL) | |
236 | goto out; | |
237 | ||
238 | mm->pgd = pgd; | |
239 | ||
240 | if (preallocate_pmds(pmds) != 0) | |
241 | goto out_free_pgd; | |
242 | ||
243 | if (paravirt_pgd_alloc(mm) != 0) | |
244 | goto out_free_pmds; | |
1ec1fe73 IM |
245 | |
246 | /* | |
d8d5900e JF |
247 | * Make sure that pre-populating the pmds is atomic with |
248 | * respect to anything walking the pgd_list, so that they | |
249 | * never see a partially populated pgd. | |
1ec1fe73 | 250 | */ |
d8d5900e | 251 | spin_lock_irqsave(&pgd_lock, flags); |
4f76cd38 | 252 | |
d8d5900e JF |
253 | pgd_ctor(pgd); |
254 | pgd_prepopulate_pmd(mm, pgd, pmds); | |
4f76cd38 | 255 | |
d8d5900e | 256 | spin_unlock_irqrestore(&pgd_lock, flags); |
4f76cd38 JF |
257 | |
258 | return pgd; | |
d8d5900e JF |
259 | |
260 | out_free_pmds: | |
261 | free_pmds(pmds); | |
262 | out_free_pgd: | |
263 | free_page((unsigned long)pgd); | |
264 | out: | |
265 | return NULL; | |
4f76cd38 JF |
266 | } |
267 | ||
268 | void pgd_free(struct mm_struct *mm, pgd_t *pgd) | |
269 | { | |
270 | pgd_mop_up_pmds(mm, pgd); | |
271 | pgd_dtor(pgd); | |
eba0045f | 272 | paravirt_pgd_free(mm, pgd); |
4f76cd38 JF |
273 | free_page((unsigned long)pgd); |
274 | } | |
ee5aa8d3 JF |
275 | |
276 | int ptep_set_access_flags(struct vm_area_struct *vma, | |
277 | unsigned long address, pte_t *ptep, | |
278 | pte_t entry, int dirty) | |
279 | { | |
280 | int changed = !pte_same(*ptep, entry); | |
281 | ||
282 | if (changed && dirty) { | |
283 | *ptep = entry; | |
284 | pte_update_defer(vma->vm_mm, address, ptep); | |
285 | flush_tlb_page(vma, address); | |
286 | } | |
287 | ||
288 | return changed; | |
289 | } | |
f9fbf1a3 JF |
290 | |
291 | int ptep_test_and_clear_young(struct vm_area_struct *vma, | |
292 | unsigned long addr, pte_t *ptep) | |
293 | { | |
294 | int ret = 0; | |
295 | ||
296 | if (pte_young(*ptep)) | |
297 | ret = test_and_clear_bit(_PAGE_BIT_ACCESSED, | |
48e23957 | 298 | (unsigned long *) &ptep->pte); |
f9fbf1a3 JF |
299 | |
300 | if (ret) | |
301 | pte_update(vma->vm_mm, addr, ptep); | |
302 | ||
303 | return ret; | |
304 | } | |
c20311e1 JF |
305 | |
306 | int ptep_clear_flush_young(struct vm_area_struct *vma, | |
307 | unsigned long address, pte_t *ptep) | |
308 | { | |
309 | int young; | |
310 | ||
311 | young = ptep_test_and_clear_young(vma, address, ptep); | |
312 | if (young) | |
313 | flush_tlb_page(vma, address); | |
314 | ||
315 | return young; | |
316 | } | |
7c7e6e07 | 317 | |
fd862dde GP |
318 | /** |
319 | * reserve_top_address - reserves a hole in the top of kernel address space | |
320 | * @reserve - size of hole to reserve | |
321 | * | |
322 | * Can be used to relocate the fixmap area and poke a hole in the top | |
323 | * of kernel address space to make room for a hypervisor. | |
324 | */ | |
325 | void __init reserve_top_address(unsigned long reserve) | |
326 | { | |
327 | #ifdef CONFIG_X86_32 | |
328 | BUG_ON(fixmaps_set > 0); | |
329 | printk(KERN_INFO "Reserving virtual address space above 0x%08x\n", | |
330 | (int)-reserve); | |
331 | __FIXADDR_TOP = -reserve - PAGE_SIZE; | |
fd862dde GP |
332 | #endif |
333 | } | |
334 | ||
7c7e6e07 JF |
335 | int fixmaps_set; |
336 | ||
aeaaa59c | 337 | void __native_set_fixmap(enum fixed_addresses idx, pte_t pte) |
7c7e6e07 JF |
338 | { |
339 | unsigned long address = __fix_to_virt(idx); | |
340 | ||
341 | if (idx >= __end_of_fixed_addresses) { | |
342 | BUG(); | |
343 | return; | |
344 | } | |
aeaaa59c | 345 | set_pte_vaddr(address, pte); |
7c7e6e07 JF |
346 | fixmaps_set++; |
347 | } | |
aeaaa59c | 348 | |
3b3809ac MH |
349 | void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys, |
350 | pgprot_t flags) | |
aeaaa59c JF |
351 | { |
352 | __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags)); | |
353 | } |