]>
Commit | Line | Data |
---|---|---|
4f76cd38 JF |
1 | #include <linux/mm.h> |
2 | #include <asm/pgalloc.h> | |
3 | #include <asm/tlb.h> | |
4 | ||
5 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) | |
6 | { | |
7 | return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); | |
8 | } | |
9 | ||
10 | pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) | |
11 | { | |
12 | struct page *pte; | |
13 | ||
14 | #ifdef CONFIG_HIGHPTE | |
15 | pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0); | |
16 | #else | |
17 | pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); | |
18 | #endif | |
19 | if (pte) | |
20 | pgtable_page_ctor(pte); | |
21 | return pte; | |
22 | } | |
23 | ||
397f687a JF |
24 | void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte) |
25 | { | |
26 | pgtable_page_dtor(pte); | |
27 | paravirt_release_pt(page_to_pfn(pte)); | |
28 | tlb_remove_page(tlb, pte); | |
29 | } | |
30 | ||
4f76cd38 JF |
31 | #ifdef CONFIG_X86_64 |
32 | static inline void pgd_list_add(pgd_t *pgd) | |
33 | { | |
34 | struct page *page = virt_to_page(pgd); | |
35 | unsigned long flags; | |
36 | ||
37 | spin_lock_irqsave(&pgd_lock, flags); | |
38 | list_add(&page->lru, &pgd_list); | |
39 | spin_unlock_irqrestore(&pgd_lock, flags); | |
40 | } | |
41 | ||
42 | static inline void pgd_list_del(pgd_t *pgd) | |
43 | { | |
44 | struct page *page = virt_to_page(pgd); | |
45 | unsigned long flags; | |
46 | ||
47 | spin_lock_irqsave(&pgd_lock, flags); | |
48 | list_del(&page->lru); | |
49 | spin_unlock_irqrestore(&pgd_lock, flags); | |
50 | } | |
51 | ||
52 | pgd_t *pgd_alloc(struct mm_struct *mm) | |
53 | { | |
54 | unsigned boundary; | |
55 | pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT); | |
56 | if (!pgd) | |
57 | return NULL; | |
58 | pgd_list_add(pgd); | |
59 | /* | |
60 | * Copy kernel pointers in from init. | |
61 | * Could keep a freelist or slab cache of those because the kernel | |
62 | * part never changes. | |
63 | */ | |
64 | boundary = pgd_index(__PAGE_OFFSET); | |
65 | memset(pgd, 0, boundary * sizeof(pgd_t)); | |
66 | memcpy(pgd + boundary, | |
67 | init_level4_pgt + boundary, | |
68 | (PTRS_PER_PGD - boundary) * sizeof(pgd_t)); | |
69 | return pgd; | |
70 | } | |
71 | ||
72 | void pgd_free(struct mm_struct *mm, pgd_t *pgd) | |
73 | { | |
74 | BUG_ON((unsigned long)pgd & (PAGE_SIZE-1)); | |
75 | pgd_list_del(pgd); | |
76 | free_page((unsigned long)pgd); | |
77 | } | |
78 | #else | |
79 | /* | |
80 | * List of all pgd's needed for non-PAE so it can invalidate entries | |
81 | * in both cached and uncached pgd's; not needed for PAE since the | |
82 | * kernel pmd is shared. If PAE were not to share the pmd a similar | |
83 | * tactic would be needed. This is essentially codepath-based locking | |
84 | * against pageattr.c; it is the unique case in which a valid change | |
85 | * of kernel pagetables can't be lazily synchronized by vmalloc faults. | |
86 | * vmalloc faults work because attached pagetables are never freed. | |
87 | * -- wli | |
88 | */ | |
89 | static inline void pgd_list_add(pgd_t *pgd) | |
90 | { | |
91 | struct page *page = virt_to_page(pgd); | |
92 | ||
93 | list_add(&page->lru, &pgd_list); | |
94 | } | |
95 | ||
96 | static inline void pgd_list_del(pgd_t *pgd) | |
97 | { | |
98 | struct page *page = virt_to_page(pgd); | |
99 | ||
100 | list_del(&page->lru); | |
101 | } | |
102 | ||
103 | #define UNSHARED_PTRS_PER_PGD \ | |
104 | (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD) | |
105 | ||
106 | static void pgd_ctor(void *p) | |
107 | { | |
108 | pgd_t *pgd = p; | |
109 | unsigned long flags; | |
110 | ||
111 | /* Clear usermode parts of PGD */ | |
112 | memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); | |
113 | ||
114 | spin_lock_irqsave(&pgd_lock, flags); | |
115 | ||
116 | /* If the pgd points to a shared pagetable level (either the | |
117 | ptes in non-PAE, or shared PMD in PAE), then just copy the | |
118 | references from swapper_pg_dir. */ | |
119 | if (PAGETABLE_LEVELS == 2 || | |
120 | (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD)) { | |
121 | clone_pgd_range(pgd + USER_PTRS_PER_PGD, | |
122 | swapper_pg_dir + USER_PTRS_PER_PGD, | |
123 | KERNEL_PGD_PTRS); | |
124 | paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT, | |
125 | __pa(swapper_pg_dir) >> PAGE_SHIFT, | |
126 | USER_PTRS_PER_PGD, | |
127 | KERNEL_PGD_PTRS); | |
128 | } | |
129 | ||
130 | /* list required to sync kernel mapping updates */ | |
131 | if (!SHARED_KERNEL_PMD) | |
132 | pgd_list_add(pgd); | |
133 | ||
134 | spin_unlock_irqrestore(&pgd_lock, flags); | |
135 | } | |
136 | ||
137 | static void pgd_dtor(void *pgd) | |
138 | { | |
139 | unsigned long flags; /* can be called from interrupt context */ | |
140 | ||
141 | if (SHARED_KERNEL_PMD) | |
142 | return; | |
143 | ||
144 | spin_lock_irqsave(&pgd_lock, flags); | |
145 | pgd_list_del(pgd); | |
146 | spin_unlock_irqrestore(&pgd_lock, flags); | |
147 | } | |
148 | ||
149 | #ifdef CONFIG_X86_PAE | |
150 | /* | |
151 | * Mop up any pmd pages which may still be attached to the pgd. | |
152 | * Normally they will be freed by munmap/exit_mmap, but any pmd we | |
153 | * preallocate which never got a corresponding vma will need to be | |
154 | * freed manually. | |
155 | */ | |
156 | static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp) | |
157 | { | |
158 | int i; | |
159 | ||
160 | for(i = 0; i < UNSHARED_PTRS_PER_PGD; i++) { | |
161 | pgd_t pgd = pgdp[i]; | |
162 | ||
163 | if (pgd_val(pgd) != 0) { | |
164 | pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); | |
165 | ||
166 | pgdp[i] = native_make_pgd(0); | |
167 | ||
168 | paravirt_release_pd(pgd_val(pgd) >> PAGE_SHIFT); | |
169 | pmd_free(mm, pmd); | |
170 | } | |
171 | } | |
172 | } | |
173 | ||
174 | /* | |
175 | * In PAE mode, we need to do a cr3 reload (=tlb flush) when | |
176 | * updating the top-level pagetable entries to guarantee the | |
177 | * processor notices the update. Since this is expensive, and | |
178 | * all 4 top-level entries are used almost immediately in a | |
179 | * new process's life, we just pre-populate them here. | |
180 | * | |
181 | * Also, if we're in a paravirt environment where the kernel pmd is | |
182 | * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate | |
183 | * and initialize the kernel pmds here. | |
184 | */ | |
185 | static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd) | |
186 | { | |
187 | pud_t *pud; | |
188 | unsigned long addr; | |
189 | int i; | |
190 | ||
191 | pud = pud_offset(pgd, 0); | |
192 | for (addr = i = 0; i < UNSHARED_PTRS_PER_PGD; | |
193 | i++, pud++, addr += PUD_SIZE) { | |
194 | pmd_t *pmd = pmd_alloc_one(mm, addr); | |
195 | ||
196 | if (!pmd) { | |
197 | pgd_mop_up_pmds(mm, pgd); | |
198 | return 0; | |
199 | } | |
200 | ||
201 | if (i >= USER_PTRS_PER_PGD) | |
202 | memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]), | |
203 | sizeof(pmd_t) * PTRS_PER_PMD); | |
204 | ||
205 | pud_populate(mm, pud, pmd); | |
206 | } | |
207 | ||
208 | return 1; | |
209 | } | |
1ec1fe73 IM |
210 | |
211 | void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) | |
212 | { | |
213 | paravirt_alloc_pd(mm, __pa(pmd) >> PAGE_SHIFT); | |
214 | ||
215 | /* Note: almost everything apart from _PAGE_PRESENT is | |
216 | reserved at the pmd (PDPT) level. */ | |
217 | set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT)); | |
218 | ||
219 | /* | |
220 | * According to Intel App note "TLBs, Paging-Structure Caches, | |
221 | * and Their Invalidation", April 2007, document 317080-001, | |
222 | * section 8.1: in PAE mode we explicitly have to flush the | |
223 | * TLB via cr3 if the top-level pgd is changed... | |
224 | */ | |
225 | if (mm == current->active_mm) | |
226 | write_cr3(read_cr3()); | |
227 | } | |
4f76cd38 JF |
228 | #else /* !CONFIG_X86_PAE */ |
229 | /* No need to prepopulate any pagetable entries in non-PAE modes. */ | |
230 | static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd) | |
231 | { | |
232 | return 1; | |
233 | } | |
234 | ||
235 | static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgd) | |
236 | { | |
237 | } | |
238 | #endif /* CONFIG_X86_PAE */ | |
239 | ||
240 | pgd_t *pgd_alloc(struct mm_struct *mm) | |
241 | { | |
242 | pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); | |
243 | ||
244 | /* so that alloc_pd can use it */ | |
245 | mm->pgd = pgd; | |
246 | if (pgd) | |
247 | pgd_ctor(pgd); | |
248 | ||
249 | if (pgd && !pgd_prepopulate_pmd(mm, pgd)) { | |
250 | pgd_dtor(pgd); | |
251 | free_page((unsigned long)pgd); | |
252 | pgd = NULL; | |
253 | } | |
254 | ||
255 | return pgd; | |
256 | } | |
257 | ||
258 | void pgd_free(struct mm_struct *mm, pgd_t *pgd) | |
259 | { | |
260 | pgd_mop_up_pmds(mm, pgd); | |
261 | pgd_dtor(pgd); | |
262 | free_page((unsigned long)pgd); | |
263 | } | |
264 | #endif |