]>
Commit | Line | Data |
---|---|---|
3b827c1b JF |
1 | /* |
2 | * Xen mmu operations | |
3 | * | |
4 | * This file contains the various mmu fetch and update operations. | |
5 | * The most important job they must perform is the mapping between the | |
6 | * domain's pfn and the overall machine mfns. | |
7 | * | |
8 | * Xen allows guests to directly update the pagetable, in a controlled | |
9 | * fashion. In other words, the guest modifies the same pagetable | |
10 | * that the CPU actually uses, which eliminates the overhead of having | |
11 | * a separate shadow pagetable. | |
12 | * | |
13 | * In order to allow this, it falls on the guest domain to map its | |
14 | * notion of a "physical" pfn - which is just a domain-local linear | |
15 | * address - into a real "machine address" which the CPU's MMU can | |
16 | * use. | |
17 | * | |
18 | * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be | |
19 | * inserted directly into the pagetable. When creating a new | |
20 | * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely, | |
21 | * when reading the content back with __(pgd|pmd|pte)_val, it converts | |
22 | * the mfn back into a pfn. | |
23 | * | |
24 | * The other constraint is that all pages which make up a pagetable | |
25 | * must be mapped read-only in the guest. This prevents uncontrolled | |
26 | * guest updates to the pagetable. Xen strictly enforces this, and | |
27 | * will disallow any pagetable update which will end up mapping a | |
28 | * pagetable page RW, and will disallow using any writable page as a | |
29 | * pagetable. | |
30 | * | |
31 | * Naively, when loading %cr3 with the base of a new pagetable, Xen | |
32 | * would need to validate the whole pagetable before going on. | |
33 | * Naturally, this is quite slow. The solution is to "pin" a | |
34 | * pagetable, which enforces all the constraints on the pagetable even | |
35 | * when it is not actively in use. This menas that Xen can be assured | |
36 | * that it is still valid when you do load it into %cr3, and doesn't | |
37 | * need to revalidate it. | |
38 | * | |
39 | * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 | |
40 | */ | |
f120f13e | 41 | #include <linux/sched.h> |
f4f97b3e | 42 | #include <linux/highmem.h> |
994025ca | 43 | #include <linux/debugfs.h> |
3b827c1b | 44 | #include <linux/bug.h> |
d2cb2145 | 45 | #include <linux/vmalloc.h> |
44408ad7 | 46 | #include <linux/module.h> |
5a0e3ad6 | 47 | #include <linux/gfp.h> |
a9ce6bc1 | 48 | #include <linux/memblock.h> |
2222e71b | 49 | #include <linux/seq_file.h> |
34b6f01a | 50 | #include <linux/crash_dump.h> |
3b827c1b | 51 | |
84708807 JF |
52 | #include <trace/events/xen.h> |
53 | ||
3b827c1b JF |
54 | #include <asm/pgtable.h> |
55 | #include <asm/tlbflush.h> | |
5deb30d1 | 56 | #include <asm/fixmap.h> |
3b827c1b | 57 | #include <asm/mmu_context.h> |
319f3ba5 | 58 | #include <asm/setup.h> |
f4f97b3e | 59 | #include <asm/paravirt.h> |
7347b408 | 60 | #include <asm/e820.h> |
cbcd79c2 | 61 | #include <asm/linkage.h> |
08bbc9da | 62 | #include <asm/page.h> |
fef5ba79 | 63 | #include <asm/init.h> |
41f2e477 | 64 | #include <asm/pat.h> |
900cba88 | 65 | #include <asm/smp.h> |
3b827c1b JF |
66 | |
67 | #include <asm/xen/hypercall.h> | |
f4f97b3e | 68 | #include <asm/xen/hypervisor.h> |
3b827c1b | 69 | |
c0011dbf | 70 | #include <xen/xen.h> |
3b827c1b JF |
71 | #include <xen/page.h> |
72 | #include <xen/interface/xen.h> | |
59151001 | 73 | #include <xen/interface/hvm/hvm_op.h> |
319f3ba5 | 74 | #include <xen/interface/version.h> |
c0011dbf | 75 | #include <xen/interface/memory.h> |
319f3ba5 | 76 | #include <xen/hvc-console.h> |
3b827c1b | 77 | |
f4f97b3e | 78 | #include "multicalls.h" |
3b827c1b | 79 | #include "mmu.h" |
994025ca JF |
80 | #include "debugfs.h" |
81 | ||
19001c8c AN |
82 | /* |
83 | * Protects atomic reservation decrease/increase against concurrent increases. | |
06f521d5 | 84 | * Also protects non-atomic updates of current_pages and balloon lists. |
19001c8c AN |
85 | */ |
86 | DEFINE_SPINLOCK(xen_reservation_lock); | |
87 | ||
caaf9ecf | 88 | #ifdef CONFIG_X86_32 |
319f3ba5 JF |
89 | /* |
90 | * Identity map, in addition to plain kernel map. This needs to be | |
91 | * large enough to allocate page table pages to allocate the rest. | |
92 | * Each page can map 2MB. | |
93 | */ | |
764f0138 JF |
94 | #define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4) |
95 | static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES); | |
caaf9ecf | 96 | #endif |
319f3ba5 JF |
97 | #ifdef CONFIG_X86_64 |
98 | /* l3 pud for userspace vsyscall mapping */ | |
99 | static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss; | |
100 | #endif /* CONFIG_X86_64 */ | |
101 | ||
102 | /* | |
103 | * Note about cr3 (pagetable base) values: | |
104 | * | |
105 | * xen_cr3 contains the current logical cr3 value; it contains the | |
106 | * last set cr3. This may not be the current effective cr3, because | |
107 | * its update may be being lazily deferred. However, a vcpu looking | |
108 | * at its own cr3 can use this value knowing that it everything will | |
109 | * be self-consistent. | |
110 | * | |
111 | * xen_current_cr3 contains the actual vcpu cr3; it is set once the | |
112 | * hypercall to set the vcpu cr3 is complete (so it may be a little | |
113 | * out of date, but it will never be set early). If one vcpu is | |
114 | * looking at another vcpu's cr3 value, it should use this variable. | |
115 | */ | |
116 | DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */ | |
117 | DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */ | |
118 | ||
04414baa | 119 | static phys_addr_t xen_pt_base, xen_pt_size __initdata; |
319f3ba5 | 120 | |
d6182fbf JF |
121 | /* |
122 | * Just beyond the highest usermode address. STACK_TOP_MAX has a | |
123 | * redzone above it, so round it up to a PGD boundary. | |
124 | */ | |
125 | #define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK) | |
126 | ||
9976b39b JF |
127 | unsigned long arbitrary_virt_to_mfn(void *vaddr) |
128 | { | |
129 | xmaddr_t maddr = arbitrary_virt_to_machine(vaddr); | |
130 | ||
131 | return PFN_DOWN(maddr.maddr); | |
132 | } | |
133 | ||
ce803e70 | 134 | xmaddr_t arbitrary_virt_to_machine(void *vaddr) |
3b827c1b | 135 | { |
ce803e70 | 136 | unsigned long address = (unsigned long)vaddr; |
da7bfc50 | 137 | unsigned int level; |
9f32d21c CL |
138 | pte_t *pte; |
139 | unsigned offset; | |
3b827c1b | 140 | |
9f32d21c CL |
141 | /* |
142 | * if the PFN is in the linear mapped vaddr range, we can just use | |
143 | * the (quick) virt_to_machine() p2m lookup | |
144 | */ | |
145 | if (virt_addr_valid(vaddr)) | |
146 | return virt_to_machine(vaddr); | |
147 | ||
148 | /* otherwise we have to do a (slower) full page-table walk */ | |
3b827c1b | 149 | |
9f32d21c CL |
150 | pte = lookup_address(address, &level); |
151 | BUG_ON(pte == NULL); | |
152 | offset = address & ~PAGE_MASK; | |
ebd879e3 | 153 | return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset); |
3b827c1b | 154 | } |
de23be5f | 155 | EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine); |
3b827c1b JF |
156 | |
157 | void make_lowmem_page_readonly(void *vaddr) | |
158 | { | |
159 | pte_t *pte, ptev; | |
160 | unsigned long address = (unsigned long)vaddr; | |
da7bfc50 | 161 | unsigned int level; |
3b827c1b | 162 | |
f0646e43 | 163 | pte = lookup_address(address, &level); |
fef5ba79 JF |
164 | if (pte == NULL) |
165 | return; /* vaddr missing */ | |
3b827c1b JF |
166 | |
167 | ptev = pte_wrprotect(*pte); | |
168 | ||
169 | if (HYPERVISOR_update_va_mapping(address, ptev, 0)) | |
170 | BUG(); | |
171 | } | |
172 | ||
173 | void make_lowmem_page_readwrite(void *vaddr) | |
174 | { | |
175 | pte_t *pte, ptev; | |
176 | unsigned long address = (unsigned long)vaddr; | |
da7bfc50 | 177 | unsigned int level; |
3b827c1b | 178 | |
f0646e43 | 179 | pte = lookup_address(address, &level); |
fef5ba79 JF |
180 | if (pte == NULL) |
181 | return; /* vaddr missing */ | |
3b827c1b JF |
182 | |
183 | ptev = pte_mkwrite(*pte); | |
184 | ||
185 | if (HYPERVISOR_update_va_mapping(address, ptev, 0)) | |
186 | BUG(); | |
187 | } | |
188 | ||
189 | ||
7708ad64 | 190 | static bool xen_page_pinned(void *ptr) |
e2426cf8 JF |
191 | { |
192 | struct page *page = virt_to_page(ptr); | |
193 | ||
194 | return PagePinned(page); | |
195 | } | |
196 | ||
eba3ff8b | 197 | void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid) |
c0011dbf JF |
198 | { |
199 | struct multicall_space mcs; | |
200 | struct mmu_update *u; | |
201 | ||
84708807 JF |
202 | trace_xen_mmu_set_domain_pte(ptep, pteval, domid); |
203 | ||
c0011dbf JF |
204 | mcs = xen_mc_entry(sizeof(*u)); |
205 | u = mcs.args; | |
206 | ||
207 | /* ptep might be kmapped when using 32-bit HIGHPTE */ | |
d5108316 | 208 | u->ptr = virt_to_machine(ptep).maddr; |
c0011dbf JF |
209 | u->val = pte_val_ma(pteval); |
210 | ||
eba3ff8b | 211 | MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid); |
c0011dbf JF |
212 | |
213 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
214 | } | |
eba3ff8b JF |
215 | EXPORT_SYMBOL_GPL(xen_set_domain_pte); |
216 | ||
7708ad64 | 217 | static void xen_extend_mmu_update(const struct mmu_update *update) |
3b827c1b | 218 | { |
d66bf8fc JF |
219 | struct multicall_space mcs; |
220 | struct mmu_update *u; | |
3b827c1b | 221 | |
400d3494 JF |
222 | mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u)); |
223 | ||
994025ca | 224 | if (mcs.mc != NULL) { |
400d3494 | 225 | mcs.mc->args[1]++; |
994025ca | 226 | } else { |
400d3494 JF |
227 | mcs = __xen_mc_entry(sizeof(*u)); |
228 | MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); | |
229 | } | |
d66bf8fc | 230 | |
d66bf8fc | 231 | u = mcs.args; |
400d3494 JF |
232 | *u = *update; |
233 | } | |
234 | ||
dcf7435c JF |
235 | static void xen_extend_mmuext_op(const struct mmuext_op *op) |
236 | { | |
237 | struct multicall_space mcs; | |
238 | struct mmuext_op *u; | |
239 | ||
240 | mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u)); | |
241 | ||
242 | if (mcs.mc != NULL) { | |
243 | mcs.mc->args[1]++; | |
244 | } else { | |
245 | mcs = __xen_mc_entry(sizeof(*u)); | |
246 | MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); | |
247 | } | |
248 | ||
249 | u = mcs.args; | |
250 | *u = *op; | |
251 | } | |
252 | ||
4c13629f | 253 | static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) |
400d3494 JF |
254 | { |
255 | struct mmu_update u; | |
256 | ||
257 | preempt_disable(); | |
258 | ||
259 | xen_mc_batch(); | |
260 | ||
ce803e70 JF |
261 | /* ptr may be ioremapped for 64-bit pagetable setup */ |
262 | u.ptr = arbitrary_virt_to_machine(ptr).maddr; | |
400d3494 | 263 | u.val = pmd_val_ma(val); |
7708ad64 | 264 | xen_extend_mmu_update(&u); |
d66bf8fc JF |
265 | |
266 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
267 | ||
268 | preempt_enable(); | |
3b827c1b JF |
269 | } |
270 | ||
4c13629f | 271 | static void xen_set_pmd(pmd_t *ptr, pmd_t val) |
e2426cf8 | 272 | { |
84708807 JF |
273 | trace_xen_mmu_set_pmd(ptr, val); |
274 | ||
e2426cf8 JF |
275 | /* If page is not pinned, we can just update the entry |
276 | directly */ | |
7708ad64 | 277 | if (!xen_page_pinned(ptr)) { |
e2426cf8 JF |
278 | *ptr = val; |
279 | return; | |
280 | } | |
281 | ||
282 | xen_set_pmd_hyper(ptr, val); | |
283 | } | |
284 | ||
3b827c1b JF |
285 | /* |
286 | * Associate a virtual page frame with a given physical page frame | |
287 | * and protection flags for that frame. | |
288 | */ | |
289 | void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags) | |
290 | { | |
836fe2f2 | 291 | set_pte_vaddr(vaddr, mfn_pte(mfn, flags)); |
3b827c1b JF |
292 | } |
293 | ||
4a35c13c | 294 | static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval) |
3b827c1b | 295 | { |
4a35c13c | 296 | struct mmu_update u; |
c0011dbf | 297 | |
4a35c13c JF |
298 | if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU) |
299 | return false; | |
994025ca | 300 | |
4a35c13c | 301 | xen_mc_batch(); |
d66bf8fc | 302 | |
4a35c13c JF |
303 | u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE; |
304 | u.val = pte_val_ma(pteval); | |
305 | xen_extend_mmu_update(&u); | |
a99ac5e8 | 306 | |
4a35c13c | 307 | xen_mc_issue(PARAVIRT_LAZY_MMU); |
2bd50036 | 308 | |
4a35c13c JF |
309 | return true; |
310 | } | |
311 | ||
84708807 | 312 | static inline void __xen_set_pte(pte_t *ptep, pte_t pteval) |
4a35c13c | 313 | { |
d095d43e DV |
314 | if (!xen_batched_set_pte(ptep, pteval)) { |
315 | /* | |
316 | * Could call native_set_pte() here and trap and | |
317 | * emulate the PTE write but with 32-bit guests this | |
318 | * needs two traps (one for each of the two 32-bit | |
319 | * words in the PTE) so do one hypercall directly | |
320 | * instead. | |
321 | */ | |
322 | struct mmu_update u; | |
323 | ||
324 | u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE; | |
325 | u.val = pte_val_ma(pteval); | |
326 | HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF); | |
327 | } | |
3b827c1b JF |
328 | } |
329 | ||
84708807 JF |
330 | static void xen_set_pte(pte_t *ptep, pte_t pteval) |
331 | { | |
332 | trace_xen_mmu_set_pte(ptep, pteval); | |
333 | __xen_set_pte(ptep, pteval); | |
334 | } | |
335 | ||
4c13629f | 336 | static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, |
4a35c13c JF |
337 | pte_t *ptep, pte_t pteval) |
338 | { | |
84708807 JF |
339 | trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval); |
340 | __xen_set_pte(ptep, pteval); | |
3b827c1b JF |
341 | } |
342 | ||
f63c2f24 T |
343 | pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, |
344 | unsigned long addr, pte_t *ptep) | |
947a69c9 | 345 | { |
e57778a1 | 346 | /* Just return the pte as-is. We preserve the bits on commit */ |
84708807 | 347 | trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep); |
e57778a1 JF |
348 | return *ptep; |
349 | } | |
350 | ||
351 | void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, | |
352 | pte_t *ptep, pte_t pte) | |
353 | { | |
400d3494 | 354 | struct mmu_update u; |
e57778a1 | 355 | |
84708807 | 356 | trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte); |
400d3494 | 357 | xen_mc_batch(); |
947a69c9 | 358 | |
d5108316 | 359 | u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; |
400d3494 | 360 | u.val = pte_val_ma(pte); |
7708ad64 | 361 | xen_extend_mmu_update(&u); |
947a69c9 | 362 | |
e57778a1 | 363 | xen_mc_issue(PARAVIRT_LAZY_MMU); |
947a69c9 JF |
364 | } |
365 | ||
ebb9cfe2 JF |
366 | /* Assume pteval_t is equivalent to all the other *val_t types. */ |
367 | static pteval_t pte_mfn_to_pfn(pteval_t val) | |
947a69c9 | 368 | { |
5926f87f | 369 | if (val & _PAGE_PRESENT) { |
59438c9f | 370 | unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; |
b7e5ffe5 KRW |
371 | unsigned long pfn = mfn_to_pfn(mfn); |
372 | ||
77be1fab | 373 | pteval_t flags = val & PTE_FLAGS_MASK; |
b7e5ffe5 KRW |
374 | if (unlikely(pfn == ~0)) |
375 | val = flags & ~_PAGE_PRESENT; | |
376 | else | |
377 | val = ((pteval_t)pfn << PAGE_SHIFT) | flags; | |
ebb9cfe2 | 378 | } |
947a69c9 | 379 | |
ebb9cfe2 | 380 | return val; |
947a69c9 JF |
381 | } |
382 | ||
ebb9cfe2 | 383 | static pteval_t pte_pfn_to_mfn(pteval_t val) |
947a69c9 | 384 | { |
5926f87f | 385 | if (val & _PAGE_PRESENT) { |
59438c9f | 386 | unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; |
77be1fab | 387 | pteval_t flags = val & PTE_FLAGS_MASK; |
fb38923e | 388 | unsigned long mfn; |
cfd8951e | 389 | |
fb38923e | 390 | if (!xen_feature(XENFEAT_auto_translated_physmap)) |
0aad5689 | 391 | mfn = __pfn_to_mfn(pfn); |
fb38923e KRW |
392 | else |
393 | mfn = pfn; | |
cfd8951e JF |
394 | /* |
395 | * If there's no mfn for the pfn, then just create an | |
396 | * empty non-present pte. Unfortunately this loses | |
397 | * information about the original pfn, so | |
398 | * pte_mfn_to_pfn is asymmetric. | |
399 | */ | |
400 | if (unlikely(mfn == INVALID_P2M_ENTRY)) { | |
401 | mfn = 0; | |
402 | flags = 0; | |
7f2f8822 DV |
403 | } else |
404 | mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT); | |
cfd8951e | 405 | val = ((pteval_t)mfn << PAGE_SHIFT) | flags; |
947a69c9 JF |
406 | } |
407 | ||
ebb9cfe2 | 408 | return val; |
947a69c9 JF |
409 | } |
410 | ||
a2e7f0e3 | 411 | __visible pteval_t xen_pte_val(pte_t pte) |
947a69c9 | 412 | { |
41f2e477 | 413 | pteval_t pteval = pte.pte; |
47591df5 | 414 | |
41f2e477 | 415 | return pte_mfn_to_pfn(pteval); |
947a69c9 | 416 | } |
da5de7c2 | 417 | PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val); |
947a69c9 | 418 | |
a2e7f0e3 | 419 | __visible pgdval_t xen_pgd_val(pgd_t pgd) |
947a69c9 | 420 | { |
ebb9cfe2 | 421 | return pte_mfn_to_pfn(pgd.pgd); |
947a69c9 | 422 | } |
da5de7c2 | 423 | PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val); |
947a69c9 | 424 | |
a2e7f0e3 | 425 | __visible pte_t xen_make_pte(pteval_t pte) |
947a69c9 | 426 | { |
7f2f8822 | 427 | pte = pte_pfn_to_mfn(pte); |
c0011dbf | 428 | |
ebb9cfe2 | 429 | return native_make_pte(pte); |
947a69c9 | 430 | } |
da5de7c2 | 431 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte); |
947a69c9 | 432 | |
a2e7f0e3 | 433 | __visible pgd_t xen_make_pgd(pgdval_t pgd) |
947a69c9 | 434 | { |
ebb9cfe2 JF |
435 | pgd = pte_pfn_to_mfn(pgd); |
436 | return native_make_pgd(pgd); | |
947a69c9 | 437 | } |
da5de7c2 | 438 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd); |
947a69c9 | 439 | |
a2e7f0e3 | 440 | __visible pmdval_t xen_pmd_val(pmd_t pmd) |
947a69c9 | 441 | { |
ebb9cfe2 | 442 | return pte_mfn_to_pfn(pmd.pmd); |
947a69c9 | 443 | } |
da5de7c2 | 444 | PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val); |
28499143 | 445 | |
4c13629f | 446 | static void xen_set_pud_hyper(pud_t *ptr, pud_t val) |
f4f97b3e | 447 | { |
400d3494 | 448 | struct mmu_update u; |
f4f97b3e | 449 | |
d66bf8fc JF |
450 | preempt_disable(); |
451 | ||
400d3494 JF |
452 | xen_mc_batch(); |
453 | ||
ce803e70 JF |
454 | /* ptr may be ioremapped for 64-bit pagetable setup */ |
455 | u.ptr = arbitrary_virt_to_machine(ptr).maddr; | |
400d3494 | 456 | u.val = pud_val_ma(val); |
7708ad64 | 457 | xen_extend_mmu_update(&u); |
d66bf8fc JF |
458 | |
459 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
460 | ||
461 | preempt_enable(); | |
f4f97b3e JF |
462 | } |
463 | ||
4c13629f | 464 | static void xen_set_pud(pud_t *ptr, pud_t val) |
e2426cf8 | 465 | { |
84708807 JF |
466 | trace_xen_mmu_set_pud(ptr, val); |
467 | ||
e2426cf8 JF |
468 | /* If page is not pinned, we can just update the entry |
469 | directly */ | |
7708ad64 | 470 | if (!xen_page_pinned(ptr)) { |
e2426cf8 JF |
471 | *ptr = val; |
472 | return; | |
473 | } | |
474 | ||
475 | xen_set_pud_hyper(ptr, val); | |
476 | } | |
477 | ||
f6e58732 | 478 | #ifdef CONFIG_X86_PAE |
4c13629f | 479 | static void xen_set_pte_atomic(pte_t *ptep, pte_t pte) |
3b827c1b | 480 | { |
84708807 | 481 | trace_xen_mmu_set_pte_atomic(ptep, pte); |
f6e58732 | 482 | set_64bit((u64 *)ptep, native_pte_val(pte)); |
3b827c1b JF |
483 | } |
484 | ||
4c13629f | 485 | static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
3b827c1b | 486 | { |
84708807 | 487 | trace_xen_mmu_pte_clear(mm, addr, ptep); |
4a35c13c JF |
488 | if (!xen_batched_set_pte(ptep, native_make_pte(0))) |
489 | native_pte_clear(mm, addr, ptep); | |
3b827c1b JF |
490 | } |
491 | ||
4c13629f | 492 | static void xen_pmd_clear(pmd_t *pmdp) |
3b827c1b | 493 | { |
84708807 | 494 | trace_xen_mmu_pmd_clear(pmdp); |
e2426cf8 | 495 | set_pmd(pmdp, __pmd(0)); |
3b827c1b | 496 | } |
f6e58732 | 497 | #endif /* CONFIG_X86_PAE */ |
3b827c1b | 498 | |
a2e7f0e3 | 499 | __visible pmd_t xen_make_pmd(pmdval_t pmd) |
3b827c1b | 500 | { |
ebb9cfe2 | 501 | pmd = pte_pfn_to_mfn(pmd); |
947a69c9 | 502 | return native_make_pmd(pmd); |
3b827c1b | 503 | } |
da5de7c2 | 504 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd); |
3b827c1b | 505 | |
98233368 | 506 | #if CONFIG_PGTABLE_LEVELS == 4 |
a2e7f0e3 | 507 | __visible pudval_t xen_pud_val(pud_t pud) |
f6e58732 JF |
508 | { |
509 | return pte_mfn_to_pfn(pud.pud); | |
510 | } | |
da5de7c2 | 511 | PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val); |
f6e58732 | 512 | |
a2e7f0e3 | 513 | __visible pud_t xen_make_pud(pudval_t pud) |
f6e58732 JF |
514 | { |
515 | pud = pte_pfn_to_mfn(pud); | |
516 | ||
517 | return native_make_pud(pud); | |
518 | } | |
da5de7c2 | 519 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud); |
f6e58732 | 520 | |
4c13629f | 521 | static pgd_t *xen_get_user_pgd(pgd_t *pgd) |
f6e58732 | 522 | { |
d6182fbf JF |
523 | pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK); |
524 | unsigned offset = pgd - pgd_page; | |
525 | pgd_t *user_ptr = NULL; | |
f6e58732 | 526 | |
d6182fbf JF |
527 | if (offset < pgd_index(USER_LIMIT)) { |
528 | struct page *page = virt_to_page(pgd_page); | |
529 | user_ptr = (pgd_t *)page->private; | |
530 | if (user_ptr) | |
531 | user_ptr += offset; | |
532 | } | |
f6e58732 | 533 | |
d6182fbf JF |
534 | return user_ptr; |
535 | } | |
536 | ||
537 | static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val) | |
538 | { | |
539 | struct mmu_update u; | |
f6e58732 JF |
540 | |
541 | u.ptr = virt_to_machine(ptr).maddr; | |
542 | u.val = pgd_val_ma(val); | |
7708ad64 | 543 | xen_extend_mmu_update(&u); |
d6182fbf JF |
544 | } |
545 | ||
546 | /* | |
547 | * Raw hypercall-based set_pgd, intended for in early boot before | |
548 | * there's a page structure. This implies: | |
549 | * 1. The only existing pagetable is the kernel's | |
550 | * 2. It is always pinned | |
551 | * 3. It has no user pagetable attached to it | |
552 | */ | |
4c13629f | 553 | static void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val) |
d6182fbf JF |
554 | { |
555 | preempt_disable(); | |
556 | ||
557 | xen_mc_batch(); | |
558 | ||
559 | __xen_set_pgd_hyper(ptr, val); | |
f6e58732 JF |
560 | |
561 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
562 | ||
563 | preempt_enable(); | |
564 | } | |
565 | ||
4c13629f | 566 | static void xen_set_pgd(pgd_t *ptr, pgd_t val) |
f6e58732 | 567 | { |
d6182fbf JF |
568 | pgd_t *user_ptr = xen_get_user_pgd(ptr); |
569 | ||
84708807 JF |
570 | trace_xen_mmu_set_pgd(ptr, user_ptr, val); |
571 | ||
f6e58732 JF |
572 | /* If page is not pinned, we can just update the entry |
573 | directly */ | |
7708ad64 | 574 | if (!xen_page_pinned(ptr)) { |
f6e58732 | 575 | *ptr = val; |
d6182fbf | 576 | if (user_ptr) { |
7708ad64 | 577 | WARN_ON(xen_page_pinned(user_ptr)); |
d6182fbf JF |
578 | *user_ptr = val; |
579 | } | |
f6e58732 JF |
580 | return; |
581 | } | |
582 | ||
d6182fbf JF |
583 | /* If it's pinned, then we can at least batch the kernel and |
584 | user updates together. */ | |
585 | xen_mc_batch(); | |
586 | ||
587 | __xen_set_pgd_hyper(ptr, val); | |
588 | if (user_ptr) | |
589 | __xen_set_pgd_hyper(user_ptr, val); | |
590 | ||
591 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
f6e58732 | 592 | } |
98233368 | 593 | #endif /* CONFIG_PGTABLE_LEVELS == 4 */ |
f6e58732 | 594 | |
f4f97b3e | 595 | /* |
5deb30d1 JF |
596 | * (Yet another) pagetable walker. This one is intended for pinning a |
597 | * pagetable. This means that it walks a pagetable and calls the | |
598 | * callback function on each page it finds making up the page table, | |
599 | * at every level. It walks the entire pagetable, but it only bothers | |
600 | * pinning pte pages which are below limit. In the normal case this | |
601 | * will be STACK_TOP_MAX, but at boot we need to pin up to | |
602 | * FIXADDR_TOP. | |
603 | * | |
604 | * For 32-bit the important bit is that we don't pin beyond there, | |
605 | * because then we start getting into Xen's ptes. | |
606 | * | |
607 | * For 64-bit, we must skip the Xen hole in the middle of the address | |
608 | * space, just after the big x86-64 virtual hole. | |
609 | */ | |
86bbc2c2 IC |
610 | static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd, |
611 | int (*func)(struct mm_struct *mm, struct page *, | |
612 | enum pt_level), | |
613 | unsigned long limit) | |
3b827c1b | 614 | { |
f4f97b3e | 615 | int flush = 0; |
5deb30d1 JF |
616 | unsigned hole_low, hole_high; |
617 | unsigned pgdidx_limit, pudidx_limit, pmdidx_limit; | |
618 | unsigned pgdidx, pudidx, pmdidx; | |
f4f97b3e | 619 | |
5deb30d1 JF |
620 | /* The limit is the last byte to be touched */ |
621 | limit--; | |
622 | BUG_ON(limit >= FIXADDR_TOP); | |
3b827c1b JF |
623 | |
624 | if (xen_feature(XENFEAT_auto_translated_physmap)) | |
f4f97b3e JF |
625 | return 0; |
626 | ||
5deb30d1 JF |
627 | /* |
628 | * 64-bit has a great big hole in the middle of the address | |
629 | * space, which contains the Xen mappings. On 32-bit these | |
630 | * will end up making a zero-sized hole and so is a no-op. | |
631 | */ | |
d6182fbf | 632 | hole_low = pgd_index(USER_LIMIT); |
5deb30d1 JF |
633 | hole_high = pgd_index(PAGE_OFFSET); |
634 | ||
635 | pgdidx_limit = pgd_index(limit); | |
636 | #if PTRS_PER_PUD > 1 | |
637 | pudidx_limit = pud_index(limit); | |
638 | #else | |
639 | pudidx_limit = 0; | |
640 | #endif | |
641 | #if PTRS_PER_PMD > 1 | |
642 | pmdidx_limit = pmd_index(limit); | |
643 | #else | |
644 | pmdidx_limit = 0; | |
645 | #endif | |
646 | ||
5deb30d1 | 647 | for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) { |
f4f97b3e | 648 | pud_t *pud; |
3b827c1b | 649 | |
5deb30d1 JF |
650 | if (pgdidx >= hole_low && pgdidx < hole_high) |
651 | continue; | |
f4f97b3e | 652 | |
5deb30d1 | 653 | if (!pgd_val(pgd[pgdidx])) |
3b827c1b | 654 | continue; |
f4f97b3e | 655 | |
5deb30d1 | 656 | pud = pud_offset(&pgd[pgdidx], 0); |
3b827c1b JF |
657 | |
658 | if (PTRS_PER_PUD > 1) /* not folded */ | |
eefb47f6 | 659 | flush |= (*func)(mm, virt_to_page(pud), PT_PUD); |
f4f97b3e | 660 | |
5deb30d1 | 661 | for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) { |
f4f97b3e | 662 | pmd_t *pmd; |
f4f97b3e | 663 | |
5deb30d1 JF |
664 | if (pgdidx == pgdidx_limit && |
665 | pudidx > pudidx_limit) | |
666 | goto out; | |
3b827c1b | 667 | |
5deb30d1 | 668 | if (pud_none(pud[pudidx])) |
3b827c1b | 669 | continue; |
f4f97b3e | 670 | |
5deb30d1 | 671 | pmd = pmd_offset(&pud[pudidx], 0); |
3b827c1b JF |
672 | |
673 | if (PTRS_PER_PMD > 1) /* not folded */ | |
eefb47f6 | 674 | flush |= (*func)(mm, virt_to_page(pmd), PT_PMD); |
f4f97b3e | 675 | |
5deb30d1 JF |
676 | for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) { |
677 | struct page *pte; | |
678 | ||
679 | if (pgdidx == pgdidx_limit && | |
680 | pudidx == pudidx_limit && | |
681 | pmdidx > pmdidx_limit) | |
682 | goto out; | |
3b827c1b | 683 | |
5deb30d1 | 684 | if (pmd_none(pmd[pmdidx])) |
3b827c1b JF |
685 | continue; |
686 | ||
5deb30d1 | 687 | pte = pmd_page(pmd[pmdidx]); |
eefb47f6 | 688 | flush |= (*func)(mm, pte, PT_PTE); |
3b827c1b JF |
689 | } |
690 | } | |
691 | } | |
11ad93e5 | 692 | |
5deb30d1 | 693 | out: |
11ad93e5 JF |
694 | /* Do the top level last, so that the callbacks can use it as |
695 | a cue to do final things like tlb flushes. */ | |
eefb47f6 | 696 | flush |= (*func)(mm, virt_to_page(pgd), PT_PGD); |
f4f97b3e JF |
697 | |
698 | return flush; | |
3b827c1b JF |
699 | } |
700 | ||
86bbc2c2 IC |
701 | static int xen_pgd_walk(struct mm_struct *mm, |
702 | int (*func)(struct mm_struct *mm, struct page *, | |
703 | enum pt_level), | |
704 | unsigned long limit) | |
705 | { | |
706 | return __xen_pgd_walk(mm, mm->pgd, func, limit); | |
707 | } | |
708 | ||
7708ad64 JF |
709 | /* If we're using split pte locks, then take the page's lock and |
710 | return a pointer to it. Otherwise return NULL. */ | |
eefb47f6 | 711 | static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm) |
74260714 JF |
712 | { |
713 | spinlock_t *ptl = NULL; | |
714 | ||
57c1ffce | 715 | #if USE_SPLIT_PTE_PTLOCKS |
49076ec2 | 716 | ptl = ptlock_ptr(page); |
eefb47f6 | 717 | spin_lock_nest_lock(ptl, &mm->page_table_lock); |
74260714 JF |
718 | #endif |
719 | ||
720 | return ptl; | |
721 | } | |
722 | ||
7708ad64 | 723 | static void xen_pte_unlock(void *v) |
74260714 JF |
724 | { |
725 | spinlock_t *ptl = v; | |
726 | spin_unlock(ptl); | |
727 | } | |
728 | ||
729 | static void xen_do_pin(unsigned level, unsigned long pfn) | |
730 | { | |
dcf7435c | 731 | struct mmuext_op op; |
74260714 | 732 | |
dcf7435c JF |
733 | op.cmd = level; |
734 | op.arg1.mfn = pfn_to_mfn(pfn); | |
735 | ||
736 | xen_extend_mmuext_op(&op); | |
74260714 JF |
737 | } |
738 | ||
eefb47f6 JF |
739 | static int xen_pin_page(struct mm_struct *mm, struct page *page, |
740 | enum pt_level level) | |
f4f97b3e | 741 | { |
d60cd46b | 742 | unsigned pgfl = TestSetPagePinned(page); |
f4f97b3e JF |
743 | int flush; |
744 | ||
745 | if (pgfl) | |
746 | flush = 0; /* already pinned */ | |
747 | else if (PageHighMem(page)) | |
748 | /* kmaps need flushing if we found an unpinned | |
749 | highpage */ | |
750 | flush = 1; | |
751 | else { | |
752 | void *pt = lowmem_page_address(page); | |
753 | unsigned long pfn = page_to_pfn(page); | |
754 | struct multicall_space mcs = __xen_mc_entry(0); | |
74260714 | 755 | spinlock_t *ptl; |
f4f97b3e JF |
756 | |
757 | flush = 0; | |
758 | ||
11ad93e5 JF |
759 | /* |
760 | * We need to hold the pagetable lock between the time | |
761 | * we make the pagetable RO and when we actually pin | |
762 | * it. If we don't, then other users may come in and | |
763 | * attempt to update the pagetable by writing it, | |
764 | * which will fail because the memory is RO but not | |
765 | * pinned, so Xen won't do the trap'n'emulate. | |
766 | * | |
767 | * If we're using split pte locks, we can't hold the | |
768 | * entire pagetable's worth of locks during the | |
769 | * traverse, because we may wrap the preempt count (8 | |
770 | * bits). The solution is to mark RO and pin each PTE | |
771 | * page while holding the lock. This means the number | |
772 | * of locks we end up holding is never more than a | |
773 | * batch size (~32 entries, at present). | |
774 | * | |
775 | * If we're not using split pte locks, we needn't pin | |
776 | * the PTE pages independently, because we're | |
777 | * protected by the overall pagetable lock. | |
778 | */ | |
74260714 JF |
779 | ptl = NULL; |
780 | if (level == PT_PTE) | |
eefb47f6 | 781 | ptl = xen_pte_lock(page, mm); |
74260714 | 782 | |
f4f97b3e JF |
783 | MULTI_update_va_mapping(mcs.mc, (unsigned long)pt, |
784 | pfn_pte(pfn, PAGE_KERNEL_RO), | |
74260714 JF |
785 | level == PT_PGD ? UVMF_TLB_FLUSH : 0); |
786 | ||
11ad93e5 | 787 | if (ptl) { |
74260714 JF |
788 | xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn); |
789 | ||
74260714 JF |
790 | /* Queue a deferred unlock for when this batch |
791 | is completed. */ | |
7708ad64 | 792 | xen_mc_callback(xen_pte_unlock, ptl); |
74260714 | 793 | } |
f4f97b3e JF |
794 | } |
795 | ||
796 | return flush; | |
797 | } | |
3b827c1b | 798 | |
f4f97b3e JF |
799 | /* This is called just after a mm has been created, but it has not |
800 | been used yet. We need to make sure that its pagetable is all | |
801 | read-only, and can be pinned. */ | |
eefb47f6 | 802 | static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) |
3b827c1b | 803 | { |
5f94fb5b JF |
804 | trace_xen_mmu_pgd_pin(mm, pgd); |
805 | ||
f4f97b3e | 806 | xen_mc_batch(); |
3b827c1b | 807 | |
86bbc2c2 | 808 | if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) { |
d05fdf31 | 809 | /* re-enable interrupts for flushing */ |
f87e4cac | 810 | xen_mc_issue(0); |
d05fdf31 | 811 | |
f4f97b3e | 812 | kmap_flush_unused(); |
d05fdf31 | 813 | |
f87e4cac JF |
814 | xen_mc_batch(); |
815 | } | |
f4f97b3e | 816 | |
d6182fbf JF |
817 | #ifdef CONFIG_X86_64 |
818 | { | |
819 | pgd_t *user_pgd = xen_get_user_pgd(pgd); | |
820 | ||
821 | xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd))); | |
822 | ||
823 | if (user_pgd) { | |
eefb47f6 | 824 | xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD); |
f63c2f24 T |
825 | xen_do_pin(MMUEXT_PIN_L4_TABLE, |
826 | PFN_DOWN(__pa(user_pgd))); | |
d6182fbf JF |
827 | } |
828 | } | |
829 | #else /* CONFIG_X86_32 */ | |
5deb30d1 JF |
830 | #ifdef CONFIG_X86_PAE |
831 | /* Need to make sure unshared kernel PMD is pinnable */ | |
47cb2ed9 | 832 | xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]), |
eefb47f6 | 833 | PT_PMD); |
5deb30d1 | 834 | #endif |
28499143 | 835 | xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd))); |
d6182fbf | 836 | #endif /* CONFIG_X86_64 */ |
f4f97b3e | 837 | xen_mc_issue(0); |
3b827c1b JF |
838 | } |
839 | ||
eefb47f6 JF |
840 | static void xen_pgd_pin(struct mm_struct *mm) |
841 | { | |
842 | __xen_pgd_pin(mm, mm->pgd); | |
843 | } | |
844 | ||
0e91398f JF |
845 | /* |
846 | * On save, we need to pin all pagetables to make sure they get their | |
847 | * mfns turned into pfns. Search the list for any unpinned pgds and pin | |
848 | * them (unpinned pgds are not currently in use, probably because the | |
849 | * process is under construction or destruction). | |
eefb47f6 JF |
850 | * |
851 | * Expected to be called in stop_machine() ("equivalent to taking | |
852 | * every spinlock in the system"), so the locking doesn't really | |
853 | * matter all that much. | |
0e91398f JF |
854 | */ |
855 | void xen_mm_pin_all(void) | |
856 | { | |
0e91398f | 857 | struct page *page; |
74260714 | 858 | |
a79e53d8 | 859 | spin_lock(&pgd_lock); |
f4f97b3e | 860 | |
0e91398f JF |
861 | list_for_each_entry(page, &pgd_list, lru) { |
862 | if (!PagePinned(page)) { | |
eefb47f6 | 863 | __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page)); |
0e91398f JF |
864 | SetPageSavePinned(page); |
865 | } | |
866 | } | |
867 | ||
a79e53d8 | 868 | spin_unlock(&pgd_lock); |
3b827c1b JF |
869 | } |
870 | ||
c1f2f09e EH |
871 | /* |
872 | * The init_mm pagetable is really pinned as soon as its created, but | |
873 | * that's before we have page structures to store the bits. So do all | |
874 | * the book-keeping now. | |
875 | */ | |
3f508953 | 876 | static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page, |
eefb47f6 | 877 | enum pt_level level) |
3b827c1b | 878 | { |
f4f97b3e JF |
879 | SetPagePinned(page); |
880 | return 0; | |
881 | } | |
3b827c1b | 882 | |
b96229b5 | 883 | static void __init xen_mark_init_mm_pinned(void) |
f4f97b3e | 884 | { |
eefb47f6 | 885 | xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP); |
f4f97b3e | 886 | } |
3b827c1b | 887 | |
eefb47f6 JF |
888 | static int xen_unpin_page(struct mm_struct *mm, struct page *page, |
889 | enum pt_level level) | |
f4f97b3e | 890 | { |
d60cd46b | 891 | unsigned pgfl = TestClearPagePinned(page); |
3b827c1b | 892 | |
f4f97b3e JF |
893 | if (pgfl && !PageHighMem(page)) { |
894 | void *pt = lowmem_page_address(page); | |
895 | unsigned long pfn = page_to_pfn(page); | |
74260714 JF |
896 | spinlock_t *ptl = NULL; |
897 | struct multicall_space mcs; | |
898 | ||
11ad93e5 JF |
899 | /* |
900 | * Do the converse to pin_page. If we're using split | |
901 | * pte locks, we must be holding the lock for while | |
902 | * the pte page is unpinned but still RO to prevent | |
903 | * concurrent updates from seeing it in this | |
904 | * partially-pinned state. | |
905 | */ | |
74260714 | 906 | if (level == PT_PTE) { |
eefb47f6 | 907 | ptl = xen_pte_lock(page, mm); |
74260714 | 908 | |
11ad93e5 JF |
909 | if (ptl) |
910 | xen_do_pin(MMUEXT_UNPIN_TABLE, pfn); | |
74260714 JF |
911 | } |
912 | ||
913 | mcs = __xen_mc_entry(0); | |
f4f97b3e JF |
914 | |
915 | MULTI_update_va_mapping(mcs.mc, (unsigned long)pt, | |
916 | pfn_pte(pfn, PAGE_KERNEL), | |
74260714 JF |
917 | level == PT_PGD ? UVMF_TLB_FLUSH : 0); |
918 | ||
919 | if (ptl) { | |
920 | /* unlock when batch completed */ | |
7708ad64 | 921 | xen_mc_callback(xen_pte_unlock, ptl); |
74260714 | 922 | } |
f4f97b3e JF |
923 | } |
924 | ||
925 | return 0; /* never need to flush on unpin */ | |
3b827c1b JF |
926 | } |
927 | ||
f4f97b3e | 928 | /* Release a pagetables pages back as normal RW */ |
eefb47f6 | 929 | static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd) |
f4f97b3e | 930 | { |
5f94fb5b JF |
931 | trace_xen_mmu_pgd_unpin(mm, pgd); |
932 | ||
f4f97b3e JF |
933 | xen_mc_batch(); |
934 | ||
74260714 | 935 | xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); |
f4f97b3e | 936 | |
d6182fbf JF |
937 | #ifdef CONFIG_X86_64 |
938 | { | |
939 | pgd_t *user_pgd = xen_get_user_pgd(pgd); | |
940 | ||
941 | if (user_pgd) { | |
f63c2f24 T |
942 | xen_do_pin(MMUEXT_UNPIN_TABLE, |
943 | PFN_DOWN(__pa(user_pgd))); | |
eefb47f6 | 944 | xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD); |
d6182fbf JF |
945 | } |
946 | } | |
947 | #endif | |
948 | ||
5deb30d1 JF |
949 | #ifdef CONFIG_X86_PAE |
950 | /* Need to make sure unshared kernel PMD is unpinned */ | |
47cb2ed9 | 951 | xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]), |
eefb47f6 | 952 | PT_PMD); |
5deb30d1 | 953 | #endif |
d6182fbf | 954 | |
86bbc2c2 | 955 | __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT); |
f4f97b3e JF |
956 | |
957 | xen_mc_issue(0); | |
958 | } | |
3b827c1b | 959 | |
eefb47f6 JF |
960 | static void xen_pgd_unpin(struct mm_struct *mm) |
961 | { | |
962 | __xen_pgd_unpin(mm, mm->pgd); | |
963 | } | |
964 | ||
0e91398f JF |
965 | /* |
966 | * On resume, undo any pinning done at save, so that the rest of the | |
967 | * kernel doesn't see any unexpected pinned pagetables. | |
968 | */ | |
969 | void xen_mm_unpin_all(void) | |
970 | { | |
0e91398f JF |
971 | struct page *page; |
972 | ||
a79e53d8 | 973 | spin_lock(&pgd_lock); |
0e91398f JF |
974 | |
975 | list_for_each_entry(page, &pgd_list, lru) { | |
976 | if (PageSavePinned(page)) { | |
977 | BUG_ON(!PagePinned(page)); | |
eefb47f6 | 978 | __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page)); |
0e91398f JF |
979 | ClearPageSavePinned(page); |
980 | } | |
981 | } | |
982 | ||
a79e53d8 | 983 | spin_unlock(&pgd_lock); |
0e91398f JF |
984 | } |
985 | ||
4c13629f | 986 | static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) |
3b827c1b | 987 | { |
f4f97b3e | 988 | spin_lock(&next->page_table_lock); |
eefb47f6 | 989 | xen_pgd_pin(next); |
f4f97b3e | 990 | spin_unlock(&next->page_table_lock); |
3b827c1b JF |
991 | } |
992 | ||
4c13629f | 993 | static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) |
3b827c1b | 994 | { |
f4f97b3e | 995 | spin_lock(&mm->page_table_lock); |
eefb47f6 | 996 | xen_pgd_pin(mm); |
f4f97b3e | 997 | spin_unlock(&mm->page_table_lock); |
3b827c1b JF |
998 | } |
999 | ||
3b827c1b | 1000 | |
f87e4cac JF |
1001 | #ifdef CONFIG_SMP |
1002 | /* Another cpu may still have their %cr3 pointing at the pagetable, so | |
1003 | we need to repoint it somewhere else before we can unpin it. */ | |
1004 | static void drop_other_mm_ref(void *info) | |
1005 | { | |
1006 | struct mm_struct *mm = info; | |
ce87b3d3 | 1007 | struct mm_struct *active_mm; |
3b827c1b | 1008 | |
2113f469 | 1009 | active_mm = this_cpu_read(cpu_tlbstate.active_mm); |
ce87b3d3 | 1010 | |
2113f469 | 1011 | if (active_mm == mm && this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK) |
f87e4cac | 1012 | leave_mm(smp_processor_id()); |
9f79991d JF |
1013 | |
1014 | /* If this cpu still has a stale cr3 reference, then make sure | |
1015 | it has been flushed. */ | |
2113f469 | 1016 | if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd)) |
9f79991d | 1017 | load_cr3(swapper_pg_dir); |
f87e4cac | 1018 | } |
3b827c1b | 1019 | |
7708ad64 | 1020 | static void xen_drop_mm_ref(struct mm_struct *mm) |
f87e4cac | 1021 | { |
e4d98207 | 1022 | cpumask_var_t mask; |
9f79991d JF |
1023 | unsigned cpu; |
1024 | ||
f87e4cac JF |
1025 | if (current->active_mm == mm) { |
1026 | if (current->mm == mm) | |
1027 | load_cr3(swapper_pg_dir); | |
1028 | else | |
1029 | leave_mm(smp_processor_id()); | |
9f79991d JF |
1030 | } |
1031 | ||
1032 | /* Get the "official" set of cpus referring to our pagetable. */ | |
e4d98207 MT |
1033 | if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) { |
1034 | for_each_online_cpu(cpu) { | |
78f1c4d6 | 1035 | if (!cpumask_test_cpu(cpu, mm_cpumask(mm)) |
e4d98207 MT |
1036 | && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd)) |
1037 | continue; | |
1038 | smp_call_function_single(cpu, drop_other_mm_ref, mm, 1); | |
1039 | } | |
1040 | return; | |
1041 | } | |
78f1c4d6 | 1042 | cpumask_copy(mask, mm_cpumask(mm)); |
9f79991d JF |
1043 | |
1044 | /* It's possible that a vcpu may have a stale reference to our | |
1045 | cr3, because its in lazy mode, and it hasn't yet flushed | |
1046 | its set of pending hypercalls yet. In this case, we can | |
1047 | look at its actual current cr3 value, and force it to flush | |
1048 | if needed. */ | |
1049 | for_each_online_cpu(cpu) { | |
1050 | if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd)) | |
e4d98207 | 1051 | cpumask_set_cpu(cpu, mask); |
3b827c1b JF |
1052 | } |
1053 | ||
e4d98207 MT |
1054 | if (!cpumask_empty(mask)) |
1055 | smp_call_function_many(mask, drop_other_mm_ref, mm, 1); | |
1056 | free_cpumask_var(mask); | |
f87e4cac JF |
1057 | } |
1058 | #else | |
7708ad64 | 1059 | static void xen_drop_mm_ref(struct mm_struct *mm) |
f87e4cac JF |
1060 | { |
1061 | if (current->active_mm == mm) | |
1062 | load_cr3(swapper_pg_dir); | |
1063 | } | |
1064 | #endif | |
1065 | ||
1066 | /* | |
1067 | * While a process runs, Xen pins its pagetables, which means that the | |
1068 | * hypervisor forces it to be read-only, and it controls all updates | |
1069 | * to it. This means that all pagetable updates have to go via the | |
1070 | * hypervisor, which is moderately expensive. | |
1071 | * | |
1072 | * Since we're pulling the pagetable down, we switch to use init_mm, | |
1073 | * unpin old process pagetable and mark it all read-write, which | |
1074 | * allows further operations on it to be simple memory accesses. | |
1075 | * | |
1076 | * The only subtle point is that another CPU may be still using the | |
1077 | * pagetable because of lazy tlb flushing. This means we need need to | |
1078 | * switch all CPUs off this pagetable before we can unpin it. | |
1079 | */ | |
4c13629f | 1080 | static void xen_exit_mmap(struct mm_struct *mm) |
f87e4cac JF |
1081 | { |
1082 | get_cpu(); /* make sure we don't move around */ | |
7708ad64 | 1083 | xen_drop_mm_ref(mm); |
f87e4cac | 1084 | put_cpu(); |
3b827c1b | 1085 | |
f120f13e | 1086 | spin_lock(&mm->page_table_lock); |
df912ea4 JF |
1087 | |
1088 | /* pgd may not be pinned in the error exit path of execve */ | |
7708ad64 | 1089 | if (xen_page_pinned(mm->pgd)) |
eefb47f6 | 1090 | xen_pgd_unpin(mm); |
74260714 | 1091 | |
f120f13e | 1092 | spin_unlock(&mm->page_table_lock); |
3b827c1b | 1093 | } |
994025ca | 1094 | |
c7112887 AR |
1095 | static void xen_post_allocator_init(void); |
1096 | ||
70e61199 JG |
1097 | static void __init pin_pagetable_pfn(unsigned cmd, unsigned long pfn) |
1098 | { | |
1099 | struct mmuext_op op; | |
1100 | ||
1101 | op.cmd = cmd; | |
1102 | op.arg1.mfn = pfn_to_mfn(pfn); | |
1103 | if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) | |
1104 | BUG(); | |
1105 | } | |
1106 | ||
7f914062 KRW |
1107 | #ifdef CONFIG_X86_64 |
1108 | static void __init xen_cleanhighmap(unsigned long vaddr, | |
1109 | unsigned long vaddr_end) | |
1110 | { | |
1111 | unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1; | |
1112 | pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr); | |
1113 | ||
1114 | /* NOTE: The loop is more greedy than the cleanup_highmap variant. | |
1115 | * We include the PMD passed in on _both_ boundaries. */ | |
1cf38741 | 1116 | for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD)); |
7f914062 KRW |
1117 | pmd++, vaddr += PMD_SIZE) { |
1118 | if (pmd_none(*pmd)) | |
1119 | continue; | |
1120 | if (vaddr < (unsigned long) _text || vaddr > kernel_end) | |
1121 | set_pmd(pmd, __pmd(0)); | |
1122 | } | |
1123 | /* In case we did something silly, we should crash in this function | |
1124 | * instead of somewhere later and be confusing. */ | |
1125 | xen_mc_flush(); | |
1126 | } | |
054954eb | 1127 | |
8f5b0c63 JG |
1128 | /* |
1129 | * Make a page range writeable and free it. | |
1130 | */ | |
1131 | static void __init xen_free_ro_pages(unsigned long paddr, unsigned long size) | |
1132 | { | |
1133 | void *vaddr = __va(paddr); | |
1134 | void *vaddr_end = vaddr + size; | |
1135 | ||
1136 | for (; vaddr < vaddr_end; vaddr += PAGE_SIZE) | |
1137 | make_lowmem_page_readwrite(vaddr); | |
1138 | ||
1139 | memblock_free(paddr, size); | |
1140 | } | |
1141 | ||
70e61199 | 1142 | static void __init xen_cleanmfnmap_free_pgtbl(void *pgtbl, bool unpin) |
8f5b0c63 JG |
1143 | { |
1144 | unsigned long pa = __pa(pgtbl) & PHYSICAL_PAGE_MASK; | |
1145 | ||
70e61199 JG |
1146 | if (unpin) |
1147 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(pa)); | |
8f5b0c63 JG |
1148 | ClearPagePinned(virt_to_page(__va(pa))); |
1149 | xen_free_ro_pages(pa, PAGE_SIZE); | |
1150 | } | |
1151 | ||
1152 | /* | |
1153 | * Since it is well isolated we can (and since it is perhaps large we should) | |
1154 | * also free the page tables mapping the initial P->M table. | |
1155 | */ | |
1156 | static void __init xen_cleanmfnmap(unsigned long vaddr) | |
1157 | { | |
1158 | unsigned long va = vaddr & PMD_MASK; | |
1159 | unsigned long pa; | |
1160 | pgd_t *pgd = pgd_offset_k(va); | |
1161 | pud_t *pud_page = pud_offset(pgd, 0); | |
1162 | pud_t *pud; | |
1163 | pmd_t *pmd; | |
1164 | pte_t *pte; | |
1165 | unsigned int i; | |
70e61199 | 1166 | bool unpin; |
8f5b0c63 | 1167 | |
70e61199 | 1168 | unpin = (vaddr == 2 * PGDIR_SIZE); |
8f5b0c63 JG |
1169 | set_pgd(pgd, __pgd(0)); |
1170 | do { | |
1171 | pud = pud_page + pud_index(va); | |
1172 | if (pud_none(*pud)) { | |
1173 | va += PUD_SIZE; | |
1174 | } else if (pud_large(*pud)) { | |
1175 | pa = pud_val(*pud) & PHYSICAL_PAGE_MASK; | |
1176 | xen_free_ro_pages(pa, PUD_SIZE); | |
1177 | va += PUD_SIZE; | |
1178 | } else { | |
1179 | pmd = pmd_offset(pud, va); | |
1180 | if (pmd_large(*pmd)) { | |
1181 | pa = pmd_val(*pmd) & PHYSICAL_PAGE_MASK; | |
1182 | xen_free_ro_pages(pa, PMD_SIZE); | |
1183 | } else if (!pmd_none(*pmd)) { | |
1184 | pte = pte_offset_kernel(pmd, va); | |
70e61199 | 1185 | set_pmd(pmd, __pmd(0)); |
8f5b0c63 JG |
1186 | for (i = 0; i < PTRS_PER_PTE; ++i) { |
1187 | if (pte_none(pte[i])) | |
1188 | break; | |
1189 | pa = pte_pfn(pte[i]) << PAGE_SHIFT; | |
1190 | xen_free_ro_pages(pa, PAGE_SIZE); | |
1191 | } | |
70e61199 | 1192 | xen_cleanmfnmap_free_pgtbl(pte, unpin); |
8f5b0c63 JG |
1193 | } |
1194 | va += PMD_SIZE; | |
1195 | if (pmd_index(va)) | |
1196 | continue; | |
70e61199 JG |
1197 | set_pud(pud, __pud(0)); |
1198 | xen_cleanmfnmap_free_pgtbl(pmd, unpin); | |
8f5b0c63 JG |
1199 | } |
1200 | ||
1201 | } while (pud_index(va) || pmd_index(va)); | |
70e61199 | 1202 | xen_cleanmfnmap_free_pgtbl(pud_page, unpin); |
8f5b0c63 JG |
1203 | } |
1204 | ||
054954eb | 1205 | static void __init xen_pagetable_p2m_free(void) |
319f3ba5 | 1206 | { |
7f914062 KRW |
1207 | unsigned long size; |
1208 | unsigned long addr; | |
32df75cd KRW |
1209 | |
1210 | size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long)); | |
1211 | ||
b621e157 | 1212 | /* No memory or already called. */ |
054954eb | 1213 | if ((unsigned long)xen_p2m_addr == xen_start_info->mfn_list) |
32df75cd | 1214 | return; |
7f914062 | 1215 | |
b621e157 KRW |
1216 | /* using __ka address and sticking INVALID_P2M_ENTRY! */ |
1217 | memset((void *)xen_start_info->mfn_list, 0xff, size); | |
1218 | ||
b621e157 | 1219 | addr = xen_start_info->mfn_list; |
8f5b0c63 JG |
1220 | /* |
1221 | * We could be in __ka space. | |
1222 | * We roundup to the PMD, which means that if anybody at this stage is | |
1223 | * using the __ka address of xen_start_info or | |
1224 | * xen_start_info->shared_info they are in going to crash. Fortunatly | |
1225 | * we have already revectored in xen_setup_kernel_pagetable and in | |
1226 | * xen_setup_shared_info. | |
1227 | */ | |
b621e157 | 1228 | size = roundup(size, PMD_SIZE); |
b621e157 | 1229 | |
8f5b0c63 JG |
1230 | if (addr >= __START_KERNEL_map) { |
1231 | xen_cleanhighmap(addr, addr + size); | |
1232 | size = PAGE_ALIGN(xen_start_info->nr_pages * | |
1233 | sizeof(unsigned long)); | |
1234 | memblock_free(__pa(addr), size); | |
1235 | } else { | |
1236 | xen_cleanmfnmap(addr); | |
1237 | } | |
70e61199 JG |
1238 | } |
1239 | ||
1240 | static void __init xen_pagetable_cleanhighmap(void) | |
1241 | { | |
1242 | unsigned long size; | |
1243 | unsigned long addr; | |
b621e157 | 1244 | |
3aca7fbc KRW |
1245 | /* At this stage, cleanup_highmap has already cleaned __ka space |
1246 | * from _brk_limit way up to the max_pfn_mapped (which is the end of | |
1247 | * the ramdisk). We continue on, erasing PMD entries that point to page | |
1248 | * tables - do note that they are accessible at this stage via __va. | |
1249 | * For good measure we also round up to the PMD - which means that if | |
1250 | * anybody is using __ka address to the initial boot-stack - and try | |
1251 | * to use it - they are going to crash. The xen_start_info has been | |
1252 | * taken care of already in xen_setup_kernel_pagetable. */ | |
1253 | addr = xen_start_info->pt_base; | |
1254 | size = roundup(xen_start_info->nr_pt_frames * PAGE_SIZE, PMD_SIZE); | |
1255 | ||
1256 | xen_cleanhighmap(addr, addr + size); | |
1257 | xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base)); | |
1258 | #ifdef DEBUG | |
6a6256f9 | 1259 | /* This is superfluous and is not necessary, but you know what |
3aca7fbc KRW |
1260 | * lets do it. The MODULES_VADDR -> MODULES_END should be clear of |
1261 | * anything at this stage. */ | |
1262 | xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1); | |
1263 | #endif | |
32df75cd KRW |
1264 | } |
1265 | #endif | |
1266 | ||
054954eb | 1267 | static void __init xen_pagetable_p2m_setup(void) |
32df75cd | 1268 | { |
054954eb JG |
1269 | if (xen_feature(XENFEAT_auto_translated_physmap)) |
1270 | return; | |
1271 | ||
1272 | xen_vmalloc_p2m_tree(); | |
1273 | ||
32df75cd | 1274 | #ifdef CONFIG_X86_64 |
054954eb | 1275 | xen_pagetable_p2m_free(); |
70e61199 JG |
1276 | |
1277 | xen_pagetable_cleanhighmap(); | |
7f914062 | 1278 | #endif |
054954eb JG |
1279 | /* And revector! Bye bye old array */ |
1280 | xen_start_info->mfn_list = (unsigned long)xen_p2m_addr; | |
1281 | } | |
1282 | ||
1283 | static void __init xen_pagetable_init(void) | |
1284 | { | |
1285 | paging_init(); | |
cdfa0bad | 1286 | xen_post_allocator_init(); |
054954eb JG |
1287 | |
1288 | xen_pagetable_p2m_setup(); | |
1289 | ||
2c185687 JG |
1290 | /* Allocate and initialize top and mid mfn levels for p2m structure */ |
1291 | xen_build_mfn_list_list(); | |
1292 | ||
1f3ac86b JG |
1293 | /* Remap memory freed due to conflicts with E820 map */ |
1294 | if (!xen_feature(XENFEAT_auto_translated_physmap)) | |
1295 | xen_remap_memory(); | |
1296 | ||
2c185687 | 1297 | xen_setup_shared_info(); |
319f3ba5 | 1298 | } |
319f3ba5 JF |
1299 | static void xen_write_cr2(unsigned long cr2) |
1300 | { | |
2113f469 | 1301 | this_cpu_read(xen_vcpu)->arch.cr2 = cr2; |
319f3ba5 JF |
1302 | } |
1303 | ||
1304 | static unsigned long xen_read_cr2(void) | |
1305 | { | |
2113f469 | 1306 | return this_cpu_read(xen_vcpu)->arch.cr2; |
319f3ba5 JF |
1307 | } |
1308 | ||
1309 | unsigned long xen_read_cr2_direct(void) | |
1310 | { | |
2113f469 | 1311 | return this_cpu_read(xen_vcpu_info.arch.cr2); |
319f3ba5 JF |
1312 | } |
1313 | ||
95a7d768 KRW |
1314 | void xen_flush_tlb_all(void) |
1315 | { | |
1316 | struct mmuext_op *op; | |
1317 | struct multicall_space mcs; | |
1318 | ||
1319 | trace_xen_mmu_flush_tlb_all(0); | |
1320 | ||
1321 | preempt_disable(); | |
1322 | ||
1323 | mcs = xen_mc_entry(sizeof(*op)); | |
1324 | ||
1325 | op = mcs.args; | |
1326 | op->cmd = MMUEXT_TLB_FLUSH_ALL; | |
1327 | MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); | |
1328 | ||
1329 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
1330 | ||
1331 | preempt_enable(); | |
1332 | } | |
319f3ba5 JF |
1333 | static void xen_flush_tlb(void) |
1334 | { | |
1335 | struct mmuext_op *op; | |
1336 | struct multicall_space mcs; | |
1337 | ||
c8eed171 JF |
1338 | trace_xen_mmu_flush_tlb(0); |
1339 | ||
319f3ba5 JF |
1340 | preempt_disable(); |
1341 | ||
1342 | mcs = xen_mc_entry(sizeof(*op)); | |
1343 | ||
1344 | op = mcs.args; | |
1345 | op->cmd = MMUEXT_TLB_FLUSH_LOCAL; | |
1346 | MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); | |
1347 | ||
1348 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
1349 | ||
1350 | preempt_enable(); | |
1351 | } | |
1352 | ||
1353 | static void xen_flush_tlb_single(unsigned long addr) | |
1354 | { | |
1355 | struct mmuext_op *op; | |
1356 | struct multicall_space mcs; | |
1357 | ||
c8eed171 JF |
1358 | trace_xen_mmu_flush_tlb_single(addr); |
1359 | ||
319f3ba5 JF |
1360 | preempt_disable(); |
1361 | ||
1362 | mcs = xen_mc_entry(sizeof(*op)); | |
1363 | op = mcs.args; | |
1364 | op->cmd = MMUEXT_INVLPG_LOCAL; | |
1365 | op->arg1.linear_addr = addr & PAGE_MASK; | |
1366 | MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); | |
1367 | ||
1368 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
1369 | ||
1370 | preempt_enable(); | |
1371 | } | |
1372 | ||
1373 | static void xen_flush_tlb_others(const struct cpumask *cpus, | |
e7b52ffd AS |
1374 | struct mm_struct *mm, unsigned long start, |
1375 | unsigned long end) | |
319f3ba5 JF |
1376 | { |
1377 | struct { | |
1378 | struct mmuext_op op; | |
32dd1194 | 1379 | #ifdef CONFIG_SMP |
900cba88 | 1380 | DECLARE_BITMAP(mask, num_processors); |
32dd1194 KRW |
1381 | #else |
1382 | DECLARE_BITMAP(mask, NR_CPUS); | |
1383 | #endif | |
319f3ba5 JF |
1384 | } *args; |
1385 | struct multicall_space mcs; | |
1386 | ||
e7b52ffd | 1387 | trace_xen_mmu_flush_tlb_others(cpus, mm, start, end); |
c8eed171 | 1388 | |
e3f8a74e JF |
1389 | if (cpumask_empty(cpus)) |
1390 | return; /* nothing to do */ | |
319f3ba5 JF |
1391 | |
1392 | mcs = xen_mc_entry(sizeof(*args)); | |
1393 | args = mcs.args; | |
1394 | args->op.arg2.vcpumask = to_cpumask(args->mask); | |
1395 | ||
1396 | /* Remove us, and any offline CPUS. */ | |
1397 | cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask); | |
1398 | cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask)); | |
319f3ba5 | 1399 | |
e7b52ffd | 1400 | args->op.cmd = MMUEXT_TLB_FLUSH_MULTI; |
ce7184bd | 1401 | if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) { |
319f3ba5 | 1402 | args->op.cmd = MMUEXT_INVLPG_MULTI; |
e7b52ffd | 1403 | args->op.arg1.linear_addr = start; |
319f3ba5 JF |
1404 | } |
1405 | ||
1406 | MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF); | |
1407 | ||
319f3ba5 JF |
1408 | xen_mc_issue(PARAVIRT_LAZY_MMU); |
1409 | } | |
1410 | ||
1411 | static unsigned long xen_read_cr3(void) | |
1412 | { | |
2113f469 | 1413 | return this_cpu_read(xen_cr3); |
319f3ba5 JF |
1414 | } |
1415 | ||
1416 | static void set_current_cr3(void *v) | |
1417 | { | |
2113f469 | 1418 | this_cpu_write(xen_current_cr3, (unsigned long)v); |
319f3ba5 JF |
1419 | } |
1420 | ||
1421 | static void __xen_write_cr3(bool kernel, unsigned long cr3) | |
1422 | { | |
dcf7435c | 1423 | struct mmuext_op op; |
319f3ba5 JF |
1424 | unsigned long mfn; |
1425 | ||
c8eed171 JF |
1426 | trace_xen_mmu_write_cr3(kernel, cr3); |
1427 | ||
319f3ba5 JF |
1428 | if (cr3) |
1429 | mfn = pfn_to_mfn(PFN_DOWN(cr3)); | |
1430 | else | |
1431 | mfn = 0; | |
1432 | ||
1433 | WARN_ON(mfn == 0 && kernel); | |
1434 | ||
dcf7435c JF |
1435 | op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR; |
1436 | op.arg1.mfn = mfn; | |
319f3ba5 | 1437 | |
dcf7435c | 1438 | xen_extend_mmuext_op(&op); |
319f3ba5 JF |
1439 | |
1440 | if (kernel) { | |
2113f469 | 1441 | this_cpu_write(xen_cr3, cr3); |
319f3ba5 JF |
1442 | |
1443 | /* Update xen_current_cr3 once the batch has actually | |
1444 | been submitted. */ | |
1445 | xen_mc_callback(set_current_cr3, (void *)cr3); | |
1446 | } | |
1447 | } | |
319f3ba5 JF |
1448 | static void xen_write_cr3(unsigned long cr3) |
1449 | { | |
1450 | BUG_ON(preemptible()); | |
1451 | ||
1452 | xen_mc_batch(); /* disables interrupts */ | |
1453 | ||
1454 | /* Update while interrupts are disabled, so its atomic with | |
1455 | respect to ipis */ | |
2113f469 | 1456 | this_cpu_write(xen_cr3, cr3); |
319f3ba5 JF |
1457 | |
1458 | __xen_write_cr3(true, cr3); | |
1459 | ||
1460 | #ifdef CONFIG_X86_64 | |
1461 | { | |
1462 | pgd_t *user_pgd = xen_get_user_pgd(__va(cr3)); | |
1463 | if (user_pgd) | |
1464 | __xen_write_cr3(false, __pa(user_pgd)); | |
1465 | else | |
1466 | __xen_write_cr3(false, 0); | |
1467 | } | |
1468 | #endif | |
1469 | ||
1470 | xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */ | |
1471 | } | |
1472 | ||
0cc9129d KRW |
1473 | #ifdef CONFIG_X86_64 |
1474 | /* | |
1475 | * At the start of the day - when Xen launches a guest, it has already | |
1476 | * built pagetables for the guest. We diligently look over them | |
6a6256f9 | 1477 | * in xen_setup_kernel_pagetable and graft as appropriate them in the |
0cc9129d KRW |
1478 | * init_level4_pgt and its friends. Then when we are happy we load |
1479 | * the new init_level4_pgt - and continue on. | |
1480 | * | |
1481 | * The generic code starts (start_kernel) and 'init_mem_mapping' sets | |
1482 | * up the rest of the pagetables. When it has completed it loads the cr3. | |
1483 | * N.B. that baremetal would start at 'start_kernel' (and the early | |
1484 | * #PF handler would create bootstrap pagetables) - so we are running | |
1485 | * with the same assumptions as what to do when write_cr3 is executed | |
1486 | * at this point. | |
1487 | * | |
1488 | * Since there are no user-page tables at all, we have two variants | |
1489 | * of xen_write_cr3 - the early bootup (this one), and the late one | |
1490 | * (xen_write_cr3). The reason we have to do that is that in 64-bit | |
1491 | * the Linux kernel and user-space are both in ring 3 while the | |
1492 | * hypervisor is in ring 0. | |
1493 | */ | |
1494 | static void __init xen_write_cr3_init(unsigned long cr3) | |
1495 | { | |
1496 | BUG_ON(preemptible()); | |
1497 | ||
1498 | xen_mc_batch(); /* disables interrupts */ | |
1499 | ||
1500 | /* Update while interrupts are disabled, so its atomic with | |
1501 | respect to ipis */ | |
1502 | this_cpu_write(xen_cr3, cr3); | |
1503 | ||
1504 | __xen_write_cr3(true, cr3); | |
1505 | ||
1506 | xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */ | |
0cc9129d KRW |
1507 | } |
1508 | #endif | |
1509 | ||
319f3ba5 JF |
1510 | static int xen_pgd_alloc(struct mm_struct *mm) |
1511 | { | |
1512 | pgd_t *pgd = mm->pgd; | |
1513 | int ret = 0; | |
1514 | ||
1515 | BUG_ON(PagePinned(virt_to_page(pgd))); | |
1516 | ||
1517 | #ifdef CONFIG_X86_64 | |
1518 | { | |
1519 | struct page *page = virt_to_page(pgd); | |
1520 | pgd_t *user_pgd; | |
1521 | ||
1522 | BUG_ON(page->private != 0); | |
1523 | ||
1524 | ret = -ENOMEM; | |
1525 | ||
1526 | user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); | |
1527 | page->private = (unsigned long)user_pgd; | |
1528 | ||
1529 | if (user_pgd != NULL) { | |
1ad83c85 | 1530 | #ifdef CONFIG_X86_VSYSCALL_EMULATION |
f40c3300 | 1531 | user_pgd[pgd_index(VSYSCALL_ADDR)] = |
319f3ba5 | 1532 | __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE); |
1ad83c85 | 1533 | #endif |
319f3ba5 JF |
1534 | ret = 0; |
1535 | } | |
1536 | ||
1537 | BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd)))); | |
1538 | } | |
1539 | #endif | |
1540 | ||
1541 | return ret; | |
1542 | } | |
1543 | ||
1544 | static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) | |
1545 | { | |
1546 | #ifdef CONFIG_X86_64 | |
1547 | pgd_t *user_pgd = xen_get_user_pgd(pgd); | |
1548 | ||
1549 | if (user_pgd) | |
1550 | free_page((unsigned long)user_pgd); | |
1551 | #endif | |
1552 | } | |
1553 | ||
d095d43e DV |
1554 | /* |
1555 | * Init-time set_pte while constructing initial pagetables, which | |
1556 | * doesn't allow RO page table pages to be remapped RW. | |
1557 | * | |
66a27dde DV |
1558 | * If there is no MFN for this PFN then this page is initially |
1559 | * ballooned out so clear the PTE (as in decrease_reservation() in | |
1560 | * drivers/xen/balloon.c). | |
1561 | * | |
d095d43e DV |
1562 | * Many of these PTE updates are done on unpinned and writable pages |
1563 | * and doing a hypercall for these is unnecessary and expensive. At | |
1564 | * this point it is not possible to tell if a page is pinned or not, | |
1565 | * so always write the PTE directly and rely on Xen trapping and | |
1566 | * emulating any updates as necessary. | |
1567 | */ | |
d6b186c1 | 1568 | __visible pte_t xen_make_pte_init(pteval_t pte) |
1f4f9315 | 1569 | { |
d6b186c1 DV |
1570 | #ifdef CONFIG_X86_64 |
1571 | unsigned long pfn; | |
1572 | ||
1573 | /* | |
1574 | * Pages belonging to the initial p2m list mapped outside the default | |
1575 | * address range must be mapped read-only. This region contains the | |
1576 | * page tables for mapping the p2m list, too, and page tables MUST be | |
1577 | * mapped read-only. | |
1578 | */ | |
1579 | pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT; | |
1580 | if (xen_start_info->mfn_list < __START_KERNEL_map && | |
1581 | pfn >= xen_start_info->first_p2m_pfn && | |
1582 | pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames) | |
1583 | pte &= ~_PAGE_RW; | |
1584 | #endif | |
1585 | pte = pte_pfn_to_mfn(pte); | |
1586 | return native_make_pte(pte); | |
1587 | } | |
1588 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init); | |
1f4f9315 | 1589 | |
d6b186c1 DV |
1590 | static void __init xen_set_pte_init(pte_t *ptep, pte_t pte) |
1591 | { | |
1592 | #ifdef CONFIG_X86_32 | |
1593 | /* If there's an existing pte, then don't allow _PAGE_RW to be set */ | |
1594 | if (pte_mfn(pte) != INVALID_P2M_ENTRY | |
1595 | && pte_val_ma(*ptep) & _PAGE_PRESENT) | |
1596 | pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & | |
1597 | pte_val_ma(pte)); | |
1598 | #endif | |
d095d43e | 1599 | native_set_pte(ptep, pte); |
1f4f9315 | 1600 | } |
319f3ba5 JF |
1601 | |
1602 | /* Early in boot, while setting up the initial pagetable, assume | |
1603 | everything is pinned. */ | |
3f508953 | 1604 | static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) |
319f3ba5 | 1605 | { |
b96229b5 JF |
1606 | #ifdef CONFIG_FLATMEM |
1607 | BUG_ON(mem_map); /* should only be used early */ | |
1608 | #endif | |
1609 | make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); | |
1610 | pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); | |
1611 | } | |
1612 | ||
1613 | /* Used for pmd and pud */ | |
3f508953 | 1614 | static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn) |
b96229b5 | 1615 | { |
319f3ba5 JF |
1616 | #ifdef CONFIG_FLATMEM |
1617 | BUG_ON(mem_map); /* should only be used early */ | |
1618 | #endif | |
1619 | make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); | |
1620 | } | |
1621 | ||
1622 | /* Early release_pte assumes that all pts are pinned, since there's | |
1623 | only init_mm and anything attached to that is pinned. */ | |
3f508953 | 1624 | static void __init xen_release_pte_init(unsigned long pfn) |
319f3ba5 | 1625 | { |
b96229b5 | 1626 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); |
319f3ba5 JF |
1627 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); |
1628 | } | |
1629 | ||
3f508953 | 1630 | static void __init xen_release_pmd_init(unsigned long pfn) |
319f3ba5 | 1631 | { |
b96229b5 | 1632 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); |
319f3ba5 JF |
1633 | } |
1634 | ||
bc7fe1d9 JF |
1635 | static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn) |
1636 | { | |
1637 | struct multicall_space mcs; | |
1638 | struct mmuext_op *op; | |
1639 | ||
1640 | mcs = __xen_mc_entry(sizeof(*op)); | |
1641 | op = mcs.args; | |
1642 | op->cmd = cmd; | |
1643 | op->arg1.mfn = pfn_to_mfn(pfn); | |
1644 | ||
1645 | MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); | |
1646 | } | |
1647 | ||
1648 | static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot) | |
1649 | { | |
1650 | struct multicall_space mcs; | |
1651 | unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT); | |
1652 | ||
1653 | mcs = __xen_mc_entry(0); | |
1654 | MULTI_update_va_mapping(mcs.mc, (unsigned long)addr, | |
1655 | pfn_pte(pfn, prot), 0); | |
1656 | } | |
1657 | ||
319f3ba5 JF |
1658 | /* This needs to make sure the new pte page is pinned iff its being |
1659 | attached to a pinned pagetable. */ | |
bc7fe1d9 JF |
1660 | static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, |
1661 | unsigned level) | |
319f3ba5 | 1662 | { |
bc7fe1d9 JF |
1663 | bool pinned = PagePinned(virt_to_page(mm->pgd)); |
1664 | ||
c2ba050d | 1665 | trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned); |
319f3ba5 | 1666 | |
c2ba050d | 1667 | if (pinned) { |
bc7fe1d9 | 1668 | struct page *page = pfn_to_page(pfn); |
319f3ba5 | 1669 | |
319f3ba5 JF |
1670 | SetPagePinned(page); |
1671 | ||
319f3ba5 | 1672 | if (!PageHighMem(page)) { |
bc7fe1d9 JF |
1673 | xen_mc_batch(); |
1674 | ||
1675 | __set_pfn_prot(pfn, PAGE_KERNEL_RO); | |
1676 | ||
57c1ffce | 1677 | if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS) |
bc7fe1d9 JF |
1678 | __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); |
1679 | ||
1680 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
319f3ba5 JF |
1681 | } else { |
1682 | /* make sure there are no stray mappings of | |
1683 | this page */ | |
1684 | kmap_flush_unused(); | |
1685 | } | |
1686 | } | |
1687 | } | |
1688 | ||
1689 | static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn) | |
1690 | { | |
1691 | xen_alloc_ptpage(mm, pfn, PT_PTE); | |
1692 | } | |
1693 | ||
1694 | static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn) | |
1695 | { | |
1696 | xen_alloc_ptpage(mm, pfn, PT_PMD); | |
1697 | } | |
1698 | ||
1699 | /* This should never happen until we're OK to use struct page */ | |
bc7fe1d9 | 1700 | static inline void xen_release_ptpage(unsigned long pfn, unsigned level) |
319f3ba5 JF |
1701 | { |
1702 | struct page *page = pfn_to_page(pfn); | |
c2ba050d | 1703 | bool pinned = PagePinned(page); |
319f3ba5 | 1704 | |
c2ba050d | 1705 | trace_xen_mmu_release_ptpage(pfn, level, pinned); |
319f3ba5 | 1706 | |
c2ba050d | 1707 | if (pinned) { |
319f3ba5 | 1708 | if (!PageHighMem(page)) { |
bc7fe1d9 JF |
1709 | xen_mc_batch(); |
1710 | ||
57c1ffce | 1711 | if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS) |
bc7fe1d9 JF |
1712 | __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); |
1713 | ||
1714 | __set_pfn_prot(pfn, PAGE_KERNEL); | |
1715 | ||
1716 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
319f3ba5 JF |
1717 | } |
1718 | ClearPagePinned(page); | |
1719 | } | |
1720 | } | |
1721 | ||
1722 | static void xen_release_pte(unsigned long pfn) | |
1723 | { | |
1724 | xen_release_ptpage(pfn, PT_PTE); | |
1725 | } | |
1726 | ||
1727 | static void xen_release_pmd(unsigned long pfn) | |
1728 | { | |
1729 | xen_release_ptpage(pfn, PT_PMD); | |
1730 | } | |
1731 | ||
98233368 | 1732 | #if CONFIG_PGTABLE_LEVELS == 4 |
319f3ba5 JF |
1733 | static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn) |
1734 | { | |
1735 | xen_alloc_ptpage(mm, pfn, PT_PUD); | |
1736 | } | |
1737 | ||
1738 | static void xen_release_pud(unsigned long pfn) | |
1739 | { | |
1740 | xen_release_ptpage(pfn, PT_PUD); | |
1741 | } | |
1742 | #endif | |
1743 | ||
1744 | void __init xen_reserve_top(void) | |
1745 | { | |
1746 | #ifdef CONFIG_X86_32 | |
1747 | unsigned long top = HYPERVISOR_VIRT_START; | |
1748 | struct xen_platform_parameters pp; | |
1749 | ||
1750 | if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0) | |
1751 | top = pp.virt_start; | |
1752 | ||
1753 | reserve_top_address(-top); | |
1754 | #endif /* CONFIG_X86_32 */ | |
1755 | } | |
1756 | ||
1757 | /* | |
1758 | * Like __va(), but returns address in the kernel mapping (which is | |
1759 | * all we have until the physical memory mapping has been set up. | |
1760 | */ | |
bf9d834a | 1761 | static void * __init __ka(phys_addr_t paddr) |
319f3ba5 JF |
1762 | { |
1763 | #ifdef CONFIG_X86_64 | |
1764 | return (void *)(paddr + __START_KERNEL_map); | |
1765 | #else | |
1766 | return __va(paddr); | |
1767 | #endif | |
1768 | } | |
1769 | ||
1770 | /* Convert a machine address to physical address */ | |
bf9d834a | 1771 | static unsigned long __init m2p(phys_addr_t maddr) |
319f3ba5 JF |
1772 | { |
1773 | phys_addr_t paddr; | |
1774 | ||
1775 | maddr &= PTE_PFN_MASK; | |
1776 | paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT; | |
1777 | ||
1778 | return paddr; | |
1779 | } | |
1780 | ||
1781 | /* Convert a machine address to kernel virtual */ | |
bf9d834a | 1782 | static void * __init m2v(phys_addr_t maddr) |
319f3ba5 JF |
1783 | { |
1784 | return __ka(m2p(maddr)); | |
1785 | } | |
1786 | ||
4ec5387c | 1787 | /* Set the page permissions on an identity-mapped pages */ |
bf9d834a JG |
1788 | static void __init set_page_prot_flags(void *addr, pgprot_t prot, |
1789 | unsigned long flags) | |
319f3ba5 JF |
1790 | { |
1791 | unsigned long pfn = __pa(addr) >> PAGE_SHIFT; | |
1792 | pte_t pte = pfn_pte(pfn, prot); | |
1793 | ||
4e44e44b MR |
1794 | /* For PVH no need to set R/O or R/W to pin them or unpin them. */ |
1795 | if (xen_feature(XENFEAT_auto_translated_physmap)) | |
1796 | return; | |
1797 | ||
b2222794 | 1798 | if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags)) |
319f3ba5 JF |
1799 | BUG(); |
1800 | } | |
bf9d834a | 1801 | static void __init set_page_prot(void *addr, pgprot_t prot) |
b2222794 KRW |
1802 | { |
1803 | return set_page_prot_flags(addr, prot, UVMF_NONE); | |
1804 | } | |
caaf9ecf | 1805 | #ifdef CONFIG_X86_32 |
3f508953 | 1806 | static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) |
319f3ba5 JF |
1807 | { |
1808 | unsigned pmdidx, pteidx; | |
1809 | unsigned ident_pte; | |
1810 | unsigned long pfn; | |
1811 | ||
764f0138 JF |
1812 | level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES, |
1813 | PAGE_SIZE); | |
1814 | ||
319f3ba5 JF |
1815 | ident_pte = 0; |
1816 | pfn = 0; | |
1817 | for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) { | |
1818 | pte_t *pte_page; | |
1819 | ||
1820 | /* Reuse or allocate a page of ptes */ | |
1821 | if (pmd_present(pmd[pmdidx])) | |
1822 | pte_page = m2v(pmd[pmdidx].pmd); | |
1823 | else { | |
1824 | /* Check for free pte pages */ | |
764f0138 | 1825 | if (ident_pte == LEVEL1_IDENT_ENTRIES) |
319f3ba5 JF |
1826 | break; |
1827 | ||
1828 | pte_page = &level1_ident_pgt[ident_pte]; | |
1829 | ident_pte += PTRS_PER_PTE; | |
1830 | ||
1831 | pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE); | |
1832 | } | |
1833 | ||
1834 | /* Install mappings */ | |
1835 | for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { | |
1836 | pte_t pte; | |
1837 | ||
a91d9287 SS |
1838 | if (pfn > max_pfn_mapped) |
1839 | max_pfn_mapped = pfn; | |
a91d9287 | 1840 | |
319f3ba5 JF |
1841 | if (!pte_none(pte_page[pteidx])) |
1842 | continue; | |
1843 | ||
1844 | pte = pfn_pte(pfn, PAGE_KERNEL_EXEC); | |
1845 | pte_page[pteidx] = pte; | |
1846 | } | |
1847 | } | |
1848 | ||
1849 | for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE) | |
1850 | set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO); | |
1851 | ||
1852 | set_page_prot(pmd, PAGE_KERNEL_RO); | |
1853 | } | |
caaf9ecf | 1854 | #endif |
7e77506a IC |
1855 | void __init xen_setup_machphys_mapping(void) |
1856 | { | |
1857 | struct xen_machphys_mapping mapping; | |
7e77506a IC |
1858 | |
1859 | if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) { | |
1860 | machine_to_phys_mapping = (unsigned long *)mapping.v_start; | |
ccbcdf7c | 1861 | machine_to_phys_nr = mapping.max_mfn + 1; |
7e77506a | 1862 | } else { |
ccbcdf7c | 1863 | machine_to_phys_nr = MACH2PHYS_NR_ENTRIES; |
7e77506a | 1864 | } |
ccbcdf7c | 1865 | #ifdef CONFIG_X86_32 |
61cca2fa JB |
1866 | WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1)) |
1867 | < machine_to_phys_mapping); | |
ccbcdf7c | 1868 | #endif |
7e77506a IC |
1869 | } |
1870 | ||
319f3ba5 | 1871 | #ifdef CONFIG_X86_64 |
bf9d834a | 1872 | static void __init convert_pfn_mfn(void *v) |
319f3ba5 JF |
1873 | { |
1874 | pte_t *pte = v; | |
1875 | int i; | |
1876 | ||
1877 | /* All levels are converted the same way, so just treat them | |
1878 | as ptes. */ | |
1879 | for (i = 0; i < PTRS_PER_PTE; i++) | |
1880 | pte[i] = xen_make_pte(pte[i].pte); | |
1881 | } | |
488f046d KRW |
1882 | static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end, |
1883 | unsigned long addr) | |
1884 | { | |
1885 | if (*pt_base == PFN_DOWN(__pa(addr))) { | |
b2222794 | 1886 | set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG); |
488f046d KRW |
1887 | clear_page((void *)addr); |
1888 | (*pt_base)++; | |
1889 | } | |
1890 | if (*pt_end == PFN_DOWN(__pa(addr))) { | |
b2222794 | 1891 | set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG); |
488f046d KRW |
1892 | clear_page((void *)addr); |
1893 | (*pt_end)--; | |
1894 | } | |
1895 | } | |
319f3ba5 | 1896 | /* |
0d2eb44f | 1897 | * Set up the initial kernel pagetable. |
319f3ba5 JF |
1898 | * |
1899 | * We can construct this by grafting the Xen provided pagetable into | |
1900 | * head_64.S's preconstructed pagetables. We copy the Xen L2's into | |
0b5a5063 SB |
1901 | * level2_ident_pgt, and level2_kernel_pgt. This means that only the |
1902 | * kernel has a physical mapping to start with - but that's enough to | |
1903 | * get __va working. We need to fill in the rest of the physical | |
1904 | * mapping once some sort of allocator has been set up. NOTE: for | |
1905 | * PVH, the page tables are native. | |
319f3ba5 | 1906 | */ |
3699aad0 | 1907 | void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) |
319f3ba5 JF |
1908 | { |
1909 | pud_t *l3; | |
1910 | pmd_t *l2; | |
488f046d KRW |
1911 | unsigned long addr[3]; |
1912 | unsigned long pt_base, pt_end; | |
1913 | unsigned i; | |
319f3ba5 | 1914 | |
14988a4d SS |
1915 | /* max_pfn_mapped is the last pfn mapped in the initial memory |
1916 | * mappings. Considering that on Xen after the kernel mappings we | |
1917 | * have the mappings of some pages that don't exist in pfn space, we | |
1918 | * set max_pfn_mapped to the last real pfn mapped. */ | |
8f5b0c63 JG |
1919 | if (xen_start_info->mfn_list < __START_KERNEL_map) |
1920 | max_pfn_mapped = xen_start_info->first_p2m_pfn; | |
1921 | else | |
1922 | max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); | |
14988a4d | 1923 | |
488f046d KRW |
1924 | pt_base = PFN_DOWN(__pa(xen_start_info->pt_base)); |
1925 | pt_end = pt_base + xen_start_info->nr_pt_frames; | |
1926 | ||
319f3ba5 JF |
1927 | /* Zap identity mapping */ |
1928 | init_level4_pgt[0] = __pgd(0); | |
1929 | ||
4e44e44b MR |
1930 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { |
1931 | /* Pre-constructed entries are in pfn, so convert to mfn */ | |
1932 | /* L4[272] -> level3_ident_pgt | |
1933 | * L4[511] -> level3_kernel_pgt */ | |
1934 | convert_pfn_mfn(init_level4_pgt); | |
1935 | ||
1936 | /* L3_i[0] -> level2_ident_pgt */ | |
1937 | convert_pfn_mfn(level3_ident_pgt); | |
1938 | /* L3_k[510] -> level2_kernel_pgt | |
0b5a5063 | 1939 | * L3_k[511] -> level2_fixmap_pgt */ |
4e44e44b | 1940 | convert_pfn_mfn(level3_kernel_pgt); |
0b5a5063 SB |
1941 | |
1942 | /* L3_k[511][506] -> level1_fixmap_pgt */ | |
1943 | convert_pfn_mfn(level2_fixmap_pgt); | |
4e44e44b | 1944 | } |
4fac153a | 1945 | /* We get [511][511] and have Xen's version of level2_kernel_pgt */ |
319f3ba5 JF |
1946 | l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); |
1947 | l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud); | |
1948 | ||
488f046d KRW |
1949 | addr[0] = (unsigned long)pgd; |
1950 | addr[1] = (unsigned long)l3; | |
1951 | addr[2] = (unsigned long)l2; | |
4fac153a | 1952 | /* Graft it onto L4[272][0]. Note that we creating an aliasing problem: |
0b5a5063 | 1953 | * Both L4[272][0] and L4[511][510] have entries that point to the same |
4fac153a KRW |
1954 | * L2 (PMD) tables. Meaning that if you modify it in __va space |
1955 | * it will be also modified in the __ka space! (But if you just | |
1956 | * modify the PMD table to point to other PTE's or none, then you | |
1957 | * are OK - which is what cleanup_highmap does) */ | |
ae895ed7 | 1958 | copy_page(level2_ident_pgt, l2); |
0b5a5063 | 1959 | /* Graft it onto L4[511][510] */ |
ae895ed7 | 1960 | copy_page(level2_kernel_pgt, l2); |
319f3ba5 | 1961 | |
8f5b0c63 JG |
1962 | /* Copy the initial P->M table mappings if necessary. */ |
1963 | i = pgd_index(xen_start_info->mfn_list); | |
1964 | if (i && i < pgd_index(__START_KERNEL_map)) | |
1965 | init_level4_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i]; | |
1966 | ||
4e44e44b MR |
1967 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { |
1968 | /* Make pagetable pieces RO */ | |
1969 | set_page_prot(init_level4_pgt, PAGE_KERNEL_RO); | |
1970 | set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO); | |
1971 | set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO); | |
1972 | set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO); | |
1973 | set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO); | |
1974 | set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); | |
1975 | set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); | |
0b5a5063 | 1976 | set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO); |
4e44e44b MR |
1977 | |
1978 | /* Pin down new L4 */ | |
1979 | pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, | |
1980 | PFN_DOWN(__pa_symbol(init_level4_pgt))); | |
1981 | ||
1982 | /* Unpin Xen-provided one */ | |
1983 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); | |
319f3ba5 | 1984 | |
4e44e44b MR |
1985 | /* |
1986 | * At this stage there can be no user pgd, and no page | |
1987 | * structure to attach it to, so make sure we just set kernel | |
1988 | * pgd. | |
1989 | */ | |
1990 | xen_mc_batch(); | |
1991 | __xen_write_cr3(true, __pa(init_level4_pgt)); | |
1992 | xen_mc_issue(PARAVIRT_LAZY_CPU); | |
1993 | } else | |
1994 | native_write_cr3(__pa(init_level4_pgt)); | |
319f3ba5 | 1995 | |
488f046d KRW |
1996 | /* We can't that easily rip out L3 and L2, as the Xen pagetables are |
1997 | * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for | |
1998 | * the initial domain. For guests using the toolstack, they are in: | |
1999 | * [L4], [L3], [L2], [L1], [L1], order .. So for dom0 we can only | |
2000 | * rip out the [L4] (pgd), but for guests we shave off three pages. | |
2001 | */ | |
2002 | for (i = 0; i < ARRAY_SIZE(addr); i++) | |
2003 | check_pt_base(&pt_base, &pt_end, addr[i]); | |
319f3ba5 | 2004 | |
488f046d | 2005 | /* Our (by three pages) smaller Xen pagetable that we are using */ |
04414baa JG |
2006 | xen_pt_base = PFN_PHYS(pt_base); |
2007 | xen_pt_size = (pt_end - pt_base) * PAGE_SIZE; | |
2008 | memblock_reserve(xen_pt_base, xen_pt_size); | |
70e61199 | 2009 | |
7f914062 KRW |
2010 | /* Revector the xen_start_info */ |
2011 | xen_start_info = (struct start_info *)__va(__pa(xen_start_info)); | |
319f3ba5 | 2012 | } |
70e61199 JG |
2013 | |
2014 | /* | |
2015 | * Read a value from a physical address. | |
2016 | */ | |
2017 | static unsigned long __init xen_read_phys_ulong(phys_addr_t addr) | |
2018 | { | |
2019 | unsigned long *vaddr; | |
2020 | unsigned long val; | |
2021 | ||
2022 | vaddr = early_memremap_ro(addr, sizeof(val)); | |
2023 | val = *vaddr; | |
2024 | early_memunmap(vaddr, sizeof(val)); | |
2025 | return val; | |
2026 | } | |
2027 | ||
2028 | /* | |
2029 | * Translate a virtual address to a physical one without relying on mapped | |
2030 | * page tables. | |
2031 | */ | |
2032 | static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr) | |
2033 | { | |
2034 | phys_addr_t pa; | |
2035 | pgd_t pgd; | |
2036 | pud_t pud; | |
2037 | pmd_t pmd; | |
2038 | pte_t pte; | |
2039 | ||
2040 | pa = read_cr3(); | |
2041 | pgd = native_make_pgd(xen_read_phys_ulong(pa + pgd_index(vaddr) * | |
2042 | sizeof(pgd))); | |
2043 | if (!pgd_present(pgd)) | |
2044 | return 0; | |
2045 | ||
2046 | pa = pgd_val(pgd) & PTE_PFN_MASK; | |
2047 | pud = native_make_pud(xen_read_phys_ulong(pa + pud_index(vaddr) * | |
2048 | sizeof(pud))); | |
2049 | if (!pud_present(pud)) | |
2050 | return 0; | |
2051 | pa = pud_pfn(pud) << PAGE_SHIFT; | |
2052 | if (pud_large(pud)) | |
2053 | return pa + (vaddr & ~PUD_MASK); | |
2054 | ||
2055 | pmd = native_make_pmd(xen_read_phys_ulong(pa + pmd_index(vaddr) * | |
2056 | sizeof(pmd))); | |
2057 | if (!pmd_present(pmd)) | |
2058 | return 0; | |
2059 | pa = pmd_pfn(pmd) << PAGE_SHIFT; | |
2060 | if (pmd_large(pmd)) | |
2061 | return pa + (vaddr & ~PMD_MASK); | |
2062 | ||
2063 | pte = native_make_pte(xen_read_phys_ulong(pa + pte_index(vaddr) * | |
2064 | sizeof(pte))); | |
2065 | if (!pte_present(pte)) | |
2066 | return 0; | |
2067 | pa = pte_pfn(pte) << PAGE_SHIFT; | |
2068 | ||
2069 | return pa | (vaddr & ~PAGE_MASK); | |
2070 | } | |
2071 | ||
2072 | /* | |
2073 | * Find a new area for the hypervisor supplied p2m list and relocate the p2m to | |
2074 | * this area. | |
2075 | */ | |
2076 | void __init xen_relocate_p2m(void) | |
2077 | { | |
2078 | phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys; | |
2079 | unsigned long p2m_pfn, p2m_pfn_end, n_frames, pfn, pfn_end; | |
2080 | int n_pte, n_pt, n_pmd, n_pud, idx_pte, idx_pt, idx_pmd, idx_pud; | |
2081 | pte_t *pt; | |
2082 | pmd_t *pmd; | |
2083 | pud_t *pud; | |
2084 | pgd_t *pgd; | |
2085 | unsigned long *new_p2m; | |
2086 | ||
2087 | size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long)); | |
2088 | n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT; | |
2089 | n_pt = roundup(size, PMD_SIZE) >> PMD_SHIFT; | |
2090 | n_pmd = roundup(size, PUD_SIZE) >> PUD_SHIFT; | |
2091 | n_pud = roundup(size, PGDIR_SIZE) >> PGDIR_SHIFT; | |
2092 | n_frames = n_pte + n_pt + n_pmd + n_pud; | |
2093 | ||
2094 | new_area = xen_find_free_area(PFN_PHYS(n_frames)); | |
2095 | if (!new_area) { | |
2096 | xen_raw_console_write("Can't find new memory area for p2m needed due to E820 map conflict\n"); | |
2097 | BUG(); | |
2098 | } | |
2099 | ||
2100 | /* | |
2101 | * Setup the page tables for addressing the new p2m list. | |
2102 | * We have asked the hypervisor to map the p2m list at the user address | |
2103 | * PUD_SIZE. It may have done so, or it may have used a kernel space | |
2104 | * address depending on the Xen version. | |
2105 | * To avoid any possible virtual address collision, just use | |
2106 | * 2 * PUD_SIZE for the new area. | |
2107 | */ | |
2108 | pud_phys = new_area; | |
2109 | pmd_phys = pud_phys + PFN_PHYS(n_pud); | |
2110 | pt_phys = pmd_phys + PFN_PHYS(n_pmd); | |
2111 | p2m_pfn = PFN_DOWN(pt_phys) + n_pt; | |
2112 | ||
2113 | pgd = __va(read_cr3()); | |
2114 | new_p2m = (unsigned long *)(2 * PGDIR_SIZE); | |
2115 | for (idx_pud = 0; idx_pud < n_pud; idx_pud++) { | |
2116 | pud = early_memremap(pud_phys, PAGE_SIZE); | |
2117 | clear_page(pud); | |
2118 | for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD); | |
2119 | idx_pmd++) { | |
2120 | pmd = early_memremap(pmd_phys, PAGE_SIZE); | |
2121 | clear_page(pmd); | |
2122 | for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD); | |
2123 | idx_pt++) { | |
2124 | pt = early_memremap(pt_phys, PAGE_SIZE); | |
2125 | clear_page(pt); | |
2126 | for (idx_pte = 0; | |
2127 | idx_pte < min(n_pte, PTRS_PER_PTE); | |
2128 | idx_pte++) { | |
2129 | set_pte(pt + idx_pte, | |
2130 | pfn_pte(p2m_pfn, PAGE_KERNEL)); | |
2131 | p2m_pfn++; | |
2132 | } | |
2133 | n_pte -= PTRS_PER_PTE; | |
2134 | early_memunmap(pt, PAGE_SIZE); | |
2135 | make_lowmem_page_readonly(__va(pt_phys)); | |
2136 | pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, | |
2137 | PFN_DOWN(pt_phys)); | |
2138 | set_pmd(pmd + idx_pt, | |
2139 | __pmd(_PAGE_TABLE | pt_phys)); | |
2140 | pt_phys += PAGE_SIZE; | |
2141 | } | |
2142 | n_pt -= PTRS_PER_PMD; | |
2143 | early_memunmap(pmd, PAGE_SIZE); | |
2144 | make_lowmem_page_readonly(__va(pmd_phys)); | |
2145 | pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE, | |
2146 | PFN_DOWN(pmd_phys)); | |
2147 | set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys)); | |
2148 | pmd_phys += PAGE_SIZE; | |
2149 | } | |
2150 | n_pmd -= PTRS_PER_PUD; | |
2151 | early_memunmap(pud, PAGE_SIZE); | |
2152 | make_lowmem_page_readonly(__va(pud_phys)); | |
2153 | pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys)); | |
2154 | set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys)); | |
2155 | pud_phys += PAGE_SIZE; | |
2156 | } | |
2157 | ||
2158 | /* Now copy the old p2m info to the new area. */ | |
2159 | memcpy(new_p2m, xen_p2m_addr, size); | |
2160 | xen_p2m_addr = new_p2m; | |
2161 | ||
2162 | /* Release the old p2m list and set new list info. */ | |
2163 | p2m_pfn = PFN_DOWN(xen_early_virt_to_phys(xen_start_info->mfn_list)); | |
2164 | BUG_ON(!p2m_pfn); | |
2165 | p2m_pfn_end = p2m_pfn + PFN_DOWN(size); | |
2166 | ||
2167 | if (xen_start_info->mfn_list < __START_KERNEL_map) { | |
2168 | pfn = xen_start_info->first_p2m_pfn; | |
2169 | pfn_end = xen_start_info->first_p2m_pfn + | |
2170 | xen_start_info->nr_p2m_frames; | |
2171 | set_pgd(pgd + 1, __pgd(0)); | |
2172 | } else { | |
2173 | pfn = p2m_pfn; | |
2174 | pfn_end = p2m_pfn_end; | |
2175 | } | |
2176 | ||
2177 | memblock_free(PFN_PHYS(pfn), PAGE_SIZE * (pfn_end - pfn)); | |
2178 | while (pfn < pfn_end) { | |
2179 | if (pfn == p2m_pfn) { | |
2180 | pfn = p2m_pfn_end; | |
2181 | continue; | |
2182 | } | |
2183 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); | |
2184 | pfn++; | |
2185 | } | |
2186 | ||
2187 | xen_start_info->mfn_list = (unsigned long)xen_p2m_addr; | |
2188 | xen_start_info->first_p2m_pfn = PFN_DOWN(new_area); | |
2189 | xen_start_info->nr_p2m_frames = n_frames; | |
2190 | } | |
2191 | ||
319f3ba5 | 2192 | #else /* !CONFIG_X86_64 */ |
5b5c1af1 IC |
2193 | static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD); |
2194 | static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD); | |
2195 | ||
3f508953 | 2196 | static void __init xen_write_cr3_init(unsigned long cr3) |
5b5c1af1 IC |
2197 | { |
2198 | unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir)); | |
2199 | ||
2200 | BUG_ON(read_cr3() != __pa(initial_page_table)); | |
2201 | BUG_ON(cr3 != __pa(swapper_pg_dir)); | |
2202 | ||
2203 | /* | |
2204 | * We are switching to swapper_pg_dir for the first time (from | |
2205 | * initial_page_table) and therefore need to mark that page | |
2206 | * read-only and then pin it. | |
2207 | * | |
2208 | * Xen disallows sharing of kernel PMDs for PAE | |
2209 | * guests. Therefore we must copy the kernel PMD from | |
2210 | * initial_page_table into a new kernel PMD to be used in | |
2211 | * swapper_pg_dir. | |
2212 | */ | |
2213 | swapper_kernel_pmd = | |
2214 | extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); | |
ae895ed7 | 2215 | copy_page(swapper_kernel_pmd, initial_kernel_pmd); |
5b5c1af1 IC |
2216 | swapper_pg_dir[KERNEL_PGD_BOUNDARY] = |
2217 | __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT); | |
2218 | set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO); | |
2219 | ||
2220 | set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO); | |
2221 | xen_write_cr3(cr3); | |
2222 | pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn); | |
2223 | ||
2224 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, | |
2225 | PFN_DOWN(__pa(initial_page_table))); | |
2226 | set_page_prot(initial_page_table, PAGE_KERNEL); | |
2227 | set_page_prot(initial_kernel_pmd, PAGE_KERNEL); | |
2228 | ||
2229 | pv_mmu_ops.write_cr3 = &xen_write_cr3; | |
2230 | } | |
319f3ba5 | 2231 | |
70e61199 JG |
2232 | /* |
2233 | * For 32 bit domains xen_start_info->pt_base is the pgd address which might be | |
2234 | * not the first page table in the page table pool. | |
2235 | * Iterate through the initial page tables to find the real page table base. | |
2236 | */ | |
2237 | static phys_addr_t xen_find_pt_base(pmd_t *pmd) | |
2238 | { | |
2239 | phys_addr_t pt_base, paddr; | |
2240 | unsigned pmdidx; | |
2241 | ||
2242 | pt_base = min(__pa(xen_start_info->pt_base), __pa(pmd)); | |
2243 | ||
2244 | for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) | |
2245 | if (pmd_present(pmd[pmdidx]) && !pmd_large(pmd[pmdidx])) { | |
2246 | paddr = m2p(pmd[pmdidx].pmd); | |
2247 | pt_base = min(pt_base, paddr); | |
2248 | } | |
2249 | ||
2250 | return pt_base; | |
2251 | } | |
2252 | ||
3699aad0 | 2253 | void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) |
319f3ba5 JF |
2254 | { |
2255 | pmd_t *kernel_pmd; | |
2256 | ||
70e61199 JG |
2257 | kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); |
2258 | ||
2259 | xen_pt_base = xen_find_pt_base(kernel_pmd); | |
2260 | xen_pt_size = xen_start_info->nr_pt_frames * PAGE_SIZE; | |
2261 | ||
5b5c1af1 IC |
2262 | initial_kernel_pmd = |
2263 | extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); | |
f0991802 | 2264 | |
70e61199 | 2265 | max_pfn_mapped = PFN_DOWN(xen_pt_base + xen_pt_size + 512 * 1024); |
319f3ba5 | 2266 | |
ae895ed7 | 2267 | copy_page(initial_kernel_pmd, kernel_pmd); |
319f3ba5 | 2268 | |
5b5c1af1 | 2269 | xen_map_identity_early(initial_kernel_pmd, max_pfn); |
319f3ba5 | 2270 | |
ae895ed7 | 2271 | copy_page(initial_page_table, pgd); |
5b5c1af1 IC |
2272 | initial_page_table[KERNEL_PGD_BOUNDARY] = |
2273 | __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT); | |
319f3ba5 | 2274 | |
5b5c1af1 IC |
2275 | set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO); |
2276 | set_page_prot(initial_page_table, PAGE_KERNEL_RO); | |
319f3ba5 JF |
2277 | set_page_prot(empty_zero_page, PAGE_KERNEL_RO); |
2278 | ||
2279 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); | |
2280 | ||
5b5c1af1 IC |
2281 | pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, |
2282 | PFN_DOWN(__pa(initial_page_table))); | |
2283 | xen_write_cr3(__pa(initial_page_table)); | |
319f3ba5 | 2284 | |
04414baa | 2285 | memblock_reserve(xen_pt_base, xen_pt_size); |
319f3ba5 JF |
2286 | } |
2287 | #endif /* CONFIG_X86_64 */ | |
2288 | ||
6c2681c8 JG |
2289 | void __init xen_reserve_special_pages(void) |
2290 | { | |
2291 | phys_addr_t paddr; | |
2292 | ||
2293 | memblock_reserve(__pa(xen_start_info), PAGE_SIZE); | |
2294 | if (xen_start_info->store_mfn) { | |
2295 | paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->store_mfn)); | |
2296 | memblock_reserve(paddr, PAGE_SIZE); | |
2297 | } | |
2298 | if (!xen_initial_domain()) { | |
2299 | paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->console.domU.mfn)); | |
2300 | memblock_reserve(paddr, PAGE_SIZE); | |
2301 | } | |
2302 | } | |
2303 | ||
04414baa JG |
2304 | void __init xen_pt_check_e820(void) |
2305 | { | |
2306 | if (xen_is_e820_reserved(xen_pt_base, xen_pt_size)) { | |
2307 | xen_raw_console_write("Xen hypervisor allocated page table memory conflicts with E820 map\n"); | |
2308 | BUG(); | |
2309 | } | |
2310 | } | |
2311 | ||
98511f35 JF |
2312 | static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss; |
2313 | ||
3b3809ac | 2314 | static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) |
319f3ba5 JF |
2315 | { |
2316 | pte_t pte; | |
2317 | ||
2318 | phys >>= PAGE_SHIFT; | |
2319 | ||
2320 | switch (idx) { | |
2321 | case FIX_BTMAP_END ... FIX_BTMAP_BEGIN: | |
4eefbe79 | 2322 | case FIX_RO_IDT: |
319f3ba5 JF |
2323 | #ifdef CONFIG_X86_32 |
2324 | case FIX_WP_TEST: | |
319f3ba5 JF |
2325 | # ifdef CONFIG_HIGHMEM |
2326 | case FIX_KMAP_BEGIN ... FIX_KMAP_END: | |
2327 | # endif | |
1ad83c85 | 2328 | #elif defined(CONFIG_X86_VSYSCALL_EMULATION) |
f40c3300 | 2329 | case VSYSCALL_PAGE: |
319f3ba5 | 2330 | #endif |
3ecb1b7d JF |
2331 | case FIX_TEXT_POKE0: |
2332 | case FIX_TEXT_POKE1: | |
2333 | /* All local page mappings */ | |
319f3ba5 JF |
2334 | pte = pfn_pte(phys, prot); |
2335 | break; | |
2336 | ||
98511f35 JF |
2337 | #ifdef CONFIG_X86_LOCAL_APIC |
2338 | case FIX_APIC_BASE: /* maps dummy local APIC */ | |
2339 | pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL); | |
2340 | break; | |
2341 | #endif | |
2342 | ||
2343 | #ifdef CONFIG_X86_IO_APIC | |
2344 | case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END: | |
2345 | /* | |
2346 | * We just don't map the IO APIC - all access is via | |
2347 | * hypercalls. Keep the address in the pte for reference. | |
2348 | */ | |
27abd14b | 2349 | pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL); |
98511f35 JF |
2350 | break; |
2351 | #endif | |
2352 | ||
c0011dbf JF |
2353 | case FIX_PARAVIRT_BOOTMAP: |
2354 | /* This is an MFN, but it isn't an IO mapping from the | |
2355 | IO domain */ | |
319f3ba5 JF |
2356 | pte = mfn_pte(phys, prot); |
2357 | break; | |
c0011dbf JF |
2358 | |
2359 | default: | |
2360 | /* By default, set_fixmap is used for hardware mappings */ | |
7f2f8822 | 2361 | pte = mfn_pte(phys, prot); |
c0011dbf | 2362 | break; |
319f3ba5 JF |
2363 | } |
2364 | ||
2365 | __native_set_fixmap(idx, pte); | |
2366 | ||
1ad83c85 | 2367 | #ifdef CONFIG_X86_VSYSCALL_EMULATION |
319f3ba5 JF |
2368 | /* Replicate changes to map the vsyscall page into the user |
2369 | pagetable vsyscall mapping. */ | |
f40c3300 | 2370 | if (idx == VSYSCALL_PAGE) { |
319f3ba5 JF |
2371 | unsigned long vaddr = __fix_to_virt(idx); |
2372 | set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte); | |
2373 | } | |
2374 | #endif | |
2375 | } | |
2376 | ||
3f508953 | 2377 | static void __init xen_post_allocator_init(void) |
319f3ba5 | 2378 | { |
4e44e44b MR |
2379 | if (xen_feature(XENFEAT_auto_translated_physmap)) |
2380 | return; | |
2381 | ||
319f3ba5 JF |
2382 | pv_mmu_ops.set_pte = xen_set_pte; |
2383 | pv_mmu_ops.set_pmd = xen_set_pmd; | |
2384 | pv_mmu_ops.set_pud = xen_set_pud; | |
98233368 | 2385 | #if CONFIG_PGTABLE_LEVELS == 4 |
319f3ba5 JF |
2386 | pv_mmu_ops.set_pgd = xen_set_pgd; |
2387 | #endif | |
2388 | ||
2389 | /* This will work as long as patching hasn't happened yet | |
2390 | (which it hasn't) */ | |
2391 | pv_mmu_ops.alloc_pte = xen_alloc_pte; | |
2392 | pv_mmu_ops.alloc_pmd = xen_alloc_pmd; | |
2393 | pv_mmu_ops.release_pte = xen_release_pte; | |
2394 | pv_mmu_ops.release_pmd = xen_release_pmd; | |
98233368 | 2395 | #if CONFIG_PGTABLE_LEVELS == 4 |
319f3ba5 JF |
2396 | pv_mmu_ops.alloc_pud = xen_alloc_pud; |
2397 | pv_mmu_ops.release_pud = xen_release_pud; | |
2398 | #endif | |
d6b186c1 | 2399 | pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte); |
319f3ba5 JF |
2400 | |
2401 | #ifdef CONFIG_X86_64 | |
d3eb2c89 | 2402 | pv_mmu_ops.write_cr3 = &xen_write_cr3; |
319f3ba5 JF |
2403 | SetPagePinned(virt_to_page(level3_user_vsyscall)); |
2404 | #endif | |
2405 | xen_mark_init_mm_pinned(); | |
2406 | } | |
2407 | ||
b407fc57 JF |
2408 | static void xen_leave_lazy_mmu(void) |
2409 | { | |
5caecb94 | 2410 | preempt_disable(); |
b407fc57 JF |
2411 | xen_mc_flush(); |
2412 | paravirt_leave_lazy_mmu(); | |
5caecb94 | 2413 | preempt_enable(); |
b407fc57 | 2414 | } |
319f3ba5 | 2415 | |
3f508953 | 2416 | static const struct pv_mmu_ops xen_mmu_ops __initconst = { |
319f3ba5 JF |
2417 | .read_cr2 = xen_read_cr2, |
2418 | .write_cr2 = xen_write_cr2, | |
2419 | ||
2420 | .read_cr3 = xen_read_cr3, | |
5b5c1af1 | 2421 | .write_cr3 = xen_write_cr3_init, |
319f3ba5 JF |
2422 | |
2423 | .flush_tlb_user = xen_flush_tlb, | |
2424 | .flush_tlb_kernel = xen_flush_tlb, | |
2425 | .flush_tlb_single = xen_flush_tlb_single, | |
2426 | .flush_tlb_others = xen_flush_tlb_others, | |
2427 | ||
2428 | .pte_update = paravirt_nop, | |
319f3ba5 JF |
2429 | |
2430 | .pgd_alloc = xen_pgd_alloc, | |
2431 | .pgd_free = xen_pgd_free, | |
2432 | ||
2433 | .alloc_pte = xen_alloc_pte_init, | |
2434 | .release_pte = xen_release_pte_init, | |
b96229b5 | 2435 | .alloc_pmd = xen_alloc_pmd_init, |
b96229b5 | 2436 | .release_pmd = xen_release_pmd_init, |
319f3ba5 | 2437 | |
319f3ba5 | 2438 | .set_pte = xen_set_pte_init, |
319f3ba5 JF |
2439 | .set_pte_at = xen_set_pte_at, |
2440 | .set_pmd = xen_set_pmd_hyper, | |
2441 | ||
2442 | .ptep_modify_prot_start = __ptep_modify_prot_start, | |
2443 | .ptep_modify_prot_commit = __ptep_modify_prot_commit, | |
2444 | ||
da5de7c2 JF |
2445 | .pte_val = PV_CALLEE_SAVE(xen_pte_val), |
2446 | .pgd_val = PV_CALLEE_SAVE(xen_pgd_val), | |
319f3ba5 | 2447 | |
d6b186c1 | 2448 | .make_pte = PV_CALLEE_SAVE(xen_make_pte_init), |
da5de7c2 | 2449 | .make_pgd = PV_CALLEE_SAVE(xen_make_pgd), |
319f3ba5 JF |
2450 | |
2451 | #ifdef CONFIG_X86_PAE | |
2452 | .set_pte_atomic = xen_set_pte_atomic, | |
319f3ba5 JF |
2453 | .pte_clear = xen_pte_clear, |
2454 | .pmd_clear = xen_pmd_clear, | |
2455 | #endif /* CONFIG_X86_PAE */ | |
2456 | .set_pud = xen_set_pud_hyper, | |
2457 | ||
da5de7c2 JF |
2458 | .make_pmd = PV_CALLEE_SAVE(xen_make_pmd), |
2459 | .pmd_val = PV_CALLEE_SAVE(xen_pmd_val), | |
319f3ba5 | 2460 | |
98233368 | 2461 | #if CONFIG_PGTABLE_LEVELS == 4 |
da5de7c2 JF |
2462 | .pud_val = PV_CALLEE_SAVE(xen_pud_val), |
2463 | .make_pud = PV_CALLEE_SAVE(xen_make_pud), | |
319f3ba5 JF |
2464 | .set_pgd = xen_set_pgd_hyper, |
2465 | ||
b96229b5 JF |
2466 | .alloc_pud = xen_alloc_pmd_init, |
2467 | .release_pud = xen_release_pmd_init, | |
98233368 | 2468 | #endif /* CONFIG_PGTABLE_LEVELS == 4 */ |
319f3ba5 JF |
2469 | |
2470 | .activate_mm = xen_activate_mm, | |
2471 | .dup_mmap = xen_dup_mmap, | |
2472 | .exit_mmap = xen_exit_mmap, | |
2473 | ||
2474 | .lazy_mode = { | |
2475 | .enter = paravirt_enter_lazy_mmu, | |
b407fc57 | 2476 | .leave = xen_leave_lazy_mmu, |
511ba86e | 2477 | .flush = paravirt_flush_lazy_mmu, |
319f3ba5 JF |
2478 | }, |
2479 | ||
2480 | .set_fixmap = xen_set_fixmap, | |
2481 | }; | |
2482 | ||
030cb6c0 TG |
2483 | void __init xen_init_mmu_ops(void) |
2484 | { | |
7737b215 | 2485 | x86_init.paging.pagetable_init = xen_pagetable_init; |
76bcceff | 2486 | |
20f36e03 | 2487 | if (xen_feature(XENFEAT_auto_translated_physmap)) |
76bcceff | 2488 | return; |
20f36e03 | 2489 | |
030cb6c0 | 2490 | pv_mmu_ops = xen_mmu_ops; |
d2cb2145 | 2491 | |
98511f35 | 2492 | memset(dummy_mapping, 0xff, PAGE_SIZE); |
030cb6c0 | 2493 | } |
319f3ba5 | 2494 | |
08bbc9da AN |
2495 | /* Protected by xen_reservation_lock. */ |
2496 | #define MAX_CONTIG_ORDER 9 /* 2MB */ | |
2497 | static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER]; | |
2498 | ||
2499 | #define VOID_PTE (mfn_pte(0, __pgprot(0))) | |
2500 | static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order, | |
2501 | unsigned long *in_frames, | |
2502 | unsigned long *out_frames) | |
2503 | { | |
2504 | int i; | |
2505 | struct multicall_space mcs; | |
2506 | ||
2507 | xen_mc_batch(); | |
2508 | for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) { | |
2509 | mcs = __xen_mc_entry(0); | |
2510 | ||
2511 | if (in_frames) | |
2512 | in_frames[i] = virt_to_mfn(vaddr); | |
2513 | ||
2514 | MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0); | |
6eaa412f | 2515 | __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY); |
08bbc9da AN |
2516 | |
2517 | if (out_frames) | |
2518 | out_frames[i] = virt_to_pfn(vaddr); | |
2519 | } | |
2520 | xen_mc_issue(0); | |
2521 | } | |
2522 | ||
2523 | /* | |
2524 | * Update the pfn-to-mfn mappings for a virtual address range, either to | |
2525 | * point to an array of mfns, or contiguously from a single starting | |
2526 | * mfn. | |
2527 | */ | |
2528 | static void xen_remap_exchanged_ptes(unsigned long vaddr, int order, | |
2529 | unsigned long *mfns, | |
2530 | unsigned long first_mfn) | |
2531 | { | |
2532 | unsigned i, limit; | |
2533 | unsigned long mfn; | |
2534 | ||
2535 | xen_mc_batch(); | |
2536 | ||
2537 | limit = 1u << order; | |
2538 | for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) { | |
2539 | struct multicall_space mcs; | |
2540 | unsigned flags; | |
2541 | ||
2542 | mcs = __xen_mc_entry(0); | |
2543 | if (mfns) | |
2544 | mfn = mfns[i]; | |
2545 | else | |
2546 | mfn = first_mfn + i; | |
2547 | ||
2548 | if (i < (limit - 1)) | |
2549 | flags = 0; | |
2550 | else { | |
2551 | if (order == 0) | |
2552 | flags = UVMF_INVLPG | UVMF_ALL; | |
2553 | else | |
2554 | flags = UVMF_TLB_FLUSH | UVMF_ALL; | |
2555 | } | |
2556 | ||
2557 | MULTI_update_va_mapping(mcs.mc, vaddr, | |
2558 | mfn_pte(mfn, PAGE_KERNEL), flags); | |
2559 | ||
2560 | set_phys_to_machine(virt_to_pfn(vaddr), mfn); | |
2561 | } | |
2562 | ||
2563 | xen_mc_issue(0); | |
2564 | } | |
2565 | ||
2566 | /* | |
2567 | * Perform the hypercall to exchange a region of our pfns to point to | |
2568 | * memory with the required contiguous alignment. Takes the pfns as | |
2569 | * input, and populates mfns as output. | |
2570 | * | |
2571 | * Returns a success code indicating whether the hypervisor was able to | |
2572 | * satisfy the request or not. | |
2573 | */ | |
2574 | static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in, | |
2575 | unsigned long *pfns_in, | |
2576 | unsigned long extents_out, | |
2577 | unsigned int order_out, | |
2578 | unsigned long *mfns_out, | |
2579 | unsigned int address_bits) | |
2580 | { | |
2581 | long rc; | |
2582 | int success; | |
2583 | ||
2584 | struct xen_memory_exchange exchange = { | |
2585 | .in = { | |
2586 | .nr_extents = extents_in, | |
2587 | .extent_order = order_in, | |
2588 | .extent_start = pfns_in, | |
2589 | .domid = DOMID_SELF | |
2590 | }, | |
2591 | .out = { | |
2592 | .nr_extents = extents_out, | |
2593 | .extent_order = order_out, | |
2594 | .extent_start = mfns_out, | |
2595 | .address_bits = address_bits, | |
2596 | .domid = DOMID_SELF | |
2597 | } | |
2598 | }; | |
2599 | ||
2600 | BUG_ON(extents_in << order_in != extents_out << order_out); | |
2601 | ||
2602 | rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange); | |
2603 | success = (exchange.nr_exchanged == extents_in); | |
2604 | ||
2605 | BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0))); | |
2606 | BUG_ON(success && (rc != 0)); | |
2607 | ||
2608 | return success; | |
2609 | } | |
2610 | ||
1b65c4e5 | 2611 | int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, |
69908907 SS |
2612 | unsigned int address_bits, |
2613 | dma_addr_t *dma_handle) | |
08bbc9da AN |
2614 | { |
2615 | unsigned long *in_frames = discontig_frames, out_frame; | |
2616 | unsigned long flags; | |
2617 | int success; | |
1b65c4e5 | 2618 | unsigned long vstart = (unsigned long)phys_to_virt(pstart); |
08bbc9da AN |
2619 | |
2620 | /* | |
2621 | * Currently an auto-translated guest will not perform I/O, nor will | |
2622 | * it require PAE page directories below 4GB. Therefore any calls to | |
2623 | * this function are redundant and can be ignored. | |
2624 | */ | |
2625 | ||
2626 | if (xen_feature(XENFEAT_auto_translated_physmap)) | |
2627 | return 0; | |
2628 | ||
2629 | if (unlikely(order > MAX_CONTIG_ORDER)) | |
2630 | return -ENOMEM; | |
2631 | ||
2632 | memset((void *) vstart, 0, PAGE_SIZE << order); | |
2633 | ||
08bbc9da AN |
2634 | spin_lock_irqsave(&xen_reservation_lock, flags); |
2635 | ||
2636 | /* 1. Zap current PTEs, remembering MFNs. */ | |
2637 | xen_zap_pfn_range(vstart, order, in_frames, NULL); | |
2638 | ||
2639 | /* 2. Get a new contiguous memory extent. */ | |
2640 | out_frame = virt_to_pfn(vstart); | |
2641 | success = xen_exchange_memory(1UL << order, 0, in_frames, | |
2642 | 1, order, &out_frame, | |
2643 | address_bits); | |
2644 | ||
2645 | /* 3. Map the new extent in place of old pages. */ | |
2646 | if (success) | |
2647 | xen_remap_exchanged_ptes(vstart, order, NULL, out_frame); | |
2648 | else | |
2649 | xen_remap_exchanged_ptes(vstart, order, in_frames, 0); | |
2650 | ||
2651 | spin_unlock_irqrestore(&xen_reservation_lock, flags); | |
2652 | ||
69908907 | 2653 | *dma_handle = virt_to_machine(vstart).maddr; |
08bbc9da AN |
2654 | return success ? 0 : -ENOMEM; |
2655 | } | |
2656 | EXPORT_SYMBOL_GPL(xen_create_contiguous_region); | |
2657 | ||
1b65c4e5 | 2658 | void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order) |
08bbc9da AN |
2659 | { |
2660 | unsigned long *out_frames = discontig_frames, in_frame; | |
2661 | unsigned long flags; | |
2662 | int success; | |
1b65c4e5 | 2663 | unsigned long vstart; |
08bbc9da AN |
2664 | |
2665 | if (xen_feature(XENFEAT_auto_translated_physmap)) | |
2666 | return; | |
2667 | ||
2668 | if (unlikely(order > MAX_CONTIG_ORDER)) | |
2669 | return; | |
2670 | ||
1b65c4e5 | 2671 | vstart = (unsigned long)phys_to_virt(pstart); |
08bbc9da AN |
2672 | memset((void *) vstart, 0, PAGE_SIZE << order); |
2673 | ||
08bbc9da AN |
2674 | spin_lock_irqsave(&xen_reservation_lock, flags); |
2675 | ||
2676 | /* 1. Find start MFN of contiguous extent. */ | |
2677 | in_frame = virt_to_mfn(vstart); | |
2678 | ||
2679 | /* 2. Zap current PTEs. */ | |
2680 | xen_zap_pfn_range(vstart, order, NULL, out_frames); | |
2681 | ||
2682 | /* 3. Do the exchange for non-contiguous MFNs. */ | |
2683 | success = xen_exchange_memory(1, order, &in_frame, 1UL << order, | |
2684 | 0, out_frames, 0); | |
2685 | ||
2686 | /* 4. Map new pages in place of old pages. */ | |
2687 | if (success) | |
2688 | xen_remap_exchanged_ptes(vstart, order, out_frames, 0); | |
2689 | else | |
2690 | xen_remap_exchanged_ptes(vstart, order, NULL, in_frame); | |
2691 | ||
2692 | spin_unlock_irqrestore(&xen_reservation_lock, flags); | |
030cb6c0 | 2693 | } |
08bbc9da | 2694 | EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region); |
319f3ba5 | 2695 | |
ca65f9fc | 2696 | #ifdef CONFIG_XEN_PVHVM |
34b6f01a OH |
2697 | #ifdef CONFIG_PROC_VMCORE |
2698 | /* | |
2699 | * This function is used in two contexts: | |
2700 | * - the kdump kernel has to check whether a pfn of the crashed kernel | |
2701 | * was a ballooned page. vmcore is using this function to decide | |
2702 | * whether to access a pfn of the crashed kernel. | |
2703 | * - the kexec kernel has to check whether a pfn was ballooned by the | |
2704 | * previous kernel. If the pfn is ballooned, handle it properly. | |
2705 | * Returns 0 if the pfn is not backed by a RAM page, the caller may | |
2706 | * handle the pfn special in this case. | |
2707 | */ | |
2708 | static int xen_oldmem_pfn_is_ram(unsigned long pfn) | |
2709 | { | |
2710 | struct xen_hvm_get_mem_type a = { | |
2711 | .domid = DOMID_SELF, | |
2712 | .pfn = pfn, | |
2713 | }; | |
2714 | int ram; | |
2715 | ||
2716 | if (HYPERVISOR_hvm_op(HVMOP_get_mem_type, &a)) | |
2717 | return -ENXIO; | |
2718 | ||
2719 | switch (a.mem_type) { | |
2720 | case HVMMEM_mmio_dm: | |
2721 | ram = 0; | |
2722 | break; | |
2723 | case HVMMEM_ram_rw: | |
2724 | case HVMMEM_ram_ro: | |
2725 | default: | |
2726 | ram = 1; | |
2727 | break; | |
2728 | } | |
2729 | ||
2730 | return ram; | |
2731 | } | |
2732 | #endif | |
2733 | ||
59151001 SS |
2734 | static void xen_hvm_exit_mmap(struct mm_struct *mm) |
2735 | { | |
2736 | struct xen_hvm_pagetable_dying a; | |
2737 | int rc; | |
2738 | ||
2739 | a.domid = DOMID_SELF; | |
2740 | a.gpa = __pa(mm->pgd); | |
2741 | rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a); | |
2742 | WARN_ON_ONCE(rc < 0); | |
2743 | } | |
2744 | ||
2745 | static int is_pagetable_dying_supported(void) | |
2746 | { | |
2747 | struct xen_hvm_pagetable_dying a; | |
2748 | int rc = 0; | |
2749 | ||
2750 | a.domid = DOMID_SELF; | |
2751 | a.gpa = 0x00; | |
2752 | rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a); | |
2753 | if (rc < 0) { | |
2754 | printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n"); | |
2755 | return 0; | |
2756 | } | |
2757 | return 1; | |
2758 | } | |
2759 | ||
2760 | void __init xen_hvm_init_mmu_ops(void) | |
2761 | { | |
2762 | if (is_pagetable_dying_supported()) | |
2763 | pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap; | |
34b6f01a OH |
2764 | #ifdef CONFIG_PROC_VMCORE |
2765 | register_oldmem_pfn_is_ram(&xen_oldmem_pfn_is_ram); | |
2766 | #endif | |
59151001 | 2767 | } |
ca65f9fc | 2768 | #endif |
59151001 | 2769 | |
de1ef206 IC |
2770 | #define REMAP_BATCH_SIZE 16 |
2771 | ||
2772 | struct remap_data { | |
4e8c0c8c DV |
2773 | xen_pfn_t *mfn; |
2774 | bool contiguous; | |
de1ef206 IC |
2775 | pgprot_t prot; |
2776 | struct mmu_update *mmu_update; | |
2777 | }; | |
2778 | ||
2779 | static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token, | |
2780 | unsigned long addr, void *data) | |
2781 | { | |
2782 | struct remap_data *rmd = data; | |
4e8c0c8c DV |
2783 | pte_t pte = pte_mkspecial(mfn_pte(*rmd->mfn, rmd->prot)); |
2784 | ||
6a6256f9 | 2785 | /* If we have a contiguous range, just update the mfn itself, |
4e8c0c8c DV |
2786 | else update pointer to be "next mfn". */ |
2787 | if (rmd->contiguous) | |
2788 | (*rmd->mfn)++; | |
2789 | else | |
2790 | rmd->mfn++; | |
de1ef206 | 2791 | |
d5108316 | 2792 | rmd->mmu_update->ptr = virt_to_machine(ptep).maddr; |
de1ef206 IC |
2793 | rmd->mmu_update->val = pte_val_ma(pte); |
2794 | rmd->mmu_update++; | |
2795 | ||
2796 | return 0; | |
2797 | } | |
2798 | ||
a13d7201 | 2799 | static int do_remap_gfn(struct vm_area_struct *vma, |
4e8c0c8c | 2800 | unsigned long addr, |
a13d7201 | 2801 | xen_pfn_t *gfn, int nr, |
4e8c0c8c DV |
2802 | int *err_ptr, pgprot_t prot, |
2803 | unsigned domid, | |
2804 | struct page **pages) | |
de1ef206 | 2805 | { |
4e8c0c8c | 2806 | int err = 0; |
de1ef206 IC |
2807 | struct remap_data rmd; |
2808 | struct mmu_update mmu_update[REMAP_BATCH_SIZE]; | |
de1ef206 | 2809 | unsigned long range; |
4e8c0c8c | 2810 | int mapped = 0; |
de1ef206 | 2811 | |
314e51b9 | 2812 | BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO))); |
de1ef206 | 2813 | |
77945ca7 MR |
2814 | if (xen_feature(XENFEAT_auto_translated_physmap)) { |
2815 | #ifdef CONFIG_XEN_PVH | |
2816 | /* We need to update the local page tables and the xen HAP */ | |
a13d7201 | 2817 | return xen_xlate_remap_gfn_array(vma, addr, gfn, nr, err_ptr, |
4e8c0c8c | 2818 | prot, domid, pages); |
77945ca7 MR |
2819 | #else |
2820 | return -EINVAL; | |
2821 | #endif | |
2822 | } | |
2823 | ||
a13d7201 | 2824 | rmd.mfn = gfn; |
de1ef206 | 2825 | rmd.prot = prot; |
6a6256f9 | 2826 | /* We use the err_ptr to indicate if there we are doing a contiguous |
4e8c0c8c DV |
2827 | * mapping or a discontigious mapping. */ |
2828 | rmd.contiguous = !err_ptr; | |
de1ef206 IC |
2829 | |
2830 | while (nr) { | |
4e8c0c8c DV |
2831 | int index = 0; |
2832 | int done = 0; | |
2833 | int batch = min(REMAP_BATCH_SIZE, nr); | |
2834 | int batch_left = batch; | |
de1ef206 IC |
2835 | range = (unsigned long)batch << PAGE_SHIFT; |
2836 | ||
2837 | rmd.mmu_update = mmu_update; | |
2838 | err = apply_to_page_range(vma->vm_mm, addr, range, | |
2839 | remap_area_mfn_pte_fn, &rmd); | |
2840 | if (err) | |
2841 | goto out; | |
2842 | ||
4e8c0c8c DV |
2843 | /* We record the error for each page that gives an error, but |
2844 | * continue mapping until the whole set is done */ | |
2845 | do { | |
2846 | int i; | |
2847 | ||
2848 | err = HYPERVISOR_mmu_update(&mmu_update[index], | |
2849 | batch_left, &done, domid); | |
2850 | ||
2851 | /* | |
a13d7201 JG |
2852 | * @err_ptr may be the same buffer as @gfn, so |
2853 | * only clear it after each chunk of @gfn is | |
4e8c0c8c DV |
2854 | * used. |
2855 | */ | |
2856 | if (err_ptr) { | |
2857 | for (i = index; i < index + done; i++) | |
2858 | err_ptr[i] = 0; | |
2859 | } | |
2860 | if (err < 0) { | |
2861 | if (!err_ptr) | |
2862 | goto out; | |
2863 | err_ptr[i] = err; | |
2864 | done++; /* Skip failed frame. */ | |
2865 | } else | |
2866 | mapped += done; | |
2867 | batch_left -= done; | |
2868 | index += done; | |
2869 | } while (batch_left); | |
de1ef206 IC |
2870 | |
2871 | nr -= batch; | |
2872 | addr += range; | |
4e8c0c8c DV |
2873 | if (err_ptr) |
2874 | err_ptr += batch; | |
914beb9f | 2875 | cond_resched(); |
de1ef206 | 2876 | } |
de1ef206 IC |
2877 | out: |
2878 | ||
95a7d768 | 2879 | xen_flush_tlb_all(); |
de1ef206 | 2880 | |
4e8c0c8c DV |
2881 | return err < 0 ? err : mapped; |
2882 | } | |
2883 | ||
a13d7201 | 2884 | int xen_remap_domain_gfn_range(struct vm_area_struct *vma, |
4e8c0c8c | 2885 | unsigned long addr, |
a13d7201 | 2886 | xen_pfn_t gfn, int nr, |
4e8c0c8c DV |
2887 | pgprot_t prot, unsigned domid, |
2888 | struct page **pages) | |
2889 | { | |
a13d7201 | 2890 | return do_remap_gfn(vma, addr, &gfn, nr, NULL, prot, domid, pages); |
de1ef206 | 2891 | } |
a13d7201 | 2892 | EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_range); |
9a032e39 | 2893 | |
a13d7201 | 2894 | int xen_remap_domain_gfn_array(struct vm_area_struct *vma, |
4e8c0c8c | 2895 | unsigned long addr, |
a13d7201 | 2896 | xen_pfn_t *gfn, int nr, |
4e8c0c8c DV |
2897 | int *err_ptr, pgprot_t prot, |
2898 | unsigned domid, struct page **pages) | |
2899 | { | |
2900 | /* We BUG_ON because it's a programmer error to pass a NULL err_ptr, | |
2901 | * and the consequences later is quite hard to detect what the actual | |
2902 | * cause of "wrong memory was mapped in". | |
2903 | */ | |
2904 | BUG_ON(err_ptr == NULL); | |
a13d7201 | 2905 | return do_remap_gfn(vma, addr, gfn, nr, err_ptr, prot, domid, pages); |
4e8c0c8c | 2906 | } |
a13d7201 | 2907 | EXPORT_SYMBOL_GPL(xen_remap_domain_gfn_array); |
4e8c0c8c DV |
2908 | |
2909 | ||
9a032e39 | 2910 | /* Returns: 0 success */ |
a13d7201 | 2911 | int xen_unmap_domain_gfn_range(struct vm_area_struct *vma, |
9a032e39 IC |
2912 | int numpgs, struct page **pages) |
2913 | { | |
2914 | if (!pages || !xen_feature(XENFEAT_auto_translated_physmap)) | |
2915 | return 0; | |
2916 | ||
77945ca7 | 2917 | #ifdef CONFIG_XEN_PVH |
628c28ee | 2918 | return xen_xlate_unmap_gfn_range(vma, numpgs, pages); |
77945ca7 | 2919 | #else |
9a032e39 | 2920 | return -EINVAL; |
77945ca7 | 2921 | #endif |
9a032e39 | 2922 | } |
a13d7201 | 2923 | EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range); |