]>
Commit | Line | Data |
---|---|---|
7e0563de VK |
1 | /* |
2 | * Xen mmu operations | |
3 | * | |
4 | * This file contains the various mmu fetch and update operations. | |
5 | * The most important job they must perform is the mapping between the | |
6 | * domain's pfn and the overall machine mfns. | |
7 | * | |
8 | * Xen allows guests to directly update the pagetable, in a controlled | |
9 | * fashion. In other words, the guest modifies the same pagetable | |
10 | * that the CPU actually uses, which eliminates the overhead of having | |
11 | * a separate shadow pagetable. | |
12 | * | |
13 | * In order to allow this, it falls on the guest domain to map its | |
14 | * notion of a "physical" pfn - which is just a domain-local linear | |
15 | * address - into a real "machine address" which the CPU's MMU can | |
16 | * use. | |
17 | * | |
18 | * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be | |
19 | * inserted directly into the pagetable. When creating a new | |
20 | * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely, | |
21 | * when reading the content back with __(pgd|pmd|pte)_val, it converts | |
22 | * the mfn back into a pfn. | |
23 | * | |
24 | * The other constraint is that all pages which make up a pagetable | |
25 | * must be mapped read-only in the guest. This prevents uncontrolled | |
26 | * guest updates to the pagetable. Xen strictly enforces this, and | |
27 | * will disallow any pagetable update which will end up mapping a | |
28 | * pagetable page RW, and will disallow using any writable page as a | |
29 | * pagetable. | |
30 | * | |
31 | * Naively, when loading %cr3 with the base of a new pagetable, Xen | |
32 | * would need to validate the whole pagetable before going on. | |
33 | * Naturally, this is quite slow. The solution is to "pin" a | |
34 | * pagetable, which enforces all the constraints on the pagetable even | |
35 | * when it is not actively in use. This menas that Xen can be assured | |
36 | * that it is still valid when you do load it into %cr3, and doesn't | |
37 | * need to revalidate it. | |
38 | * | |
39 | * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 | |
40 | */ | |
41 | #include <linux/sched/mm.h> | |
42 | #include <linux/highmem.h> | |
43 | #include <linux/debugfs.h> | |
44 | #include <linux/bug.h> | |
45 | #include <linux/vmalloc.h> | |
46 | #include <linux/export.h> | |
47 | #include <linux/init.h> | |
48 | #include <linux/gfp.h> | |
49 | #include <linux/memblock.h> | |
50 | #include <linux/seq_file.h> | |
51 | #include <linux/crash_dump.h> | |
29985b09 JG |
52 | #ifdef CONFIG_KEXEC_CORE |
53 | #include <linux/kexec.h> | |
54 | #endif | |
7e0563de VK |
55 | |
56 | #include <trace/events/xen.h> | |
57 | ||
58 | #include <asm/pgtable.h> | |
59 | #include <asm/tlbflush.h> | |
60 | #include <asm/fixmap.h> | |
61 | #include <asm/mmu_context.h> | |
62 | #include <asm/setup.h> | |
63 | #include <asm/paravirt.h> | |
64 | #include <asm/e820/api.h> | |
65 | #include <asm/linkage.h> | |
66 | #include <asm/page.h> | |
67 | #include <asm/init.h> | |
68 | #include <asm/pat.h> | |
69 | #include <asm/smp.h> | |
70 | ||
71 | #include <asm/xen/hypercall.h> | |
72 | #include <asm/xen/hypervisor.h> | |
73 | ||
74 | #include <xen/xen.h> | |
75 | #include <xen/page.h> | |
76 | #include <xen/interface/xen.h> | |
77 | #include <xen/interface/hvm/hvm_op.h> | |
78 | #include <xen/interface/version.h> | |
79 | #include <xen/interface/memory.h> | |
80 | #include <xen/hvc-console.h> | |
81 | ||
82 | #include "multicalls.h" | |
83 | #include "mmu.h" | |
84 | #include "debugfs.h" | |
85 | ||
86 | #ifdef CONFIG_X86_32 | |
87 | /* | |
88 | * Identity map, in addition to plain kernel map. This needs to be | |
89 | * large enough to allocate page table pages to allocate the rest. | |
90 | * Each page can map 2MB. | |
91 | */ | |
92 | #define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4) | |
93 | static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES); | |
94 | #endif | |
95 | #ifdef CONFIG_X86_64 | |
96 | /* l3 pud for userspace vsyscall mapping */ | |
97 | static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss; | |
98 | #endif /* CONFIG_X86_64 */ | |
99 | ||
100 | /* | |
101 | * Note about cr3 (pagetable base) values: | |
102 | * | |
103 | * xen_cr3 contains the current logical cr3 value; it contains the | |
104 | * last set cr3. This may not be the current effective cr3, because | |
105 | * its update may be being lazily deferred. However, a vcpu looking | |
106 | * at its own cr3 can use this value knowing that it everything will | |
107 | * be self-consistent. | |
108 | * | |
109 | * xen_current_cr3 contains the actual vcpu cr3; it is set once the | |
110 | * hypercall to set the vcpu cr3 is complete (so it may be a little | |
111 | * out of date, but it will never be set early). If one vcpu is | |
112 | * looking at another vcpu's cr3 value, it should use this variable. | |
113 | */ | |
114 | DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */ | |
115 | DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */ | |
116 | ||
117 | static phys_addr_t xen_pt_base, xen_pt_size __initdata; | |
118 | ||
119 | /* | |
120 | * Just beyond the highest usermode address. STACK_TOP_MAX has a | |
121 | * redzone above it, so round it up to a PGD boundary. | |
122 | */ | |
123 | #define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK) | |
124 | ||
125 | void make_lowmem_page_readonly(void *vaddr) | |
126 | { | |
127 | pte_t *pte, ptev; | |
128 | unsigned long address = (unsigned long)vaddr; | |
129 | unsigned int level; | |
130 | ||
131 | pte = lookup_address(address, &level); | |
132 | if (pte == NULL) | |
133 | return; /* vaddr missing */ | |
134 | ||
135 | ptev = pte_wrprotect(*pte); | |
136 | ||
137 | if (HYPERVISOR_update_va_mapping(address, ptev, 0)) | |
138 | BUG(); | |
139 | } | |
140 | ||
141 | void make_lowmem_page_readwrite(void *vaddr) | |
142 | { | |
143 | pte_t *pte, ptev; | |
144 | unsigned long address = (unsigned long)vaddr; | |
145 | unsigned int level; | |
146 | ||
147 | pte = lookup_address(address, &level); | |
148 | if (pte == NULL) | |
149 | return; /* vaddr missing */ | |
150 | ||
151 | ptev = pte_mkwrite(*pte); | |
152 | ||
153 | if (HYPERVISOR_update_va_mapping(address, ptev, 0)) | |
154 | BUG(); | |
155 | } | |
156 | ||
157 | ||
158 | static bool xen_page_pinned(void *ptr) | |
159 | { | |
160 | struct page *page = virt_to_page(ptr); | |
161 | ||
162 | return PagePinned(page); | |
163 | } | |
164 | ||
165 | void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid) | |
166 | { | |
167 | struct multicall_space mcs; | |
168 | struct mmu_update *u; | |
169 | ||
170 | trace_xen_mmu_set_domain_pte(ptep, pteval, domid); | |
171 | ||
172 | mcs = xen_mc_entry(sizeof(*u)); | |
173 | u = mcs.args; | |
174 | ||
175 | /* ptep might be kmapped when using 32-bit HIGHPTE */ | |
176 | u->ptr = virt_to_machine(ptep).maddr; | |
177 | u->val = pte_val_ma(pteval); | |
178 | ||
179 | MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid); | |
180 | ||
181 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
182 | } | |
183 | EXPORT_SYMBOL_GPL(xen_set_domain_pte); | |
184 | ||
185 | static void xen_extend_mmu_update(const struct mmu_update *update) | |
186 | { | |
187 | struct multicall_space mcs; | |
188 | struct mmu_update *u; | |
189 | ||
190 | mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u)); | |
191 | ||
192 | if (mcs.mc != NULL) { | |
193 | mcs.mc->args[1]++; | |
194 | } else { | |
195 | mcs = __xen_mc_entry(sizeof(*u)); | |
196 | MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); | |
197 | } | |
198 | ||
199 | u = mcs.args; | |
200 | *u = *update; | |
201 | } | |
202 | ||
203 | static void xen_extend_mmuext_op(const struct mmuext_op *op) | |
204 | { | |
205 | struct multicall_space mcs; | |
206 | struct mmuext_op *u; | |
207 | ||
208 | mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u)); | |
209 | ||
210 | if (mcs.mc != NULL) { | |
211 | mcs.mc->args[1]++; | |
212 | } else { | |
213 | mcs = __xen_mc_entry(sizeof(*u)); | |
214 | MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); | |
215 | } | |
216 | ||
217 | u = mcs.args; | |
218 | *u = *op; | |
219 | } | |
220 | ||
221 | static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) | |
222 | { | |
223 | struct mmu_update u; | |
224 | ||
225 | preempt_disable(); | |
226 | ||
227 | xen_mc_batch(); | |
228 | ||
229 | /* ptr may be ioremapped for 64-bit pagetable setup */ | |
230 | u.ptr = arbitrary_virt_to_machine(ptr).maddr; | |
231 | u.val = pmd_val_ma(val); | |
232 | xen_extend_mmu_update(&u); | |
233 | ||
234 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
235 | ||
236 | preempt_enable(); | |
237 | } | |
238 | ||
239 | static void xen_set_pmd(pmd_t *ptr, pmd_t val) | |
240 | { | |
241 | trace_xen_mmu_set_pmd(ptr, val); | |
242 | ||
243 | /* If page is not pinned, we can just update the entry | |
244 | directly */ | |
245 | if (!xen_page_pinned(ptr)) { | |
246 | *ptr = val; | |
247 | return; | |
248 | } | |
249 | ||
250 | xen_set_pmd_hyper(ptr, val); | |
251 | } | |
252 | ||
253 | /* | |
254 | * Associate a virtual page frame with a given physical page frame | |
255 | * and protection flags for that frame. | |
256 | */ | |
257 | void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags) | |
258 | { | |
259 | set_pte_vaddr(vaddr, mfn_pte(mfn, flags)); | |
260 | } | |
261 | ||
262 | static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval) | |
263 | { | |
264 | struct mmu_update u; | |
265 | ||
266 | if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU) | |
267 | return false; | |
268 | ||
269 | xen_mc_batch(); | |
270 | ||
271 | u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE; | |
272 | u.val = pte_val_ma(pteval); | |
273 | xen_extend_mmu_update(&u); | |
274 | ||
275 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
276 | ||
277 | return true; | |
278 | } | |
279 | ||
280 | static inline void __xen_set_pte(pte_t *ptep, pte_t pteval) | |
281 | { | |
282 | if (!xen_batched_set_pte(ptep, pteval)) { | |
283 | /* | |
284 | * Could call native_set_pte() here and trap and | |
285 | * emulate the PTE write but with 32-bit guests this | |
286 | * needs two traps (one for each of the two 32-bit | |
287 | * words in the PTE) so do one hypercall directly | |
288 | * instead. | |
289 | */ | |
290 | struct mmu_update u; | |
291 | ||
292 | u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE; | |
293 | u.val = pte_val_ma(pteval); | |
294 | HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF); | |
295 | } | |
296 | } | |
297 | ||
298 | static void xen_set_pte(pte_t *ptep, pte_t pteval) | |
299 | { | |
300 | trace_xen_mmu_set_pte(ptep, pteval); | |
301 | __xen_set_pte(ptep, pteval); | |
302 | } | |
303 | ||
304 | static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, | |
305 | pte_t *ptep, pte_t pteval) | |
306 | { | |
307 | trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval); | |
308 | __xen_set_pte(ptep, pteval); | |
309 | } | |
310 | ||
311 | pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, | |
312 | unsigned long addr, pte_t *ptep) | |
313 | { | |
314 | /* Just return the pte as-is. We preserve the bits on commit */ | |
315 | trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep); | |
316 | return *ptep; | |
317 | } | |
318 | ||
319 | void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, | |
320 | pte_t *ptep, pte_t pte) | |
321 | { | |
322 | struct mmu_update u; | |
323 | ||
324 | trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte); | |
325 | xen_mc_batch(); | |
326 | ||
327 | u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; | |
328 | u.val = pte_val_ma(pte); | |
329 | xen_extend_mmu_update(&u); | |
330 | ||
331 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
332 | } | |
333 | ||
334 | /* Assume pteval_t is equivalent to all the other *val_t types. */ | |
335 | static pteval_t pte_mfn_to_pfn(pteval_t val) | |
336 | { | |
337 | if (val & _PAGE_PRESENT) { | |
338 | unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; | |
339 | unsigned long pfn = mfn_to_pfn(mfn); | |
340 | ||
341 | pteval_t flags = val & PTE_FLAGS_MASK; | |
342 | if (unlikely(pfn == ~0)) | |
343 | val = flags & ~_PAGE_PRESENT; | |
344 | else | |
345 | val = ((pteval_t)pfn << PAGE_SHIFT) | flags; | |
346 | } | |
347 | ||
348 | return val; | |
349 | } | |
350 | ||
351 | static pteval_t pte_pfn_to_mfn(pteval_t val) | |
352 | { | |
353 | if (val & _PAGE_PRESENT) { | |
354 | unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; | |
355 | pteval_t flags = val & PTE_FLAGS_MASK; | |
356 | unsigned long mfn; | |
357 | ||
989513a7 JG |
358 | mfn = __pfn_to_mfn(pfn); |
359 | ||
7e0563de VK |
360 | /* |
361 | * If there's no mfn for the pfn, then just create an | |
362 | * empty non-present pte. Unfortunately this loses | |
363 | * information about the original pfn, so | |
364 | * pte_mfn_to_pfn is asymmetric. | |
365 | */ | |
366 | if (unlikely(mfn == INVALID_P2M_ENTRY)) { | |
367 | mfn = 0; | |
368 | flags = 0; | |
369 | } else | |
370 | mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT); | |
371 | val = ((pteval_t)mfn << PAGE_SHIFT) | flags; | |
372 | } | |
373 | ||
374 | return val; | |
375 | } | |
376 | ||
377 | __visible pteval_t xen_pte_val(pte_t pte) | |
378 | { | |
379 | pteval_t pteval = pte.pte; | |
380 | ||
381 | return pte_mfn_to_pfn(pteval); | |
382 | } | |
383 | PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val); | |
384 | ||
385 | __visible pgdval_t xen_pgd_val(pgd_t pgd) | |
386 | { | |
387 | return pte_mfn_to_pfn(pgd.pgd); | |
388 | } | |
389 | PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val); | |
390 | ||
391 | __visible pte_t xen_make_pte(pteval_t pte) | |
392 | { | |
393 | pte = pte_pfn_to_mfn(pte); | |
394 | ||
395 | return native_make_pte(pte); | |
396 | } | |
397 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte); | |
398 | ||
399 | __visible pgd_t xen_make_pgd(pgdval_t pgd) | |
400 | { | |
401 | pgd = pte_pfn_to_mfn(pgd); | |
402 | return native_make_pgd(pgd); | |
403 | } | |
404 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd); | |
405 | ||
406 | __visible pmdval_t xen_pmd_val(pmd_t pmd) | |
407 | { | |
408 | return pte_mfn_to_pfn(pmd.pmd); | |
409 | } | |
410 | PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val); | |
411 | ||
412 | static void xen_set_pud_hyper(pud_t *ptr, pud_t val) | |
413 | { | |
414 | struct mmu_update u; | |
415 | ||
416 | preempt_disable(); | |
417 | ||
418 | xen_mc_batch(); | |
419 | ||
420 | /* ptr may be ioremapped for 64-bit pagetable setup */ | |
421 | u.ptr = arbitrary_virt_to_machine(ptr).maddr; | |
422 | u.val = pud_val_ma(val); | |
423 | xen_extend_mmu_update(&u); | |
424 | ||
425 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
426 | ||
427 | preempt_enable(); | |
428 | } | |
429 | ||
430 | static void xen_set_pud(pud_t *ptr, pud_t val) | |
431 | { | |
432 | trace_xen_mmu_set_pud(ptr, val); | |
433 | ||
434 | /* If page is not pinned, we can just update the entry | |
435 | directly */ | |
436 | if (!xen_page_pinned(ptr)) { | |
437 | *ptr = val; | |
438 | return; | |
439 | } | |
440 | ||
441 | xen_set_pud_hyper(ptr, val); | |
442 | } | |
443 | ||
444 | #ifdef CONFIG_X86_PAE | |
445 | static void xen_set_pte_atomic(pte_t *ptep, pte_t pte) | |
446 | { | |
447 | trace_xen_mmu_set_pte_atomic(ptep, pte); | |
448 | set_64bit((u64 *)ptep, native_pte_val(pte)); | |
449 | } | |
450 | ||
451 | static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | |
452 | { | |
453 | trace_xen_mmu_pte_clear(mm, addr, ptep); | |
454 | if (!xen_batched_set_pte(ptep, native_make_pte(0))) | |
455 | native_pte_clear(mm, addr, ptep); | |
456 | } | |
457 | ||
458 | static void xen_pmd_clear(pmd_t *pmdp) | |
459 | { | |
460 | trace_xen_mmu_pmd_clear(pmdp); | |
461 | set_pmd(pmdp, __pmd(0)); | |
462 | } | |
463 | #endif /* CONFIG_X86_PAE */ | |
464 | ||
465 | __visible pmd_t xen_make_pmd(pmdval_t pmd) | |
466 | { | |
467 | pmd = pte_pfn_to_mfn(pmd); | |
468 | return native_make_pmd(pmd); | |
469 | } | |
470 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd); | |
471 | ||
472 | #if CONFIG_PGTABLE_LEVELS == 4 | |
473 | __visible pudval_t xen_pud_val(pud_t pud) | |
474 | { | |
475 | return pte_mfn_to_pfn(pud.pud); | |
476 | } | |
477 | PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val); | |
478 | ||
479 | __visible pud_t xen_make_pud(pudval_t pud) | |
480 | { | |
481 | pud = pte_pfn_to_mfn(pud); | |
482 | ||
483 | return native_make_pud(pud); | |
484 | } | |
485 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud); | |
486 | ||
487 | static pgd_t *xen_get_user_pgd(pgd_t *pgd) | |
488 | { | |
489 | pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK); | |
490 | unsigned offset = pgd - pgd_page; | |
491 | pgd_t *user_ptr = NULL; | |
492 | ||
493 | if (offset < pgd_index(USER_LIMIT)) { | |
494 | struct page *page = virt_to_page(pgd_page); | |
495 | user_ptr = (pgd_t *)page->private; | |
496 | if (user_ptr) | |
497 | user_ptr += offset; | |
498 | } | |
499 | ||
500 | return user_ptr; | |
501 | } | |
502 | ||
503 | static void __xen_set_p4d_hyper(p4d_t *ptr, p4d_t val) | |
504 | { | |
505 | struct mmu_update u; | |
506 | ||
507 | u.ptr = virt_to_machine(ptr).maddr; | |
508 | u.val = p4d_val_ma(val); | |
509 | xen_extend_mmu_update(&u); | |
510 | } | |
511 | ||
512 | /* | |
513 | * Raw hypercall-based set_p4d, intended for in early boot before | |
514 | * there's a page structure. This implies: | |
515 | * 1. The only existing pagetable is the kernel's | |
516 | * 2. It is always pinned | |
517 | * 3. It has no user pagetable attached to it | |
518 | */ | |
519 | static void __init xen_set_p4d_hyper(p4d_t *ptr, p4d_t val) | |
520 | { | |
521 | preempt_disable(); | |
522 | ||
523 | xen_mc_batch(); | |
524 | ||
525 | __xen_set_p4d_hyper(ptr, val); | |
526 | ||
527 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
528 | ||
529 | preempt_enable(); | |
530 | } | |
531 | ||
532 | static void xen_set_p4d(p4d_t *ptr, p4d_t val) | |
533 | { | |
534 | pgd_t *user_ptr = xen_get_user_pgd((pgd_t *)ptr); | |
535 | pgd_t pgd_val; | |
536 | ||
537 | trace_xen_mmu_set_p4d(ptr, (p4d_t *)user_ptr, val); | |
538 | ||
539 | /* If page is not pinned, we can just update the entry | |
540 | directly */ | |
541 | if (!xen_page_pinned(ptr)) { | |
542 | *ptr = val; | |
543 | if (user_ptr) { | |
544 | WARN_ON(xen_page_pinned(user_ptr)); | |
545 | pgd_val.pgd = p4d_val_ma(val); | |
546 | *user_ptr = pgd_val; | |
547 | } | |
548 | return; | |
549 | } | |
550 | ||
551 | /* If it's pinned, then we can at least batch the kernel and | |
552 | user updates together. */ | |
553 | xen_mc_batch(); | |
554 | ||
555 | __xen_set_p4d_hyper(ptr, val); | |
556 | if (user_ptr) | |
557 | __xen_set_p4d_hyper((p4d_t *)user_ptr, val); | |
558 | ||
559 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
560 | } | |
561 | #endif /* CONFIG_PGTABLE_LEVELS == 4 */ | |
562 | ||
563 | static int xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd, | |
564 | int (*func)(struct mm_struct *mm, struct page *, enum pt_level), | |
565 | bool last, unsigned long limit) | |
566 | { | |
567 | int i, nr, flush = 0; | |
568 | ||
569 | nr = last ? pmd_index(limit) + 1 : PTRS_PER_PMD; | |
570 | for (i = 0; i < nr; i++) { | |
571 | if (!pmd_none(pmd[i])) | |
572 | flush |= (*func)(mm, pmd_page(pmd[i]), PT_PTE); | |
573 | } | |
574 | return flush; | |
575 | } | |
576 | ||
577 | static int xen_pud_walk(struct mm_struct *mm, pud_t *pud, | |
578 | int (*func)(struct mm_struct *mm, struct page *, enum pt_level), | |
579 | bool last, unsigned long limit) | |
580 | { | |
581 | int i, nr, flush = 0; | |
582 | ||
583 | nr = last ? pud_index(limit) + 1 : PTRS_PER_PUD; | |
584 | for (i = 0; i < nr; i++) { | |
585 | pmd_t *pmd; | |
586 | ||
587 | if (pud_none(pud[i])) | |
588 | continue; | |
589 | ||
590 | pmd = pmd_offset(&pud[i], 0); | |
591 | if (PTRS_PER_PMD > 1) | |
592 | flush |= (*func)(mm, virt_to_page(pmd), PT_PMD); | |
593 | flush |= xen_pmd_walk(mm, pmd, func, | |
594 | last && i == nr - 1, limit); | |
595 | } | |
596 | return flush; | |
597 | } | |
598 | ||
599 | static int xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d, | |
600 | int (*func)(struct mm_struct *mm, struct page *, enum pt_level), | |
601 | bool last, unsigned long limit) | |
602 | { | |
603 | int i, nr, flush = 0; | |
604 | ||
605 | nr = last ? p4d_index(limit) + 1 : PTRS_PER_P4D; | |
606 | for (i = 0; i < nr; i++) { | |
607 | pud_t *pud; | |
608 | ||
609 | if (p4d_none(p4d[i])) | |
610 | continue; | |
611 | ||
612 | pud = pud_offset(&p4d[i], 0); | |
613 | if (PTRS_PER_PUD > 1) | |
614 | flush |= (*func)(mm, virt_to_page(pud), PT_PUD); | |
615 | flush |= xen_pud_walk(mm, pud, func, | |
616 | last && i == nr - 1, limit); | |
617 | } | |
618 | return flush; | |
619 | } | |
620 | ||
621 | /* | |
622 | * (Yet another) pagetable walker. This one is intended for pinning a | |
623 | * pagetable. This means that it walks a pagetable and calls the | |
624 | * callback function on each page it finds making up the page table, | |
625 | * at every level. It walks the entire pagetable, but it only bothers | |
626 | * pinning pte pages which are below limit. In the normal case this | |
627 | * will be STACK_TOP_MAX, but at boot we need to pin up to | |
628 | * FIXADDR_TOP. | |
629 | * | |
630 | * For 32-bit the important bit is that we don't pin beyond there, | |
631 | * because then we start getting into Xen's ptes. | |
632 | * | |
633 | * For 64-bit, we must skip the Xen hole in the middle of the address | |
634 | * space, just after the big x86-64 virtual hole. | |
635 | */ | |
636 | static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd, | |
637 | int (*func)(struct mm_struct *mm, struct page *, | |
638 | enum pt_level), | |
639 | unsigned long limit) | |
640 | { | |
641 | int i, nr, flush = 0; | |
642 | unsigned hole_low, hole_high; | |
643 | ||
644 | /* The limit is the last byte to be touched */ | |
645 | limit--; | |
646 | BUG_ON(limit >= FIXADDR_TOP); | |
647 | ||
7e0563de VK |
648 | /* |
649 | * 64-bit has a great big hole in the middle of the address | |
650 | * space, which contains the Xen mappings. On 32-bit these | |
651 | * will end up making a zero-sized hole and so is a no-op. | |
652 | */ | |
653 | hole_low = pgd_index(USER_LIMIT); | |
654 | hole_high = pgd_index(PAGE_OFFSET); | |
655 | ||
656 | nr = pgd_index(limit) + 1; | |
657 | for (i = 0; i < nr; i++) { | |
658 | p4d_t *p4d; | |
659 | ||
660 | if (i >= hole_low && i < hole_high) | |
661 | continue; | |
662 | ||
663 | if (pgd_none(pgd[i])) | |
664 | continue; | |
665 | ||
666 | p4d = p4d_offset(&pgd[i], 0); | |
667 | if (PTRS_PER_P4D > 1) | |
668 | flush |= (*func)(mm, virt_to_page(p4d), PT_P4D); | |
669 | flush |= xen_p4d_walk(mm, p4d, func, i == nr - 1, limit); | |
670 | } | |
671 | ||
672 | /* Do the top level last, so that the callbacks can use it as | |
673 | a cue to do final things like tlb flushes. */ | |
674 | flush |= (*func)(mm, virt_to_page(pgd), PT_PGD); | |
675 | ||
676 | return flush; | |
677 | } | |
678 | ||
679 | static int xen_pgd_walk(struct mm_struct *mm, | |
680 | int (*func)(struct mm_struct *mm, struct page *, | |
681 | enum pt_level), | |
682 | unsigned long limit) | |
683 | { | |
684 | return __xen_pgd_walk(mm, mm->pgd, func, limit); | |
685 | } | |
686 | ||
687 | /* If we're using split pte locks, then take the page's lock and | |
688 | return a pointer to it. Otherwise return NULL. */ | |
689 | static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm) | |
690 | { | |
691 | spinlock_t *ptl = NULL; | |
692 | ||
693 | #if USE_SPLIT_PTE_PTLOCKS | |
694 | ptl = ptlock_ptr(page); | |
695 | spin_lock_nest_lock(ptl, &mm->page_table_lock); | |
696 | #endif | |
697 | ||
698 | return ptl; | |
699 | } | |
700 | ||
701 | static void xen_pte_unlock(void *v) | |
702 | { | |
703 | spinlock_t *ptl = v; | |
704 | spin_unlock(ptl); | |
705 | } | |
706 | ||
707 | static void xen_do_pin(unsigned level, unsigned long pfn) | |
708 | { | |
709 | struct mmuext_op op; | |
710 | ||
711 | op.cmd = level; | |
712 | op.arg1.mfn = pfn_to_mfn(pfn); | |
713 | ||
714 | xen_extend_mmuext_op(&op); | |
715 | } | |
716 | ||
717 | static int xen_pin_page(struct mm_struct *mm, struct page *page, | |
718 | enum pt_level level) | |
719 | { | |
720 | unsigned pgfl = TestSetPagePinned(page); | |
721 | int flush; | |
722 | ||
723 | if (pgfl) | |
724 | flush = 0; /* already pinned */ | |
725 | else if (PageHighMem(page)) | |
726 | /* kmaps need flushing if we found an unpinned | |
727 | highpage */ | |
728 | flush = 1; | |
729 | else { | |
730 | void *pt = lowmem_page_address(page); | |
731 | unsigned long pfn = page_to_pfn(page); | |
732 | struct multicall_space mcs = __xen_mc_entry(0); | |
733 | spinlock_t *ptl; | |
734 | ||
735 | flush = 0; | |
736 | ||
737 | /* | |
738 | * We need to hold the pagetable lock between the time | |
739 | * we make the pagetable RO and when we actually pin | |
740 | * it. If we don't, then other users may come in and | |
741 | * attempt to update the pagetable by writing it, | |
742 | * which will fail because the memory is RO but not | |
743 | * pinned, so Xen won't do the trap'n'emulate. | |
744 | * | |
745 | * If we're using split pte locks, we can't hold the | |
746 | * entire pagetable's worth of locks during the | |
747 | * traverse, because we may wrap the preempt count (8 | |
748 | * bits). The solution is to mark RO and pin each PTE | |
749 | * page while holding the lock. This means the number | |
750 | * of locks we end up holding is never more than a | |
751 | * batch size (~32 entries, at present). | |
752 | * | |
753 | * If we're not using split pte locks, we needn't pin | |
754 | * the PTE pages independently, because we're | |
755 | * protected by the overall pagetable lock. | |
756 | */ | |
757 | ptl = NULL; | |
758 | if (level == PT_PTE) | |
759 | ptl = xen_pte_lock(page, mm); | |
760 | ||
761 | MULTI_update_va_mapping(mcs.mc, (unsigned long)pt, | |
762 | pfn_pte(pfn, PAGE_KERNEL_RO), | |
763 | level == PT_PGD ? UVMF_TLB_FLUSH : 0); | |
764 | ||
765 | if (ptl) { | |
766 | xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn); | |
767 | ||
768 | /* Queue a deferred unlock for when this batch | |
769 | is completed. */ | |
770 | xen_mc_callback(xen_pte_unlock, ptl); | |
771 | } | |
772 | } | |
773 | ||
774 | return flush; | |
775 | } | |
776 | ||
777 | /* This is called just after a mm has been created, but it has not | |
778 | been used yet. We need to make sure that its pagetable is all | |
779 | read-only, and can be pinned. */ | |
780 | static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) | |
781 | { | |
782 | trace_xen_mmu_pgd_pin(mm, pgd); | |
783 | ||
784 | xen_mc_batch(); | |
785 | ||
786 | if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) { | |
787 | /* re-enable interrupts for flushing */ | |
788 | xen_mc_issue(0); | |
789 | ||
790 | kmap_flush_unused(); | |
791 | ||
792 | xen_mc_batch(); | |
793 | } | |
794 | ||
795 | #ifdef CONFIG_X86_64 | |
796 | { | |
797 | pgd_t *user_pgd = xen_get_user_pgd(pgd); | |
798 | ||
799 | xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd))); | |
800 | ||
801 | if (user_pgd) { | |
802 | xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD); | |
803 | xen_do_pin(MMUEXT_PIN_L4_TABLE, | |
804 | PFN_DOWN(__pa(user_pgd))); | |
805 | } | |
806 | } | |
807 | #else /* CONFIG_X86_32 */ | |
808 | #ifdef CONFIG_X86_PAE | |
809 | /* Need to make sure unshared kernel PMD is pinnable */ | |
810 | xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]), | |
811 | PT_PMD); | |
812 | #endif | |
813 | xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd))); | |
814 | #endif /* CONFIG_X86_64 */ | |
815 | xen_mc_issue(0); | |
816 | } | |
817 | ||
818 | static void xen_pgd_pin(struct mm_struct *mm) | |
819 | { | |
820 | __xen_pgd_pin(mm, mm->pgd); | |
821 | } | |
822 | ||
823 | /* | |
824 | * On save, we need to pin all pagetables to make sure they get their | |
825 | * mfns turned into pfns. Search the list for any unpinned pgds and pin | |
826 | * them (unpinned pgds are not currently in use, probably because the | |
827 | * process is under construction or destruction). | |
828 | * | |
829 | * Expected to be called in stop_machine() ("equivalent to taking | |
830 | * every spinlock in the system"), so the locking doesn't really | |
831 | * matter all that much. | |
832 | */ | |
833 | void xen_mm_pin_all(void) | |
834 | { | |
835 | struct page *page; | |
836 | ||
837 | spin_lock(&pgd_lock); | |
838 | ||
839 | list_for_each_entry(page, &pgd_list, lru) { | |
840 | if (!PagePinned(page)) { | |
841 | __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page)); | |
842 | SetPageSavePinned(page); | |
843 | } | |
844 | } | |
845 | ||
846 | spin_unlock(&pgd_lock); | |
847 | } | |
848 | ||
849 | /* | |
850 | * The init_mm pagetable is really pinned as soon as its created, but | |
851 | * that's before we have page structures to store the bits. So do all | |
852 | * the book-keeping now. | |
853 | */ | |
854 | static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page, | |
855 | enum pt_level level) | |
856 | { | |
857 | SetPagePinned(page); | |
858 | return 0; | |
859 | } | |
860 | ||
861 | static void __init xen_mark_init_mm_pinned(void) | |
862 | { | |
863 | xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP); | |
864 | } | |
865 | ||
866 | static int xen_unpin_page(struct mm_struct *mm, struct page *page, | |
867 | enum pt_level level) | |
868 | { | |
869 | unsigned pgfl = TestClearPagePinned(page); | |
870 | ||
871 | if (pgfl && !PageHighMem(page)) { | |
872 | void *pt = lowmem_page_address(page); | |
873 | unsigned long pfn = page_to_pfn(page); | |
874 | spinlock_t *ptl = NULL; | |
875 | struct multicall_space mcs; | |
876 | ||
877 | /* | |
878 | * Do the converse to pin_page. If we're using split | |
879 | * pte locks, we must be holding the lock for while | |
880 | * the pte page is unpinned but still RO to prevent | |
881 | * concurrent updates from seeing it in this | |
882 | * partially-pinned state. | |
883 | */ | |
884 | if (level == PT_PTE) { | |
885 | ptl = xen_pte_lock(page, mm); | |
886 | ||
887 | if (ptl) | |
888 | xen_do_pin(MMUEXT_UNPIN_TABLE, pfn); | |
889 | } | |
890 | ||
891 | mcs = __xen_mc_entry(0); | |
892 | ||
893 | MULTI_update_va_mapping(mcs.mc, (unsigned long)pt, | |
894 | pfn_pte(pfn, PAGE_KERNEL), | |
895 | level == PT_PGD ? UVMF_TLB_FLUSH : 0); | |
896 | ||
897 | if (ptl) { | |
898 | /* unlock when batch completed */ | |
899 | xen_mc_callback(xen_pte_unlock, ptl); | |
900 | } | |
901 | } | |
902 | ||
903 | return 0; /* never need to flush on unpin */ | |
904 | } | |
905 | ||
906 | /* Release a pagetables pages back as normal RW */ | |
907 | static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd) | |
908 | { | |
909 | trace_xen_mmu_pgd_unpin(mm, pgd); | |
910 | ||
911 | xen_mc_batch(); | |
912 | ||
913 | xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); | |
914 | ||
915 | #ifdef CONFIG_X86_64 | |
916 | { | |
917 | pgd_t *user_pgd = xen_get_user_pgd(pgd); | |
918 | ||
919 | if (user_pgd) { | |
920 | xen_do_pin(MMUEXT_UNPIN_TABLE, | |
921 | PFN_DOWN(__pa(user_pgd))); | |
922 | xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD); | |
923 | } | |
924 | } | |
925 | #endif | |
926 | ||
927 | #ifdef CONFIG_X86_PAE | |
928 | /* Need to make sure unshared kernel PMD is unpinned */ | |
929 | xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]), | |
930 | PT_PMD); | |
931 | #endif | |
932 | ||
933 | __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT); | |
934 | ||
935 | xen_mc_issue(0); | |
936 | } | |
937 | ||
938 | static void xen_pgd_unpin(struct mm_struct *mm) | |
939 | { | |
940 | __xen_pgd_unpin(mm, mm->pgd); | |
941 | } | |
942 | ||
943 | /* | |
944 | * On resume, undo any pinning done at save, so that the rest of the | |
945 | * kernel doesn't see any unexpected pinned pagetables. | |
946 | */ | |
947 | void xen_mm_unpin_all(void) | |
948 | { | |
949 | struct page *page; | |
950 | ||
951 | spin_lock(&pgd_lock); | |
952 | ||
953 | list_for_each_entry(page, &pgd_list, lru) { | |
954 | if (PageSavePinned(page)) { | |
955 | BUG_ON(!PagePinned(page)); | |
956 | __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page)); | |
957 | ClearPageSavePinned(page); | |
958 | } | |
959 | } | |
960 | ||
961 | spin_unlock(&pgd_lock); | |
962 | } | |
963 | ||
964 | static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) | |
965 | { | |
966 | spin_lock(&next->page_table_lock); | |
967 | xen_pgd_pin(next); | |
968 | spin_unlock(&next->page_table_lock); | |
969 | } | |
970 | ||
971 | static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) | |
972 | { | |
973 | spin_lock(&mm->page_table_lock); | |
974 | xen_pgd_pin(mm); | |
975 | spin_unlock(&mm->page_table_lock); | |
976 | } | |
977 | ||
978 | ||
979 | #ifdef CONFIG_SMP | |
980 | /* Another cpu may still have their %cr3 pointing at the pagetable, so | |
981 | we need to repoint it somewhere else before we can unpin it. */ | |
982 | static void drop_other_mm_ref(void *info) | |
983 | { | |
984 | struct mm_struct *mm = info; | |
985 | struct mm_struct *active_mm; | |
986 | ||
987 | active_mm = this_cpu_read(cpu_tlbstate.active_mm); | |
988 | ||
989 | if (active_mm == mm && this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK) | |
990 | leave_mm(smp_processor_id()); | |
991 | ||
992 | /* If this cpu still has a stale cr3 reference, then make sure | |
993 | it has been flushed. */ | |
994 | if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd)) | |
995 | load_cr3(swapper_pg_dir); | |
996 | } | |
997 | ||
998 | static void xen_drop_mm_ref(struct mm_struct *mm) | |
999 | { | |
1000 | cpumask_var_t mask; | |
1001 | unsigned cpu; | |
1002 | ||
1003 | if (current->active_mm == mm) { | |
1004 | if (current->mm == mm) | |
1005 | load_cr3(swapper_pg_dir); | |
1006 | else | |
1007 | leave_mm(smp_processor_id()); | |
1008 | } | |
1009 | ||
1010 | /* Get the "official" set of cpus referring to our pagetable. */ | |
1011 | if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) { | |
1012 | for_each_online_cpu(cpu) { | |
1013 | if (!cpumask_test_cpu(cpu, mm_cpumask(mm)) | |
1014 | && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd)) | |
1015 | continue; | |
1016 | smp_call_function_single(cpu, drop_other_mm_ref, mm, 1); | |
1017 | } | |
1018 | return; | |
1019 | } | |
1020 | cpumask_copy(mask, mm_cpumask(mm)); | |
1021 | ||
1022 | /* It's possible that a vcpu may have a stale reference to our | |
1023 | cr3, because its in lazy mode, and it hasn't yet flushed | |
1024 | its set of pending hypercalls yet. In this case, we can | |
1025 | look at its actual current cr3 value, and force it to flush | |
1026 | if needed. */ | |
1027 | for_each_online_cpu(cpu) { | |
1028 | if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd)) | |
1029 | cpumask_set_cpu(cpu, mask); | |
1030 | } | |
1031 | ||
1032 | if (!cpumask_empty(mask)) | |
1033 | smp_call_function_many(mask, drop_other_mm_ref, mm, 1); | |
1034 | free_cpumask_var(mask); | |
1035 | } | |
1036 | #else | |
1037 | static void xen_drop_mm_ref(struct mm_struct *mm) | |
1038 | { | |
1039 | if (current->active_mm == mm) | |
1040 | load_cr3(swapper_pg_dir); | |
1041 | } | |
1042 | #endif | |
1043 | ||
1044 | /* | |
1045 | * While a process runs, Xen pins its pagetables, which means that the | |
1046 | * hypervisor forces it to be read-only, and it controls all updates | |
1047 | * to it. This means that all pagetable updates have to go via the | |
1048 | * hypervisor, which is moderately expensive. | |
1049 | * | |
1050 | * Since we're pulling the pagetable down, we switch to use init_mm, | |
1051 | * unpin old process pagetable and mark it all read-write, which | |
1052 | * allows further operations on it to be simple memory accesses. | |
1053 | * | |
1054 | * The only subtle point is that another CPU may be still using the | |
1055 | * pagetable because of lazy tlb flushing. This means we need need to | |
1056 | * switch all CPUs off this pagetable before we can unpin it. | |
1057 | */ | |
1058 | static void xen_exit_mmap(struct mm_struct *mm) | |
1059 | { | |
1060 | get_cpu(); /* make sure we don't move around */ | |
1061 | xen_drop_mm_ref(mm); | |
1062 | put_cpu(); | |
1063 | ||
1064 | spin_lock(&mm->page_table_lock); | |
1065 | ||
1066 | /* pgd may not be pinned in the error exit path of execve */ | |
1067 | if (xen_page_pinned(mm->pgd)) | |
1068 | xen_pgd_unpin(mm); | |
1069 | ||
1070 | spin_unlock(&mm->page_table_lock); | |
1071 | } | |
1072 | ||
1073 | static void xen_post_allocator_init(void); | |
1074 | ||
1075 | static void __init pin_pagetable_pfn(unsigned cmd, unsigned long pfn) | |
1076 | { | |
1077 | struct mmuext_op op; | |
1078 | ||
1079 | op.cmd = cmd; | |
1080 | op.arg1.mfn = pfn_to_mfn(pfn); | |
1081 | if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) | |
1082 | BUG(); | |
1083 | } | |
1084 | ||
1085 | #ifdef CONFIG_X86_64 | |
1086 | static void __init xen_cleanhighmap(unsigned long vaddr, | |
1087 | unsigned long vaddr_end) | |
1088 | { | |
1089 | unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1; | |
1090 | pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr); | |
1091 | ||
1092 | /* NOTE: The loop is more greedy than the cleanup_highmap variant. | |
1093 | * We include the PMD passed in on _both_ boundaries. */ | |
1094 | for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD)); | |
1095 | pmd++, vaddr += PMD_SIZE) { | |
1096 | if (pmd_none(*pmd)) | |
1097 | continue; | |
1098 | if (vaddr < (unsigned long) _text || vaddr > kernel_end) | |
1099 | set_pmd(pmd, __pmd(0)); | |
1100 | } | |
1101 | /* In case we did something silly, we should crash in this function | |
1102 | * instead of somewhere later and be confusing. */ | |
1103 | xen_mc_flush(); | |
1104 | } | |
1105 | ||
1106 | /* | |
1107 | * Make a page range writeable and free it. | |
1108 | */ | |
1109 | static void __init xen_free_ro_pages(unsigned long paddr, unsigned long size) | |
1110 | { | |
1111 | void *vaddr = __va(paddr); | |
1112 | void *vaddr_end = vaddr + size; | |
1113 | ||
1114 | for (; vaddr < vaddr_end; vaddr += PAGE_SIZE) | |
1115 | make_lowmem_page_readwrite(vaddr); | |
1116 | ||
1117 | memblock_free(paddr, size); | |
1118 | } | |
1119 | ||
1120 | static void __init xen_cleanmfnmap_free_pgtbl(void *pgtbl, bool unpin) | |
1121 | { | |
1122 | unsigned long pa = __pa(pgtbl) & PHYSICAL_PAGE_MASK; | |
1123 | ||
1124 | if (unpin) | |
1125 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(pa)); | |
1126 | ClearPagePinned(virt_to_page(__va(pa))); | |
1127 | xen_free_ro_pages(pa, PAGE_SIZE); | |
1128 | } | |
1129 | ||
1130 | static void __init xen_cleanmfnmap_pmd(pmd_t *pmd, bool unpin) | |
1131 | { | |
1132 | unsigned long pa; | |
1133 | pte_t *pte_tbl; | |
1134 | int i; | |
1135 | ||
1136 | if (pmd_large(*pmd)) { | |
1137 | pa = pmd_val(*pmd) & PHYSICAL_PAGE_MASK; | |
1138 | xen_free_ro_pages(pa, PMD_SIZE); | |
1139 | return; | |
1140 | } | |
1141 | ||
1142 | pte_tbl = pte_offset_kernel(pmd, 0); | |
1143 | for (i = 0; i < PTRS_PER_PTE; i++) { | |
1144 | if (pte_none(pte_tbl[i])) | |
1145 | continue; | |
1146 | pa = pte_pfn(pte_tbl[i]) << PAGE_SHIFT; | |
1147 | xen_free_ro_pages(pa, PAGE_SIZE); | |
1148 | } | |
1149 | set_pmd(pmd, __pmd(0)); | |
1150 | xen_cleanmfnmap_free_pgtbl(pte_tbl, unpin); | |
1151 | } | |
1152 | ||
1153 | static void __init xen_cleanmfnmap_pud(pud_t *pud, bool unpin) | |
1154 | { | |
1155 | unsigned long pa; | |
1156 | pmd_t *pmd_tbl; | |
1157 | int i; | |
1158 | ||
1159 | if (pud_large(*pud)) { | |
1160 | pa = pud_val(*pud) & PHYSICAL_PAGE_MASK; | |
1161 | xen_free_ro_pages(pa, PUD_SIZE); | |
1162 | return; | |
1163 | } | |
1164 | ||
1165 | pmd_tbl = pmd_offset(pud, 0); | |
1166 | for (i = 0; i < PTRS_PER_PMD; i++) { | |
1167 | if (pmd_none(pmd_tbl[i])) | |
1168 | continue; | |
1169 | xen_cleanmfnmap_pmd(pmd_tbl + i, unpin); | |
1170 | } | |
1171 | set_pud(pud, __pud(0)); | |
1172 | xen_cleanmfnmap_free_pgtbl(pmd_tbl, unpin); | |
1173 | } | |
1174 | ||
1175 | static void __init xen_cleanmfnmap_p4d(p4d_t *p4d, bool unpin) | |
1176 | { | |
1177 | unsigned long pa; | |
1178 | pud_t *pud_tbl; | |
1179 | int i; | |
1180 | ||
1181 | if (p4d_large(*p4d)) { | |
1182 | pa = p4d_val(*p4d) & PHYSICAL_PAGE_MASK; | |
1183 | xen_free_ro_pages(pa, P4D_SIZE); | |
1184 | return; | |
1185 | } | |
1186 | ||
1187 | pud_tbl = pud_offset(p4d, 0); | |
1188 | for (i = 0; i < PTRS_PER_PUD; i++) { | |
1189 | if (pud_none(pud_tbl[i])) | |
1190 | continue; | |
1191 | xen_cleanmfnmap_pud(pud_tbl + i, unpin); | |
1192 | } | |
1193 | set_p4d(p4d, __p4d(0)); | |
1194 | xen_cleanmfnmap_free_pgtbl(pud_tbl, unpin); | |
1195 | } | |
1196 | ||
1197 | /* | |
1198 | * Since it is well isolated we can (and since it is perhaps large we should) | |
1199 | * also free the page tables mapping the initial P->M table. | |
1200 | */ | |
1201 | static void __init xen_cleanmfnmap(unsigned long vaddr) | |
1202 | { | |
1203 | pgd_t *pgd; | |
1204 | p4d_t *p4d; | |
1205 | unsigned int i; | |
1206 | bool unpin; | |
1207 | ||
1208 | unpin = (vaddr == 2 * PGDIR_SIZE); | |
1209 | vaddr &= PMD_MASK; | |
1210 | pgd = pgd_offset_k(vaddr); | |
1211 | p4d = p4d_offset(pgd, 0); | |
1212 | for (i = 0; i < PTRS_PER_P4D; i++) { | |
1213 | if (p4d_none(p4d[i])) | |
1214 | continue; | |
1215 | xen_cleanmfnmap_p4d(p4d + i, unpin); | |
1216 | } | |
1217 | if (IS_ENABLED(CONFIG_X86_5LEVEL)) { | |
1218 | set_pgd(pgd, __pgd(0)); | |
1219 | xen_cleanmfnmap_free_pgtbl(p4d, unpin); | |
1220 | } | |
1221 | } | |
1222 | ||
1223 | static void __init xen_pagetable_p2m_free(void) | |
1224 | { | |
1225 | unsigned long size; | |
1226 | unsigned long addr; | |
1227 | ||
1228 | size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long)); | |
1229 | ||
1230 | /* No memory or already called. */ | |
1231 | if ((unsigned long)xen_p2m_addr == xen_start_info->mfn_list) | |
1232 | return; | |
1233 | ||
1234 | /* using __ka address and sticking INVALID_P2M_ENTRY! */ | |
1235 | memset((void *)xen_start_info->mfn_list, 0xff, size); | |
1236 | ||
1237 | addr = xen_start_info->mfn_list; | |
1238 | /* | |
1239 | * We could be in __ka space. | |
1240 | * We roundup to the PMD, which means that if anybody at this stage is | |
1241 | * using the __ka address of xen_start_info or | |
1242 | * xen_start_info->shared_info they are in going to crash. Fortunatly | |
1243 | * we have already revectored in xen_setup_kernel_pagetable and in | |
1244 | * xen_setup_shared_info. | |
1245 | */ | |
1246 | size = roundup(size, PMD_SIZE); | |
1247 | ||
1248 | if (addr >= __START_KERNEL_map) { | |
1249 | xen_cleanhighmap(addr, addr + size); | |
1250 | size = PAGE_ALIGN(xen_start_info->nr_pages * | |
1251 | sizeof(unsigned long)); | |
1252 | memblock_free(__pa(addr), size); | |
1253 | } else { | |
1254 | xen_cleanmfnmap(addr); | |
1255 | } | |
1256 | } | |
1257 | ||
1258 | static void __init xen_pagetable_cleanhighmap(void) | |
1259 | { | |
1260 | unsigned long size; | |
1261 | unsigned long addr; | |
1262 | ||
1263 | /* At this stage, cleanup_highmap has already cleaned __ka space | |
1264 | * from _brk_limit way up to the max_pfn_mapped (which is the end of | |
1265 | * the ramdisk). We continue on, erasing PMD entries that point to page | |
1266 | * tables - do note that they are accessible at this stage via __va. | |
1267 | * For good measure we also round up to the PMD - which means that if | |
1268 | * anybody is using __ka address to the initial boot-stack - and try | |
1269 | * to use it - they are going to crash. The xen_start_info has been | |
1270 | * taken care of already in xen_setup_kernel_pagetable. */ | |
1271 | addr = xen_start_info->pt_base; | |
1272 | size = roundup(xen_start_info->nr_pt_frames * PAGE_SIZE, PMD_SIZE); | |
1273 | ||
1274 | xen_cleanhighmap(addr, addr + size); | |
1275 | xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base)); | |
1276 | #ifdef DEBUG | |
1277 | /* This is superfluous and is not necessary, but you know what | |
1278 | * lets do it. The MODULES_VADDR -> MODULES_END should be clear of | |
1279 | * anything at this stage. */ | |
1280 | xen_cleanhighmap(MODULES_VADDR, roundup(MODULES_VADDR, PUD_SIZE) - 1); | |
1281 | #endif | |
1282 | } | |
1283 | #endif | |
1284 | ||
1285 | static void __init xen_pagetable_p2m_setup(void) | |
1286 | { | |
7e0563de VK |
1287 | xen_vmalloc_p2m_tree(); |
1288 | ||
1289 | #ifdef CONFIG_X86_64 | |
1290 | xen_pagetable_p2m_free(); | |
1291 | ||
1292 | xen_pagetable_cleanhighmap(); | |
1293 | #endif | |
1294 | /* And revector! Bye bye old array */ | |
1295 | xen_start_info->mfn_list = (unsigned long)xen_p2m_addr; | |
1296 | } | |
1297 | ||
1298 | static void __init xen_pagetable_init(void) | |
1299 | { | |
1300 | paging_init(); | |
1301 | xen_post_allocator_init(); | |
1302 | ||
1303 | xen_pagetable_p2m_setup(); | |
1304 | ||
1305 | /* Allocate and initialize top and mid mfn levels for p2m structure */ | |
1306 | xen_build_mfn_list_list(); | |
1307 | ||
1308 | /* Remap memory freed due to conflicts with E820 map */ | |
989513a7 | 1309 | xen_remap_memory(); |
7e0563de VK |
1310 | |
1311 | xen_setup_shared_info(); | |
1312 | } | |
1313 | static void xen_write_cr2(unsigned long cr2) | |
1314 | { | |
1315 | this_cpu_read(xen_vcpu)->arch.cr2 = cr2; | |
1316 | } | |
1317 | ||
1318 | static unsigned long xen_read_cr2(void) | |
1319 | { | |
1320 | return this_cpu_read(xen_vcpu)->arch.cr2; | |
1321 | } | |
1322 | ||
1323 | unsigned long xen_read_cr2_direct(void) | |
1324 | { | |
1325 | return this_cpu_read(xen_vcpu_info.arch.cr2); | |
1326 | } | |
1327 | ||
1328 | static void xen_flush_tlb(void) | |
1329 | { | |
1330 | struct mmuext_op *op; | |
1331 | struct multicall_space mcs; | |
1332 | ||
1333 | trace_xen_mmu_flush_tlb(0); | |
1334 | ||
1335 | preempt_disable(); | |
1336 | ||
1337 | mcs = xen_mc_entry(sizeof(*op)); | |
1338 | ||
1339 | op = mcs.args; | |
1340 | op->cmd = MMUEXT_TLB_FLUSH_LOCAL; | |
1341 | MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); | |
1342 | ||
1343 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
1344 | ||
1345 | preempt_enable(); | |
1346 | } | |
1347 | ||
1348 | static void xen_flush_tlb_single(unsigned long addr) | |
1349 | { | |
1350 | struct mmuext_op *op; | |
1351 | struct multicall_space mcs; | |
1352 | ||
1353 | trace_xen_mmu_flush_tlb_single(addr); | |
1354 | ||
1355 | preempt_disable(); | |
1356 | ||
1357 | mcs = xen_mc_entry(sizeof(*op)); | |
1358 | op = mcs.args; | |
1359 | op->cmd = MMUEXT_INVLPG_LOCAL; | |
1360 | op->arg1.linear_addr = addr & PAGE_MASK; | |
1361 | MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); | |
1362 | ||
1363 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
1364 | ||
1365 | preempt_enable(); | |
1366 | } | |
1367 | ||
1368 | static void xen_flush_tlb_others(const struct cpumask *cpus, | |
1369 | struct mm_struct *mm, unsigned long start, | |
1370 | unsigned long end) | |
1371 | { | |
1372 | struct { | |
1373 | struct mmuext_op op; | |
1374 | #ifdef CONFIG_SMP | |
1375 | DECLARE_BITMAP(mask, num_processors); | |
1376 | #else | |
1377 | DECLARE_BITMAP(mask, NR_CPUS); | |
1378 | #endif | |
1379 | } *args; | |
1380 | struct multicall_space mcs; | |
1381 | ||
1382 | trace_xen_mmu_flush_tlb_others(cpus, mm, start, end); | |
1383 | ||
1384 | if (cpumask_empty(cpus)) | |
1385 | return; /* nothing to do */ | |
1386 | ||
1387 | mcs = xen_mc_entry(sizeof(*args)); | |
1388 | args = mcs.args; | |
1389 | args->op.arg2.vcpumask = to_cpumask(args->mask); | |
1390 | ||
1391 | /* Remove us, and any offline CPUS. */ | |
1392 | cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask); | |
1393 | cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask)); | |
1394 | ||
1395 | args->op.cmd = MMUEXT_TLB_FLUSH_MULTI; | |
1396 | if (end != TLB_FLUSH_ALL && (end - start) <= PAGE_SIZE) { | |
1397 | args->op.cmd = MMUEXT_INVLPG_MULTI; | |
1398 | args->op.arg1.linear_addr = start; | |
1399 | } | |
1400 | ||
1401 | MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF); | |
1402 | ||
1403 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
1404 | } | |
1405 | ||
1406 | static unsigned long xen_read_cr3(void) | |
1407 | { | |
1408 | return this_cpu_read(xen_cr3); | |
1409 | } | |
1410 | ||
1411 | static void set_current_cr3(void *v) | |
1412 | { | |
1413 | this_cpu_write(xen_current_cr3, (unsigned long)v); | |
1414 | } | |
1415 | ||
1416 | static void __xen_write_cr3(bool kernel, unsigned long cr3) | |
1417 | { | |
1418 | struct mmuext_op op; | |
1419 | unsigned long mfn; | |
1420 | ||
1421 | trace_xen_mmu_write_cr3(kernel, cr3); | |
1422 | ||
1423 | if (cr3) | |
1424 | mfn = pfn_to_mfn(PFN_DOWN(cr3)); | |
1425 | else | |
1426 | mfn = 0; | |
1427 | ||
1428 | WARN_ON(mfn == 0 && kernel); | |
1429 | ||
1430 | op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR; | |
1431 | op.arg1.mfn = mfn; | |
1432 | ||
1433 | xen_extend_mmuext_op(&op); | |
1434 | ||
1435 | if (kernel) { | |
1436 | this_cpu_write(xen_cr3, cr3); | |
1437 | ||
1438 | /* Update xen_current_cr3 once the batch has actually | |
1439 | been submitted. */ | |
1440 | xen_mc_callback(set_current_cr3, (void *)cr3); | |
1441 | } | |
1442 | } | |
1443 | static void xen_write_cr3(unsigned long cr3) | |
1444 | { | |
1445 | BUG_ON(preemptible()); | |
1446 | ||
1447 | xen_mc_batch(); /* disables interrupts */ | |
1448 | ||
1449 | /* Update while interrupts are disabled, so its atomic with | |
1450 | respect to ipis */ | |
1451 | this_cpu_write(xen_cr3, cr3); | |
1452 | ||
1453 | __xen_write_cr3(true, cr3); | |
1454 | ||
1455 | #ifdef CONFIG_X86_64 | |
1456 | { | |
1457 | pgd_t *user_pgd = xen_get_user_pgd(__va(cr3)); | |
1458 | if (user_pgd) | |
1459 | __xen_write_cr3(false, __pa(user_pgd)); | |
1460 | else | |
1461 | __xen_write_cr3(false, 0); | |
1462 | } | |
1463 | #endif | |
1464 | ||
1465 | xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */ | |
1466 | } | |
1467 | ||
1468 | #ifdef CONFIG_X86_64 | |
1469 | /* | |
1470 | * At the start of the day - when Xen launches a guest, it has already | |
1471 | * built pagetables for the guest. We diligently look over them | |
1472 | * in xen_setup_kernel_pagetable and graft as appropriate them in the | |
1473 | * init_level4_pgt and its friends. Then when we are happy we load | |
1474 | * the new init_level4_pgt - and continue on. | |
1475 | * | |
1476 | * The generic code starts (start_kernel) and 'init_mem_mapping' sets | |
1477 | * up the rest of the pagetables. When it has completed it loads the cr3. | |
1478 | * N.B. that baremetal would start at 'start_kernel' (and the early | |
1479 | * #PF handler would create bootstrap pagetables) - so we are running | |
1480 | * with the same assumptions as what to do when write_cr3 is executed | |
1481 | * at this point. | |
1482 | * | |
1483 | * Since there are no user-page tables at all, we have two variants | |
1484 | * of xen_write_cr3 - the early bootup (this one), and the late one | |
1485 | * (xen_write_cr3). The reason we have to do that is that in 64-bit | |
1486 | * the Linux kernel and user-space are both in ring 3 while the | |
1487 | * hypervisor is in ring 0. | |
1488 | */ | |
1489 | static void __init xen_write_cr3_init(unsigned long cr3) | |
1490 | { | |
1491 | BUG_ON(preemptible()); | |
1492 | ||
1493 | xen_mc_batch(); /* disables interrupts */ | |
1494 | ||
1495 | /* Update while interrupts are disabled, so its atomic with | |
1496 | respect to ipis */ | |
1497 | this_cpu_write(xen_cr3, cr3); | |
1498 | ||
1499 | __xen_write_cr3(true, cr3); | |
1500 | ||
1501 | xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */ | |
1502 | } | |
1503 | #endif | |
1504 | ||
1505 | static int xen_pgd_alloc(struct mm_struct *mm) | |
1506 | { | |
1507 | pgd_t *pgd = mm->pgd; | |
1508 | int ret = 0; | |
1509 | ||
1510 | BUG_ON(PagePinned(virt_to_page(pgd))); | |
1511 | ||
1512 | #ifdef CONFIG_X86_64 | |
1513 | { | |
1514 | struct page *page = virt_to_page(pgd); | |
1515 | pgd_t *user_pgd; | |
1516 | ||
1517 | BUG_ON(page->private != 0); | |
1518 | ||
1519 | ret = -ENOMEM; | |
1520 | ||
1521 | user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); | |
1522 | page->private = (unsigned long)user_pgd; | |
1523 | ||
1524 | if (user_pgd != NULL) { | |
1525 | #ifdef CONFIG_X86_VSYSCALL_EMULATION | |
1526 | user_pgd[pgd_index(VSYSCALL_ADDR)] = | |
1527 | __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE); | |
1528 | #endif | |
1529 | ret = 0; | |
1530 | } | |
1531 | ||
1532 | BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd)))); | |
1533 | } | |
1534 | #endif | |
1535 | return ret; | |
1536 | } | |
1537 | ||
1538 | static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) | |
1539 | { | |
1540 | #ifdef CONFIG_X86_64 | |
1541 | pgd_t *user_pgd = xen_get_user_pgd(pgd); | |
1542 | ||
1543 | if (user_pgd) | |
1544 | free_page((unsigned long)user_pgd); | |
1545 | #endif | |
1546 | } | |
1547 | ||
1548 | /* | |
1549 | * Init-time set_pte while constructing initial pagetables, which | |
1550 | * doesn't allow RO page table pages to be remapped RW. | |
1551 | * | |
1552 | * If there is no MFN for this PFN then this page is initially | |
1553 | * ballooned out so clear the PTE (as in decrease_reservation() in | |
1554 | * drivers/xen/balloon.c). | |
1555 | * | |
1556 | * Many of these PTE updates are done on unpinned and writable pages | |
1557 | * and doing a hypercall for these is unnecessary and expensive. At | |
1558 | * this point it is not possible to tell if a page is pinned or not, | |
1559 | * so always write the PTE directly and rely on Xen trapping and | |
1560 | * emulating any updates as necessary. | |
1561 | */ | |
1562 | __visible pte_t xen_make_pte_init(pteval_t pte) | |
1563 | { | |
1564 | #ifdef CONFIG_X86_64 | |
1565 | unsigned long pfn; | |
1566 | ||
1567 | /* | |
1568 | * Pages belonging to the initial p2m list mapped outside the default | |
1569 | * address range must be mapped read-only. This region contains the | |
1570 | * page tables for mapping the p2m list, too, and page tables MUST be | |
1571 | * mapped read-only. | |
1572 | */ | |
1573 | pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT; | |
1574 | if (xen_start_info->mfn_list < __START_KERNEL_map && | |
1575 | pfn >= xen_start_info->first_p2m_pfn && | |
1576 | pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames) | |
1577 | pte &= ~_PAGE_RW; | |
1578 | #endif | |
1579 | pte = pte_pfn_to_mfn(pte); | |
1580 | return native_make_pte(pte); | |
1581 | } | |
1582 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init); | |
1583 | ||
1584 | static void __init xen_set_pte_init(pte_t *ptep, pte_t pte) | |
1585 | { | |
1586 | #ifdef CONFIG_X86_32 | |
1587 | /* If there's an existing pte, then don't allow _PAGE_RW to be set */ | |
1588 | if (pte_mfn(pte) != INVALID_P2M_ENTRY | |
1589 | && pte_val_ma(*ptep) & _PAGE_PRESENT) | |
1590 | pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & | |
1591 | pte_val_ma(pte)); | |
1592 | #endif | |
1593 | native_set_pte(ptep, pte); | |
1594 | } | |
1595 | ||
1596 | /* Early in boot, while setting up the initial pagetable, assume | |
1597 | everything is pinned. */ | |
1598 | static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) | |
1599 | { | |
1600 | #ifdef CONFIG_FLATMEM | |
1601 | BUG_ON(mem_map); /* should only be used early */ | |
1602 | #endif | |
1603 | make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); | |
1604 | pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); | |
1605 | } | |
1606 | ||
1607 | /* Used for pmd and pud */ | |
1608 | static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn) | |
1609 | { | |
1610 | #ifdef CONFIG_FLATMEM | |
1611 | BUG_ON(mem_map); /* should only be used early */ | |
1612 | #endif | |
1613 | make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); | |
1614 | } | |
1615 | ||
1616 | /* Early release_pte assumes that all pts are pinned, since there's | |
1617 | only init_mm and anything attached to that is pinned. */ | |
1618 | static void __init xen_release_pte_init(unsigned long pfn) | |
1619 | { | |
1620 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); | |
1621 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); | |
1622 | } | |
1623 | ||
1624 | static void __init xen_release_pmd_init(unsigned long pfn) | |
1625 | { | |
1626 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); | |
1627 | } | |
1628 | ||
1629 | static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn) | |
1630 | { | |
1631 | struct multicall_space mcs; | |
1632 | struct mmuext_op *op; | |
1633 | ||
1634 | mcs = __xen_mc_entry(sizeof(*op)); | |
1635 | op = mcs.args; | |
1636 | op->cmd = cmd; | |
1637 | op->arg1.mfn = pfn_to_mfn(pfn); | |
1638 | ||
1639 | MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); | |
1640 | } | |
1641 | ||
1642 | static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot) | |
1643 | { | |
1644 | struct multicall_space mcs; | |
1645 | unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT); | |
1646 | ||
1647 | mcs = __xen_mc_entry(0); | |
1648 | MULTI_update_va_mapping(mcs.mc, (unsigned long)addr, | |
1649 | pfn_pte(pfn, prot), 0); | |
1650 | } | |
1651 | ||
1652 | /* This needs to make sure the new pte page is pinned iff its being | |
1653 | attached to a pinned pagetable. */ | |
1654 | static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, | |
1655 | unsigned level) | |
1656 | { | |
1657 | bool pinned = PagePinned(virt_to_page(mm->pgd)); | |
1658 | ||
1659 | trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned); | |
1660 | ||
1661 | if (pinned) { | |
1662 | struct page *page = pfn_to_page(pfn); | |
1663 | ||
1664 | SetPagePinned(page); | |
1665 | ||
1666 | if (!PageHighMem(page)) { | |
1667 | xen_mc_batch(); | |
1668 | ||
1669 | __set_pfn_prot(pfn, PAGE_KERNEL_RO); | |
1670 | ||
1671 | if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS) | |
1672 | __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); | |
1673 | ||
1674 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
1675 | } else { | |
1676 | /* make sure there are no stray mappings of | |
1677 | this page */ | |
1678 | kmap_flush_unused(); | |
1679 | } | |
1680 | } | |
1681 | } | |
1682 | ||
1683 | static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn) | |
1684 | { | |
1685 | xen_alloc_ptpage(mm, pfn, PT_PTE); | |
1686 | } | |
1687 | ||
1688 | static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn) | |
1689 | { | |
1690 | xen_alloc_ptpage(mm, pfn, PT_PMD); | |
1691 | } | |
1692 | ||
1693 | /* This should never happen until we're OK to use struct page */ | |
1694 | static inline void xen_release_ptpage(unsigned long pfn, unsigned level) | |
1695 | { | |
1696 | struct page *page = pfn_to_page(pfn); | |
1697 | bool pinned = PagePinned(page); | |
1698 | ||
1699 | trace_xen_mmu_release_ptpage(pfn, level, pinned); | |
1700 | ||
1701 | if (pinned) { | |
1702 | if (!PageHighMem(page)) { | |
1703 | xen_mc_batch(); | |
1704 | ||
1705 | if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS) | |
1706 | __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); | |
1707 | ||
1708 | __set_pfn_prot(pfn, PAGE_KERNEL); | |
1709 | ||
1710 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
1711 | } | |
1712 | ClearPagePinned(page); | |
1713 | } | |
1714 | } | |
1715 | ||
1716 | static void xen_release_pte(unsigned long pfn) | |
1717 | { | |
1718 | xen_release_ptpage(pfn, PT_PTE); | |
1719 | } | |
1720 | ||
1721 | static void xen_release_pmd(unsigned long pfn) | |
1722 | { | |
1723 | xen_release_ptpage(pfn, PT_PMD); | |
1724 | } | |
1725 | ||
1726 | #if CONFIG_PGTABLE_LEVELS >= 4 | |
1727 | static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn) | |
1728 | { | |
1729 | xen_alloc_ptpage(mm, pfn, PT_PUD); | |
1730 | } | |
1731 | ||
1732 | static void xen_release_pud(unsigned long pfn) | |
1733 | { | |
1734 | xen_release_ptpage(pfn, PT_PUD); | |
1735 | } | |
1736 | #endif | |
1737 | ||
1738 | void __init xen_reserve_top(void) | |
1739 | { | |
1740 | #ifdef CONFIG_X86_32 | |
1741 | unsigned long top = HYPERVISOR_VIRT_START; | |
1742 | struct xen_platform_parameters pp; | |
1743 | ||
1744 | if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0) | |
1745 | top = pp.virt_start; | |
1746 | ||
1747 | reserve_top_address(-top); | |
1748 | #endif /* CONFIG_X86_32 */ | |
1749 | } | |
1750 | ||
1751 | /* | |
1752 | * Like __va(), but returns address in the kernel mapping (which is | |
1753 | * all we have until the physical memory mapping has been set up. | |
1754 | */ | |
1755 | static void * __init __ka(phys_addr_t paddr) | |
1756 | { | |
1757 | #ifdef CONFIG_X86_64 | |
1758 | return (void *)(paddr + __START_KERNEL_map); | |
1759 | #else | |
1760 | return __va(paddr); | |
1761 | #endif | |
1762 | } | |
1763 | ||
1764 | /* Convert a machine address to physical address */ | |
1765 | static unsigned long __init m2p(phys_addr_t maddr) | |
1766 | { | |
1767 | phys_addr_t paddr; | |
1768 | ||
1769 | maddr &= PTE_PFN_MASK; | |
1770 | paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT; | |
1771 | ||
1772 | return paddr; | |
1773 | } | |
1774 | ||
1775 | /* Convert a machine address to kernel virtual */ | |
1776 | static void * __init m2v(phys_addr_t maddr) | |
1777 | { | |
1778 | return __ka(m2p(maddr)); | |
1779 | } | |
1780 | ||
1781 | /* Set the page permissions on an identity-mapped pages */ | |
1782 | static void __init set_page_prot_flags(void *addr, pgprot_t prot, | |
1783 | unsigned long flags) | |
1784 | { | |
1785 | unsigned long pfn = __pa(addr) >> PAGE_SHIFT; | |
1786 | pte_t pte = pfn_pte(pfn, prot); | |
1787 | ||
1788 | if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags)) | |
1789 | BUG(); | |
1790 | } | |
1791 | static void __init set_page_prot(void *addr, pgprot_t prot) | |
1792 | { | |
1793 | return set_page_prot_flags(addr, prot, UVMF_NONE); | |
1794 | } | |
1795 | #ifdef CONFIG_X86_32 | |
1796 | static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) | |
1797 | { | |
1798 | unsigned pmdidx, pteidx; | |
1799 | unsigned ident_pte; | |
1800 | unsigned long pfn; | |
1801 | ||
1802 | level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES, | |
1803 | PAGE_SIZE); | |
1804 | ||
1805 | ident_pte = 0; | |
1806 | pfn = 0; | |
1807 | for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) { | |
1808 | pte_t *pte_page; | |
1809 | ||
1810 | /* Reuse or allocate a page of ptes */ | |
1811 | if (pmd_present(pmd[pmdidx])) | |
1812 | pte_page = m2v(pmd[pmdidx].pmd); | |
1813 | else { | |
1814 | /* Check for free pte pages */ | |
1815 | if (ident_pte == LEVEL1_IDENT_ENTRIES) | |
1816 | break; | |
1817 | ||
1818 | pte_page = &level1_ident_pgt[ident_pte]; | |
1819 | ident_pte += PTRS_PER_PTE; | |
1820 | ||
1821 | pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE); | |
1822 | } | |
1823 | ||
1824 | /* Install mappings */ | |
1825 | for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { | |
1826 | pte_t pte; | |
1827 | ||
1828 | if (pfn > max_pfn_mapped) | |
1829 | max_pfn_mapped = pfn; | |
1830 | ||
1831 | if (!pte_none(pte_page[pteidx])) | |
1832 | continue; | |
1833 | ||
1834 | pte = pfn_pte(pfn, PAGE_KERNEL_EXEC); | |
1835 | pte_page[pteidx] = pte; | |
1836 | } | |
1837 | } | |
1838 | ||
1839 | for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE) | |
1840 | set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO); | |
1841 | ||
1842 | set_page_prot(pmd, PAGE_KERNEL_RO); | |
1843 | } | |
1844 | #endif | |
1845 | void __init xen_setup_machphys_mapping(void) | |
1846 | { | |
1847 | struct xen_machphys_mapping mapping; | |
1848 | ||
1849 | if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) { | |
1850 | machine_to_phys_mapping = (unsigned long *)mapping.v_start; | |
1851 | machine_to_phys_nr = mapping.max_mfn + 1; | |
1852 | } else { | |
1853 | machine_to_phys_nr = MACH2PHYS_NR_ENTRIES; | |
1854 | } | |
1855 | #ifdef CONFIG_X86_32 | |
1856 | WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1)) | |
1857 | < machine_to_phys_mapping); | |
1858 | #endif | |
1859 | } | |
1860 | ||
1861 | #ifdef CONFIG_X86_64 | |
1862 | static void __init convert_pfn_mfn(void *v) | |
1863 | { | |
1864 | pte_t *pte = v; | |
1865 | int i; | |
1866 | ||
1867 | /* All levels are converted the same way, so just treat them | |
1868 | as ptes. */ | |
1869 | for (i = 0; i < PTRS_PER_PTE; i++) | |
1870 | pte[i] = xen_make_pte(pte[i].pte); | |
1871 | } | |
1872 | static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end, | |
1873 | unsigned long addr) | |
1874 | { | |
1875 | if (*pt_base == PFN_DOWN(__pa(addr))) { | |
1876 | set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG); | |
1877 | clear_page((void *)addr); | |
1878 | (*pt_base)++; | |
1879 | } | |
1880 | if (*pt_end == PFN_DOWN(__pa(addr))) { | |
1881 | set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG); | |
1882 | clear_page((void *)addr); | |
1883 | (*pt_end)--; | |
1884 | } | |
1885 | } | |
1886 | /* | |
1887 | * Set up the initial kernel pagetable. | |
1888 | * | |
1889 | * We can construct this by grafting the Xen provided pagetable into | |
1890 | * head_64.S's preconstructed pagetables. We copy the Xen L2's into | |
1891 | * level2_ident_pgt, and level2_kernel_pgt. This means that only the | |
1892 | * kernel has a physical mapping to start with - but that's enough to | |
1893 | * get __va working. We need to fill in the rest of the physical | |
1894 | * mapping once some sort of allocator has been set up. | |
1895 | */ | |
1896 | void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) | |
1897 | { | |
1898 | pud_t *l3; | |
1899 | pmd_t *l2; | |
1900 | unsigned long addr[3]; | |
1901 | unsigned long pt_base, pt_end; | |
1902 | unsigned i; | |
1903 | ||
1904 | /* max_pfn_mapped is the last pfn mapped in the initial memory | |
1905 | * mappings. Considering that on Xen after the kernel mappings we | |
1906 | * have the mappings of some pages that don't exist in pfn space, we | |
1907 | * set max_pfn_mapped to the last real pfn mapped. */ | |
1908 | if (xen_start_info->mfn_list < __START_KERNEL_map) | |
1909 | max_pfn_mapped = xen_start_info->first_p2m_pfn; | |
1910 | else | |
1911 | max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); | |
1912 | ||
1913 | pt_base = PFN_DOWN(__pa(xen_start_info->pt_base)); | |
1914 | pt_end = pt_base + xen_start_info->nr_pt_frames; | |
1915 | ||
1916 | /* Zap identity mapping */ | |
1917 | init_level4_pgt[0] = __pgd(0); | |
1918 | ||
989513a7 JG |
1919 | /* Pre-constructed entries are in pfn, so convert to mfn */ |
1920 | /* L4[272] -> level3_ident_pgt */ | |
1921 | /* L4[511] -> level3_kernel_pgt */ | |
1922 | convert_pfn_mfn(init_level4_pgt); | |
7e0563de | 1923 | |
989513a7 JG |
1924 | /* L3_i[0] -> level2_ident_pgt */ |
1925 | convert_pfn_mfn(level3_ident_pgt); | |
1926 | /* L3_k[510] -> level2_kernel_pgt */ | |
1927 | /* L3_k[511] -> level2_fixmap_pgt */ | |
1928 | convert_pfn_mfn(level3_kernel_pgt); | |
1929 | ||
1930 | /* L3_k[511][506] -> level1_fixmap_pgt */ | |
1931 | convert_pfn_mfn(level2_fixmap_pgt); | |
7e0563de | 1932 | |
7e0563de VK |
1933 | /* We get [511][511] and have Xen's version of level2_kernel_pgt */ |
1934 | l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); | |
1935 | l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud); | |
1936 | ||
1937 | addr[0] = (unsigned long)pgd; | |
1938 | addr[1] = (unsigned long)l3; | |
1939 | addr[2] = (unsigned long)l2; | |
1940 | /* Graft it onto L4[272][0]. Note that we creating an aliasing problem: | |
1941 | * Both L4[272][0] and L4[511][510] have entries that point to the same | |
1942 | * L2 (PMD) tables. Meaning that if you modify it in __va space | |
1943 | * it will be also modified in the __ka space! (But if you just | |
1944 | * modify the PMD table to point to other PTE's or none, then you | |
1945 | * are OK - which is what cleanup_highmap does) */ | |
1946 | copy_page(level2_ident_pgt, l2); | |
1947 | /* Graft it onto L4[511][510] */ | |
1948 | copy_page(level2_kernel_pgt, l2); | |
1949 | ||
1950 | /* Copy the initial P->M table mappings if necessary. */ | |
1951 | i = pgd_index(xen_start_info->mfn_list); | |
1952 | if (i && i < pgd_index(__START_KERNEL_map)) | |
1953 | init_level4_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i]; | |
1954 | ||
989513a7 JG |
1955 | /* Make pagetable pieces RO */ |
1956 | set_page_prot(init_level4_pgt, PAGE_KERNEL_RO); | |
1957 | set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO); | |
1958 | set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO); | |
1959 | set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO); | |
1960 | set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO); | |
1961 | set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); | |
1962 | set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); | |
1963 | set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO); | |
1964 | ||
1965 | /* Pin down new L4 */ | |
1966 | pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, | |
1967 | PFN_DOWN(__pa_symbol(init_level4_pgt))); | |
1968 | ||
1969 | /* Unpin Xen-provided one */ | |
1970 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); | |
7e0563de | 1971 | |
989513a7 JG |
1972 | /* |
1973 | * At this stage there can be no user pgd, and no page structure to | |
1974 | * attach it to, so make sure we just set kernel pgd. | |
1975 | */ | |
1976 | xen_mc_batch(); | |
1977 | __xen_write_cr3(true, __pa(init_level4_pgt)); | |
1978 | xen_mc_issue(PARAVIRT_LAZY_CPU); | |
7e0563de VK |
1979 | |
1980 | /* We can't that easily rip out L3 and L2, as the Xen pagetables are | |
1981 | * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for | |
1982 | * the initial domain. For guests using the toolstack, they are in: | |
1983 | * [L4], [L3], [L2], [L1], [L1], order .. So for dom0 we can only | |
1984 | * rip out the [L4] (pgd), but for guests we shave off three pages. | |
1985 | */ | |
1986 | for (i = 0; i < ARRAY_SIZE(addr); i++) | |
1987 | check_pt_base(&pt_base, &pt_end, addr[i]); | |
1988 | ||
1989 | /* Our (by three pages) smaller Xen pagetable that we are using */ | |
1990 | xen_pt_base = PFN_PHYS(pt_base); | |
1991 | xen_pt_size = (pt_end - pt_base) * PAGE_SIZE; | |
1992 | memblock_reserve(xen_pt_base, xen_pt_size); | |
1993 | ||
1994 | /* Revector the xen_start_info */ | |
1995 | xen_start_info = (struct start_info *)__va(__pa(xen_start_info)); | |
1996 | } | |
1997 | ||
1998 | /* | |
1999 | * Read a value from a physical address. | |
2000 | */ | |
2001 | static unsigned long __init xen_read_phys_ulong(phys_addr_t addr) | |
2002 | { | |
2003 | unsigned long *vaddr; | |
2004 | unsigned long val; | |
2005 | ||
2006 | vaddr = early_memremap_ro(addr, sizeof(val)); | |
2007 | val = *vaddr; | |
2008 | early_memunmap(vaddr, sizeof(val)); | |
2009 | return val; | |
2010 | } | |
2011 | ||
2012 | /* | |
2013 | * Translate a virtual address to a physical one without relying on mapped | |
69861e0a JG |
2014 | * page tables. Don't rely on big pages being aligned in (guest) physical |
2015 | * space! | |
7e0563de VK |
2016 | */ |
2017 | static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr) | |
2018 | { | |
2019 | phys_addr_t pa; | |
2020 | pgd_t pgd; | |
2021 | pud_t pud; | |
2022 | pmd_t pmd; | |
2023 | pte_t pte; | |
2024 | ||
2025 | pa = read_cr3(); | |
2026 | pgd = native_make_pgd(xen_read_phys_ulong(pa + pgd_index(vaddr) * | |
2027 | sizeof(pgd))); | |
2028 | if (!pgd_present(pgd)) | |
2029 | return 0; | |
2030 | ||
2031 | pa = pgd_val(pgd) & PTE_PFN_MASK; | |
2032 | pud = native_make_pud(xen_read_phys_ulong(pa + pud_index(vaddr) * | |
2033 | sizeof(pud))); | |
2034 | if (!pud_present(pud)) | |
2035 | return 0; | |
69861e0a | 2036 | pa = pud_val(pud) & PTE_PFN_MASK; |
7e0563de VK |
2037 | if (pud_large(pud)) |
2038 | return pa + (vaddr & ~PUD_MASK); | |
2039 | ||
2040 | pmd = native_make_pmd(xen_read_phys_ulong(pa + pmd_index(vaddr) * | |
2041 | sizeof(pmd))); | |
2042 | if (!pmd_present(pmd)) | |
2043 | return 0; | |
69861e0a | 2044 | pa = pmd_val(pmd) & PTE_PFN_MASK; |
7e0563de VK |
2045 | if (pmd_large(pmd)) |
2046 | return pa + (vaddr & ~PMD_MASK); | |
2047 | ||
2048 | pte = native_make_pte(xen_read_phys_ulong(pa + pte_index(vaddr) * | |
2049 | sizeof(pte))); | |
2050 | if (!pte_present(pte)) | |
2051 | return 0; | |
2052 | pa = pte_pfn(pte) << PAGE_SHIFT; | |
2053 | ||
2054 | return pa | (vaddr & ~PAGE_MASK); | |
2055 | } | |
2056 | ||
2057 | /* | |
2058 | * Find a new area for the hypervisor supplied p2m list and relocate the p2m to | |
2059 | * this area. | |
2060 | */ | |
2061 | void __init xen_relocate_p2m(void) | |
2062 | { | |
2063 | phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys, p4d_phys; | |
2064 | unsigned long p2m_pfn, p2m_pfn_end, n_frames, pfn, pfn_end; | |
2065 | int n_pte, n_pt, n_pmd, n_pud, n_p4d, idx_pte, idx_pt, idx_pmd, idx_pud, idx_p4d; | |
2066 | pte_t *pt; | |
2067 | pmd_t *pmd; | |
2068 | pud_t *pud; | |
2069 | p4d_t *p4d = NULL; | |
2070 | pgd_t *pgd; | |
2071 | unsigned long *new_p2m; | |
2072 | int save_pud; | |
2073 | ||
2074 | size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long)); | |
2075 | n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT; | |
2076 | n_pt = roundup(size, PMD_SIZE) >> PMD_SHIFT; | |
2077 | n_pmd = roundup(size, PUD_SIZE) >> PUD_SHIFT; | |
2078 | n_pud = roundup(size, P4D_SIZE) >> P4D_SHIFT; | |
2079 | if (PTRS_PER_P4D > 1) | |
2080 | n_p4d = roundup(size, PGDIR_SIZE) >> PGDIR_SHIFT; | |
2081 | else | |
2082 | n_p4d = 0; | |
2083 | n_frames = n_pte + n_pt + n_pmd + n_pud + n_p4d; | |
2084 | ||
2085 | new_area = xen_find_free_area(PFN_PHYS(n_frames)); | |
2086 | if (!new_area) { | |
2087 | xen_raw_console_write("Can't find new memory area for p2m needed due to E820 map conflict\n"); | |
2088 | BUG(); | |
2089 | } | |
2090 | ||
2091 | /* | |
2092 | * Setup the page tables for addressing the new p2m list. | |
2093 | * We have asked the hypervisor to map the p2m list at the user address | |
2094 | * PUD_SIZE. It may have done so, or it may have used a kernel space | |
2095 | * address depending on the Xen version. | |
2096 | * To avoid any possible virtual address collision, just use | |
2097 | * 2 * PUD_SIZE for the new area. | |
2098 | */ | |
2099 | p4d_phys = new_area; | |
2100 | pud_phys = p4d_phys + PFN_PHYS(n_p4d); | |
2101 | pmd_phys = pud_phys + PFN_PHYS(n_pud); | |
2102 | pt_phys = pmd_phys + PFN_PHYS(n_pmd); | |
2103 | p2m_pfn = PFN_DOWN(pt_phys) + n_pt; | |
2104 | ||
2105 | pgd = __va(read_cr3()); | |
2106 | new_p2m = (unsigned long *)(2 * PGDIR_SIZE); | |
2107 | idx_p4d = 0; | |
2108 | save_pud = n_pud; | |
2109 | do { | |
2110 | if (n_p4d > 0) { | |
2111 | p4d = early_memremap(p4d_phys, PAGE_SIZE); | |
2112 | clear_page(p4d); | |
2113 | n_pud = min(save_pud, PTRS_PER_P4D); | |
2114 | } | |
2115 | for (idx_pud = 0; idx_pud < n_pud; idx_pud++) { | |
2116 | pud = early_memremap(pud_phys, PAGE_SIZE); | |
2117 | clear_page(pud); | |
2118 | for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD); | |
2119 | idx_pmd++) { | |
2120 | pmd = early_memremap(pmd_phys, PAGE_SIZE); | |
2121 | clear_page(pmd); | |
2122 | for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD); | |
2123 | idx_pt++) { | |
2124 | pt = early_memremap(pt_phys, PAGE_SIZE); | |
2125 | clear_page(pt); | |
2126 | for (idx_pte = 0; | |
2127 | idx_pte < min(n_pte, PTRS_PER_PTE); | |
2128 | idx_pte++) { | |
2129 | set_pte(pt + idx_pte, | |
2130 | pfn_pte(p2m_pfn, PAGE_KERNEL)); | |
2131 | p2m_pfn++; | |
2132 | } | |
2133 | n_pte -= PTRS_PER_PTE; | |
2134 | early_memunmap(pt, PAGE_SIZE); | |
2135 | make_lowmem_page_readonly(__va(pt_phys)); | |
2136 | pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, | |
2137 | PFN_DOWN(pt_phys)); | |
2138 | set_pmd(pmd + idx_pt, | |
2139 | __pmd(_PAGE_TABLE | pt_phys)); | |
2140 | pt_phys += PAGE_SIZE; | |
2141 | } | |
2142 | n_pt -= PTRS_PER_PMD; | |
2143 | early_memunmap(pmd, PAGE_SIZE); | |
2144 | make_lowmem_page_readonly(__va(pmd_phys)); | |
2145 | pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE, | |
2146 | PFN_DOWN(pmd_phys)); | |
2147 | set_pud(pud + idx_pmd, __pud(_PAGE_TABLE | pmd_phys)); | |
2148 | pmd_phys += PAGE_SIZE; | |
2149 | } | |
2150 | n_pmd -= PTRS_PER_PUD; | |
2151 | early_memunmap(pud, PAGE_SIZE); | |
2152 | make_lowmem_page_readonly(__va(pud_phys)); | |
2153 | pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys)); | |
2154 | if (n_p4d > 0) | |
2155 | set_p4d(p4d + idx_pud, __p4d(_PAGE_TABLE | pud_phys)); | |
2156 | else | |
2157 | set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys)); | |
2158 | pud_phys += PAGE_SIZE; | |
2159 | } | |
2160 | if (n_p4d > 0) { | |
2161 | save_pud -= PTRS_PER_P4D; | |
2162 | early_memunmap(p4d, PAGE_SIZE); | |
2163 | make_lowmem_page_readonly(__va(p4d_phys)); | |
2164 | pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, PFN_DOWN(p4d_phys)); | |
2165 | set_pgd(pgd + 2 + idx_p4d, __pgd(_PAGE_TABLE | p4d_phys)); | |
2166 | p4d_phys += PAGE_SIZE; | |
2167 | } | |
2168 | } while (++idx_p4d < n_p4d); | |
2169 | ||
2170 | /* Now copy the old p2m info to the new area. */ | |
2171 | memcpy(new_p2m, xen_p2m_addr, size); | |
2172 | xen_p2m_addr = new_p2m; | |
2173 | ||
2174 | /* Release the old p2m list and set new list info. */ | |
2175 | p2m_pfn = PFN_DOWN(xen_early_virt_to_phys(xen_start_info->mfn_list)); | |
2176 | BUG_ON(!p2m_pfn); | |
2177 | p2m_pfn_end = p2m_pfn + PFN_DOWN(size); | |
2178 | ||
2179 | if (xen_start_info->mfn_list < __START_KERNEL_map) { | |
2180 | pfn = xen_start_info->first_p2m_pfn; | |
2181 | pfn_end = xen_start_info->first_p2m_pfn + | |
2182 | xen_start_info->nr_p2m_frames; | |
2183 | set_pgd(pgd + 1, __pgd(0)); | |
2184 | } else { | |
2185 | pfn = p2m_pfn; | |
2186 | pfn_end = p2m_pfn_end; | |
2187 | } | |
2188 | ||
2189 | memblock_free(PFN_PHYS(pfn), PAGE_SIZE * (pfn_end - pfn)); | |
2190 | while (pfn < pfn_end) { | |
2191 | if (pfn == p2m_pfn) { | |
2192 | pfn = p2m_pfn_end; | |
2193 | continue; | |
2194 | } | |
2195 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); | |
2196 | pfn++; | |
2197 | } | |
2198 | ||
2199 | xen_start_info->mfn_list = (unsigned long)xen_p2m_addr; | |
2200 | xen_start_info->first_p2m_pfn = PFN_DOWN(new_area); | |
2201 | xen_start_info->nr_p2m_frames = n_frames; | |
2202 | } | |
2203 | ||
2204 | #else /* !CONFIG_X86_64 */ | |
2205 | static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD); | |
2206 | static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD); | |
2207 | ||
2208 | static void __init xen_write_cr3_init(unsigned long cr3) | |
2209 | { | |
2210 | unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir)); | |
2211 | ||
2212 | BUG_ON(read_cr3() != __pa(initial_page_table)); | |
2213 | BUG_ON(cr3 != __pa(swapper_pg_dir)); | |
2214 | ||
2215 | /* | |
2216 | * We are switching to swapper_pg_dir for the first time (from | |
2217 | * initial_page_table) and therefore need to mark that page | |
2218 | * read-only and then pin it. | |
2219 | * | |
2220 | * Xen disallows sharing of kernel PMDs for PAE | |
2221 | * guests. Therefore we must copy the kernel PMD from | |
2222 | * initial_page_table into a new kernel PMD to be used in | |
2223 | * swapper_pg_dir. | |
2224 | */ | |
2225 | swapper_kernel_pmd = | |
2226 | extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); | |
2227 | copy_page(swapper_kernel_pmd, initial_kernel_pmd); | |
2228 | swapper_pg_dir[KERNEL_PGD_BOUNDARY] = | |
2229 | __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT); | |
2230 | set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO); | |
2231 | ||
2232 | set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO); | |
2233 | xen_write_cr3(cr3); | |
2234 | pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn); | |
2235 | ||
2236 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, | |
2237 | PFN_DOWN(__pa(initial_page_table))); | |
2238 | set_page_prot(initial_page_table, PAGE_KERNEL); | |
2239 | set_page_prot(initial_kernel_pmd, PAGE_KERNEL); | |
2240 | ||
2241 | pv_mmu_ops.write_cr3 = &xen_write_cr3; | |
2242 | } | |
2243 | ||
2244 | /* | |
2245 | * For 32 bit domains xen_start_info->pt_base is the pgd address which might be | |
2246 | * not the first page table in the page table pool. | |
2247 | * Iterate through the initial page tables to find the real page table base. | |
2248 | */ | |
2249 | static phys_addr_t xen_find_pt_base(pmd_t *pmd) | |
2250 | { | |
2251 | phys_addr_t pt_base, paddr; | |
2252 | unsigned pmdidx; | |
2253 | ||
2254 | pt_base = min(__pa(xen_start_info->pt_base), __pa(pmd)); | |
2255 | ||
2256 | for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) | |
2257 | if (pmd_present(pmd[pmdidx]) && !pmd_large(pmd[pmdidx])) { | |
2258 | paddr = m2p(pmd[pmdidx].pmd); | |
2259 | pt_base = min(pt_base, paddr); | |
2260 | } | |
2261 | ||
2262 | return pt_base; | |
2263 | } | |
2264 | ||
2265 | void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) | |
2266 | { | |
2267 | pmd_t *kernel_pmd; | |
2268 | ||
2269 | kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); | |
2270 | ||
2271 | xen_pt_base = xen_find_pt_base(kernel_pmd); | |
2272 | xen_pt_size = xen_start_info->nr_pt_frames * PAGE_SIZE; | |
2273 | ||
2274 | initial_kernel_pmd = | |
2275 | extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); | |
2276 | ||
2277 | max_pfn_mapped = PFN_DOWN(xen_pt_base + xen_pt_size + 512 * 1024); | |
2278 | ||
2279 | copy_page(initial_kernel_pmd, kernel_pmd); | |
2280 | ||
2281 | xen_map_identity_early(initial_kernel_pmd, max_pfn); | |
2282 | ||
2283 | copy_page(initial_page_table, pgd); | |
2284 | initial_page_table[KERNEL_PGD_BOUNDARY] = | |
2285 | __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT); | |
2286 | ||
2287 | set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO); | |
2288 | set_page_prot(initial_page_table, PAGE_KERNEL_RO); | |
2289 | set_page_prot(empty_zero_page, PAGE_KERNEL_RO); | |
2290 | ||
2291 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); | |
2292 | ||
2293 | pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, | |
2294 | PFN_DOWN(__pa(initial_page_table))); | |
2295 | xen_write_cr3(__pa(initial_page_table)); | |
2296 | ||
2297 | memblock_reserve(xen_pt_base, xen_pt_size); | |
2298 | } | |
2299 | #endif /* CONFIG_X86_64 */ | |
2300 | ||
2301 | void __init xen_reserve_special_pages(void) | |
2302 | { | |
2303 | phys_addr_t paddr; | |
2304 | ||
2305 | memblock_reserve(__pa(xen_start_info), PAGE_SIZE); | |
2306 | if (xen_start_info->store_mfn) { | |
2307 | paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->store_mfn)); | |
2308 | memblock_reserve(paddr, PAGE_SIZE); | |
2309 | } | |
2310 | if (!xen_initial_domain()) { | |
2311 | paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->console.domU.mfn)); | |
2312 | memblock_reserve(paddr, PAGE_SIZE); | |
2313 | } | |
2314 | } | |
2315 | ||
2316 | void __init xen_pt_check_e820(void) | |
2317 | { | |
2318 | if (xen_is_e820_reserved(xen_pt_base, xen_pt_size)) { | |
2319 | xen_raw_console_write("Xen hypervisor allocated page table memory conflicts with E820 map\n"); | |
2320 | BUG(); | |
2321 | } | |
2322 | } | |
2323 | ||
2324 | static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss; | |
2325 | ||
2326 | static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) | |
2327 | { | |
2328 | pte_t pte; | |
2329 | ||
2330 | phys >>= PAGE_SHIFT; | |
2331 | ||
2332 | switch (idx) { | |
2333 | case FIX_BTMAP_END ... FIX_BTMAP_BEGIN: | |
2334 | case FIX_RO_IDT: | |
2335 | #ifdef CONFIG_X86_32 | |
2336 | case FIX_WP_TEST: | |
2337 | # ifdef CONFIG_HIGHMEM | |
2338 | case FIX_KMAP_BEGIN ... FIX_KMAP_END: | |
2339 | # endif | |
2340 | #elif defined(CONFIG_X86_VSYSCALL_EMULATION) | |
2341 | case VSYSCALL_PAGE: | |
2342 | #endif | |
2343 | case FIX_TEXT_POKE0: | |
2344 | case FIX_TEXT_POKE1: | |
2345 | case FIX_GDT_REMAP_BEGIN ... FIX_GDT_REMAP_END: | |
2346 | /* All local page mappings */ | |
2347 | pte = pfn_pte(phys, prot); | |
2348 | break; | |
2349 | ||
2350 | #ifdef CONFIG_X86_LOCAL_APIC | |
2351 | case FIX_APIC_BASE: /* maps dummy local APIC */ | |
2352 | pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL); | |
2353 | break; | |
2354 | #endif | |
2355 | ||
2356 | #ifdef CONFIG_X86_IO_APIC | |
2357 | case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END: | |
2358 | /* | |
2359 | * We just don't map the IO APIC - all access is via | |
2360 | * hypercalls. Keep the address in the pte for reference. | |
2361 | */ | |
2362 | pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL); | |
2363 | break; | |
2364 | #endif | |
2365 | ||
2366 | case FIX_PARAVIRT_BOOTMAP: | |
2367 | /* This is an MFN, but it isn't an IO mapping from the | |
2368 | IO domain */ | |
2369 | pte = mfn_pte(phys, prot); | |
2370 | break; | |
2371 | ||
2372 | default: | |
2373 | /* By default, set_fixmap is used for hardware mappings */ | |
2374 | pte = mfn_pte(phys, prot); | |
2375 | break; | |
2376 | } | |
2377 | ||
2378 | __native_set_fixmap(idx, pte); | |
2379 | ||
2380 | #ifdef CONFIG_X86_VSYSCALL_EMULATION | |
2381 | /* Replicate changes to map the vsyscall page into the user | |
2382 | pagetable vsyscall mapping. */ | |
2383 | if (idx == VSYSCALL_PAGE) { | |
2384 | unsigned long vaddr = __fix_to_virt(idx); | |
2385 | set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte); | |
2386 | } | |
2387 | #endif | |
2388 | } | |
2389 | ||
2390 | static void __init xen_post_allocator_init(void) | |
2391 | { | |
7e0563de VK |
2392 | pv_mmu_ops.set_pte = xen_set_pte; |
2393 | pv_mmu_ops.set_pmd = xen_set_pmd; | |
2394 | pv_mmu_ops.set_pud = xen_set_pud; | |
2395 | #if CONFIG_PGTABLE_LEVELS >= 4 | |
2396 | pv_mmu_ops.set_p4d = xen_set_p4d; | |
2397 | #endif | |
2398 | ||
2399 | /* This will work as long as patching hasn't happened yet | |
2400 | (which it hasn't) */ | |
2401 | pv_mmu_ops.alloc_pte = xen_alloc_pte; | |
2402 | pv_mmu_ops.alloc_pmd = xen_alloc_pmd; | |
2403 | pv_mmu_ops.release_pte = xen_release_pte; | |
2404 | pv_mmu_ops.release_pmd = xen_release_pmd; | |
2405 | #if CONFIG_PGTABLE_LEVELS >= 4 | |
2406 | pv_mmu_ops.alloc_pud = xen_alloc_pud; | |
2407 | pv_mmu_ops.release_pud = xen_release_pud; | |
2408 | #endif | |
2409 | pv_mmu_ops.make_pte = PV_CALLEE_SAVE(xen_make_pte); | |
2410 | ||
2411 | #ifdef CONFIG_X86_64 | |
2412 | pv_mmu_ops.write_cr3 = &xen_write_cr3; | |
2413 | SetPagePinned(virt_to_page(level3_user_vsyscall)); | |
2414 | #endif | |
2415 | xen_mark_init_mm_pinned(); | |
2416 | } | |
2417 | ||
2418 | static void xen_leave_lazy_mmu(void) | |
2419 | { | |
2420 | preempt_disable(); | |
2421 | xen_mc_flush(); | |
2422 | paravirt_leave_lazy_mmu(); | |
2423 | preempt_enable(); | |
2424 | } | |
2425 | ||
2426 | static const struct pv_mmu_ops xen_mmu_ops __initconst = { | |
2427 | .read_cr2 = xen_read_cr2, | |
2428 | .write_cr2 = xen_write_cr2, | |
2429 | ||
2430 | .read_cr3 = xen_read_cr3, | |
2431 | .write_cr3 = xen_write_cr3_init, | |
2432 | ||
2433 | .flush_tlb_user = xen_flush_tlb, | |
2434 | .flush_tlb_kernel = xen_flush_tlb, | |
2435 | .flush_tlb_single = xen_flush_tlb_single, | |
2436 | .flush_tlb_others = xen_flush_tlb_others, | |
2437 | ||
2438 | .pte_update = paravirt_nop, | |
2439 | ||
2440 | .pgd_alloc = xen_pgd_alloc, | |
2441 | .pgd_free = xen_pgd_free, | |
2442 | ||
2443 | .alloc_pte = xen_alloc_pte_init, | |
2444 | .release_pte = xen_release_pte_init, | |
2445 | .alloc_pmd = xen_alloc_pmd_init, | |
2446 | .release_pmd = xen_release_pmd_init, | |
2447 | ||
2448 | .set_pte = xen_set_pte_init, | |
2449 | .set_pte_at = xen_set_pte_at, | |
2450 | .set_pmd = xen_set_pmd_hyper, | |
2451 | ||
2452 | .ptep_modify_prot_start = __ptep_modify_prot_start, | |
2453 | .ptep_modify_prot_commit = __ptep_modify_prot_commit, | |
2454 | ||
2455 | .pte_val = PV_CALLEE_SAVE(xen_pte_val), | |
2456 | .pgd_val = PV_CALLEE_SAVE(xen_pgd_val), | |
2457 | ||
2458 | .make_pte = PV_CALLEE_SAVE(xen_make_pte_init), | |
2459 | .make_pgd = PV_CALLEE_SAVE(xen_make_pgd), | |
2460 | ||
2461 | #ifdef CONFIG_X86_PAE | |
2462 | .set_pte_atomic = xen_set_pte_atomic, | |
2463 | .pte_clear = xen_pte_clear, | |
2464 | .pmd_clear = xen_pmd_clear, | |
2465 | #endif /* CONFIG_X86_PAE */ | |
2466 | .set_pud = xen_set_pud_hyper, | |
2467 | ||
2468 | .make_pmd = PV_CALLEE_SAVE(xen_make_pmd), | |
2469 | .pmd_val = PV_CALLEE_SAVE(xen_pmd_val), | |
2470 | ||
2471 | #if CONFIG_PGTABLE_LEVELS >= 4 | |
2472 | .pud_val = PV_CALLEE_SAVE(xen_pud_val), | |
2473 | .make_pud = PV_CALLEE_SAVE(xen_make_pud), | |
2474 | .set_p4d = xen_set_p4d_hyper, | |
2475 | ||
2476 | .alloc_pud = xen_alloc_pmd_init, | |
2477 | .release_pud = xen_release_pmd_init, | |
2478 | #endif /* CONFIG_PGTABLE_LEVELS == 4 */ | |
2479 | ||
2480 | .activate_mm = xen_activate_mm, | |
2481 | .dup_mmap = xen_dup_mmap, | |
2482 | .exit_mmap = xen_exit_mmap, | |
2483 | ||
2484 | .lazy_mode = { | |
2485 | .enter = paravirt_enter_lazy_mmu, | |
2486 | .leave = xen_leave_lazy_mmu, | |
2487 | .flush = paravirt_flush_lazy_mmu, | |
2488 | }, | |
2489 | ||
2490 | .set_fixmap = xen_set_fixmap, | |
2491 | }; | |
2492 | ||
2493 | void __init xen_init_mmu_ops(void) | |
2494 | { | |
2495 | x86_init.paging.pagetable_init = xen_pagetable_init; | |
2496 | ||
7e0563de VK |
2497 | pv_mmu_ops = xen_mmu_ops; |
2498 | ||
2499 | memset(dummy_mapping, 0xff, PAGE_SIZE); | |
2500 | } | |
2501 | ||
2502 | /* Protected by xen_reservation_lock. */ | |
2503 | #define MAX_CONTIG_ORDER 9 /* 2MB */ | |
2504 | static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER]; | |
2505 | ||
2506 | #define VOID_PTE (mfn_pte(0, __pgprot(0))) | |
2507 | static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order, | |
2508 | unsigned long *in_frames, | |
2509 | unsigned long *out_frames) | |
2510 | { | |
2511 | int i; | |
2512 | struct multicall_space mcs; | |
2513 | ||
2514 | xen_mc_batch(); | |
2515 | for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) { | |
2516 | mcs = __xen_mc_entry(0); | |
2517 | ||
2518 | if (in_frames) | |
2519 | in_frames[i] = virt_to_mfn(vaddr); | |
2520 | ||
2521 | MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0); | |
2522 | __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY); | |
2523 | ||
2524 | if (out_frames) | |
2525 | out_frames[i] = virt_to_pfn(vaddr); | |
2526 | } | |
2527 | xen_mc_issue(0); | |
2528 | } | |
2529 | ||
2530 | /* | |
2531 | * Update the pfn-to-mfn mappings for a virtual address range, either to | |
2532 | * point to an array of mfns, or contiguously from a single starting | |
2533 | * mfn. | |
2534 | */ | |
2535 | static void xen_remap_exchanged_ptes(unsigned long vaddr, int order, | |
2536 | unsigned long *mfns, | |
2537 | unsigned long first_mfn) | |
2538 | { | |
2539 | unsigned i, limit; | |
2540 | unsigned long mfn; | |
2541 | ||
2542 | xen_mc_batch(); | |
2543 | ||
2544 | limit = 1u << order; | |
2545 | for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) { | |
2546 | struct multicall_space mcs; | |
2547 | unsigned flags; | |
2548 | ||
2549 | mcs = __xen_mc_entry(0); | |
2550 | if (mfns) | |
2551 | mfn = mfns[i]; | |
2552 | else | |
2553 | mfn = first_mfn + i; | |
2554 | ||
2555 | if (i < (limit - 1)) | |
2556 | flags = 0; | |
2557 | else { | |
2558 | if (order == 0) | |
2559 | flags = UVMF_INVLPG | UVMF_ALL; | |
2560 | else | |
2561 | flags = UVMF_TLB_FLUSH | UVMF_ALL; | |
2562 | } | |
2563 | ||
2564 | MULTI_update_va_mapping(mcs.mc, vaddr, | |
2565 | mfn_pte(mfn, PAGE_KERNEL), flags); | |
2566 | ||
2567 | set_phys_to_machine(virt_to_pfn(vaddr), mfn); | |
2568 | } | |
2569 | ||
2570 | xen_mc_issue(0); | |
2571 | } | |
2572 | ||
2573 | /* | |
2574 | * Perform the hypercall to exchange a region of our pfns to point to | |
2575 | * memory with the required contiguous alignment. Takes the pfns as | |
2576 | * input, and populates mfns as output. | |
2577 | * | |
2578 | * Returns a success code indicating whether the hypervisor was able to | |
2579 | * satisfy the request or not. | |
2580 | */ | |
2581 | static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in, | |
2582 | unsigned long *pfns_in, | |
2583 | unsigned long extents_out, | |
2584 | unsigned int order_out, | |
2585 | unsigned long *mfns_out, | |
2586 | unsigned int address_bits) | |
2587 | { | |
2588 | long rc; | |
2589 | int success; | |
2590 | ||
2591 | struct xen_memory_exchange exchange = { | |
2592 | .in = { | |
2593 | .nr_extents = extents_in, | |
2594 | .extent_order = order_in, | |
2595 | .extent_start = pfns_in, | |
2596 | .domid = DOMID_SELF | |
2597 | }, | |
2598 | .out = { | |
2599 | .nr_extents = extents_out, | |
2600 | .extent_order = order_out, | |
2601 | .extent_start = mfns_out, | |
2602 | .address_bits = address_bits, | |
2603 | .domid = DOMID_SELF | |
2604 | } | |
2605 | }; | |
2606 | ||
2607 | BUG_ON(extents_in << order_in != extents_out << order_out); | |
2608 | ||
2609 | rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange); | |
2610 | success = (exchange.nr_exchanged == extents_in); | |
2611 | ||
2612 | BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0))); | |
2613 | BUG_ON(success && (rc != 0)); | |
2614 | ||
2615 | return success; | |
2616 | } | |
2617 | ||
2618 | int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, | |
2619 | unsigned int address_bits, | |
2620 | dma_addr_t *dma_handle) | |
2621 | { | |
2622 | unsigned long *in_frames = discontig_frames, out_frame; | |
2623 | unsigned long flags; | |
2624 | int success; | |
2625 | unsigned long vstart = (unsigned long)phys_to_virt(pstart); | |
2626 | ||
2627 | /* | |
2628 | * Currently an auto-translated guest will not perform I/O, nor will | |
2629 | * it require PAE page directories below 4GB. Therefore any calls to | |
2630 | * this function are redundant and can be ignored. | |
2631 | */ | |
2632 | ||
7e0563de VK |
2633 | if (unlikely(order > MAX_CONTIG_ORDER)) |
2634 | return -ENOMEM; | |
2635 | ||
2636 | memset((void *) vstart, 0, PAGE_SIZE << order); | |
2637 | ||
2638 | spin_lock_irqsave(&xen_reservation_lock, flags); | |
2639 | ||
2640 | /* 1. Zap current PTEs, remembering MFNs. */ | |
2641 | xen_zap_pfn_range(vstart, order, in_frames, NULL); | |
2642 | ||
2643 | /* 2. Get a new contiguous memory extent. */ | |
2644 | out_frame = virt_to_pfn(vstart); | |
2645 | success = xen_exchange_memory(1UL << order, 0, in_frames, | |
2646 | 1, order, &out_frame, | |
2647 | address_bits); | |
2648 | ||
2649 | /* 3. Map the new extent in place of old pages. */ | |
2650 | if (success) | |
2651 | xen_remap_exchanged_ptes(vstart, order, NULL, out_frame); | |
2652 | else | |
2653 | xen_remap_exchanged_ptes(vstart, order, in_frames, 0); | |
2654 | ||
2655 | spin_unlock_irqrestore(&xen_reservation_lock, flags); | |
2656 | ||
2657 | *dma_handle = virt_to_machine(vstart).maddr; | |
2658 | return success ? 0 : -ENOMEM; | |
2659 | } | |
2660 | EXPORT_SYMBOL_GPL(xen_create_contiguous_region); | |
2661 | ||
2662 | void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order) | |
2663 | { | |
2664 | unsigned long *out_frames = discontig_frames, in_frame; | |
2665 | unsigned long flags; | |
2666 | int success; | |
2667 | unsigned long vstart; | |
2668 | ||
7e0563de VK |
2669 | if (unlikely(order > MAX_CONTIG_ORDER)) |
2670 | return; | |
2671 | ||
2672 | vstart = (unsigned long)phys_to_virt(pstart); | |
2673 | memset((void *) vstart, 0, PAGE_SIZE << order); | |
2674 | ||
2675 | spin_lock_irqsave(&xen_reservation_lock, flags); | |
2676 | ||
2677 | /* 1. Find start MFN of contiguous extent. */ | |
2678 | in_frame = virt_to_mfn(vstart); | |
2679 | ||
2680 | /* 2. Zap current PTEs. */ | |
2681 | xen_zap_pfn_range(vstart, order, NULL, out_frames); | |
2682 | ||
2683 | /* 3. Do the exchange for non-contiguous MFNs. */ | |
2684 | success = xen_exchange_memory(1, order, &in_frame, 1UL << order, | |
2685 | 0, out_frames, 0); | |
2686 | ||
2687 | /* 4. Map new pages in place of old pages. */ | |
2688 | if (success) | |
2689 | xen_remap_exchanged_ptes(vstart, order, out_frames, 0); | |
2690 | else | |
2691 | xen_remap_exchanged_ptes(vstart, order, NULL, in_frame); | |
2692 | ||
2693 | spin_unlock_irqrestore(&xen_reservation_lock, flags); | |
2694 | } | |
2695 | EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region); | |
29985b09 JG |
2696 | |
2697 | #ifdef CONFIG_KEXEC_CORE | |
2698 | phys_addr_t paddr_vmcoreinfo_note(void) | |
2699 | { | |
2700 | if (xen_pv_domain()) | |
2701 | return virt_to_machine(&vmcoreinfo_note).maddr; | |
2702 | else | |
2703 | return __pa_symbol(&vmcoreinfo_note); | |
2704 | } | |
2705 | #endif /* CONFIG_KEXEC_CORE */ |