]>
Commit | Line | Data |
---|---|---|
3b827c1b JF |
1 | /* |
2 | * Xen mmu operations | |
3 | * | |
4 | * This file contains the various mmu fetch and update operations. | |
5 | * The most important job they must perform is the mapping between the | |
6 | * domain's pfn and the overall machine mfns. | |
7 | * | |
8 | * Xen allows guests to directly update the pagetable, in a controlled | |
9 | * fashion. In other words, the guest modifies the same pagetable | |
10 | * that the CPU actually uses, which eliminates the overhead of having | |
11 | * a separate shadow pagetable. | |
12 | * | |
13 | * In order to allow this, it falls on the guest domain to map its | |
14 | * notion of a "physical" pfn - which is just a domain-local linear | |
15 | * address - into a real "machine address" which the CPU's MMU can | |
16 | * use. | |
17 | * | |
18 | * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be | |
19 | * inserted directly into the pagetable. When creating a new | |
20 | * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely, | |
21 | * when reading the content back with __(pgd|pmd|pte)_val, it converts | |
22 | * the mfn back into a pfn. | |
23 | * | |
24 | * The other constraint is that all pages which make up a pagetable | |
25 | * must be mapped read-only in the guest. This prevents uncontrolled | |
26 | * guest updates to the pagetable. Xen strictly enforces this, and | |
27 | * will disallow any pagetable update which will end up mapping a | |
28 | * pagetable page RW, and will disallow using any writable page as a | |
29 | * pagetable. | |
30 | * | |
31 | * Naively, when loading %cr3 with the base of a new pagetable, Xen | |
32 | * would need to validate the whole pagetable before going on. | |
33 | * Naturally, this is quite slow. The solution is to "pin" a | |
34 | * pagetable, which enforces all the constraints on the pagetable even | |
35 | * when it is not actively in use. This menas that Xen can be assured | |
36 | * that it is still valid when you do load it into %cr3, and doesn't | |
37 | * need to revalidate it. | |
38 | * | |
39 | * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 | |
40 | */ | |
f120f13e | 41 | #include <linux/sched.h> |
f4f97b3e | 42 | #include <linux/highmem.h> |
994025ca | 43 | #include <linux/debugfs.h> |
3b827c1b | 44 | #include <linux/bug.h> |
d2cb2145 | 45 | #include <linux/vmalloc.h> |
44408ad7 | 46 | #include <linux/module.h> |
5a0e3ad6 | 47 | #include <linux/gfp.h> |
a9ce6bc1 | 48 | #include <linux/memblock.h> |
2222e71b | 49 | #include <linux/seq_file.h> |
3b827c1b | 50 | |
84708807 JF |
51 | #include <trace/events/xen.h> |
52 | ||
3b827c1b JF |
53 | #include <asm/pgtable.h> |
54 | #include <asm/tlbflush.h> | |
5deb30d1 | 55 | #include <asm/fixmap.h> |
3b827c1b | 56 | #include <asm/mmu_context.h> |
319f3ba5 | 57 | #include <asm/setup.h> |
f4f97b3e | 58 | #include <asm/paravirt.h> |
7347b408 | 59 | #include <asm/e820.h> |
cbcd79c2 | 60 | #include <asm/linkage.h> |
08bbc9da | 61 | #include <asm/page.h> |
fef5ba79 | 62 | #include <asm/init.h> |
41f2e477 | 63 | #include <asm/pat.h> |
900cba88 | 64 | #include <asm/smp.h> |
3b827c1b JF |
65 | |
66 | #include <asm/xen/hypercall.h> | |
f4f97b3e | 67 | #include <asm/xen/hypervisor.h> |
3b827c1b | 68 | |
c0011dbf | 69 | #include <xen/xen.h> |
3b827c1b JF |
70 | #include <xen/page.h> |
71 | #include <xen/interface/xen.h> | |
59151001 | 72 | #include <xen/interface/hvm/hvm_op.h> |
319f3ba5 | 73 | #include <xen/interface/version.h> |
c0011dbf | 74 | #include <xen/interface/memory.h> |
319f3ba5 | 75 | #include <xen/hvc-console.h> |
3b827c1b | 76 | |
f4f97b3e | 77 | #include "multicalls.h" |
3b827c1b | 78 | #include "mmu.h" |
994025ca JF |
79 | #include "debugfs.h" |
80 | ||
19001c8c AN |
81 | /* |
82 | * Protects atomic reservation decrease/increase against concurrent increases. | |
06f521d5 | 83 | * Also protects non-atomic updates of current_pages and balloon lists. |
19001c8c AN |
84 | */ |
85 | DEFINE_SPINLOCK(xen_reservation_lock); | |
86 | ||
caaf9ecf | 87 | #ifdef CONFIG_X86_32 |
319f3ba5 JF |
88 | /* |
89 | * Identity map, in addition to plain kernel map. This needs to be | |
90 | * large enough to allocate page table pages to allocate the rest. | |
91 | * Each page can map 2MB. | |
92 | */ | |
764f0138 JF |
93 | #define LEVEL1_IDENT_ENTRIES (PTRS_PER_PTE * 4) |
94 | static RESERVE_BRK_ARRAY(pte_t, level1_ident_pgt, LEVEL1_IDENT_ENTRIES); | |
caaf9ecf | 95 | #endif |
319f3ba5 JF |
96 | #ifdef CONFIG_X86_64 |
97 | /* l3 pud for userspace vsyscall mapping */ | |
98 | static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss; | |
99 | #endif /* CONFIG_X86_64 */ | |
100 | ||
101 | /* | |
102 | * Note about cr3 (pagetable base) values: | |
103 | * | |
104 | * xen_cr3 contains the current logical cr3 value; it contains the | |
105 | * last set cr3. This may not be the current effective cr3, because | |
106 | * its update may be being lazily deferred. However, a vcpu looking | |
107 | * at its own cr3 can use this value knowing that it everything will | |
108 | * be self-consistent. | |
109 | * | |
110 | * xen_current_cr3 contains the actual vcpu cr3; it is set once the | |
111 | * hypercall to set the vcpu cr3 is complete (so it may be a little | |
112 | * out of date, but it will never be set early). If one vcpu is | |
113 | * looking at another vcpu's cr3 value, it should use this variable. | |
114 | */ | |
115 | DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */ | |
116 | DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */ | |
117 | ||
118 | ||
d6182fbf JF |
119 | /* |
120 | * Just beyond the highest usermode address. STACK_TOP_MAX has a | |
121 | * redzone above it, so round it up to a PGD boundary. | |
122 | */ | |
123 | #define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK) | |
124 | ||
9976b39b JF |
125 | unsigned long arbitrary_virt_to_mfn(void *vaddr) |
126 | { | |
127 | xmaddr_t maddr = arbitrary_virt_to_machine(vaddr); | |
128 | ||
129 | return PFN_DOWN(maddr.maddr); | |
130 | } | |
131 | ||
ce803e70 | 132 | xmaddr_t arbitrary_virt_to_machine(void *vaddr) |
3b827c1b | 133 | { |
ce803e70 | 134 | unsigned long address = (unsigned long)vaddr; |
da7bfc50 | 135 | unsigned int level; |
9f32d21c CL |
136 | pte_t *pte; |
137 | unsigned offset; | |
3b827c1b | 138 | |
9f32d21c CL |
139 | /* |
140 | * if the PFN is in the linear mapped vaddr range, we can just use | |
141 | * the (quick) virt_to_machine() p2m lookup | |
142 | */ | |
143 | if (virt_addr_valid(vaddr)) | |
144 | return virt_to_machine(vaddr); | |
145 | ||
146 | /* otherwise we have to do a (slower) full page-table walk */ | |
3b827c1b | 147 | |
9f32d21c CL |
148 | pte = lookup_address(address, &level); |
149 | BUG_ON(pte == NULL); | |
150 | offset = address & ~PAGE_MASK; | |
ebd879e3 | 151 | return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset); |
3b827c1b | 152 | } |
de23be5f | 153 | EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine); |
3b827c1b JF |
154 | |
155 | void make_lowmem_page_readonly(void *vaddr) | |
156 | { | |
157 | pte_t *pte, ptev; | |
158 | unsigned long address = (unsigned long)vaddr; | |
da7bfc50 | 159 | unsigned int level; |
3b827c1b | 160 | |
f0646e43 | 161 | pte = lookup_address(address, &level); |
fef5ba79 JF |
162 | if (pte == NULL) |
163 | return; /* vaddr missing */ | |
3b827c1b JF |
164 | |
165 | ptev = pte_wrprotect(*pte); | |
166 | ||
167 | if (HYPERVISOR_update_va_mapping(address, ptev, 0)) | |
168 | BUG(); | |
169 | } | |
170 | ||
171 | void make_lowmem_page_readwrite(void *vaddr) | |
172 | { | |
173 | pte_t *pte, ptev; | |
174 | unsigned long address = (unsigned long)vaddr; | |
da7bfc50 | 175 | unsigned int level; |
3b827c1b | 176 | |
f0646e43 | 177 | pte = lookup_address(address, &level); |
fef5ba79 JF |
178 | if (pte == NULL) |
179 | return; /* vaddr missing */ | |
3b827c1b JF |
180 | |
181 | ptev = pte_mkwrite(*pte); | |
182 | ||
183 | if (HYPERVISOR_update_va_mapping(address, ptev, 0)) | |
184 | BUG(); | |
185 | } | |
186 | ||
187 | ||
7708ad64 | 188 | static bool xen_page_pinned(void *ptr) |
e2426cf8 JF |
189 | { |
190 | struct page *page = virt_to_page(ptr); | |
191 | ||
192 | return PagePinned(page); | |
193 | } | |
194 | ||
eba3ff8b | 195 | void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid) |
c0011dbf JF |
196 | { |
197 | struct multicall_space mcs; | |
198 | struct mmu_update *u; | |
199 | ||
84708807 JF |
200 | trace_xen_mmu_set_domain_pte(ptep, pteval, domid); |
201 | ||
c0011dbf JF |
202 | mcs = xen_mc_entry(sizeof(*u)); |
203 | u = mcs.args; | |
204 | ||
205 | /* ptep might be kmapped when using 32-bit HIGHPTE */ | |
d5108316 | 206 | u->ptr = virt_to_machine(ptep).maddr; |
c0011dbf JF |
207 | u->val = pte_val_ma(pteval); |
208 | ||
eba3ff8b | 209 | MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid); |
c0011dbf JF |
210 | |
211 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
212 | } | |
eba3ff8b JF |
213 | EXPORT_SYMBOL_GPL(xen_set_domain_pte); |
214 | ||
7708ad64 | 215 | static void xen_extend_mmu_update(const struct mmu_update *update) |
3b827c1b | 216 | { |
d66bf8fc JF |
217 | struct multicall_space mcs; |
218 | struct mmu_update *u; | |
3b827c1b | 219 | |
400d3494 JF |
220 | mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u)); |
221 | ||
994025ca | 222 | if (mcs.mc != NULL) { |
400d3494 | 223 | mcs.mc->args[1]++; |
994025ca | 224 | } else { |
400d3494 JF |
225 | mcs = __xen_mc_entry(sizeof(*u)); |
226 | MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); | |
227 | } | |
d66bf8fc | 228 | |
d66bf8fc | 229 | u = mcs.args; |
400d3494 JF |
230 | *u = *update; |
231 | } | |
232 | ||
dcf7435c JF |
233 | static void xen_extend_mmuext_op(const struct mmuext_op *op) |
234 | { | |
235 | struct multicall_space mcs; | |
236 | struct mmuext_op *u; | |
237 | ||
238 | mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u)); | |
239 | ||
240 | if (mcs.mc != NULL) { | |
241 | mcs.mc->args[1]++; | |
242 | } else { | |
243 | mcs = __xen_mc_entry(sizeof(*u)); | |
244 | MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); | |
245 | } | |
246 | ||
247 | u = mcs.args; | |
248 | *u = *op; | |
249 | } | |
250 | ||
4c13629f | 251 | static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) |
400d3494 JF |
252 | { |
253 | struct mmu_update u; | |
254 | ||
255 | preempt_disable(); | |
256 | ||
257 | xen_mc_batch(); | |
258 | ||
ce803e70 JF |
259 | /* ptr may be ioremapped for 64-bit pagetable setup */ |
260 | u.ptr = arbitrary_virt_to_machine(ptr).maddr; | |
400d3494 | 261 | u.val = pmd_val_ma(val); |
7708ad64 | 262 | xen_extend_mmu_update(&u); |
d66bf8fc JF |
263 | |
264 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
265 | ||
266 | preempt_enable(); | |
3b827c1b JF |
267 | } |
268 | ||
4c13629f | 269 | static void xen_set_pmd(pmd_t *ptr, pmd_t val) |
e2426cf8 | 270 | { |
84708807 JF |
271 | trace_xen_mmu_set_pmd(ptr, val); |
272 | ||
e2426cf8 JF |
273 | /* If page is not pinned, we can just update the entry |
274 | directly */ | |
7708ad64 | 275 | if (!xen_page_pinned(ptr)) { |
e2426cf8 JF |
276 | *ptr = val; |
277 | return; | |
278 | } | |
279 | ||
280 | xen_set_pmd_hyper(ptr, val); | |
281 | } | |
282 | ||
3b827c1b JF |
283 | /* |
284 | * Associate a virtual page frame with a given physical page frame | |
285 | * and protection flags for that frame. | |
286 | */ | |
287 | void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags) | |
288 | { | |
836fe2f2 | 289 | set_pte_vaddr(vaddr, mfn_pte(mfn, flags)); |
3b827c1b JF |
290 | } |
291 | ||
4a35c13c | 292 | static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval) |
3b827c1b | 293 | { |
4a35c13c | 294 | struct mmu_update u; |
c0011dbf | 295 | |
4a35c13c JF |
296 | if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU) |
297 | return false; | |
994025ca | 298 | |
4a35c13c | 299 | xen_mc_batch(); |
d66bf8fc | 300 | |
4a35c13c JF |
301 | u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE; |
302 | u.val = pte_val_ma(pteval); | |
303 | xen_extend_mmu_update(&u); | |
a99ac5e8 | 304 | |
4a35c13c | 305 | xen_mc_issue(PARAVIRT_LAZY_MMU); |
2bd50036 | 306 | |
4a35c13c JF |
307 | return true; |
308 | } | |
309 | ||
84708807 | 310 | static inline void __xen_set_pte(pte_t *ptep, pte_t pteval) |
4a35c13c | 311 | { |
4a35c13c | 312 | if (!xen_batched_set_pte(ptep, pteval)) |
a99ac5e8 | 313 | native_set_pte(ptep, pteval); |
3b827c1b JF |
314 | } |
315 | ||
84708807 JF |
316 | static void xen_set_pte(pte_t *ptep, pte_t pteval) |
317 | { | |
318 | trace_xen_mmu_set_pte(ptep, pteval); | |
319 | __xen_set_pte(ptep, pteval); | |
320 | } | |
321 | ||
4c13629f | 322 | static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, |
4a35c13c JF |
323 | pte_t *ptep, pte_t pteval) |
324 | { | |
84708807 JF |
325 | trace_xen_mmu_set_pte_at(mm, addr, ptep, pteval); |
326 | __xen_set_pte(ptep, pteval); | |
3b827c1b JF |
327 | } |
328 | ||
f63c2f24 T |
329 | pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, |
330 | unsigned long addr, pte_t *ptep) | |
947a69c9 | 331 | { |
e57778a1 | 332 | /* Just return the pte as-is. We preserve the bits on commit */ |
84708807 | 333 | trace_xen_mmu_ptep_modify_prot_start(mm, addr, ptep, *ptep); |
e57778a1 JF |
334 | return *ptep; |
335 | } | |
336 | ||
337 | void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, | |
338 | pte_t *ptep, pte_t pte) | |
339 | { | |
400d3494 | 340 | struct mmu_update u; |
e57778a1 | 341 | |
84708807 | 342 | trace_xen_mmu_ptep_modify_prot_commit(mm, addr, ptep, pte); |
400d3494 | 343 | xen_mc_batch(); |
947a69c9 | 344 | |
d5108316 | 345 | u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; |
400d3494 | 346 | u.val = pte_val_ma(pte); |
7708ad64 | 347 | xen_extend_mmu_update(&u); |
947a69c9 | 348 | |
e57778a1 | 349 | xen_mc_issue(PARAVIRT_LAZY_MMU); |
947a69c9 JF |
350 | } |
351 | ||
ebb9cfe2 JF |
352 | /* Assume pteval_t is equivalent to all the other *val_t types. */ |
353 | static pteval_t pte_mfn_to_pfn(pteval_t val) | |
947a69c9 | 354 | { |
ebb9cfe2 | 355 | if (val & _PAGE_PRESENT) { |
59438c9f | 356 | unsigned long mfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; |
b7e5ffe5 KRW |
357 | unsigned long pfn = mfn_to_pfn(mfn); |
358 | ||
77be1fab | 359 | pteval_t flags = val & PTE_FLAGS_MASK; |
b7e5ffe5 KRW |
360 | if (unlikely(pfn == ~0)) |
361 | val = flags & ~_PAGE_PRESENT; | |
362 | else | |
363 | val = ((pteval_t)pfn << PAGE_SHIFT) | flags; | |
ebb9cfe2 | 364 | } |
947a69c9 | 365 | |
ebb9cfe2 | 366 | return val; |
947a69c9 JF |
367 | } |
368 | ||
ebb9cfe2 | 369 | static pteval_t pte_pfn_to_mfn(pteval_t val) |
947a69c9 | 370 | { |
ebb9cfe2 | 371 | if (val & _PAGE_PRESENT) { |
59438c9f | 372 | unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; |
77be1fab | 373 | pteval_t flags = val & PTE_FLAGS_MASK; |
fb38923e | 374 | unsigned long mfn; |
cfd8951e | 375 | |
fb38923e KRW |
376 | if (!xen_feature(XENFEAT_auto_translated_physmap)) |
377 | mfn = get_phys_to_machine(pfn); | |
378 | else | |
379 | mfn = pfn; | |
cfd8951e JF |
380 | /* |
381 | * If there's no mfn for the pfn, then just create an | |
382 | * empty non-present pte. Unfortunately this loses | |
383 | * information about the original pfn, so | |
384 | * pte_mfn_to_pfn is asymmetric. | |
385 | */ | |
386 | if (unlikely(mfn == INVALID_P2M_ENTRY)) { | |
387 | mfn = 0; | |
388 | flags = 0; | |
fb38923e KRW |
389 | } else { |
390 | /* | |
391 | * Paramount to do this test _after_ the | |
392 | * INVALID_P2M_ENTRY as INVALID_P2M_ENTRY & | |
393 | * IDENTITY_FRAME_BIT resolves to true. | |
394 | */ | |
395 | mfn &= ~FOREIGN_FRAME_BIT; | |
396 | if (mfn & IDENTITY_FRAME_BIT) { | |
397 | mfn &= ~IDENTITY_FRAME_BIT; | |
398 | flags |= _PAGE_IOMAP; | |
399 | } | |
cfd8951e | 400 | } |
cfd8951e | 401 | val = ((pteval_t)mfn << PAGE_SHIFT) | flags; |
947a69c9 JF |
402 | } |
403 | ||
ebb9cfe2 | 404 | return val; |
947a69c9 JF |
405 | } |
406 | ||
c0011dbf JF |
407 | static pteval_t iomap_pte(pteval_t val) |
408 | { | |
409 | if (val & _PAGE_PRESENT) { | |
410 | unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; | |
411 | pteval_t flags = val & PTE_FLAGS_MASK; | |
412 | ||
413 | /* We assume the pte frame number is a MFN, so | |
414 | just use it as-is. */ | |
415 | val = ((pteval_t)pfn << PAGE_SHIFT) | flags; | |
416 | } | |
417 | ||
418 | return val; | |
419 | } | |
420 | ||
4c13629f | 421 | static pteval_t xen_pte_val(pte_t pte) |
947a69c9 | 422 | { |
41f2e477 | 423 | pteval_t pteval = pte.pte; |
8eaffa67 | 424 | #if 0 |
41f2e477 JF |
425 | /* If this is a WC pte, convert back from Xen WC to Linux WC */ |
426 | if ((pteval & (_PAGE_PAT | _PAGE_PCD | _PAGE_PWT)) == _PAGE_PAT) { | |
427 | WARN_ON(!pat_enabled); | |
428 | pteval = (pteval & ~_PAGE_PAT) | _PAGE_PWT; | |
429 | } | |
8eaffa67 | 430 | #endif |
41f2e477 JF |
431 | if (xen_initial_domain() && (pteval & _PAGE_IOMAP)) |
432 | return pteval; | |
433 | ||
434 | return pte_mfn_to_pfn(pteval); | |
947a69c9 | 435 | } |
da5de7c2 | 436 | PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val); |
947a69c9 | 437 | |
4c13629f | 438 | static pgdval_t xen_pgd_val(pgd_t pgd) |
947a69c9 | 439 | { |
ebb9cfe2 | 440 | return pte_mfn_to_pfn(pgd.pgd); |
947a69c9 | 441 | } |
da5de7c2 | 442 | PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val); |
947a69c9 | 443 | |
41f2e477 JF |
444 | /* |
445 | * Xen's PAT setup is part of its ABI, though I assume entries 6 & 7 | |
446 | * are reserved for now, to correspond to the Intel-reserved PAT | |
447 | * types. | |
448 | * | |
449 | * We expect Linux's PAT set as follows: | |
450 | * | |
451 | * Idx PTE flags Linux Xen Default | |
452 | * 0 WB WB WB | |
453 | * 1 PWT WC WT WT | |
454 | * 2 PCD UC- UC- UC- | |
455 | * 3 PCD PWT UC UC UC | |
456 | * 4 PAT WB WC WB | |
457 | * 5 PAT PWT WC WP WT | |
458 | * 6 PAT PCD UC- UC UC- | |
459 | * 7 PAT PCD PWT UC UC UC | |
460 | */ | |
461 | ||
462 | void xen_set_pat(u64 pat) | |
463 | { | |
464 | /* We expect Linux to use a PAT setting of | |
465 | * UC UC- WC WB (ignoring the PAT flag) */ | |
466 | WARN_ON(pat != 0x0007010600070106ull); | |
467 | } | |
468 | ||
4c13629f | 469 | static pte_t xen_make_pte(pteval_t pte) |
947a69c9 | 470 | { |
7347b408 | 471 | phys_addr_t addr = (pte & PTE_PFN_MASK); |
8eaffa67 | 472 | #if 0 |
41f2e477 JF |
473 | /* If Linux is trying to set a WC pte, then map to the Xen WC. |
474 | * If _PAGE_PAT is set, then it probably means it is really | |
475 | * _PAGE_PSE, so avoid fiddling with the PAT mapping and hope | |
476 | * things work out OK... | |
477 | * | |
478 | * (We should never see kernel mappings with _PAGE_PSE set, | |
479 | * but we could see hugetlbfs mappings, I think.). | |
480 | */ | |
481 | if (pat_enabled && !WARN_ON(pte & _PAGE_PAT)) { | |
482 | if ((pte & (_PAGE_PCD | _PAGE_PWT)) == _PAGE_PWT) | |
483 | pte = (pte & ~(_PAGE_PCD | _PAGE_PWT)) | _PAGE_PAT; | |
484 | } | |
8eaffa67 | 485 | #endif |
7347b408 AN |
486 | /* |
487 | * Unprivileged domains are allowed to do IOMAPpings for | |
488 | * PCI passthrough, but not map ISA space. The ISA | |
489 | * mappings are just dummy local mappings to keep other | |
490 | * parts of the kernel happy. | |
491 | */ | |
492 | if (unlikely(pte & _PAGE_IOMAP) && | |
493 | (xen_initial_domain() || addr >= ISA_END_ADDRESS)) { | |
c0011dbf | 494 | pte = iomap_pte(pte); |
7347b408 AN |
495 | } else { |
496 | pte &= ~_PAGE_IOMAP; | |
c0011dbf | 497 | pte = pte_pfn_to_mfn(pte); |
7347b408 | 498 | } |
c0011dbf | 499 | |
ebb9cfe2 | 500 | return native_make_pte(pte); |
947a69c9 | 501 | } |
da5de7c2 | 502 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte); |
947a69c9 | 503 | |
4c13629f | 504 | static pgd_t xen_make_pgd(pgdval_t pgd) |
947a69c9 | 505 | { |
ebb9cfe2 JF |
506 | pgd = pte_pfn_to_mfn(pgd); |
507 | return native_make_pgd(pgd); | |
947a69c9 | 508 | } |
da5de7c2 | 509 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd); |
947a69c9 | 510 | |
4c13629f | 511 | static pmdval_t xen_pmd_val(pmd_t pmd) |
947a69c9 | 512 | { |
ebb9cfe2 | 513 | return pte_mfn_to_pfn(pmd.pmd); |
947a69c9 | 514 | } |
da5de7c2 | 515 | PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val); |
28499143 | 516 | |
4c13629f | 517 | static void xen_set_pud_hyper(pud_t *ptr, pud_t val) |
f4f97b3e | 518 | { |
400d3494 | 519 | struct mmu_update u; |
f4f97b3e | 520 | |
d66bf8fc JF |
521 | preempt_disable(); |
522 | ||
400d3494 JF |
523 | xen_mc_batch(); |
524 | ||
ce803e70 JF |
525 | /* ptr may be ioremapped for 64-bit pagetable setup */ |
526 | u.ptr = arbitrary_virt_to_machine(ptr).maddr; | |
400d3494 | 527 | u.val = pud_val_ma(val); |
7708ad64 | 528 | xen_extend_mmu_update(&u); |
d66bf8fc JF |
529 | |
530 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
531 | ||
532 | preempt_enable(); | |
f4f97b3e JF |
533 | } |
534 | ||
4c13629f | 535 | static void xen_set_pud(pud_t *ptr, pud_t val) |
e2426cf8 | 536 | { |
84708807 JF |
537 | trace_xen_mmu_set_pud(ptr, val); |
538 | ||
e2426cf8 JF |
539 | /* If page is not pinned, we can just update the entry |
540 | directly */ | |
7708ad64 | 541 | if (!xen_page_pinned(ptr)) { |
e2426cf8 JF |
542 | *ptr = val; |
543 | return; | |
544 | } | |
545 | ||
546 | xen_set_pud_hyper(ptr, val); | |
547 | } | |
548 | ||
f6e58732 | 549 | #ifdef CONFIG_X86_PAE |
4c13629f | 550 | static void xen_set_pte_atomic(pte_t *ptep, pte_t pte) |
3b827c1b | 551 | { |
84708807 | 552 | trace_xen_mmu_set_pte_atomic(ptep, pte); |
f6e58732 | 553 | set_64bit((u64 *)ptep, native_pte_val(pte)); |
3b827c1b JF |
554 | } |
555 | ||
4c13629f | 556 | static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
3b827c1b | 557 | { |
84708807 | 558 | trace_xen_mmu_pte_clear(mm, addr, ptep); |
4a35c13c JF |
559 | if (!xen_batched_set_pte(ptep, native_make_pte(0))) |
560 | native_pte_clear(mm, addr, ptep); | |
3b827c1b JF |
561 | } |
562 | ||
4c13629f | 563 | static void xen_pmd_clear(pmd_t *pmdp) |
3b827c1b | 564 | { |
84708807 | 565 | trace_xen_mmu_pmd_clear(pmdp); |
e2426cf8 | 566 | set_pmd(pmdp, __pmd(0)); |
3b827c1b | 567 | } |
f6e58732 | 568 | #endif /* CONFIG_X86_PAE */ |
3b827c1b | 569 | |
4c13629f | 570 | static pmd_t xen_make_pmd(pmdval_t pmd) |
3b827c1b | 571 | { |
ebb9cfe2 | 572 | pmd = pte_pfn_to_mfn(pmd); |
947a69c9 | 573 | return native_make_pmd(pmd); |
3b827c1b | 574 | } |
da5de7c2 | 575 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd); |
3b827c1b | 576 | |
f6e58732 | 577 | #if PAGETABLE_LEVELS == 4 |
4c13629f | 578 | static pudval_t xen_pud_val(pud_t pud) |
f6e58732 JF |
579 | { |
580 | return pte_mfn_to_pfn(pud.pud); | |
581 | } | |
da5de7c2 | 582 | PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val); |
f6e58732 | 583 | |
4c13629f | 584 | static pud_t xen_make_pud(pudval_t pud) |
f6e58732 JF |
585 | { |
586 | pud = pte_pfn_to_mfn(pud); | |
587 | ||
588 | return native_make_pud(pud); | |
589 | } | |
da5de7c2 | 590 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud); |
f6e58732 | 591 | |
4c13629f | 592 | static pgd_t *xen_get_user_pgd(pgd_t *pgd) |
f6e58732 | 593 | { |
d6182fbf JF |
594 | pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK); |
595 | unsigned offset = pgd - pgd_page; | |
596 | pgd_t *user_ptr = NULL; | |
f6e58732 | 597 | |
d6182fbf JF |
598 | if (offset < pgd_index(USER_LIMIT)) { |
599 | struct page *page = virt_to_page(pgd_page); | |
600 | user_ptr = (pgd_t *)page->private; | |
601 | if (user_ptr) | |
602 | user_ptr += offset; | |
603 | } | |
f6e58732 | 604 | |
d6182fbf JF |
605 | return user_ptr; |
606 | } | |
607 | ||
608 | static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val) | |
609 | { | |
610 | struct mmu_update u; | |
f6e58732 JF |
611 | |
612 | u.ptr = virt_to_machine(ptr).maddr; | |
613 | u.val = pgd_val_ma(val); | |
7708ad64 | 614 | xen_extend_mmu_update(&u); |
d6182fbf JF |
615 | } |
616 | ||
617 | /* | |
618 | * Raw hypercall-based set_pgd, intended for in early boot before | |
619 | * there's a page structure. This implies: | |
620 | * 1. The only existing pagetable is the kernel's | |
621 | * 2. It is always pinned | |
622 | * 3. It has no user pagetable attached to it | |
623 | */ | |
4c13629f | 624 | static void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val) |
d6182fbf JF |
625 | { |
626 | preempt_disable(); | |
627 | ||
628 | xen_mc_batch(); | |
629 | ||
630 | __xen_set_pgd_hyper(ptr, val); | |
f6e58732 JF |
631 | |
632 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
633 | ||
634 | preempt_enable(); | |
635 | } | |
636 | ||
4c13629f | 637 | static void xen_set_pgd(pgd_t *ptr, pgd_t val) |
f6e58732 | 638 | { |
d6182fbf JF |
639 | pgd_t *user_ptr = xen_get_user_pgd(ptr); |
640 | ||
84708807 JF |
641 | trace_xen_mmu_set_pgd(ptr, user_ptr, val); |
642 | ||
f6e58732 JF |
643 | /* If page is not pinned, we can just update the entry |
644 | directly */ | |
7708ad64 | 645 | if (!xen_page_pinned(ptr)) { |
f6e58732 | 646 | *ptr = val; |
d6182fbf | 647 | if (user_ptr) { |
7708ad64 | 648 | WARN_ON(xen_page_pinned(user_ptr)); |
d6182fbf JF |
649 | *user_ptr = val; |
650 | } | |
f6e58732 JF |
651 | return; |
652 | } | |
653 | ||
d6182fbf JF |
654 | /* If it's pinned, then we can at least batch the kernel and |
655 | user updates together. */ | |
656 | xen_mc_batch(); | |
657 | ||
658 | __xen_set_pgd_hyper(ptr, val); | |
659 | if (user_ptr) | |
660 | __xen_set_pgd_hyper(user_ptr, val); | |
661 | ||
662 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
f6e58732 JF |
663 | } |
664 | #endif /* PAGETABLE_LEVELS == 4 */ | |
665 | ||
f4f97b3e | 666 | /* |
5deb30d1 JF |
667 | * (Yet another) pagetable walker. This one is intended for pinning a |
668 | * pagetable. This means that it walks a pagetable and calls the | |
669 | * callback function on each page it finds making up the page table, | |
670 | * at every level. It walks the entire pagetable, but it only bothers | |
671 | * pinning pte pages which are below limit. In the normal case this | |
672 | * will be STACK_TOP_MAX, but at boot we need to pin up to | |
673 | * FIXADDR_TOP. | |
674 | * | |
675 | * For 32-bit the important bit is that we don't pin beyond there, | |
676 | * because then we start getting into Xen's ptes. | |
677 | * | |
678 | * For 64-bit, we must skip the Xen hole in the middle of the address | |
679 | * space, just after the big x86-64 virtual hole. | |
680 | */ | |
86bbc2c2 IC |
681 | static int __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd, |
682 | int (*func)(struct mm_struct *mm, struct page *, | |
683 | enum pt_level), | |
684 | unsigned long limit) | |
3b827c1b | 685 | { |
f4f97b3e | 686 | int flush = 0; |
5deb30d1 JF |
687 | unsigned hole_low, hole_high; |
688 | unsigned pgdidx_limit, pudidx_limit, pmdidx_limit; | |
689 | unsigned pgdidx, pudidx, pmdidx; | |
f4f97b3e | 690 | |
5deb30d1 JF |
691 | /* The limit is the last byte to be touched */ |
692 | limit--; | |
693 | BUG_ON(limit >= FIXADDR_TOP); | |
3b827c1b JF |
694 | |
695 | if (xen_feature(XENFEAT_auto_translated_physmap)) | |
f4f97b3e JF |
696 | return 0; |
697 | ||
5deb30d1 JF |
698 | /* |
699 | * 64-bit has a great big hole in the middle of the address | |
700 | * space, which contains the Xen mappings. On 32-bit these | |
701 | * will end up making a zero-sized hole and so is a no-op. | |
702 | */ | |
d6182fbf | 703 | hole_low = pgd_index(USER_LIMIT); |
5deb30d1 JF |
704 | hole_high = pgd_index(PAGE_OFFSET); |
705 | ||
706 | pgdidx_limit = pgd_index(limit); | |
707 | #if PTRS_PER_PUD > 1 | |
708 | pudidx_limit = pud_index(limit); | |
709 | #else | |
710 | pudidx_limit = 0; | |
711 | #endif | |
712 | #if PTRS_PER_PMD > 1 | |
713 | pmdidx_limit = pmd_index(limit); | |
714 | #else | |
715 | pmdidx_limit = 0; | |
716 | #endif | |
717 | ||
5deb30d1 | 718 | for (pgdidx = 0; pgdidx <= pgdidx_limit; pgdidx++) { |
f4f97b3e | 719 | pud_t *pud; |
3b827c1b | 720 | |
5deb30d1 JF |
721 | if (pgdidx >= hole_low && pgdidx < hole_high) |
722 | continue; | |
f4f97b3e | 723 | |
5deb30d1 | 724 | if (!pgd_val(pgd[pgdidx])) |
3b827c1b | 725 | continue; |
f4f97b3e | 726 | |
5deb30d1 | 727 | pud = pud_offset(&pgd[pgdidx], 0); |
3b827c1b JF |
728 | |
729 | if (PTRS_PER_PUD > 1) /* not folded */ | |
eefb47f6 | 730 | flush |= (*func)(mm, virt_to_page(pud), PT_PUD); |
f4f97b3e | 731 | |
5deb30d1 | 732 | for (pudidx = 0; pudidx < PTRS_PER_PUD; pudidx++) { |
f4f97b3e | 733 | pmd_t *pmd; |
f4f97b3e | 734 | |
5deb30d1 JF |
735 | if (pgdidx == pgdidx_limit && |
736 | pudidx > pudidx_limit) | |
737 | goto out; | |
3b827c1b | 738 | |
5deb30d1 | 739 | if (pud_none(pud[pudidx])) |
3b827c1b | 740 | continue; |
f4f97b3e | 741 | |
5deb30d1 | 742 | pmd = pmd_offset(&pud[pudidx], 0); |
3b827c1b JF |
743 | |
744 | if (PTRS_PER_PMD > 1) /* not folded */ | |
eefb47f6 | 745 | flush |= (*func)(mm, virt_to_page(pmd), PT_PMD); |
f4f97b3e | 746 | |
5deb30d1 JF |
747 | for (pmdidx = 0; pmdidx < PTRS_PER_PMD; pmdidx++) { |
748 | struct page *pte; | |
749 | ||
750 | if (pgdidx == pgdidx_limit && | |
751 | pudidx == pudidx_limit && | |
752 | pmdidx > pmdidx_limit) | |
753 | goto out; | |
3b827c1b | 754 | |
5deb30d1 | 755 | if (pmd_none(pmd[pmdidx])) |
3b827c1b JF |
756 | continue; |
757 | ||
5deb30d1 | 758 | pte = pmd_page(pmd[pmdidx]); |
eefb47f6 | 759 | flush |= (*func)(mm, pte, PT_PTE); |
3b827c1b JF |
760 | } |
761 | } | |
762 | } | |
11ad93e5 | 763 | |
5deb30d1 | 764 | out: |
11ad93e5 JF |
765 | /* Do the top level last, so that the callbacks can use it as |
766 | a cue to do final things like tlb flushes. */ | |
eefb47f6 | 767 | flush |= (*func)(mm, virt_to_page(pgd), PT_PGD); |
f4f97b3e JF |
768 | |
769 | return flush; | |
3b827c1b JF |
770 | } |
771 | ||
86bbc2c2 IC |
772 | static int xen_pgd_walk(struct mm_struct *mm, |
773 | int (*func)(struct mm_struct *mm, struct page *, | |
774 | enum pt_level), | |
775 | unsigned long limit) | |
776 | { | |
777 | return __xen_pgd_walk(mm, mm->pgd, func, limit); | |
778 | } | |
779 | ||
7708ad64 JF |
780 | /* If we're using split pte locks, then take the page's lock and |
781 | return a pointer to it. Otherwise return NULL. */ | |
eefb47f6 | 782 | static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm) |
74260714 JF |
783 | { |
784 | spinlock_t *ptl = NULL; | |
785 | ||
f7d0b926 | 786 | #if USE_SPLIT_PTLOCKS |
74260714 | 787 | ptl = __pte_lockptr(page); |
eefb47f6 | 788 | spin_lock_nest_lock(ptl, &mm->page_table_lock); |
74260714 JF |
789 | #endif |
790 | ||
791 | return ptl; | |
792 | } | |
793 | ||
7708ad64 | 794 | static void xen_pte_unlock(void *v) |
74260714 JF |
795 | { |
796 | spinlock_t *ptl = v; | |
797 | spin_unlock(ptl); | |
798 | } | |
799 | ||
800 | static void xen_do_pin(unsigned level, unsigned long pfn) | |
801 | { | |
dcf7435c | 802 | struct mmuext_op op; |
74260714 | 803 | |
dcf7435c JF |
804 | op.cmd = level; |
805 | op.arg1.mfn = pfn_to_mfn(pfn); | |
806 | ||
807 | xen_extend_mmuext_op(&op); | |
74260714 JF |
808 | } |
809 | ||
eefb47f6 JF |
810 | static int xen_pin_page(struct mm_struct *mm, struct page *page, |
811 | enum pt_level level) | |
f4f97b3e | 812 | { |
d60cd46b | 813 | unsigned pgfl = TestSetPagePinned(page); |
f4f97b3e JF |
814 | int flush; |
815 | ||
816 | if (pgfl) | |
817 | flush = 0; /* already pinned */ | |
818 | else if (PageHighMem(page)) | |
819 | /* kmaps need flushing if we found an unpinned | |
820 | highpage */ | |
821 | flush = 1; | |
822 | else { | |
823 | void *pt = lowmem_page_address(page); | |
824 | unsigned long pfn = page_to_pfn(page); | |
825 | struct multicall_space mcs = __xen_mc_entry(0); | |
74260714 | 826 | spinlock_t *ptl; |
f4f97b3e JF |
827 | |
828 | flush = 0; | |
829 | ||
11ad93e5 JF |
830 | /* |
831 | * We need to hold the pagetable lock between the time | |
832 | * we make the pagetable RO and when we actually pin | |
833 | * it. If we don't, then other users may come in and | |
834 | * attempt to update the pagetable by writing it, | |
835 | * which will fail because the memory is RO but not | |
836 | * pinned, so Xen won't do the trap'n'emulate. | |
837 | * | |
838 | * If we're using split pte locks, we can't hold the | |
839 | * entire pagetable's worth of locks during the | |
840 | * traverse, because we may wrap the preempt count (8 | |
841 | * bits). The solution is to mark RO and pin each PTE | |
842 | * page while holding the lock. This means the number | |
843 | * of locks we end up holding is never more than a | |
844 | * batch size (~32 entries, at present). | |
845 | * | |
846 | * If we're not using split pte locks, we needn't pin | |
847 | * the PTE pages independently, because we're | |
848 | * protected by the overall pagetable lock. | |
849 | */ | |
74260714 JF |
850 | ptl = NULL; |
851 | if (level == PT_PTE) | |
eefb47f6 | 852 | ptl = xen_pte_lock(page, mm); |
74260714 | 853 | |
f4f97b3e JF |
854 | MULTI_update_va_mapping(mcs.mc, (unsigned long)pt, |
855 | pfn_pte(pfn, PAGE_KERNEL_RO), | |
74260714 JF |
856 | level == PT_PGD ? UVMF_TLB_FLUSH : 0); |
857 | ||
11ad93e5 | 858 | if (ptl) { |
74260714 JF |
859 | xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn); |
860 | ||
74260714 JF |
861 | /* Queue a deferred unlock for when this batch |
862 | is completed. */ | |
7708ad64 | 863 | xen_mc_callback(xen_pte_unlock, ptl); |
74260714 | 864 | } |
f4f97b3e JF |
865 | } |
866 | ||
867 | return flush; | |
868 | } | |
3b827c1b | 869 | |
f4f97b3e JF |
870 | /* This is called just after a mm has been created, but it has not |
871 | been used yet. We need to make sure that its pagetable is all | |
872 | read-only, and can be pinned. */ | |
eefb47f6 | 873 | static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) |
3b827c1b | 874 | { |
5f94fb5b JF |
875 | trace_xen_mmu_pgd_pin(mm, pgd); |
876 | ||
f4f97b3e | 877 | xen_mc_batch(); |
3b827c1b | 878 | |
86bbc2c2 | 879 | if (__xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT)) { |
d05fdf31 | 880 | /* re-enable interrupts for flushing */ |
f87e4cac | 881 | xen_mc_issue(0); |
d05fdf31 | 882 | |
f4f97b3e | 883 | kmap_flush_unused(); |
d05fdf31 | 884 | |
f87e4cac JF |
885 | xen_mc_batch(); |
886 | } | |
f4f97b3e | 887 | |
d6182fbf JF |
888 | #ifdef CONFIG_X86_64 |
889 | { | |
890 | pgd_t *user_pgd = xen_get_user_pgd(pgd); | |
891 | ||
892 | xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd))); | |
893 | ||
894 | if (user_pgd) { | |
eefb47f6 | 895 | xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD); |
f63c2f24 T |
896 | xen_do_pin(MMUEXT_PIN_L4_TABLE, |
897 | PFN_DOWN(__pa(user_pgd))); | |
d6182fbf JF |
898 | } |
899 | } | |
900 | #else /* CONFIG_X86_32 */ | |
5deb30d1 JF |
901 | #ifdef CONFIG_X86_PAE |
902 | /* Need to make sure unshared kernel PMD is pinnable */ | |
47cb2ed9 | 903 | xen_pin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]), |
eefb47f6 | 904 | PT_PMD); |
5deb30d1 | 905 | #endif |
28499143 | 906 | xen_do_pin(MMUEXT_PIN_L3_TABLE, PFN_DOWN(__pa(pgd))); |
d6182fbf | 907 | #endif /* CONFIG_X86_64 */ |
f4f97b3e | 908 | xen_mc_issue(0); |
3b827c1b JF |
909 | } |
910 | ||
eefb47f6 JF |
911 | static void xen_pgd_pin(struct mm_struct *mm) |
912 | { | |
913 | __xen_pgd_pin(mm, mm->pgd); | |
914 | } | |
915 | ||
0e91398f JF |
916 | /* |
917 | * On save, we need to pin all pagetables to make sure they get their | |
918 | * mfns turned into pfns. Search the list for any unpinned pgds and pin | |
919 | * them (unpinned pgds are not currently in use, probably because the | |
920 | * process is under construction or destruction). | |
eefb47f6 JF |
921 | * |
922 | * Expected to be called in stop_machine() ("equivalent to taking | |
923 | * every spinlock in the system"), so the locking doesn't really | |
924 | * matter all that much. | |
0e91398f JF |
925 | */ |
926 | void xen_mm_pin_all(void) | |
927 | { | |
0e91398f | 928 | struct page *page; |
74260714 | 929 | |
a79e53d8 | 930 | spin_lock(&pgd_lock); |
f4f97b3e | 931 | |
0e91398f JF |
932 | list_for_each_entry(page, &pgd_list, lru) { |
933 | if (!PagePinned(page)) { | |
eefb47f6 | 934 | __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page)); |
0e91398f JF |
935 | SetPageSavePinned(page); |
936 | } | |
937 | } | |
938 | ||
a79e53d8 | 939 | spin_unlock(&pgd_lock); |
3b827c1b JF |
940 | } |
941 | ||
c1f2f09e EH |
942 | /* |
943 | * The init_mm pagetable is really pinned as soon as its created, but | |
944 | * that's before we have page structures to store the bits. So do all | |
945 | * the book-keeping now. | |
946 | */ | |
3f508953 | 947 | static int __init xen_mark_pinned(struct mm_struct *mm, struct page *page, |
eefb47f6 | 948 | enum pt_level level) |
3b827c1b | 949 | { |
f4f97b3e JF |
950 | SetPagePinned(page); |
951 | return 0; | |
952 | } | |
3b827c1b | 953 | |
b96229b5 | 954 | static void __init xen_mark_init_mm_pinned(void) |
f4f97b3e | 955 | { |
eefb47f6 | 956 | xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP); |
f4f97b3e | 957 | } |
3b827c1b | 958 | |
eefb47f6 JF |
959 | static int xen_unpin_page(struct mm_struct *mm, struct page *page, |
960 | enum pt_level level) | |
f4f97b3e | 961 | { |
d60cd46b | 962 | unsigned pgfl = TestClearPagePinned(page); |
3b827c1b | 963 | |
f4f97b3e JF |
964 | if (pgfl && !PageHighMem(page)) { |
965 | void *pt = lowmem_page_address(page); | |
966 | unsigned long pfn = page_to_pfn(page); | |
74260714 JF |
967 | spinlock_t *ptl = NULL; |
968 | struct multicall_space mcs; | |
969 | ||
11ad93e5 JF |
970 | /* |
971 | * Do the converse to pin_page. If we're using split | |
972 | * pte locks, we must be holding the lock for while | |
973 | * the pte page is unpinned but still RO to prevent | |
974 | * concurrent updates from seeing it in this | |
975 | * partially-pinned state. | |
976 | */ | |
74260714 | 977 | if (level == PT_PTE) { |
eefb47f6 | 978 | ptl = xen_pte_lock(page, mm); |
74260714 | 979 | |
11ad93e5 JF |
980 | if (ptl) |
981 | xen_do_pin(MMUEXT_UNPIN_TABLE, pfn); | |
74260714 JF |
982 | } |
983 | ||
984 | mcs = __xen_mc_entry(0); | |
f4f97b3e JF |
985 | |
986 | MULTI_update_va_mapping(mcs.mc, (unsigned long)pt, | |
987 | pfn_pte(pfn, PAGE_KERNEL), | |
74260714 JF |
988 | level == PT_PGD ? UVMF_TLB_FLUSH : 0); |
989 | ||
990 | if (ptl) { | |
991 | /* unlock when batch completed */ | |
7708ad64 | 992 | xen_mc_callback(xen_pte_unlock, ptl); |
74260714 | 993 | } |
f4f97b3e JF |
994 | } |
995 | ||
996 | return 0; /* never need to flush on unpin */ | |
3b827c1b JF |
997 | } |
998 | ||
f4f97b3e | 999 | /* Release a pagetables pages back as normal RW */ |
eefb47f6 | 1000 | static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd) |
f4f97b3e | 1001 | { |
5f94fb5b JF |
1002 | trace_xen_mmu_pgd_unpin(mm, pgd); |
1003 | ||
f4f97b3e JF |
1004 | xen_mc_batch(); |
1005 | ||
74260714 | 1006 | xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); |
f4f97b3e | 1007 | |
d6182fbf JF |
1008 | #ifdef CONFIG_X86_64 |
1009 | { | |
1010 | pgd_t *user_pgd = xen_get_user_pgd(pgd); | |
1011 | ||
1012 | if (user_pgd) { | |
f63c2f24 T |
1013 | xen_do_pin(MMUEXT_UNPIN_TABLE, |
1014 | PFN_DOWN(__pa(user_pgd))); | |
eefb47f6 | 1015 | xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD); |
d6182fbf JF |
1016 | } |
1017 | } | |
1018 | #endif | |
1019 | ||
5deb30d1 JF |
1020 | #ifdef CONFIG_X86_PAE |
1021 | /* Need to make sure unshared kernel PMD is unpinned */ | |
47cb2ed9 | 1022 | xen_unpin_page(mm, pgd_page(pgd[pgd_index(TASK_SIZE)]), |
eefb47f6 | 1023 | PT_PMD); |
5deb30d1 | 1024 | #endif |
d6182fbf | 1025 | |
86bbc2c2 | 1026 | __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT); |
f4f97b3e JF |
1027 | |
1028 | xen_mc_issue(0); | |
1029 | } | |
3b827c1b | 1030 | |
eefb47f6 JF |
1031 | static void xen_pgd_unpin(struct mm_struct *mm) |
1032 | { | |
1033 | __xen_pgd_unpin(mm, mm->pgd); | |
1034 | } | |
1035 | ||
0e91398f JF |
1036 | /* |
1037 | * On resume, undo any pinning done at save, so that the rest of the | |
1038 | * kernel doesn't see any unexpected pinned pagetables. | |
1039 | */ | |
1040 | void xen_mm_unpin_all(void) | |
1041 | { | |
0e91398f JF |
1042 | struct page *page; |
1043 | ||
a79e53d8 | 1044 | spin_lock(&pgd_lock); |
0e91398f JF |
1045 | |
1046 | list_for_each_entry(page, &pgd_list, lru) { | |
1047 | if (PageSavePinned(page)) { | |
1048 | BUG_ON(!PagePinned(page)); | |
eefb47f6 | 1049 | __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page)); |
0e91398f JF |
1050 | ClearPageSavePinned(page); |
1051 | } | |
1052 | } | |
1053 | ||
a79e53d8 | 1054 | spin_unlock(&pgd_lock); |
0e91398f JF |
1055 | } |
1056 | ||
4c13629f | 1057 | static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) |
3b827c1b | 1058 | { |
f4f97b3e | 1059 | spin_lock(&next->page_table_lock); |
eefb47f6 | 1060 | xen_pgd_pin(next); |
f4f97b3e | 1061 | spin_unlock(&next->page_table_lock); |
3b827c1b JF |
1062 | } |
1063 | ||
4c13629f | 1064 | static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) |
3b827c1b | 1065 | { |
f4f97b3e | 1066 | spin_lock(&mm->page_table_lock); |
eefb47f6 | 1067 | xen_pgd_pin(mm); |
f4f97b3e | 1068 | spin_unlock(&mm->page_table_lock); |
3b827c1b JF |
1069 | } |
1070 | ||
3b827c1b | 1071 | |
f87e4cac JF |
1072 | #ifdef CONFIG_SMP |
1073 | /* Another cpu may still have their %cr3 pointing at the pagetable, so | |
1074 | we need to repoint it somewhere else before we can unpin it. */ | |
1075 | static void drop_other_mm_ref(void *info) | |
1076 | { | |
1077 | struct mm_struct *mm = info; | |
ce87b3d3 | 1078 | struct mm_struct *active_mm; |
3b827c1b | 1079 | |
2113f469 | 1080 | active_mm = this_cpu_read(cpu_tlbstate.active_mm); |
ce87b3d3 | 1081 | |
2113f469 | 1082 | if (active_mm == mm && this_cpu_read(cpu_tlbstate.state) != TLBSTATE_OK) |
f87e4cac | 1083 | leave_mm(smp_processor_id()); |
9f79991d JF |
1084 | |
1085 | /* If this cpu still has a stale cr3 reference, then make sure | |
1086 | it has been flushed. */ | |
2113f469 | 1087 | if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd)) |
9f79991d | 1088 | load_cr3(swapper_pg_dir); |
f87e4cac | 1089 | } |
3b827c1b | 1090 | |
7708ad64 | 1091 | static void xen_drop_mm_ref(struct mm_struct *mm) |
f87e4cac | 1092 | { |
e4d98207 | 1093 | cpumask_var_t mask; |
9f79991d JF |
1094 | unsigned cpu; |
1095 | ||
f87e4cac JF |
1096 | if (current->active_mm == mm) { |
1097 | if (current->mm == mm) | |
1098 | load_cr3(swapper_pg_dir); | |
1099 | else | |
1100 | leave_mm(smp_processor_id()); | |
9f79991d JF |
1101 | } |
1102 | ||
1103 | /* Get the "official" set of cpus referring to our pagetable. */ | |
e4d98207 MT |
1104 | if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) { |
1105 | for_each_online_cpu(cpu) { | |
78f1c4d6 | 1106 | if (!cpumask_test_cpu(cpu, mm_cpumask(mm)) |
e4d98207 MT |
1107 | && per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd)) |
1108 | continue; | |
1109 | smp_call_function_single(cpu, drop_other_mm_ref, mm, 1); | |
1110 | } | |
1111 | return; | |
1112 | } | |
78f1c4d6 | 1113 | cpumask_copy(mask, mm_cpumask(mm)); |
9f79991d JF |
1114 | |
1115 | /* It's possible that a vcpu may have a stale reference to our | |
1116 | cr3, because its in lazy mode, and it hasn't yet flushed | |
1117 | its set of pending hypercalls yet. In this case, we can | |
1118 | look at its actual current cr3 value, and force it to flush | |
1119 | if needed. */ | |
1120 | for_each_online_cpu(cpu) { | |
1121 | if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd)) | |
e4d98207 | 1122 | cpumask_set_cpu(cpu, mask); |
3b827c1b JF |
1123 | } |
1124 | ||
e4d98207 MT |
1125 | if (!cpumask_empty(mask)) |
1126 | smp_call_function_many(mask, drop_other_mm_ref, mm, 1); | |
1127 | free_cpumask_var(mask); | |
f87e4cac JF |
1128 | } |
1129 | #else | |
7708ad64 | 1130 | static void xen_drop_mm_ref(struct mm_struct *mm) |
f87e4cac JF |
1131 | { |
1132 | if (current->active_mm == mm) | |
1133 | load_cr3(swapper_pg_dir); | |
1134 | } | |
1135 | #endif | |
1136 | ||
1137 | /* | |
1138 | * While a process runs, Xen pins its pagetables, which means that the | |
1139 | * hypervisor forces it to be read-only, and it controls all updates | |
1140 | * to it. This means that all pagetable updates have to go via the | |
1141 | * hypervisor, which is moderately expensive. | |
1142 | * | |
1143 | * Since we're pulling the pagetable down, we switch to use init_mm, | |
1144 | * unpin old process pagetable and mark it all read-write, which | |
1145 | * allows further operations on it to be simple memory accesses. | |
1146 | * | |
1147 | * The only subtle point is that another CPU may be still using the | |
1148 | * pagetable because of lazy tlb flushing. This means we need need to | |
1149 | * switch all CPUs off this pagetable before we can unpin it. | |
1150 | */ | |
4c13629f | 1151 | static void xen_exit_mmap(struct mm_struct *mm) |
f87e4cac JF |
1152 | { |
1153 | get_cpu(); /* make sure we don't move around */ | |
7708ad64 | 1154 | xen_drop_mm_ref(mm); |
f87e4cac | 1155 | put_cpu(); |
3b827c1b | 1156 | |
f120f13e | 1157 | spin_lock(&mm->page_table_lock); |
df912ea4 JF |
1158 | |
1159 | /* pgd may not be pinned in the error exit path of execve */ | |
7708ad64 | 1160 | if (xen_page_pinned(mm->pgd)) |
eefb47f6 | 1161 | xen_pgd_unpin(mm); |
74260714 | 1162 | |
f120f13e | 1163 | spin_unlock(&mm->page_table_lock); |
3b827c1b | 1164 | } |
994025ca | 1165 | |
3f508953 | 1166 | static void __init xen_pagetable_setup_start(pgd_t *base) |
319f3ba5 JF |
1167 | { |
1168 | } | |
1169 | ||
279b706b SS |
1170 | static __init void xen_mapping_pagetable_reserve(u64 start, u64 end) |
1171 | { | |
1172 | /* reserve the range used */ | |
1173 | native_pagetable_reserve(start, end); | |
1174 | ||
1175 | /* set as RW the rest */ | |
1176 | printk(KERN_DEBUG "xen: setting RW the range %llx - %llx\n", end, | |
1177 | PFN_PHYS(pgt_buf_top)); | |
1178 | while (end < PFN_PHYS(pgt_buf_top)) { | |
1179 | make_lowmem_page_readwrite(__va(end)); | |
1180 | end += PAGE_SIZE; | |
1181 | } | |
1182 | } | |
1183 | ||
f1d7062a TG |
1184 | static void xen_post_allocator_init(void); |
1185 | ||
3f508953 | 1186 | static void __init xen_pagetable_setup_done(pgd_t *base) |
319f3ba5 JF |
1187 | { |
1188 | xen_setup_shared_info(); | |
f1d7062a | 1189 | xen_post_allocator_init(); |
319f3ba5 JF |
1190 | } |
1191 | ||
1192 | static void xen_write_cr2(unsigned long cr2) | |
1193 | { | |
2113f469 | 1194 | this_cpu_read(xen_vcpu)->arch.cr2 = cr2; |
319f3ba5 JF |
1195 | } |
1196 | ||
1197 | static unsigned long xen_read_cr2(void) | |
1198 | { | |
2113f469 | 1199 | return this_cpu_read(xen_vcpu)->arch.cr2; |
319f3ba5 JF |
1200 | } |
1201 | ||
1202 | unsigned long xen_read_cr2_direct(void) | |
1203 | { | |
2113f469 | 1204 | return this_cpu_read(xen_vcpu_info.arch.cr2); |
319f3ba5 JF |
1205 | } |
1206 | ||
1207 | static void xen_flush_tlb(void) | |
1208 | { | |
1209 | struct mmuext_op *op; | |
1210 | struct multicall_space mcs; | |
1211 | ||
c8eed171 JF |
1212 | trace_xen_mmu_flush_tlb(0); |
1213 | ||
319f3ba5 JF |
1214 | preempt_disable(); |
1215 | ||
1216 | mcs = xen_mc_entry(sizeof(*op)); | |
1217 | ||
1218 | op = mcs.args; | |
1219 | op->cmd = MMUEXT_TLB_FLUSH_LOCAL; | |
1220 | MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); | |
1221 | ||
1222 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
1223 | ||
1224 | preempt_enable(); | |
1225 | } | |
1226 | ||
1227 | static void xen_flush_tlb_single(unsigned long addr) | |
1228 | { | |
1229 | struct mmuext_op *op; | |
1230 | struct multicall_space mcs; | |
1231 | ||
c8eed171 JF |
1232 | trace_xen_mmu_flush_tlb_single(addr); |
1233 | ||
319f3ba5 JF |
1234 | preempt_disable(); |
1235 | ||
1236 | mcs = xen_mc_entry(sizeof(*op)); | |
1237 | op = mcs.args; | |
1238 | op->cmd = MMUEXT_INVLPG_LOCAL; | |
1239 | op->arg1.linear_addr = addr & PAGE_MASK; | |
1240 | MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); | |
1241 | ||
1242 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
1243 | ||
1244 | preempt_enable(); | |
1245 | } | |
1246 | ||
1247 | static void xen_flush_tlb_others(const struct cpumask *cpus, | |
1248 | struct mm_struct *mm, unsigned long va) | |
1249 | { | |
1250 | struct { | |
1251 | struct mmuext_op op; | |
32dd1194 | 1252 | #ifdef CONFIG_SMP |
900cba88 | 1253 | DECLARE_BITMAP(mask, num_processors); |
32dd1194 KRW |
1254 | #else |
1255 | DECLARE_BITMAP(mask, NR_CPUS); | |
1256 | #endif | |
319f3ba5 JF |
1257 | } *args; |
1258 | struct multicall_space mcs; | |
1259 | ||
c8eed171 JF |
1260 | trace_xen_mmu_flush_tlb_others(cpus, mm, va); |
1261 | ||
e3f8a74e JF |
1262 | if (cpumask_empty(cpus)) |
1263 | return; /* nothing to do */ | |
319f3ba5 JF |
1264 | |
1265 | mcs = xen_mc_entry(sizeof(*args)); | |
1266 | args = mcs.args; | |
1267 | args->op.arg2.vcpumask = to_cpumask(args->mask); | |
1268 | ||
1269 | /* Remove us, and any offline CPUS. */ | |
1270 | cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask); | |
1271 | cpumask_clear_cpu(smp_processor_id(), to_cpumask(args->mask)); | |
319f3ba5 JF |
1272 | |
1273 | if (va == TLB_FLUSH_ALL) { | |
1274 | args->op.cmd = MMUEXT_TLB_FLUSH_MULTI; | |
1275 | } else { | |
1276 | args->op.cmd = MMUEXT_INVLPG_MULTI; | |
1277 | args->op.arg1.linear_addr = va; | |
1278 | } | |
1279 | ||
1280 | MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF); | |
1281 | ||
319f3ba5 JF |
1282 | xen_mc_issue(PARAVIRT_LAZY_MMU); |
1283 | } | |
1284 | ||
1285 | static unsigned long xen_read_cr3(void) | |
1286 | { | |
2113f469 | 1287 | return this_cpu_read(xen_cr3); |
319f3ba5 JF |
1288 | } |
1289 | ||
1290 | static void set_current_cr3(void *v) | |
1291 | { | |
2113f469 | 1292 | this_cpu_write(xen_current_cr3, (unsigned long)v); |
319f3ba5 JF |
1293 | } |
1294 | ||
1295 | static void __xen_write_cr3(bool kernel, unsigned long cr3) | |
1296 | { | |
dcf7435c | 1297 | struct mmuext_op op; |
319f3ba5 JF |
1298 | unsigned long mfn; |
1299 | ||
c8eed171 JF |
1300 | trace_xen_mmu_write_cr3(kernel, cr3); |
1301 | ||
319f3ba5 JF |
1302 | if (cr3) |
1303 | mfn = pfn_to_mfn(PFN_DOWN(cr3)); | |
1304 | else | |
1305 | mfn = 0; | |
1306 | ||
1307 | WARN_ON(mfn == 0 && kernel); | |
1308 | ||
dcf7435c JF |
1309 | op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR; |
1310 | op.arg1.mfn = mfn; | |
319f3ba5 | 1311 | |
dcf7435c | 1312 | xen_extend_mmuext_op(&op); |
319f3ba5 JF |
1313 | |
1314 | if (kernel) { | |
2113f469 | 1315 | this_cpu_write(xen_cr3, cr3); |
319f3ba5 JF |
1316 | |
1317 | /* Update xen_current_cr3 once the batch has actually | |
1318 | been submitted. */ | |
1319 | xen_mc_callback(set_current_cr3, (void *)cr3); | |
1320 | } | |
1321 | } | |
1322 | ||
1323 | static void xen_write_cr3(unsigned long cr3) | |
1324 | { | |
1325 | BUG_ON(preemptible()); | |
1326 | ||
1327 | xen_mc_batch(); /* disables interrupts */ | |
1328 | ||
1329 | /* Update while interrupts are disabled, so its atomic with | |
1330 | respect to ipis */ | |
2113f469 | 1331 | this_cpu_write(xen_cr3, cr3); |
319f3ba5 JF |
1332 | |
1333 | __xen_write_cr3(true, cr3); | |
1334 | ||
1335 | #ifdef CONFIG_X86_64 | |
1336 | { | |
1337 | pgd_t *user_pgd = xen_get_user_pgd(__va(cr3)); | |
1338 | if (user_pgd) | |
1339 | __xen_write_cr3(false, __pa(user_pgd)); | |
1340 | else | |
1341 | __xen_write_cr3(false, 0); | |
1342 | } | |
1343 | #endif | |
1344 | ||
1345 | xen_mc_issue(PARAVIRT_LAZY_CPU); /* interrupts restored */ | |
1346 | } | |
1347 | ||
1348 | static int xen_pgd_alloc(struct mm_struct *mm) | |
1349 | { | |
1350 | pgd_t *pgd = mm->pgd; | |
1351 | int ret = 0; | |
1352 | ||
1353 | BUG_ON(PagePinned(virt_to_page(pgd))); | |
1354 | ||
1355 | #ifdef CONFIG_X86_64 | |
1356 | { | |
1357 | struct page *page = virt_to_page(pgd); | |
1358 | pgd_t *user_pgd; | |
1359 | ||
1360 | BUG_ON(page->private != 0); | |
1361 | ||
1362 | ret = -ENOMEM; | |
1363 | ||
1364 | user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); | |
1365 | page->private = (unsigned long)user_pgd; | |
1366 | ||
1367 | if (user_pgd != NULL) { | |
1368 | user_pgd[pgd_index(VSYSCALL_START)] = | |
1369 | __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE); | |
1370 | ret = 0; | |
1371 | } | |
1372 | ||
1373 | BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd)))); | |
1374 | } | |
1375 | #endif | |
1376 | ||
1377 | return ret; | |
1378 | } | |
1379 | ||
1380 | static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) | |
1381 | { | |
1382 | #ifdef CONFIG_X86_64 | |
1383 | pgd_t *user_pgd = xen_get_user_pgd(pgd); | |
1384 | ||
1385 | if (user_pgd) | |
1386 | free_page((unsigned long)user_pgd); | |
1387 | #endif | |
1388 | } | |
1389 | ||
ee176455 | 1390 | #ifdef CONFIG_X86_32 |
3f508953 | 1391 | static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) |
1f4f9315 JF |
1392 | { |
1393 | /* If there's an existing pte, then don't allow _PAGE_RW to be set */ | |
1394 | if (pte_val_ma(*ptep) & _PAGE_PRESENT) | |
1395 | pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & | |
1396 | pte_val_ma(pte)); | |
ee176455 SS |
1397 | |
1398 | return pte; | |
1399 | } | |
1400 | #else /* CONFIG_X86_64 */ | |
3f508953 | 1401 | static pte_t __init mask_rw_pte(pte_t *ptep, pte_t pte) |
ee176455 SS |
1402 | { |
1403 | unsigned long pfn = pte_pfn(pte); | |
fef5ba79 JF |
1404 | |
1405 | /* | |
1406 | * If the new pfn is within the range of the newly allocated | |
1407 | * kernel pagetable, and it isn't being mapped into an | |
d8aa5ec3 SS |
1408 | * early_ioremap fixmap slot as a freshly allocated page, make sure |
1409 | * it is RO. | |
fef5ba79 | 1410 | */ |
d8aa5ec3 | 1411 | if (((!is_early_ioremap_ptep(ptep) && |
b9269dc7 | 1412 | pfn >= pgt_buf_start && pfn < pgt_buf_top)) || |
d8aa5ec3 | 1413 | (is_early_ioremap_ptep(ptep) && pfn != (pgt_buf_end - 1))) |
fef5ba79 | 1414 | pte = pte_wrprotect(pte); |
1f4f9315 JF |
1415 | |
1416 | return pte; | |
1417 | } | |
ee176455 | 1418 | #endif /* CONFIG_X86_64 */ |
1f4f9315 JF |
1419 | |
1420 | /* Init-time set_pte while constructing initial pagetables, which | |
1421 | doesn't allow RO pagetable pages to be remapped RW */ | |
3f508953 | 1422 | static void __init xen_set_pte_init(pte_t *ptep, pte_t pte) |
1f4f9315 JF |
1423 | { |
1424 | pte = mask_rw_pte(ptep, pte); | |
1425 | ||
1426 | xen_set_pte(ptep, pte); | |
1427 | } | |
319f3ba5 | 1428 | |
b96229b5 JF |
1429 | static void pin_pagetable_pfn(unsigned cmd, unsigned long pfn) |
1430 | { | |
1431 | struct mmuext_op op; | |
1432 | op.cmd = cmd; | |
1433 | op.arg1.mfn = pfn_to_mfn(pfn); | |
1434 | if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) | |
1435 | BUG(); | |
1436 | } | |
1437 | ||
319f3ba5 JF |
1438 | /* Early in boot, while setting up the initial pagetable, assume |
1439 | everything is pinned. */ | |
3f508953 | 1440 | static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) |
319f3ba5 | 1441 | { |
b96229b5 JF |
1442 | #ifdef CONFIG_FLATMEM |
1443 | BUG_ON(mem_map); /* should only be used early */ | |
1444 | #endif | |
1445 | make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); | |
1446 | pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); | |
1447 | } | |
1448 | ||
1449 | /* Used for pmd and pud */ | |
3f508953 | 1450 | static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn) |
b96229b5 | 1451 | { |
319f3ba5 JF |
1452 | #ifdef CONFIG_FLATMEM |
1453 | BUG_ON(mem_map); /* should only be used early */ | |
1454 | #endif | |
1455 | make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); | |
1456 | } | |
1457 | ||
1458 | /* Early release_pte assumes that all pts are pinned, since there's | |
1459 | only init_mm and anything attached to that is pinned. */ | |
3f508953 | 1460 | static void __init xen_release_pte_init(unsigned long pfn) |
319f3ba5 | 1461 | { |
b96229b5 | 1462 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); |
319f3ba5 JF |
1463 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); |
1464 | } | |
1465 | ||
3f508953 | 1466 | static void __init xen_release_pmd_init(unsigned long pfn) |
319f3ba5 | 1467 | { |
b96229b5 | 1468 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); |
319f3ba5 JF |
1469 | } |
1470 | ||
bc7fe1d9 JF |
1471 | static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn) |
1472 | { | |
1473 | struct multicall_space mcs; | |
1474 | struct mmuext_op *op; | |
1475 | ||
1476 | mcs = __xen_mc_entry(sizeof(*op)); | |
1477 | op = mcs.args; | |
1478 | op->cmd = cmd; | |
1479 | op->arg1.mfn = pfn_to_mfn(pfn); | |
1480 | ||
1481 | MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); | |
1482 | } | |
1483 | ||
1484 | static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot) | |
1485 | { | |
1486 | struct multicall_space mcs; | |
1487 | unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT); | |
1488 | ||
1489 | mcs = __xen_mc_entry(0); | |
1490 | MULTI_update_va_mapping(mcs.mc, (unsigned long)addr, | |
1491 | pfn_pte(pfn, prot), 0); | |
1492 | } | |
1493 | ||
319f3ba5 JF |
1494 | /* This needs to make sure the new pte page is pinned iff its being |
1495 | attached to a pinned pagetable. */ | |
bc7fe1d9 JF |
1496 | static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, |
1497 | unsigned level) | |
319f3ba5 | 1498 | { |
bc7fe1d9 JF |
1499 | bool pinned = PagePinned(virt_to_page(mm->pgd)); |
1500 | ||
c2ba050d | 1501 | trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned); |
319f3ba5 | 1502 | |
c2ba050d | 1503 | if (pinned) { |
bc7fe1d9 | 1504 | struct page *page = pfn_to_page(pfn); |
319f3ba5 | 1505 | |
319f3ba5 JF |
1506 | SetPagePinned(page); |
1507 | ||
319f3ba5 | 1508 | if (!PageHighMem(page)) { |
bc7fe1d9 JF |
1509 | xen_mc_batch(); |
1510 | ||
1511 | __set_pfn_prot(pfn, PAGE_KERNEL_RO); | |
1512 | ||
319f3ba5 | 1513 | if (level == PT_PTE && USE_SPLIT_PTLOCKS) |
bc7fe1d9 JF |
1514 | __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); |
1515 | ||
1516 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
319f3ba5 JF |
1517 | } else { |
1518 | /* make sure there are no stray mappings of | |
1519 | this page */ | |
1520 | kmap_flush_unused(); | |
1521 | } | |
1522 | } | |
1523 | } | |
1524 | ||
1525 | static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn) | |
1526 | { | |
1527 | xen_alloc_ptpage(mm, pfn, PT_PTE); | |
1528 | } | |
1529 | ||
1530 | static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn) | |
1531 | { | |
1532 | xen_alloc_ptpage(mm, pfn, PT_PMD); | |
1533 | } | |
1534 | ||
1535 | /* This should never happen until we're OK to use struct page */ | |
bc7fe1d9 | 1536 | static inline void xen_release_ptpage(unsigned long pfn, unsigned level) |
319f3ba5 JF |
1537 | { |
1538 | struct page *page = pfn_to_page(pfn); | |
c2ba050d | 1539 | bool pinned = PagePinned(page); |
319f3ba5 | 1540 | |
c2ba050d | 1541 | trace_xen_mmu_release_ptpage(pfn, level, pinned); |
319f3ba5 | 1542 | |
c2ba050d | 1543 | if (pinned) { |
319f3ba5 | 1544 | if (!PageHighMem(page)) { |
bc7fe1d9 JF |
1545 | xen_mc_batch(); |
1546 | ||
319f3ba5 | 1547 | if (level == PT_PTE && USE_SPLIT_PTLOCKS) |
bc7fe1d9 JF |
1548 | __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); |
1549 | ||
1550 | __set_pfn_prot(pfn, PAGE_KERNEL); | |
1551 | ||
1552 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
319f3ba5 JF |
1553 | } |
1554 | ClearPagePinned(page); | |
1555 | } | |
1556 | } | |
1557 | ||
1558 | static void xen_release_pte(unsigned long pfn) | |
1559 | { | |
1560 | xen_release_ptpage(pfn, PT_PTE); | |
1561 | } | |
1562 | ||
1563 | static void xen_release_pmd(unsigned long pfn) | |
1564 | { | |
1565 | xen_release_ptpage(pfn, PT_PMD); | |
1566 | } | |
1567 | ||
1568 | #if PAGETABLE_LEVELS == 4 | |
1569 | static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn) | |
1570 | { | |
1571 | xen_alloc_ptpage(mm, pfn, PT_PUD); | |
1572 | } | |
1573 | ||
1574 | static void xen_release_pud(unsigned long pfn) | |
1575 | { | |
1576 | xen_release_ptpage(pfn, PT_PUD); | |
1577 | } | |
1578 | #endif | |
1579 | ||
1580 | void __init xen_reserve_top(void) | |
1581 | { | |
1582 | #ifdef CONFIG_X86_32 | |
1583 | unsigned long top = HYPERVISOR_VIRT_START; | |
1584 | struct xen_platform_parameters pp; | |
1585 | ||
1586 | if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0) | |
1587 | top = pp.virt_start; | |
1588 | ||
1589 | reserve_top_address(-top); | |
1590 | #endif /* CONFIG_X86_32 */ | |
1591 | } | |
1592 | ||
1593 | /* | |
1594 | * Like __va(), but returns address in the kernel mapping (which is | |
1595 | * all we have until the physical memory mapping has been set up. | |
1596 | */ | |
1597 | static void *__ka(phys_addr_t paddr) | |
1598 | { | |
1599 | #ifdef CONFIG_X86_64 | |
1600 | return (void *)(paddr + __START_KERNEL_map); | |
1601 | #else | |
1602 | return __va(paddr); | |
1603 | #endif | |
1604 | } | |
1605 | ||
1606 | /* Convert a machine address to physical address */ | |
1607 | static unsigned long m2p(phys_addr_t maddr) | |
1608 | { | |
1609 | phys_addr_t paddr; | |
1610 | ||
1611 | maddr &= PTE_PFN_MASK; | |
1612 | paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT; | |
1613 | ||
1614 | return paddr; | |
1615 | } | |
1616 | ||
1617 | /* Convert a machine address to kernel virtual */ | |
1618 | static void *m2v(phys_addr_t maddr) | |
1619 | { | |
1620 | return __ka(m2p(maddr)); | |
1621 | } | |
1622 | ||
4ec5387c | 1623 | /* Set the page permissions on an identity-mapped pages */ |
319f3ba5 JF |
1624 | static void set_page_prot(void *addr, pgprot_t prot) |
1625 | { | |
1626 | unsigned long pfn = __pa(addr) >> PAGE_SHIFT; | |
1627 | pte_t pte = pfn_pte(pfn, prot); | |
1628 | ||
1629 | if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, 0)) | |
1630 | BUG(); | |
1631 | } | |
caaf9ecf | 1632 | #ifdef CONFIG_X86_32 |
3f508953 | 1633 | static void __init xen_map_identity_early(pmd_t *pmd, unsigned long max_pfn) |
319f3ba5 JF |
1634 | { |
1635 | unsigned pmdidx, pteidx; | |
1636 | unsigned ident_pte; | |
1637 | unsigned long pfn; | |
1638 | ||
764f0138 JF |
1639 | level1_ident_pgt = extend_brk(sizeof(pte_t) * LEVEL1_IDENT_ENTRIES, |
1640 | PAGE_SIZE); | |
1641 | ||
319f3ba5 JF |
1642 | ident_pte = 0; |
1643 | pfn = 0; | |
1644 | for (pmdidx = 0; pmdidx < PTRS_PER_PMD && pfn < max_pfn; pmdidx++) { | |
1645 | pte_t *pte_page; | |
1646 | ||
1647 | /* Reuse or allocate a page of ptes */ | |
1648 | if (pmd_present(pmd[pmdidx])) | |
1649 | pte_page = m2v(pmd[pmdidx].pmd); | |
1650 | else { | |
1651 | /* Check for free pte pages */ | |
764f0138 | 1652 | if (ident_pte == LEVEL1_IDENT_ENTRIES) |
319f3ba5 JF |
1653 | break; |
1654 | ||
1655 | pte_page = &level1_ident_pgt[ident_pte]; | |
1656 | ident_pte += PTRS_PER_PTE; | |
1657 | ||
1658 | pmd[pmdidx] = __pmd(__pa(pte_page) | _PAGE_TABLE); | |
1659 | } | |
1660 | ||
1661 | /* Install mappings */ | |
1662 | for (pteidx = 0; pteidx < PTRS_PER_PTE; pteidx++, pfn++) { | |
1663 | pte_t pte; | |
1664 | ||
a91d9287 SS |
1665 | #ifdef CONFIG_X86_32 |
1666 | if (pfn > max_pfn_mapped) | |
1667 | max_pfn_mapped = pfn; | |
1668 | #endif | |
1669 | ||
319f3ba5 JF |
1670 | if (!pte_none(pte_page[pteidx])) |
1671 | continue; | |
1672 | ||
1673 | pte = pfn_pte(pfn, PAGE_KERNEL_EXEC); | |
1674 | pte_page[pteidx] = pte; | |
1675 | } | |
1676 | } | |
1677 | ||
1678 | for (pteidx = 0; pteidx < ident_pte; pteidx += PTRS_PER_PTE) | |
1679 | set_page_prot(&level1_ident_pgt[pteidx], PAGE_KERNEL_RO); | |
1680 | ||
1681 | set_page_prot(pmd, PAGE_KERNEL_RO); | |
1682 | } | |
caaf9ecf | 1683 | #endif |
7e77506a IC |
1684 | void __init xen_setup_machphys_mapping(void) |
1685 | { | |
1686 | struct xen_machphys_mapping mapping; | |
7e77506a IC |
1687 | |
1688 | if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) { | |
1689 | machine_to_phys_mapping = (unsigned long *)mapping.v_start; | |
ccbcdf7c | 1690 | machine_to_phys_nr = mapping.max_mfn + 1; |
7e77506a | 1691 | } else { |
ccbcdf7c | 1692 | machine_to_phys_nr = MACH2PHYS_NR_ENTRIES; |
7e77506a | 1693 | } |
ccbcdf7c | 1694 | #ifdef CONFIG_X86_32 |
61cca2fa JB |
1695 | WARN_ON((machine_to_phys_mapping + (machine_to_phys_nr - 1)) |
1696 | < machine_to_phys_mapping); | |
ccbcdf7c | 1697 | #endif |
7e77506a IC |
1698 | } |
1699 | ||
319f3ba5 JF |
1700 | #ifdef CONFIG_X86_64 |
1701 | static void convert_pfn_mfn(void *v) | |
1702 | { | |
1703 | pte_t *pte = v; | |
1704 | int i; | |
1705 | ||
1706 | /* All levels are converted the same way, so just treat them | |
1707 | as ptes. */ | |
1708 | for (i = 0; i < PTRS_PER_PTE; i++) | |
1709 | pte[i] = xen_make_pte(pte[i].pte); | |
1710 | } | |
488f046d KRW |
1711 | static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end, |
1712 | unsigned long addr) | |
1713 | { | |
1714 | if (*pt_base == PFN_DOWN(__pa(addr))) { | |
1715 | set_page_prot((void *)addr, PAGE_KERNEL); | |
1716 | clear_page((void *)addr); | |
1717 | (*pt_base)++; | |
1718 | } | |
1719 | if (*pt_end == PFN_DOWN(__pa(addr))) { | |
1720 | set_page_prot((void *)addr, PAGE_KERNEL); | |
1721 | clear_page((void *)addr); | |
1722 | (*pt_end)--; | |
1723 | } | |
1724 | } | |
319f3ba5 | 1725 | /* |
0d2eb44f | 1726 | * Set up the initial kernel pagetable. |
319f3ba5 JF |
1727 | * |
1728 | * We can construct this by grafting the Xen provided pagetable into | |
1729 | * head_64.S's preconstructed pagetables. We copy the Xen L2's into | |
1730 | * level2_ident_pgt, level2_kernel_pgt and level2_fixmap_pgt. This | |
1731 | * means that only the kernel has a physical mapping to start with - | |
1732 | * but that's enough to get __va working. We need to fill in the rest | |
1733 | * of the physical mapping once some sort of allocator has been set | |
1734 | * up. | |
1735 | */ | |
3699aad0 | 1736 | void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) |
319f3ba5 JF |
1737 | { |
1738 | pud_t *l3; | |
1739 | pmd_t *l2; | |
488f046d KRW |
1740 | unsigned long addr[3]; |
1741 | unsigned long pt_base, pt_end; | |
1742 | unsigned i; | |
319f3ba5 | 1743 | |
14988a4d SS |
1744 | /* max_pfn_mapped is the last pfn mapped in the initial memory |
1745 | * mappings. Considering that on Xen after the kernel mappings we | |
1746 | * have the mappings of some pages that don't exist in pfn space, we | |
1747 | * set max_pfn_mapped to the last real pfn mapped. */ | |
1748 | max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); | |
1749 | ||
488f046d KRW |
1750 | pt_base = PFN_DOWN(__pa(xen_start_info->pt_base)); |
1751 | pt_end = pt_base + xen_start_info->nr_pt_frames; | |
1752 | ||
319f3ba5 JF |
1753 | /* Zap identity mapping */ |
1754 | init_level4_pgt[0] = __pgd(0); | |
1755 | ||
1756 | /* Pre-constructed entries are in pfn, so convert to mfn */ | |
4fac153a KRW |
1757 | /* L4[272] -> level3_ident_pgt |
1758 | * L4[511] -> level3_kernel_pgt */ | |
319f3ba5 | 1759 | convert_pfn_mfn(init_level4_pgt); |
4fac153a KRW |
1760 | |
1761 | /* L3_i[0] -> level2_ident_pgt */ | |
319f3ba5 | 1762 | convert_pfn_mfn(level3_ident_pgt); |
4fac153a KRW |
1763 | /* L3_k[510] -> level2_kernel_pgt |
1764 | * L3_i[511] -> level2_fixmap_pgt */ | |
319f3ba5 JF |
1765 | convert_pfn_mfn(level3_kernel_pgt); |
1766 | ||
4fac153a | 1767 | /* We get [511][511] and have Xen's version of level2_kernel_pgt */ |
319f3ba5 JF |
1768 | l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); |
1769 | l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud); | |
1770 | ||
488f046d KRW |
1771 | addr[0] = (unsigned long)pgd; |
1772 | addr[1] = (unsigned long)l3; | |
1773 | addr[2] = (unsigned long)l2; | |
4fac153a KRW |
1774 | /* Graft it onto L4[272][0]. Note that we creating an aliasing problem: |
1775 | * Both L4[272][0] and L4[511][511] have entries that point to the same | |
1776 | * L2 (PMD) tables. Meaning that if you modify it in __va space | |
1777 | * it will be also modified in the __ka space! (But if you just | |
1778 | * modify the PMD table to point to other PTE's or none, then you | |
1779 | * are OK - which is what cleanup_highmap does) */ | |
ae895ed7 | 1780 | copy_page(level2_ident_pgt, l2); |
4fac153a | 1781 | /* Graft it onto L4[511][511] */ |
ae895ed7 | 1782 | copy_page(level2_kernel_pgt, l2); |
319f3ba5 | 1783 | |
4fac153a | 1784 | /* Get [511][510] and graft that in level2_fixmap_pgt */ |
319f3ba5 JF |
1785 | l3 = m2v(pgd[pgd_index(__START_KERNEL_map + PMD_SIZE)].pgd); |
1786 | l2 = m2v(l3[pud_index(__START_KERNEL_map + PMD_SIZE)].pud); | |
ae895ed7 | 1787 | copy_page(level2_fixmap_pgt, l2); |
4fac153a KRW |
1788 | /* Note that we don't do anything with level1_fixmap_pgt which |
1789 | * we don't need. */ | |
319f3ba5 | 1790 | |
319f3ba5 JF |
1791 | /* Make pagetable pieces RO */ |
1792 | set_page_prot(init_level4_pgt, PAGE_KERNEL_RO); | |
1793 | set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO); | |
1794 | set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO); | |
1795 | set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO); | |
caaf9ecf | 1796 | set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO); |
319f3ba5 JF |
1797 | set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); |
1798 | set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); | |
1799 | ||
1800 | /* Pin down new L4 */ | |
1801 | pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, | |
1802 | PFN_DOWN(__pa_symbol(init_level4_pgt))); | |
1803 | ||
1804 | /* Unpin Xen-provided one */ | |
1805 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); | |
1806 | ||
319f3ba5 JF |
1807 | /* |
1808 | * At this stage there can be no user pgd, and no page | |
1809 | * structure to attach it to, so make sure we just set kernel | |
1810 | * pgd. | |
1811 | */ | |
1812 | xen_mc_batch(); | |
488f046d | 1813 | __xen_write_cr3(true, __pa(init_level4_pgt)); |
319f3ba5 JF |
1814 | xen_mc_issue(PARAVIRT_LAZY_CPU); |
1815 | ||
488f046d KRW |
1816 | /* We can't that easily rip out L3 and L2, as the Xen pagetables are |
1817 | * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for | |
1818 | * the initial domain. For guests using the toolstack, they are in: | |
1819 | * [L4], [L3], [L2], [L1], [L1], order .. So for dom0 we can only | |
1820 | * rip out the [L4] (pgd), but for guests we shave off three pages. | |
1821 | */ | |
1822 | for (i = 0; i < ARRAY_SIZE(addr); i++) | |
1823 | check_pt_base(&pt_base, &pt_end, addr[i]); | |
1824 | ||
1825 | /* Our (by three pages) smaller Xen pagetable that we are using */ | |
1826 | memblock_reserve(PFN_PHYS(pt_base), (pt_end - pt_base) * PAGE_SIZE); | |
319f3ba5 JF |
1827 | } |
1828 | #else /* !CONFIG_X86_64 */ | |
5b5c1af1 IC |
1829 | static RESERVE_BRK_ARRAY(pmd_t, initial_kernel_pmd, PTRS_PER_PMD); |
1830 | static RESERVE_BRK_ARRAY(pmd_t, swapper_kernel_pmd, PTRS_PER_PMD); | |
1831 | ||
3f508953 | 1832 | static void __init xen_write_cr3_init(unsigned long cr3) |
5b5c1af1 IC |
1833 | { |
1834 | unsigned long pfn = PFN_DOWN(__pa(swapper_pg_dir)); | |
1835 | ||
1836 | BUG_ON(read_cr3() != __pa(initial_page_table)); | |
1837 | BUG_ON(cr3 != __pa(swapper_pg_dir)); | |
1838 | ||
1839 | /* | |
1840 | * We are switching to swapper_pg_dir for the first time (from | |
1841 | * initial_page_table) and therefore need to mark that page | |
1842 | * read-only and then pin it. | |
1843 | * | |
1844 | * Xen disallows sharing of kernel PMDs for PAE | |
1845 | * guests. Therefore we must copy the kernel PMD from | |
1846 | * initial_page_table into a new kernel PMD to be used in | |
1847 | * swapper_pg_dir. | |
1848 | */ | |
1849 | swapper_kernel_pmd = | |
1850 | extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); | |
ae895ed7 | 1851 | copy_page(swapper_kernel_pmd, initial_kernel_pmd); |
5b5c1af1 IC |
1852 | swapper_pg_dir[KERNEL_PGD_BOUNDARY] = |
1853 | __pgd(__pa(swapper_kernel_pmd) | _PAGE_PRESENT); | |
1854 | set_page_prot(swapper_kernel_pmd, PAGE_KERNEL_RO); | |
1855 | ||
1856 | set_page_prot(swapper_pg_dir, PAGE_KERNEL_RO); | |
1857 | xen_write_cr3(cr3); | |
1858 | pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, pfn); | |
1859 | ||
1860 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, | |
1861 | PFN_DOWN(__pa(initial_page_table))); | |
1862 | set_page_prot(initial_page_table, PAGE_KERNEL); | |
1863 | set_page_prot(initial_kernel_pmd, PAGE_KERNEL); | |
1864 | ||
1865 | pv_mmu_ops.write_cr3 = &xen_write_cr3; | |
1866 | } | |
319f3ba5 | 1867 | |
3699aad0 | 1868 | void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) |
319f3ba5 JF |
1869 | { |
1870 | pmd_t *kernel_pmd; | |
1871 | ||
5b5c1af1 IC |
1872 | initial_kernel_pmd = |
1873 | extend_brk(sizeof(pmd_t) * PTRS_PER_PMD, PAGE_SIZE); | |
f0991802 | 1874 | |
a91d9287 SS |
1875 | max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->pt_base) + |
1876 | xen_start_info->nr_pt_frames * PAGE_SIZE + | |
1877 | 512*1024); | |
319f3ba5 JF |
1878 | |
1879 | kernel_pmd = m2v(pgd[KERNEL_PGD_BOUNDARY].pgd); | |
ae895ed7 | 1880 | copy_page(initial_kernel_pmd, kernel_pmd); |
319f3ba5 | 1881 | |
5b5c1af1 | 1882 | xen_map_identity_early(initial_kernel_pmd, max_pfn); |
319f3ba5 | 1883 | |
ae895ed7 | 1884 | copy_page(initial_page_table, pgd); |
5b5c1af1 IC |
1885 | initial_page_table[KERNEL_PGD_BOUNDARY] = |
1886 | __pgd(__pa(initial_kernel_pmd) | _PAGE_PRESENT); | |
319f3ba5 | 1887 | |
5b5c1af1 IC |
1888 | set_page_prot(initial_kernel_pmd, PAGE_KERNEL_RO); |
1889 | set_page_prot(initial_page_table, PAGE_KERNEL_RO); | |
319f3ba5 JF |
1890 | set_page_prot(empty_zero_page, PAGE_KERNEL_RO); |
1891 | ||
1892 | pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); | |
1893 | ||
5b5c1af1 IC |
1894 | pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, |
1895 | PFN_DOWN(__pa(initial_page_table))); | |
1896 | xen_write_cr3(__pa(initial_page_table)); | |
319f3ba5 | 1897 | |
24aa0788 | 1898 | memblock_reserve(__pa(xen_start_info->pt_base), |
dc6821e0 | 1899 | xen_start_info->nr_pt_frames * PAGE_SIZE); |
319f3ba5 JF |
1900 | } |
1901 | #endif /* CONFIG_X86_64 */ | |
1902 | ||
98511f35 JF |
1903 | static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss; |
1904 | ||
3b3809ac | 1905 | static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) |
319f3ba5 JF |
1906 | { |
1907 | pte_t pte; | |
1908 | ||
1909 | phys >>= PAGE_SHIFT; | |
1910 | ||
1911 | switch (idx) { | |
1912 | case FIX_BTMAP_END ... FIX_BTMAP_BEGIN: | |
1913 | #ifdef CONFIG_X86_F00F_BUG | |
1914 | case FIX_F00F_IDT: | |
1915 | #endif | |
1916 | #ifdef CONFIG_X86_32 | |
1917 | case FIX_WP_TEST: | |
1918 | case FIX_VDSO: | |
1919 | # ifdef CONFIG_HIGHMEM | |
1920 | case FIX_KMAP_BEGIN ... FIX_KMAP_END: | |
1921 | # endif | |
1922 | #else | |
1923 | case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE: | |
5d5791af | 1924 | case VVAR_PAGE: |
319f3ba5 | 1925 | #endif |
3ecb1b7d JF |
1926 | case FIX_TEXT_POKE0: |
1927 | case FIX_TEXT_POKE1: | |
1928 | /* All local page mappings */ | |
319f3ba5 JF |
1929 | pte = pfn_pte(phys, prot); |
1930 | break; | |
1931 | ||
98511f35 JF |
1932 | #ifdef CONFIG_X86_LOCAL_APIC |
1933 | case FIX_APIC_BASE: /* maps dummy local APIC */ | |
1934 | pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL); | |
1935 | break; | |
1936 | #endif | |
1937 | ||
1938 | #ifdef CONFIG_X86_IO_APIC | |
1939 | case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END: | |
1940 | /* | |
1941 | * We just don't map the IO APIC - all access is via | |
1942 | * hypercalls. Keep the address in the pte for reference. | |
1943 | */ | |
27abd14b | 1944 | pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL); |
98511f35 JF |
1945 | break; |
1946 | #endif | |
1947 | ||
c0011dbf JF |
1948 | case FIX_PARAVIRT_BOOTMAP: |
1949 | /* This is an MFN, but it isn't an IO mapping from the | |
1950 | IO domain */ | |
319f3ba5 JF |
1951 | pte = mfn_pte(phys, prot); |
1952 | break; | |
c0011dbf JF |
1953 | |
1954 | default: | |
1955 | /* By default, set_fixmap is used for hardware mappings */ | |
1956 | pte = mfn_pte(phys, __pgprot(pgprot_val(prot) | _PAGE_IOMAP)); | |
1957 | break; | |
319f3ba5 JF |
1958 | } |
1959 | ||
1960 | __native_set_fixmap(idx, pte); | |
1961 | ||
1962 | #ifdef CONFIG_X86_64 | |
1963 | /* Replicate changes to map the vsyscall page into the user | |
1964 | pagetable vsyscall mapping. */ | |
5d5791af AL |
1965 | if ((idx >= VSYSCALL_LAST_PAGE && idx <= VSYSCALL_FIRST_PAGE) || |
1966 | idx == VVAR_PAGE) { | |
319f3ba5 JF |
1967 | unsigned long vaddr = __fix_to_virt(idx); |
1968 | set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte); | |
1969 | } | |
1970 | #endif | |
1971 | } | |
1972 | ||
3f508953 | 1973 | static void __init xen_post_allocator_init(void) |
319f3ba5 JF |
1974 | { |
1975 | pv_mmu_ops.set_pte = xen_set_pte; | |
1976 | pv_mmu_ops.set_pmd = xen_set_pmd; | |
1977 | pv_mmu_ops.set_pud = xen_set_pud; | |
1978 | #if PAGETABLE_LEVELS == 4 | |
1979 | pv_mmu_ops.set_pgd = xen_set_pgd; | |
1980 | #endif | |
1981 | ||
1982 | /* This will work as long as patching hasn't happened yet | |
1983 | (which it hasn't) */ | |
1984 | pv_mmu_ops.alloc_pte = xen_alloc_pte; | |
1985 | pv_mmu_ops.alloc_pmd = xen_alloc_pmd; | |
1986 | pv_mmu_ops.release_pte = xen_release_pte; | |
1987 | pv_mmu_ops.release_pmd = xen_release_pmd; | |
1988 | #if PAGETABLE_LEVELS == 4 | |
1989 | pv_mmu_ops.alloc_pud = xen_alloc_pud; | |
1990 | pv_mmu_ops.release_pud = xen_release_pud; | |
1991 | #endif | |
1992 | ||
1993 | #ifdef CONFIG_X86_64 | |
1994 | SetPagePinned(virt_to_page(level3_user_vsyscall)); | |
1995 | #endif | |
1996 | xen_mark_init_mm_pinned(); | |
1997 | } | |
1998 | ||
b407fc57 JF |
1999 | static void xen_leave_lazy_mmu(void) |
2000 | { | |
5caecb94 | 2001 | preempt_disable(); |
b407fc57 JF |
2002 | xen_mc_flush(); |
2003 | paravirt_leave_lazy_mmu(); | |
5caecb94 | 2004 | preempt_enable(); |
b407fc57 | 2005 | } |
319f3ba5 | 2006 | |
3f508953 | 2007 | static const struct pv_mmu_ops xen_mmu_ops __initconst = { |
319f3ba5 JF |
2008 | .read_cr2 = xen_read_cr2, |
2009 | .write_cr2 = xen_write_cr2, | |
2010 | ||
2011 | .read_cr3 = xen_read_cr3, | |
5b5c1af1 IC |
2012 | #ifdef CONFIG_X86_32 |
2013 | .write_cr3 = xen_write_cr3_init, | |
2014 | #else | |
319f3ba5 | 2015 | .write_cr3 = xen_write_cr3, |
5b5c1af1 | 2016 | #endif |
319f3ba5 JF |
2017 | |
2018 | .flush_tlb_user = xen_flush_tlb, | |
2019 | .flush_tlb_kernel = xen_flush_tlb, | |
2020 | .flush_tlb_single = xen_flush_tlb_single, | |
2021 | .flush_tlb_others = xen_flush_tlb_others, | |
2022 | ||
2023 | .pte_update = paravirt_nop, | |
2024 | .pte_update_defer = paravirt_nop, | |
2025 | ||
2026 | .pgd_alloc = xen_pgd_alloc, | |
2027 | .pgd_free = xen_pgd_free, | |
2028 | ||
2029 | .alloc_pte = xen_alloc_pte_init, | |
2030 | .release_pte = xen_release_pte_init, | |
b96229b5 | 2031 | .alloc_pmd = xen_alloc_pmd_init, |
b96229b5 | 2032 | .release_pmd = xen_release_pmd_init, |
319f3ba5 | 2033 | |
319f3ba5 | 2034 | .set_pte = xen_set_pte_init, |
319f3ba5 JF |
2035 | .set_pte_at = xen_set_pte_at, |
2036 | .set_pmd = xen_set_pmd_hyper, | |
2037 | ||
2038 | .ptep_modify_prot_start = __ptep_modify_prot_start, | |
2039 | .ptep_modify_prot_commit = __ptep_modify_prot_commit, | |
2040 | ||
da5de7c2 JF |
2041 | .pte_val = PV_CALLEE_SAVE(xen_pte_val), |
2042 | .pgd_val = PV_CALLEE_SAVE(xen_pgd_val), | |
319f3ba5 | 2043 | |
da5de7c2 JF |
2044 | .make_pte = PV_CALLEE_SAVE(xen_make_pte), |
2045 | .make_pgd = PV_CALLEE_SAVE(xen_make_pgd), | |
319f3ba5 JF |
2046 | |
2047 | #ifdef CONFIG_X86_PAE | |
2048 | .set_pte_atomic = xen_set_pte_atomic, | |
319f3ba5 JF |
2049 | .pte_clear = xen_pte_clear, |
2050 | .pmd_clear = xen_pmd_clear, | |
2051 | #endif /* CONFIG_X86_PAE */ | |
2052 | .set_pud = xen_set_pud_hyper, | |
2053 | ||
da5de7c2 JF |
2054 | .make_pmd = PV_CALLEE_SAVE(xen_make_pmd), |
2055 | .pmd_val = PV_CALLEE_SAVE(xen_pmd_val), | |
319f3ba5 JF |
2056 | |
2057 | #if PAGETABLE_LEVELS == 4 | |
da5de7c2 JF |
2058 | .pud_val = PV_CALLEE_SAVE(xen_pud_val), |
2059 | .make_pud = PV_CALLEE_SAVE(xen_make_pud), | |
319f3ba5 JF |
2060 | .set_pgd = xen_set_pgd_hyper, |
2061 | ||
b96229b5 JF |
2062 | .alloc_pud = xen_alloc_pmd_init, |
2063 | .release_pud = xen_release_pmd_init, | |
319f3ba5 JF |
2064 | #endif /* PAGETABLE_LEVELS == 4 */ |
2065 | ||
2066 | .activate_mm = xen_activate_mm, | |
2067 | .dup_mmap = xen_dup_mmap, | |
2068 | .exit_mmap = xen_exit_mmap, | |
2069 | ||
2070 | .lazy_mode = { | |
2071 | .enter = paravirt_enter_lazy_mmu, | |
b407fc57 | 2072 | .leave = xen_leave_lazy_mmu, |
319f3ba5 JF |
2073 | }, |
2074 | ||
2075 | .set_fixmap = xen_set_fixmap, | |
2076 | }; | |
2077 | ||
030cb6c0 TG |
2078 | void __init xen_init_mmu_ops(void) |
2079 | { | |
279b706b | 2080 | x86_init.mapping.pagetable_reserve = xen_mapping_pagetable_reserve; |
030cb6c0 TG |
2081 | x86_init.paging.pagetable_setup_start = xen_pagetable_setup_start; |
2082 | x86_init.paging.pagetable_setup_done = xen_pagetable_setup_done; | |
2083 | pv_mmu_ops = xen_mmu_ops; | |
d2cb2145 | 2084 | |
98511f35 | 2085 | memset(dummy_mapping, 0xff, PAGE_SIZE); |
030cb6c0 | 2086 | } |
319f3ba5 | 2087 | |
08bbc9da AN |
2088 | /* Protected by xen_reservation_lock. */ |
2089 | #define MAX_CONTIG_ORDER 9 /* 2MB */ | |
2090 | static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER]; | |
2091 | ||
2092 | #define VOID_PTE (mfn_pte(0, __pgprot(0))) | |
2093 | static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order, | |
2094 | unsigned long *in_frames, | |
2095 | unsigned long *out_frames) | |
2096 | { | |
2097 | int i; | |
2098 | struct multicall_space mcs; | |
2099 | ||
2100 | xen_mc_batch(); | |
2101 | for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) { | |
2102 | mcs = __xen_mc_entry(0); | |
2103 | ||
2104 | if (in_frames) | |
2105 | in_frames[i] = virt_to_mfn(vaddr); | |
2106 | ||
2107 | MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0); | |
6eaa412f | 2108 | __set_phys_to_machine(virt_to_pfn(vaddr), INVALID_P2M_ENTRY); |
08bbc9da AN |
2109 | |
2110 | if (out_frames) | |
2111 | out_frames[i] = virt_to_pfn(vaddr); | |
2112 | } | |
2113 | xen_mc_issue(0); | |
2114 | } | |
2115 | ||
2116 | /* | |
2117 | * Update the pfn-to-mfn mappings for a virtual address range, either to | |
2118 | * point to an array of mfns, or contiguously from a single starting | |
2119 | * mfn. | |
2120 | */ | |
2121 | static void xen_remap_exchanged_ptes(unsigned long vaddr, int order, | |
2122 | unsigned long *mfns, | |
2123 | unsigned long first_mfn) | |
2124 | { | |
2125 | unsigned i, limit; | |
2126 | unsigned long mfn; | |
2127 | ||
2128 | xen_mc_batch(); | |
2129 | ||
2130 | limit = 1u << order; | |
2131 | for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) { | |
2132 | struct multicall_space mcs; | |
2133 | unsigned flags; | |
2134 | ||
2135 | mcs = __xen_mc_entry(0); | |
2136 | if (mfns) | |
2137 | mfn = mfns[i]; | |
2138 | else | |
2139 | mfn = first_mfn + i; | |
2140 | ||
2141 | if (i < (limit - 1)) | |
2142 | flags = 0; | |
2143 | else { | |
2144 | if (order == 0) | |
2145 | flags = UVMF_INVLPG | UVMF_ALL; | |
2146 | else | |
2147 | flags = UVMF_TLB_FLUSH | UVMF_ALL; | |
2148 | } | |
2149 | ||
2150 | MULTI_update_va_mapping(mcs.mc, vaddr, | |
2151 | mfn_pte(mfn, PAGE_KERNEL), flags); | |
2152 | ||
2153 | set_phys_to_machine(virt_to_pfn(vaddr), mfn); | |
2154 | } | |
2155 | ||
2156 | xen_mc_issue(0); | |
2157 | } | |
2158 | ||
2159 | /* | |
2160 | * Perform the hypercall to exchange a region of our pfns to point to | |
2161 | * memory with the required contiguous alignment. Takes the pfns as | |
2162 | * input, and populates mfns as output. | |
2163 | * | |
2164 | * Returns a success code indicating whether the hypervisor was able to | |
2165 | * satisfy the request or not. | |
2166 | */ | |
2167 | static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in, | |
2168 | unsigned long *pfns_in, | |
2169 | unsigned long extents_out, | |
2170 | unsigned int order_out, | |
2171 | unsigned long *mfns_out, | |
2172 | unsigned int address_bits) | |
2173 | { | |
2174 | long rc; | |
2175 | int success; | |
2176 | ||
2177 | struct xen_memory_exchange exchange = { | |
2178 | .in = { | |
2179 | .nr_extents = extents_in, | |
2180 | .extent_order = order_in, | |
2181 | .extent_start = pfns_in, | |
2182 | .domid = DOMID_SELF | |
2183 | }, | |
2184 | .out = { | |
2185 | .nr_extents = extents_out, | |
2186 | .extent_order = order_out, | |
2187 | .extent_start = mfns_out, | |
2188 | .address_bits = address_bits, | |
2189 | .domid = DOMID_SELF | |
2190 | } | |
2191 | }; | |
2192 | ||
2193 | BUG_ON(extents_in << order_in != extents_out << order_out); | |
2194 | ||
2195 | rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange); | |
2196 | success = (exchange.nr_exchanged == extents_in); | |
2197 | ||
2198 | BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0))); | |
2199 | BUG_ON(success && (rc != 0)); | |
2200 | ||
2201 | return success; | |
2202 | } | |
2203 | ||
2204 | int xen_create_contiguous_region(unsigned long vstart, unsigned int order, | |
2205 | unsigned int address_bits) | |
2206 | { | |
2207 | unsigned long *in_frames = discontig_frames, out_frame; | |
2208 | unsigned long flags; | |
2209 | int success; | |
2210 | ||
2211 | /* | |
2212 | * Currently an auto-translated guest will not perform I/O, nor will | |
2213 | * it require PAE page directories below 4GB. Therefore any calls to | |
2214 | * this function are redundant and can be ignored. | |
2215 | */ | |
2216 | ||
2217 | if (xen_feature(XENFEAT_auto_translated_physmap)) | |
2218 | return 0; | |
2219 | ||
2220 | if (unlikely(order > MAX_CONTIG_ORDER)) | |
2221 | return -ENOMEM; | |
2222 | ||
2223 | memset((void *) vstart, 0, PAGE_SIZE << order); | |
2224 | ||
08bbc9da AN |
2225 | spin_lock_irqsave(&xen_reservation_lock, flags); |
2226 | ||
2227 | /* 1. Zap current PTEs, remembering MFNs. */ | |
2228 | xen_zap_pfn_range(vstart, order, in_frames, NULL); | |
2229 | ||
2230 | /* 2. Get a new contiguous memory extent. */ | |
2231 | out_frame = virt_to_pfn(vstart); | |
2232 | success = xen_exchange_memory(1UL << order, 0, in_frames, | |
2233 | 1, order, &out_frame, | |
2234 | address_bits); | |
2235 | ||
2236 | /* 3. Map the new extent in place of old pages. */ | |
2237 | if (success) | |
2238 | xen_remap_exchanged_ptes(vstart, order, NULL, out_frame); | |
2239 | else | |
2240 | xen_remap_exchanged_ptes(vstart, order, in_frames, 0); | |
2241 | ||
2242 | spin_unlock_irqrestore(&xen_reservation_lock, flags); | |
2243 | ||
2244 | return success ? 0 : -ENOMEM; | |
2245 | } | |
2246 | EXPORT_SYMBOL_GPL(xen_create_contiguous_region); | |
2247 | ||
2248 | void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order) | |
2249 | { | |
2250 | unsigned long *out_frames = discontig_frames, in_frame; | |
2251 | unsigned long flags; | |
2252 | int success; | |
2253 | ||
2254 | if (xen_feature(XENFEAT_auto_translated_physmap)) | |
2255 | return; | |
2256 | ||
2257 | if (unlikely(order > MAX_CONTIG_ORDER)) | |
2258 | return; | |
2259 | ||
2260 | memset((void *) vstart, 0, PAGE_SIZE << order); | |
2261 | ||
08bbc9da AN |
2262 | spin_lock_irqsave(&xen_reservation_lock, flags); |
2263 | ||
2264 | /* 1. Find start MFN of contiguous extent. */ | |
2265 | in_frame = virt_to_mfn(vstart); | |
2266 | ||
2267 | /* 2. Zap current PTEs. */ | |
2268 | xen_zap_pfn_range(vstart, order, NULL, out_frames); | |
2269 | ||
2270 | /* 3. Do the exchange for non-contiguous MFNs. */ | |
2271 | success = xen_exchange_memory(1, order, &in_frame, 1UL << order, | |
2272 | 0, out_frames, 0); | |
2273 | ||
2274 | /* 4. Map new pages in place of old pages. */ | |
2275 | if (success) | |
2276 | xen_remap_exchanged_ptes(vstart, order, out_frames, 0); | |
2277 | else | |
2278 | xen_remap_exchanged_ptes(vstart, order, NULL, in_frame); | |
2279 | ||
2280 | spin_unlock_irqrestore(&xen_reservation_lock, flags); | |
030cb6c0 | 2281 | } |
08bbc9da | 2282 | EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region); |
319f3ba5 | 2283 | |
ca65f9fc | 2284 | #ifdef CONFIG_XEN_PVHVM |
59151001 SS |
2285 | static void xen_hvm_exit_mmap(struct mm_struct *mm) |
2286 | { | |
2287 | struct xen_hvm_pagetable_dying a; | |
2288 | int rc; | |
2289 | ||
2290 | a.domid = DOMID_SELF; | |
2291 | a.gpa = __pa(mm->pgd); | |
2292 | rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a); | |
2293 | WARN_ON_ONCE(rc < 0); | |
2294 | } | |
2295 | ||
2296 | static int is_pagetable_dying_supported(void) | |
2297 | { | |
2298 | struct xen_hvm_pagetable_dying a; | |
2299 | int rc = 0; | |
2300 | ||
2301 | a.domid = DOMID_SELF; | |
2302 | a.gpa = 0x00; | |
2303 | rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a); | |
2304 | if (rc < 0) { | |
2305 | printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n"); | |
2306 | return 0; | |
2307 | } | |
2308 | return 1; | |
2309 | } | |
2310 | ||
2311 | void __init xen_hvm_init_mmu_ops(void) | |
2312 | { | |
2313 | if (is_pagetable_dying_supported()) | |
2314 | pv_mmu_ops.exit_mmap = xen_hvm_exit_mmap; | |
2315 | } | |
ca65f9fc | 2316 | #endif |
59151001 | 2317 | |
de1ef206 IC |
2318 | #define REMAP_BATCH_SIZE 16 |
2319 | ||
2320 | struct remap_data { | |
2321 | unsigned long mfn; | |
2322 | pgprot_t prot; | |
2323 | struct mmu_update *mmu_update; | |
2324 | }; | |
2325 | ||
2326 | static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token, | |
2327 | unsigned long addr, void *data) | |
2328 | { | |
2329 | struct remap_data *rmd = data; | |
2330 | pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot)); | |
2331 | ||
d5108316 | 2332 | rmd->mmu_update->ptr = virt_to_machine(ptep).maddr; |
de1ef206 IC |
2333 | rmd->mmu_update->val = pte_val_ma(pte); |
2334 | rmd->mmu_update++; | |
2335 | ||
2336 | return 0; | |
2337 | } | |
2338 | ||
2339 | int xen_remap_domain_mfn_range(struct vm_area_struct *vma, | |
2340 | unsigned long addr, | |
2341 | unsigned long mfn, int nr, | |
2342 | pgprot_t prot, unsigned domid) | |
2343 | { | |
2344 | struct remap_data rmd; | |
2345 | struct mmu_update mmu_update[REMAP_BATCH_SIZE]; | |
2346 | int batch; | |
2347 | unsigned long range; | |
2348 | int err = 0; | |
2349 | ||
2350 | prot = __pgprot(pgprot_val(prot) | _PAGE_IOMAP); | |
2351 | ||
e060e7af SS |
2352 | BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_RESERVED | VM_IO)) == |
2353 | (VM_PFNMAP | VM_RESERVED | VM_IO))); | |
de1ef206 IC |
2354 | |
2355 | rmd.mfn = mfn; | |
2356 | rmd.prot = prot; | |
2357 | ||
2358 | while (nr) { | |
2359 | batch = min(REMAP_BATCH_SIZE, nr); | |
2360 | range = (unsigned long)batch << PAGE_SHIFT; | |
2361 | ||
2362 | rmd.mmu_update = mmu_update; | |
2363 | err = apply_to_page_range(vma->vm_mm, addr, range, | |
2364 | remap_area_mfn_pte_fn, &rmd); | |
2365 | if (err) | |
2366 | goto out; | |
2367 | ||
2368 | err = -EFAULT; | |
2369 | if (HYPERVISOR_mmu_update(mmu_update, batch, NULL, domid) < 0) | |
2370 | goto out; | |
2371 | ||
2372 | nr -= batch; | |
2373 | addr += range; | |
2374 | } | |
2375 | ||
2376 | err = 0; | |
2377 | out: | |
2378 | ||
2379 | flush_tlb_all(); | |
2380 | ||
2381 | return err; | |
2382 | } | |
2383 | EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); |