]>
Commit | Line | Data |
---|---|---|
5ead97c8 JF |
1 | /* |
2 | * Core of Xen paravirt_ops implementation. | |
3 | * | |
4 | * This file contains the xen_paravirt_ops structure itself, and the | |
5 | * implementations for: | |
6 | * - privileged instructions | |
7 | * - interrupt flags | |
8 | * - segment operations | |
9 | * - booting and setup | |
10 | * | |
11 | * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 | |
12 | */ | |
13 | ||
14 | #include <linux/kernel.h> | |
15 | #include <linux/init.h> | |
16 | #include <linux/smp.h> | |
17 | #include <linux/preempt.h> | |
f120f13e | 18 | #include <linux/hardirq.h> |
5ead97c8 JF |
19 | #include <linux/percpu.h> |
20 | #include <linux/delay.h> | |
21 | #include <linux/start_kernel.h> | |
22 | #include <linux/sched.h> | |
23 | #include <linux/bootmem.h> | |
24 | #include <linux/module.h> | |
f4f97b3e JF |
25 | #include <linux/mm.h> |
26 | #include <linux/page-flags.h> | |
27 | #include <linux/highmem.h> | |
f87e4cac | 28 | #include <linux/smp.h> |
5ead97c8 JF |
29 | |
30 | #include <xen/interface/xen.h> | |
31 | #include <xen/interface/physdev.h> | |
32 | #include <xen/interface/vcpu.h> | |
fefa629a | 33 | #include <xen/interface/sched.h> |
5ead97c8 JF |
34 | #include <xen/features.h> |
35 | #include <xen/page.h> | |
36 | ||
37 | #include <asm/paravirt.h> | |
38 | #include <asm/page.h> | |
39 | #include <asm/xen/hypercall.h> | |
40 | #include <asm/xen/hypervisor.h> | |
41 | #include <asm/fixmap.h> | |
42 | #include <asm/processor.h> | |
43 | #include <asm/setup.h> | |
44 | #include <asm/desc.h> | |
45 | #include <asm/pgtable.h> | |
f87e4cac | 46 | #include <asm/tlbflush.h> |
fefa629a | 47 | #include <asm/reboot.h> |
5ead97c8 JF |
48 | |
49 | #include "xen-ops.h" | |
3b827c1b | 50 | #include "mmu.h" |
5ead97c8 JF |
51 | #include "multicalls.h" |
52 | ||
53 | EXPORT_SYMBOL_GPL(hypercall_page); | |
54 | ||
55 | DEFINE_PER_CPU(enum paravirt_lazy_mode, xen_lazy_mode); | |
56 | ||
57 | DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); | |
58 | DEFINE_PER_CPU(struct vcpu_info, xen_vcpu_info); | |
59 | DEFINE_PER_CPU(unsigned long, xen_cr3); | |
60 | ||
61 | struct start_info *xen_start_info; | |
62 | EXPORT_SYMBOL_GPL(xen_start_info); | |
63 | ||
f87e4cac | 64 | void xen_vcpu_setup(int cpu) |
5ead97c8 JF |
65 | { |
66 | per_cpu(xen_vcpu, cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; | |
67 | } | |
68 | ||
69 | static void __init xen_banner(void) | |
70 | { | |
71 | printk(KERN_INFO "Booting paravirtualized kernel on %s\n", | |
72 | paravirt_ops.name); | |
73 | printk(KERN_INFO "Hypervisor signature: %s\n", xen_start_info->magic); | |
74 | } | |
75 | ||
76 | static void xen_cpuid(unsigned int *eax, unsigned int *ebx, | |
77 | unsigned int *ecx, unsigned int *edx) | |
78 | { | |
79 | unsigned maskedx = ~0; | |
80 | ||
81 | /* | |
82 | * Mask out inconvenient features, to try and disable as many | |
83 | * unsupported kernel subsystems as possible. | |
84 | */ | |
85 | if (*eax == 1) | |
86 | maskedx = ~((1 << X86_FEATURE_APIC) | /* disable APIC */ | |
87 | (1 << X86_FEATURE_ACPI) | /* disable ACPI */ | |
88 | (1 << X86_FEATURE_ACC)); /* thermal monitoring */ | |
89 | ||
90 | asm(XEN_EMULATE_PREFIX "cpuid" | |
91 | : "=a" (*eax), | |
92 | "=b" (*ebx), | |
93 | "=c" (*ecx), | |
94 | "=d" (*edx) | |
95 | : "0" (*eax), "2" (*ecx)); | |
96 | *edx &= maskedx; | |
97 | } | |
98 | ||
99 | static void xen_set_debugreg(int reg, unsigned long val) | |
100 | { | |
101 | HYPERVISOR_set_debugreg(reg, val); | |
102 | } | |
103 | ||
104 | static unsigned long xen_get_debugreg(int reg) | |
105 | { | |
106 | return HYPERVISOR_get_debugreg(reg); | |
107 | } | |
108 | ||
109 | static unsigned long xen_save_fl(void) | |
110 | { | |
111 | struct vcpu_info *vcpu; | |
112 | unsigned long flags; | |
113 | ||
5ead97c8 | 114 | vcpu = x86_read_percpu(xen_vcpu); |
f120f13e | 115 | |
5ead97c8 JF |
116 | /* flag has opposite sense of mask */ |
117 | flags = !vcpu->evtchn_upcall_mask; | |
5ead97c8 JF |
118 | |
119 | /* convert to IF type flag | |
120 | -0 -> 0x00000000 | |
121 | -1 -> 0xffffffff | |
122 | */ | |
123 | return (-flags) & X86_EFLAGS_IF; | |
124 | } | |
125 | ||
126 | static void xen_restore_fl(unsigned long flags) | |
127 | { | |
128 | struct vcpu_info *vcpu; | |
129 | ||
5ead97c8 JF |
130 | /* convert from IF type flag */ |
131 | flags = !(flags & X86_EFLAGS_IF); | |
f120f13e JF |
132 | |
133 | /* There's a one instruction preempt window here. We need to | |
134 | make sure we're don't switch CPUs between getting the vcpu | |
135 | pointer and updating the mask. */ | |
136 | preempt_disable(); | |
5ead97c8 JF |
137 | vcpu = x86_read_percpu(xen_vcpu); |
138 | vcpu->evtchn_upcall_mask = flags; | |
f120f13e | 139 | preempt_enable_no_resched(); |
5ead97c8 | 140 | |
f120f13e JF |
141 | /* Doesn't matter if we get preempted here, because any |
142 | pending event will get dealt with anyway. */ | |
5ead97c8 | 143 | |
f120f13e JF |
144 | if (flags == 0) { |
145 | preempt_check_resched(); | |
146 | barrier(); /* unmask then check (avoid races) */ | |
5ead97c8 JF |
147 | if (unlikely(vcpu->evtchn_upcall_pending)) |
148 | force_evtchn_callback(); | |
f120f13e | 149 | } |
5ead97c8 JF |
150 | } |
151 | ||
152 | static void xen_irq_disable(void) | |
153 | { | |
f120f13e JF |
154 | /* There's a one instruction preempt window here. We need to |
155 | make sure we're don't switch CPUs between getting the vcpu | |
156 | pointer and updating the mask. */ | |
5ead97c8 | 157 | preempt_disable(); |
f120f13e | 158 | x86_read_percpu(xen_vcpu)->evtchn_upcall_mask = 1; |
5ead97c8 JF |
159 | preempt_enable_no_resched(); |
160 | } | |
161 | ||
162 | static void xen_irq_enable(void) | |
163 | { | |
164 | struct vcpu_info *vcpu; | |
165 | ||
f120f13e JF |
166 | /* There's a one instruction preempt window here. We need to |
167 | make sure we're don't switch CPUs between getting the vcpu | |
168 | pointer and updating the mask. */ | |
5ead97c8 JF |
169 | preempt_disable(); |
170 | vcpu = x86_read_percpu(xen_vcpu); | |
171 | vcpu->evtchn_upcall_mask = 0; | |
f120f13e | 172 | preempt_enable_no_resched(); |
5ead97c8 | 173 | |
f120f13e JF |
174 | /* Doesn't matter if we get preempted here, because any |
175 | pending event will get dealt with anyway. */ | |
5ead97c8 | 176 | |
f120f13e | 177 | barrier(); /* unmask then check (avoid races) */ |
5ead97c8 JF |
178 | if (unlikely(vcpu->evtchn_upcall_pending)) |
179 | force_evtchn_callback(); | |
5ead97c8 JF |
180 | } |
181 | ||
182 | static void xen_safe_halt(void) | |
183 | { | |
184 | /* Blocking includes an implicit local_irq_enable(). */ | |
185 | if (HYPERVISOR_sched_op(SCHEDOP_block, 0) != 0) | |
186 | BUG(); | |
187 | } | |
188 | ||
189 | static void xen_halt(void) | |
190 | { | |
191 | if (irqs_disabled()) | |
192 | HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); | |
193 | else | |
194 | xen_safe_halt(); | |
195 | } | |
196 | ||
197 | static void xen_set_lazy_mode(enum paravirt_lazy_mode mode) | |
198 | { | |
f120f13e JF |
199 | BUG_ON(preemptible()); |
200 | ||
5ead97c8 JF |
201 | switch (mode) { |
202 | case PARAVIRT_LAZY_NONE: | |
203 | BUG_ON(x86_read_percpu(xen_lazy_mode) == PARAVIRT_LAZY_NONE); | |
204 | break; | |
205 | ||
206 | case PARAVIRT_LAZY_MMU: | |
207 | case PARAVIRT_LAZY_CPU: | |
208 | BUG_ON(x86_read_percpu(xen_lazy_mode) != PARAVIRT_LAZY_NONE); | |
209 | break; | |
210 | ||
211 | case PARAVIRT_LAZY_FLUSH: | |
212 | /* flush if necessary, but don't change state */ | |
213 | if (x86_read_percpu(xen_lazy_mode) != PARAVIRT_LAZY_NONE) | |
214 | xen_mc_flush(); | |
215 | return; | |
216 | } | |
217 | ||
218 | xen_mc_flush(); | |
219 | x86_write_percpu(xen_lazy_mode, mode); | |
220 | } | |
221 | ||
222 | static unsigned long xen_store_tr(void) | |
223 | { | |
224 | return 0; | |
225 | } | |
226 | ||
227 | static void xen_set_ldt(const void *addr, unsigned entries) | |
228 | { | |
229 | unsigned long linear_addr = (unsigned long)addr; | |
230 | struct mmuext_op *op; | |
231 | struct multicall_space mcs = xen_mc_entry(sizeof(*op)); | |
232 | ||
233 | op = mcs.args; | |
234 | op->cmd = MMUEXT_SET_LDT; | |
235 | if (linear_addr) { | |
236 | /* ldt my be vmalloced, use arbitrary_virt_to_machine */ | |
237 | xmaddr_t maddr; | |
238 | maddr = arbitrary_virt_to_machine((unsigned long)addr); | |
239 | linear_addr = (unsigned long)maddr.maddr; | |
240 | } | |
241 | op->arg1.linear_addr = linear_addr; | |
242 | op->arg2.nr_ents = entries; | |
243 | ||
244 | MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); | |
245 | ||
246 | xen_mc_issue(PARAVIRT_LAZY_CPU); | |
247 | } | |
248 | ||
249 | static void xen_load_gdt(const struct Xgt_desc_struct *dtr) | |
250 | { | |
251 | unsigned long *frames; | |
252 | unsigned long va = dtr->address; | |
253 | unsigned int size = dtr->size + 1; | |
254 | unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE; | |
255 | int f; | |
256 | struct multicall_space mcs; | |
257 | ||
258 | /* A GDT can be up to 64k in size, which corresponds to 8192 | |
259 | 8-byte entries, or 16 4k pages.. */ | |
260 | ||
261 | BUG_ON(size > 65536); | |
262 | BUG_ON(va & ~PAGE_MASK); | |
263 | ||
264 | mcs = xen_mc_entry(sizeof(*frames) * pages); | |
265 | frames = mcs.args; | |
266 | ||
267 | for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) { | |
268 | frames[f] = virt_to_mfn(va); | |
269 | make_lowmem_page_readonly((void *)va); | |
270 | } | |
271 | ||
272 | MULTI_set_gdt(mcs.mc, frames, size / sizeof(struct desc_struct)); | |
273 | ||
274 | xen_mc_issue(PARAVIRT_LAZY_CPU); | |
275 | } | |
276 | ||
277 | static void load_TLS_descriptor(struct thread_struct *t, | |
278 | unsigned int cpu, unsigned int i) | |
279 | { | |
280 | struct desc_struct *gdt = get_cpu_gdt_table(cpu); | |
281 | xmaddr_t maddr = virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]); | |
282 | struct multicall_space mc = __xen_mc_entry(0); | |
283 | ||
284 | MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]); | |
285 | } | |
286 | ||
287 | static void xen_load_tls(struct thread_struct *t, unsigned int cpu) | |
288 | { | |
289 | xen_mc_batch(); | |
290 | ||
291 | load_TLS_descriptor(t, cpu, 0); | |
292 | load_TLS_descriptor(t, cpu, 1); | |
293 | load_TLS_descriptor(t, cpu, 2); | |
294 | ||
295 | xen_mc_issue(PARAVIRT_LAZY_CPU); | |
8b84ad94 JF |
296 | |
297 | /* | |
298 | * XXX sleazy hack: If we're being called in a lazy-cpu zone, | |
299 | * it means we're in a context switch, and %gs has just been | |
300 | * saved. This means we can zero it out to prevent faults on | |
301 | * exit from the hypervisor if the next process has no %gs. | |
302 | * Either way, it has been saved, and the new value will get | |
303 | * loaded properly. This will go away as soon as Xen has been | |
304 | * modified to not save/restore %gs for normal hypercalls. | |
305 | */ | |
306 | if (xen_get_lazy_mode() == PARAVIRT_LAZY_CPU) | |
307 | loadsegment(gs, 0); | |
5ead97c8 JF |
308 | } |
309 | ||
310 | static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum, | |
311 | u32 low, u32 high) | |
312 | { | |
313 | unsigned long lp = (unsigned long)&dt[entrynum]; | |
314 | xmaddr_t mach_lp = virt_to_machine(lp); | |
315 | u64 entry = (u64)high << 32 | low; | |
316 | ||
f120f13e JF |
317 | preempt_disable(); |
318 | ||
5ead97c8 JF |
319 | xen_mc_flush(); |
320 | if (HYPERVISOR_update_descriptor(mach_lp.maddr, entry)) | |
321 | BUG(); | |
f120f13e JF |
322 | |
323 | preempt_enable(); | |
5ead97c8 JF |
324 | } |
325 | ||
326 | static int cvt_gate_to_trap(int vector, u32 low, u32 high, | |
327 | struct trap_info *info) | |
328 | { | |
329 | u8 type, dpl; | |
330 | ||
331 | type = (high >> 8) & 0x1f; | |
332 | dpl = (high >> 13) & 3; | |
333 | ||
334 | if (type != 0xf && type != 0xe) | |
335 | return 0; | |
336 | ||
337 | info->vector = vector; | |
338 | info->address = (high & 0xffff0000) | (low & 0x0000ffff); | |
339 | info->cs = low >> 16; | |
340 | info->flags = dpl; | |
341 | /* interrupt gates clear IF */ | |
342 | if (type == 0xe) | |
343 | info->flags |= 4; | |
344 | ||
345 | return 1; | |
346 | } | |
347 | ||
348 | /* Locations of each CPU's IDT */ | |
349 | static DEFINE_PER_CPU(struct Xgt_desc_struct, idt_desc); | |
350 | ||
351 | /* Set an IDT entry. If the entry is part of the current IDT, then | |
352 | also update Xen. */ | |
353 | static void xen_write_idt_entry(struct desc_struct *dt, int entrynum, | |
354 | u32 low, u32 high) | |
355 | { | |
5ead97c8 | 356 | unsigned long p = (unsigned long)&dt[entrynum]; |
f120f13e JF |
357 | unsigned long start, end; |
358 | ||
359 | preempt_disable(); | |
360 | ||
361 | start = __get_cpu_var(idt_desc).address; | |
362 | end = start + __get_cpu_var(idt_desc).size + 1; | |
5ead97c8 JF |
363 | |
364 | xen_mc_flush(); | |
365 | ||
366 | write_dt_entry(dt, entrynum, low, high); | |
367 | ||
368 | if (p >= start && (p + 8) <= end) { | |
369 | struct trap_info info[2]; | |
370 | ||
371 | info[1].address = 0; | |
372 | ||
373 | if (cvt_gate_to_trap(entrynum, low, high, &info[0])) | |
374 | if (HYPERVISOR_set_trap_table(info)) | |
375 | BUG(); | |
376 | } | |
f120f13e JF |
377 | |
378 | preempt_enable(); | |
5ead97c8 JF |
379 | } |
380 | ||
f87e4cac JF |
381 | static void xen_convert_trap_info(const struct Xgt_desc_struct *desc, |
382 | struct trap_info *traps) | |
5ead97c8 | 383 | { |
5ead97c8 JF |
384 | unsigned in, out, count; |
385 | ||
5ead97c8 JF |
386 | count = (desc->size+1) / 8; |
387 | BUG_ON(count > 256); | |
388 | ||
5ead97c8 JF |
389 | for (in = out = 0; in < count; in++) { |
390 | const u32 *entry = (u32 *)(desc->address + in * 8); | |
391 | ||
392 | if (cvt_gate_to_trap(in, entry[0], entry[1], &traps[out])) | |
393 | out++; | |
394 | } | |
395 | traps[out].address = 0; | |
f87e4cac JF |
396 | } |
397 | ||
398 | void xen_copy_trap_info(struct trap_info *traps) | |
399 | { | |
f120f13e | 400 | const struct Xgt_desc_struct *desc = &__get_cpu_var(idt_desc); |
f87e4cac JF |
401 | |
402 | xen_convert_trap_info(desc, traps); | |
f87e4cac JF |
403 | } |
404 | ||
405 | /* Load a new IDT into Xen. In principle this can be per-CPU, so we | |
406 | hold a spinlock to protect the static traps[] array (static because | |
407 | it avoids allocation, and saves stack space). */ | |
408 | static void xen_load_idt(const struct Xgt_desc_struct *desc) | |
409 | { | |
410 | static DEFINE_SPINLOCK(lock); | |
411 | static struct trap_info traps[257]; | |
f87e4cac JF |
412 | |
413 | spin_lock(&lock); | |
414 | ||
f120f13e JF |
415 | __get_cpu_var(idt_desc) = *desc; |
416 | ||
f87e4cac | 417 | xen_convert_trap_info(desc, traps); |
5ead97c8 JF |
418 | |
419 | xen_mc_flush(); | |
420 | if (HYPERVISOR_set_trap_table(traps)) | |
421 | BUG(); | |
422 | ||
423 | spin_unlock(&lock); | |
424 | } | |
425 | ||
426 | /* Write a GDT descriptor entry. Ignore LDT descriptors, since | |
427 | they're handled differently. */ | |
428 | static void xen_write_gdt_entry(struct desc_struct *dt, int entry, | |
429 | u32 low, u32 high) | |
430 | { | |
f120f13e JF |
431 | preempt_disable(); |
432 | ||
5ead97c8 JF |
433 | switch ((high >> 8) & 0xff) { |
434 | case DESCTYPE_LDT: | |
435 | case DESCTYPE_TSS: | |
436 | /* ignore */ | |
437 | break; | |
438 | ||
439 | default: { | |
440 | xmaddr_t maddr = virt_to_machine(&dt[entry]); | |
441 | u64 desc = (u64)high << 32 | low; | |
442 | ||
443 | xen_mc_flush(); | |
444 | if (HYPERVISOR_update_descriptor(maddr.maddr, desc)) | |
445 | BUG(); | |
446 | } | |
447 | ||
448 | } | |
f120f13e JF |
449 | |
450 | preempt_enable(); | |
5ead97c8 JF |
451 | } |
452 | ||
453 | static void xen_load_esp0(struct tss_struct *tss, | |
f120f13e | 454 | struct thread_struct *thread) |
5ead97c8 JF |
455 | { |
456 | struct multicall_space mcs = xen_mc_entry(0); | |
457 | MULTI_stack_switch(mcs.mc, __KERNEL_DS, thread->esp0); | |
458 | xen_mc_issue(PARAVIRT_LAZY_CPU); | |
459 | } | |
460 | ||
461 | static void xen_set_iopl_mask(unsigned mask) | |
462 | { | |
463 | struct physdev_set_iopl set_iopl; | |
464 | ||
465 | /* Force the change at ring 0. */ | |
466 | set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3; | |
467 | HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl); | |
468 | } | |
469 | ||
470 | static void xen_io_delay(void) | |
471 | { | |
472 | } | |
473 | ||
474 | #ifdef CONFIG_X86_LOCAL_APIC | |
475 | static unsigned long xen_apic_read(unsigned long reg) | |
476 | { | |
477 | return 0; | |
478 | } | |
f87e4cac JF |
479 | |
480 | static void xen_apic_write(unsigned long reg, unsigned long val) | |
481 | { | |
482 | /* Warn to see if there's any stray references */ | |
483 | WARN_ON(1); | |
484 | } | |
5ead97c8 JF |
485 | #endif |
486 | ||
487 | static void xen_flush_tlb(void) | |
488 | { | |
d66bf8fc JF |
489 | struct mmuext_op *op; |
490 | struct multicall_space mcs = xen_mc_entry(sizeof(*op)); | |
5ead97c8 | 491 | |
d66bf8fc JF |
492 | op = mcs.args; |
493 | op->cmd = MMUEXT_TLB_FLUSH_LOCAL; | |
494 | MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); | |
495 | ||
496 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
5ead97c8 JF |
497 | } |
498 | ||
499 | static void xen_flush_tlb_single(unsigned long addr) | |
500 | { | |
d66bf8fc JF |
501 | struct mmuext_op *op; |
502 | struct multicall_space mcs = xen_mc_entry(sizeof(*op)); | |
5ead97c8 | 503 | |
d66bf8fc JF |
504 | op = mcs.args; |
505 | op->cmd = MMUEXT_INVLPG_LOCAL; | |
506 | op->arg1.linear_addr = addr & PAGE_MASK; | |
507 | MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); | |
508 | ||
509 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
5ead97c8 JF |
510 | } |
511 | ||
f87e4cac JF |
512 | static void xen_flush_tlb_others(const cpumask_t *cpus, struct mm_struct *mm, |
513 | unsigned long va) | |
514 | { | |
d66bf8fc JF |
515 | struct { |
516 | struct mmuext_op op; | |
517 | cpumask_t mask; | |
518 | } *args; | |
f87e4cac | 519 | cpumask_t cpumask = *cpus; |
d66bf8fc | 520 | struct multicall_space mcs; |
f87e4cac JF |
521 | |
522 | /* | |
523 | * A couple of (to be removed) sanity checks: | |
524 | * | |
525 | * - current CPU must not be in mask | |
526 | * - mask must exist :) | |
527 | */ | |
528 | BUG_ON(cpus_empty(cpumask)); | |
529 | BUG_ON(cpu_isset(smp_processor_id(), cpumask)); | |
530 | BUG_ON(!mm); | |
531 | ||
532 | /* If a CPU which we ran on has gone down, OK. */ | |
533 | cpus_and(cpumask, cpumask, cpu_online_map); | |
534 | if (cpus_empty(cpumask)) | |
535 | return; | |
536 | ||
d66bf8fc JF |
537 | mcs = xen_mc_entry(sizeof(*args)); |
538 | args = mcs.args; | |
539 | args->mask = cpumask; | |
540 | args->op.arg2.vcpumask = &args->mask; | |
541 | ||
f87e4cac | 542 | if (va == TLB_FLUSH_ALL) { |
d66bf8fc | 543 | args->op.cmd = MMUEXT_TLB_FLUSH_MULTI; |
f87e4cac | 544 | } else { |
d66bf8fc JF |
545 | args->op.cmd = MMUEXT_INVLPG_MULTI; |
546 | args->op.arg1.linear_addr = va; | |
f87e4cac JF |
547 | } |
548 | ||
d66bf8fc JF |
549 | MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF); |
550 | ||
551 | xen_mc_issue(PARAVIRT_LAZY_MMU); | |
f87e4cac JF |
552 | } |
553 | ||
5ead97c8 JF |
554 | static unsigned long xen_read_cr2(void) |
555 | { | |
556 | return x86_read_percpu(xen_vcpu)->arch.cr2; | |
557 | } | |
558 | ||
559 | static void xen_write_cr4(unsigned long cr4) | |
560 | { | |
561 | /* never allow TSC to be disabled */ | |
562 | native_write_cr4(cr4 & ~X86_CR4_TSD); | |
563 | } | |
564 | ||
5ead97c8 JF |
565 | static unsigned long xen_read_cr3(void) |
566 | { | |
567 | return x86_read_percpu(xen_cr3); | |
568 | } | |
569 | ||
570 | static void xen_write_cr3(unsigned long cr3) | |
571 | { | |
f120f13e JF |
572 | BUG_ON(preemptible()); |
573 | ||
5ead97c8 JF |
574 | if (cr3 == x86_read_percpu(xen_cr3)) { |
575 | /* just a simple tlb flush */ | |
576 | xen_flush_tlb(); | |
577 | return; | |
578 | } | |
579 | ||
580 | x86_write_percpu(xen_cr3, cr3); | |
581 | ||
582 | ||
583 | { | |
584 | struct mmuext_op *op; | |
585 | struct multicall_space mcs = xen_mc_entry(sizeof(*op)); | |
586 | unsigned long mfn = pfn_to_mfn(PFN_DOWN(cr3)); | |
587 | ||
588 | op = mcs.args; | |
589 | op->cmd = MMUEXT_NEW_BASEPTR; | |
590 | op->arg1.mfn = mfn; | |
591 | ||
592 | MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); | |
593 | ||
594 | xen_mc_issue(PARAVIRT_LAZY_CPU); | |
595 | } | |
596 | } | |
597 | ||
f4f97b3e JF |
598 | /* Early in boot, while setting up the initial pagetable, assume |
599 | everything is pinned. */ | |
9a4029fd | 600 | static __init void xen_alloc_pt_init(struct mm_struct *mm, u32 pfn) |
5ead97c8 | 601 | { |
f4f97b3e | 602 | BUG_ON(mem_map); /* should only be used early */ |
5ead97c8 JF |
603 | make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); |
604 | } | |
605 | ||
f4f97b3e JF |
606 | /* This needs to make sure the new pte page is pinned iff its being |
607 | attached to a pinned pagetable. */ | |
608 | static void xen_alloc_pt(struct mm_struct *mm, u32 pfn) | |
5ead97c8 | 609 | { |
f4f97b3e | 610 | struct page *page = pfn_to_page(pfn); |
5ead97c8 | 611 | |
f4f97b3e JF |
612 | if (PagePinned(virt_to_page(mm->pgd))) { |
613 | SetPagePinned(page); | |
614 | ||
615 | if (!PageHighMem(page)) | |
616 | make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); | |
617 | else | |
618 | /* make sure there are no stray mappings of | |
619 | this page */ | |
620 | kmap_flush_unused(); | |
621 | } | |
5ead97c8 JF |
622 | } |
623 | ||
f4f97b3e | 624 | /* This should never happen until we're OK to use struct page */ |
5ead97c8 JF |
625 | static void xen_release_pt(u32 pfn) |
626 | { | |
f4f97b3e JF |
627 | struct page *page = pfn_to_page(pfn); |
628 | ||
629 | if (PagePinned(page)) { | |
630 | if (!PageHighMem(page)) | |
631 | make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); | |
632 | } | |
5ead97c8 JF |
633 | } |
634 | ||
f4f97b3e JF |
635 | #ifdef CONFIG_HIGHPTE |
636 | static void *xen_kmap_atomic_pte(struct page *page, enum km_type type) | |
5ead97c8 | 637 | { |
f4f97b3e JF |
638 | pgprot_t prot = PAGE_KERNEL; |
639 | ||
640 | if (PagePinned(page)) | |
641 | prot = PAGE_KERNEL_RO; | |
642 | ||
643 | if (0 && PageHighMem(page)) | |
644 | printk("mapping highpte %lx type %d prot %s\n", | |
645 | page_to_pfn(page), type, | |
646 | (unsigned long)pgprot_val(prot) & _PAGE_RW ? "WRITE" : "READ"); | |
647 | ||
648 | return kmap_atomic_prot(page, type, prot); | |
5ead97c8 | 649 | } |
f4f97b3e | 650 | #endif |
5ead97c8 | 651 | |
9a4029fd JF |
652 | static __init pte_t mask_rw_pte(pte_t *ptep, pte_t pte) |
653 | { | |
654 | /* If there's an existing pte, then don't allow _PAGE_RW to be set */ | |
655 | if (pte_val_ma(*ptep) & _PAGE_PRESENT) | |
656 | pte = __pte_ma(((pte_val_ma(*ptep) & _PAGE_RW) | ~_PAGE_RW) & | |
657 | pte_val_ma(pte)); | |
658 | ||
659 | return pte; | |
660 | } | |
661 | ||
662 | /* Init-time set_pte while constructing initial pagetables, which | |
663 | doesn't allow RO pagetable pages to be remapped RW */ | |
664 | static __init void xen_set_pte_init(pte_t *ptep, pte_t pte) | |
665 | { | |
666 | pte = mask_rw_pte(ptep, pte); | |
667 | ||
668 | xen_set_pte(ptep, pte); | |
669 | } | |
670 | ||
5ead97c8 JF |
671 | static __init void xen_pagetable_setup_start(pgd_t *base) |
672 | { | |
673 | pgd_t *xen_pgd = (pgd_t *)xen_start_info->pt_base; | |
674 | ||
9a4029fd JF |
675 | /* special set_pte for pagetable initialization */ |
676 | paravirt_ops.set_pte = xen_set_pte_init; | |
677 | ||
5ead97c8 JF |
678 | init_mm.pgd = base; |
679 | /* | |
680 | * copy top-level of Xen-supplied pagetable into place. For | |
681 | * !PAE we can use this as-is, but for PAE it is a stand-in | |
682 | * while we copy the pmd pages. | |
683 | */ | |
684 | memcpy(base, xen_pgd, PTRS_PER_PGD * sizeof(pgd_t)); | |
685 | ||
686 | if (PTRS_PER_PMD > 1) { | |
687 | int i; | |
688 | /* | |
689 | * For PAE, need to allocate new pmds, rather than | |
690 | * share Xen's, since Xen doesn't like pmd's being | |
691 | * shared between address spaces. | |
692 | */ | |
693 | for (i = 0; i < PTRS_PER_PGD; i++) { | |
694 | if (pgd_val_ma(xen_pgd[i]) & _PAGE_PRESENT) { | |
695 | pmd_t *pmd = (pmd_t *)alloc_bootmem_low_pages(PAGE_SIZE); | |
696 | ||
697 | memcpy(pmd, (void *)pgd_page_vaddr(xen_pgd[i]), | |
698 | PAGE_SIZE); | |
699 | ||
f4f97b3e | 700 | make_lowmem_page_readonly(pmd); |
5ead97c8 JF |
701 | |
702 | set_pgd(&base[i], __pgd(1 + __pa(pmd))); | |
703 | } else | |
704 | pgd_clear(&base[i]); | |
705 | } | |
706 | } | |
707 | ||
708 | /* make sure zero_page is mapped RO so we can use it in pagetables */ | |
709 | make_lowmem_page_readonly(empty_zero_page); | |
710 | make_lowmem_page_readonly(base); | |
711 | /* | |
712 | * Switch to new pagetable. This is done before | |
713 | * pagetable_init has done anything so that the new pages | |
714 | * added to the table can be prepared properly for Xen. | |
715 | */ | |
716 | xen_write_cr3(__pa(base)); | |
717 | } | |
718 | ||
719 | static __init void xen_pagetable_setup_done(pgd_t *base) | |
720 | { | |
f4f97b3e JF |
721 | /* This will work as long as patching hasn't happened yet |
722 | (which it hasn't) */ | |
723 | paravirt_ops.alloc_pt = xen_alloc_pt; | |
9a4029fd | 724 | paravirt_ops.set_pte = xen_set_pte; |
f4f97b3e | 725 | |
5ead97c8 JF |
726 | if (!xen_feature(XENFEAT_auto_translated_physmap)) { |
727 | /* | |
728 | * Create a mapping for the shared info page. | |
729 | * Should be set_fixmap(), but shared_info is a machine | |
730 | * address with no corresponding pseudo-phys address. | |
731 | */ | |
5ead97c8 JF |
732 | set_pte_mfn(fix_to_virt(FIX_PARAVIRT_BOOTMAP), |
733 | PFN_DOWN(xen_start_info->shared_info), | |
734 | PAGE_KERNEL); | |
5ead97c8 JF |
735 | |
736 | HYPERVISOR_shared_info = | |
737 | (struct shared_info *)fix_to_virt(FIX_PARAVIRT_BOOTMAP); | |
738 | ||
739 | } else | |
740 | HYPERVISOR_shared_info = | |
741 | (struct shared_info *)__va(xen_start_info->shared_info); | |
742 | ||
f4f97b3e JF |
743 | /* Actually pin the pagetable down, but we can't set PG_pinned |
744 | yet because the page structures don't exist yet. */ | |
745 | { | |
746 | struct mmuext_op op; | |
747 | #ifdef CONFIG_X86_PAE | |
748 | op.cmd = MMUEXT_PIN_L3_TABLE; | |
749 | #else | |
750 | op.cmd = MMUEXT_PIN_L3_TABLE; | |
751 | #endif | |
752 | op.arg1.mfn = pfn_to_mfn(PFN_DOWN(__pa(base))); | |
753 | if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) | |
754 | BUG(); | |
755 | } | |
5ead97c8 JF |
756 | |
757 | xen_vcpu_setup(smp_processor_id()); | |
758 | } | |
759 | ||
760 | static const struct paravirt_ops xen_paravirt_ops __initdata = { | |
761 | .paravirt_enabled = 1, | |
762 | .shared_kernel_pmd = 0, | |
763 | ||
764 | .name = "Xen", | |
765 | .banner = xen_banner, | |
766 | ||
767 | .patch = paravirt_patch_default, | |
768 | ||
769 | .memory_setup = xen_memory_setup, | |
770 | .arch_setup = xen_arch_setup, | |
e46cdb66 | 771 | .init_IRQ = xen_init_IRQ, |
f4f97b3e | 772 | .post_allocator_init = xen_mark_init_mm_pinned, |
5ead97c8 | 773 | |
15c84731 JF |
774 | .time_init = xen_time_init, |
775 | .set_wallclock = xen_set_wallclock, | |
776 | .get_wallclock = xen_get_wallclock, | |
777 | .get_cpu_khz = xen_cpu_khz, | |
ab550288 | 778 | .sched_clock = xen_sched_clock, |
15c84731 | 779 | |
5ead97c8 JF |
780 | .cpuid = xen_cpuid, |
781 | ||
782 | .set_debugreg = xen_set_debugreg, | |
783 | .get_debugreg = xen_get_debugreg, | |
784 | ||
785 | .clts = native_clts, | |
786 | ||
787 | .read_cr0 = native_read_cr0, | |
788 | .write_cr0 = native_write_cr0, | |
789 | ||
790 | .read_cr2 = xen_read_cr2, | |
791 | .write_cr2 = native_write_cr2, | |
792 | ||
793 | .read_cr3 = xen_read_cr3, | |
794 | .write_cr3 = xen_write_cr3, | |
795 | ||
796 | .read_cr4 = native_read_cr4, | |
797 | .read_cr4_safe = native_read_cr4_safe, | |
798 | .write_cr4 = xen_write_cr4, | |
799 | ||
800 | .save_fl = xen_save_fl, | |
801 | .restore_fl = xen_restore_fl, | |
802 | .irq_disable = xen_irq_disable, | |
803 | .irq_enable = xen_irq_enable, | |
804 | .safe_halt = xen_safe_halt, | |
805 | .halt = xen_halt, | |
806 | .wbinvd = native_wbinvd, | |
807 | ||
808 | .read_msr = native_read_msr_safe, | |
809 | .write_msr = native_write_msr_safe, | |
810 | .read_tsc = native_read_tsc, | |
811 | .read_pmc = native_read_pmc, | |
812 | ||
813 | .iret = (void *)&hypercall_page[__HYPERVISOR_iret], | |
814 | .irq_enable_sysexit = NULL, /* never called */ | |
815 | ||
816 | .load_tr_desc = paravirt_nop, | |
817 | .set_ldt = xen_set_ldt, | |
818 | .load_gdt = xen_load_gdt, | |
819 | .load_idt = xen_load_idt, | |
820 | .load_tls = xen_load_tls, | |
821 | ||
822 | .store_gdt = native_store_gdt, | |
823 | .store_idt = native_store_idt, | |
824 | .store_tr = xen_store_tr, | |
825 | ||
826 | .write_ldt_entry = xen_write_ldt_entry, | |
827 | .write_gdt_entry = xen_write_gdt_entry, | |
828 | .write_idt_entry = xen_write_idt_entry, | |
829 | .load_esp0 = xen_load_esp0, | |
830 | ||
831 | .set_iopl_mask = xen_set_iopl_mask, | |
832 | .io_delay = xen_io_delay, | |
833 | ||
834 | #ifdef CONFIG_X86_LOCAL_APIC | |
f87e4cac JF |
835 | .apic_write = xen_apic_write, |
836 | .apic_write_atomic = xen_apic_write, | |
5ead97c8 JF |
837 | .apic_read = xen_apic_read, |
838 | .setup_boot_clock = paravirt_nop, | |
839 | .setup_secondary_clock = paravirt_nop, | |
840 | .startup_ipi_hook = paravirt_nop, | |
841 | #endif | |
842 | ||
843 | .flush_tlb_user = xen_flush_tlb, | |
844 | .flush_tlb_kernel = xen_flush_tlb, | |
845 | .flush_tlb_single = xen_flush_tlb_single, | |
f87e4cac | 846 | .flush_tlb_others = xen_flush_tlb_others, |
5ead97c8 JF |
847 | |
848 | .pte_update = paravirt_nop, | |
849 | .pte_update_defer = paravirt_nop, | |
850 | ||
851 | .pagetable_setup_start = xen_pagetable_setup_start, | |
852 | .pagetable_setup_done = xen_pagetable_setup_done, | |
853 | ||
f4f97b3e | 854 | .alloc_pt = xen_alloc_pt_init, |
5ead97c8 | 855 | .release_pt = xen_release_pt, |
f4f97b3e JF |
856 | .alloc_pd = paravirt_nop, |
857 | .alloc_pd_clone = paravirt_nop, | |
858 | .release_pd = paravirt_nop, | |
859 | ||
860 | #ifdef CONFIG_HIGHPTE | |
861 | .kmap_atomic_pte = xen_kmap_atomic_pte, | |
862 | #endif | |
5ead97c8 | 863 | |
9a4029fd | 864 | .set_pte = NULL, /* see xen_pagetable_setup_* */ |
3b827c1b JF |
865 | .set_pte_at = xen_set_pte_at, |
866 | .set_pmd = xen_set_pmd, | |
867 | ||
868 | .pte_val = xen_pte_val, | |
869 | .pgd_val = xen_pgd_val, | |
870 | ||
871 | .make_pte = xen_make_pte, | |
872 | .make_pgd = xen_make_pgd, | |
873 | ||
874 | #ifdef CONFIG_X86_PAE | |
875 | .set_pte_atomic = xen_set_pte_atomic, | |
876 | .set_pte_present = xen_set_pte_at, | |
877 | .set_pud = xen_set_pud, | |
878 | .pte_clear = xen_pte_clear, | |
879 | .pmd_clear = xen_pmd_clear, | |
880 | ||
881 | .make_pmd = xen_make_pmd, | |
882 | .pmd_val = xen_pmd_val, | |
883 | #endif /* PAE */ | |
884 | ||
885 | .activate_mm = xen_activate_mm, | |
886 | .dup_mmap = xen_dup_mmap, | |
887 | .exit_mmap = xen_exit_mmap, | |
888 | ||
5ead97c8 JF |
889 | .set_lazy_mode = xen_set_lazy_mode, |
890 | }; | |
891 | ||
f87e4cac JF |
892 | #ifdef CONFIG_SMP |
893 | static const struct smp_ops xen_smp_ops __initdata = { | |
894 | .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu, | |
895 | .smp_prepare_cpus = xen_smp_prepare_cpus, | |
896 | .cpu_up = xen_cpu_up, | |
897 | .smp_cpus_done = xen_smp_cpus_done, | |
898 | ||
899 | .smp_send_stop = xen_smp_send_stop, | |
900 | .smp_send_reschedule = xen_smp_send_reschedule, | |
901 | .smp_call_function_mask = xen_smp_call_function_mask, | |
902 | }; | |
903 | #endif /* CONFIG_SMP */ | |
904 | ||
fefa629a JF |
905 | static void xen_reboot(int reason) |
906 | { | |
907 | #ifdef CONFIG_SMP | |
908 | smp_send_stop(); | |
909 | #endif | |
910 | ||
911 | if (HYPERVISOR_sched_op(SCHEDOP_shutdown, reason)) | |
912 | BUG(); | |
913 | } | |
914 | ||
915 | static void xen_restart(char *msg) | |
916 | { | |
917 | xen_reboot(SHUTDOWN_reboot); | |
918 | } | |
919 | ||
920 | static void xen_emergency_restart(void) | |
921 | { | |
922 | xen_reboot(SHUTDOWN_reboot); | |
923 | } | |
924 | ||
925 | static void xen_machine_halt(void) | |
926 | { | |
927 | xen_reboot(SHUTDOWN_poweroff); | |
928 | } | |
929 | ||
930 | static void xen_crash_shutdown(struct pt_regs *regs) | |
931 | { | |
932 | xen_reboot(SHUTDOWN_crash); | |
933 | } | |
934 | ||
935 | static const struct machine_ops __initdata xen_machine_ops = { | |
936 | .restart = xen_restart, | |
937 | .halt = xen_machine_halt, | |
938 | .power_off = xen_machine_halt, | |
939 | .shutdown = xen_machine_halt, | |
940 | .crash_shutdown = xen_crash_shutdown, | |
941 | .emergency_restart = xen_emergency_restart, | |
942 | }; | |
943 | ||
5ead97c8 JF |
944 | /* First C function to be called on Xen boot */ |
945 | asmlinkage void __init xen_start_kernel(void) | |
946 | { | |
947 | pgd_t *pgd; | |
948 | ||
949 | if (!xen_start_info) | |
950 | return; | |
951 | ||
952 | BUG_ON(memcmp(xen_start_info->magic, "xen-3.0", 7) != 0); | |
953 | ||
954 | /* Install Xen paravirt ops */ | |
955 | paravirt_ops = xen_paravirt_ops; | |
fefa629a JF |
956 | machine_ops = xen_machine_ops; |
957 | ||
f87e4cac JF |
958 | #ifdef CONFIG_SMP |
959 | smp_ops = xen_smp_ops; | |
960 | #endif | |
5ead97c8 JF |
961 | |
962 | xen_setup_features(); | |
963 | ||
964 | /* Get mfn list */ | |
965 | if (!xen_feature(XENFEAT_auto_translated_physmap)) | |
966 | phys_to_machine_mapping = (unsigned long *)xen_start_info->mfn_list; | |
967 | ||
968 | pgd = (pgd_t *)xen_start_info->pt_base; | |
969 | ||
970 | init_pg_tables_end = __pa(pgd) + xen_start_info->nr_pt_frames*PAGE_SIZE; | |
971 | ||
972 | init_mm.pgd = pgd; /* use the Xen pagetables to start */ | |
973 | ||
974 | /* keep using Xen gdt for now; no urgent need to change it */ | |
975 | ||
976 | x86_write_percpu(xen_cr3, __pa(pgd)); | |
977 | xen_vcpu_setup(0); | |
978 | ||
979 | paravirt_ops.kernel_rpl = 1; | |
980 | if (xen_feature(XENFEAT_supervisor_mode_kernel)) | |
981 | paravirt_ops.kernel_rpl = 0; | |
982 | ||
983 | /* set the limit of our address space */ | |
984 | reserve_top_address(-HYPERVISOR_VIRT_START + 2 * PAGE_SIZE); | |
985 | ||
986 | /* set up basic CPUID stuff */ | |
987 | cpu_detect(&new_cpu_data); | |
988 | new_cpu_data.hard_math = 1; | |
989 | new_cpu_data.x86_capability[0] = cpuid_edx(1); | |
990 | ||
991 | /* Poke various useful things into boot_params */ | |
992 | LOADER_TYPE = (9 << 4) | 0; | |
993 | INITRD_START = xen_start_info->mod_start ? __pa(xen_start_info->mod_start) : 0; | |
994 | INITRD_SIZE = xen_start_info->mod_len; | |
995 | ||
996 | /* Start the world */ | |
997 | start_kernel(); | |
998 | } |