]>
Commit | Line | Data |
---|---|---|
2f4cf5e4 AG |
1 | /* |
2 | * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. | |
3 | * | |
4 | * Authors: | |
5 | * Alexander Graf <agraf@suse.de> | |
6 | * Kevin Wolf <mail@kevin-wolf.de> | |
7 | * | |
8 | * Description: | |
9 | * This file is derived from arch/powerpc/kvm/44x.c, | |
10 | * by Hollis Blanchard <hollisb@us.ibm.com>. | |
11 | * | |
12 | * This program is free software; you can redistribute it and/or modify | |
13 | * it under the terms of the GNU General Public License, version 2, as | |
14 | * published by the Free Software Foundation. | |
15 | */ | |
16 | ||
17 | #include <linux/kvm_host.h> | |
18 | #include <linux/err.h> | |
19 | ||
20 | #include <asm/reg.h> | |
21 | #include <asm/cputable.h> | |
22 | #include <asm/cacheflush.h> | |
23 | #include <asm/tlbflush.h> | |
24 | #include <asm/uaccess.h> | |
25 | #include <asm/io.h> | |
26 | #include <asm/kvm_ppc.h> | |
27 | #include <asm/kvm_book3s.h> | |
28 | #include <asm/mmu_context.h> | |
29 | #include <linux/sched.h> | |
30 | #include <linux/vmalloc.h> | |
31 | ||
32 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU | |
33 | ||
34 | /* #define EXIT_DEBUG */ | |
35 | /* #define EXIT_DEBUG_SIMPLE */ | |
36 | ||
2f4cf5e4 AG |
37 | struct kvm_stats_debugfs_item debugfs_entries[] = { |
38 | { "exits", VCPU_STAT(sum_exits) }, | |
39 | { "mmio", VCPU_STAT(mmio_exits) }, | |
40 | { "sig", VCPU_STAT(signal_exits) }, | |
41 | { "sysc", VCPU_STAT(syscall_exits) }, | |
42 | { "inst_emu", VCPU_STAT(emulated_inst_exits) }, | |
43 | { "dec", VCPU_STAT(dec_exits) }, | |
44 | { "ext_intr", VCPU_STAT(ext_intr_exits) }, | |
45 | { "queue_intr", VCPU_STAT(queue_intr) }, | |
46 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, | |
47 | { "pf_storage", VCPU_STAT(pf_storage) }, | |
48 | { "sp_storage", VCPU_STAT(sp_storage) }, | |
49 | { "pf_instruc", VCPU_STAT(pf_instruc) }, | |
50 | { "sp_instruc", VCPU_STAT(sp_instruc) }, | |
51 | { "ld", VCPU_STAT(ld) }, | |
52 | { "ld_slow", VCPU_STAT(ld_slow) }, | |
53 | { "st", VCPU_STAT(st) }, | |
54 | { "st_slow", VCPU_STAT(st_slow) }, | |
55 | { NULL } | |
56 | }; | |
57 | ||
58 | void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu) | |
59 | { | |
60 | } | |
61 | ||
62 | void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) | |
63 | { | |
64 | } | |
65 | ||
66 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |
67 | { | |
68 | memcpy(get_paca()->kvm_slb, to_book3s(vcpu)->slb_shadow, sizeof(get_paca()->kvm_slb)); | |
7e57cba0 AG |
69 | memcpy(&get_paca()->shadow_vcpu, &to_book3s(vcpu)->shadow_vcpu, |
70 | sizeof(get_paca()->shadow_vcpu)); | |
2f4cf5e4 AG |
71 | get_paca()->kvm_slb_max = to_book3s(vcpu)->slb_shadow_max; |
72 | } | |
73 | ||
74 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | |
75 | { | |
76 | memcpy(to_book3s(vcpu)->slb_shadow, get_paca()->kvm_slb, sizeof(get_paca()->kvm_slb)); | |
7e57cba0 AG |
77 | memcpy(&to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu, |
78 | sizeof(get_paca()->shadow_vcpu)); | |
2f4cf5e4 AG |
79 | to_book3s(vcpu)->slb_shadow_max = get_paca()->kvm_slb_max; |
80 | } | |
81 | ||
0bb1fb71 | 82 | #if defined(EXIT_DEBUG) |
2f4cf5e4 AG |
83 | static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu) |
84 | { | |
85 | u64 jd = mftb() - vcpu->arch.dec_jiffies; | |
86 | return vcpu->arch.dec - jd; | |
87 | } | |
88 | #endif | |
89 | ||
90 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) | |
91 | { | |
92 | ulong old_msr = vcpu->arch.msr; | |
93 | ||
94 | #ifdef EXIT_DEBUG | |
95 | printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); | |
96 | #endif | |
97 | msr &= to_book3s(vcpu)->msr_mask; | |
98 | vcpu->arch.msr = msr; | |
99 | vcpu->arch.shadow_msr = msr | MSR_USER32; | |
100 | vcpu->arch.shadow_msr &= ( MSR_VEC | MSR_VSX | MSR_FP | MSR_FE0 | | |
101 | MSR_USER64 | MSR_SE | MSR_BE | MSR_DE | | |
102 | MSR_FE1); | |
103 | ||
104 | if (msr & (MSR_WE|MSR_POW)) { | |
105 | if (!vcpu->arch.pending_exceptions) { | |
106 | kvm_vcpu_block(vcpu); | |
107 | vcpu->stat.halt_wakeup++; | |
108 | } | |
109 | } | |
110 | ||
111 | if (((vcpu->arch.msr & (MSR_IR|MSR_DR)) != (old_msr & (MSR_IR|MSR_DR))) || | |
112 | (vcpu->arch.msr & MSR_PR) != (old_msr & MSR_PR)) { | |
113 | kvmppc_mmu_flush_segments(vcpu); | |
114 | kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc); | |
115 | } | |
116 | } | |
117 | ||
118 | void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) | |
119 | { | |
120 | vcpu->arch.srr0 = vcpu->arch.pc; | |
121 | vcpu->arch.srr1 = vcpu->arch.msr | flags; | |
122 | vcpu->arch.pc = to_book3s(vcpu)->hior + vec; | |
123 | vcpu->arch.mmu.reset_msr(vcpu); | |
124 | } | |
125 | ||
583617b7 | 126 | static int kvmppc_book3s_vec2irqprio(unsigned int vec) |
2f4cf5e4 AG |
127 | { |
128 | unsigned int prio; | |
129 | ||
2f4cf5e4 AG |
130 | switch (vec) { |
131 | case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break; | |
132 | case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break; | |
133 | case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE; break; | |
134 | case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT; break; | |
135 | case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break; | |
136 | case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break; | |
137 | case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break; | |
138 | case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break; | |
139 | case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break; | |
140 | case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break; | |
141 | case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER; break; | |
142 | case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL; break; | |
143 | case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break; | |
144 | case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break; | |
145 | case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break; | |
146 | default: prio = BOOK3S_IRQPRIO_MAX; break; | |
147 | } | |
148 | ||
583617b7 AG |
149 | return prio; |
150 | } | |
151 | ||
7706664d AG |
152 | static void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu, |
153 | unsigned int vec) | |
154 | { | |
155 | clear_bit(kvmppc_book3s_vec2irqprio(vec), | |
156 | &vcpu->arch.pending_exceptions); | |
157 | } | |
158 | ||
583617b7 AG |
159 | void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) |
160 | { | |
161 | vcpu->stat.queue_intr++; | |
162 | ||
163 | set_bit(kvmppc_book3s_vec2irqprio(vec), | |
164 | &vcpu->arch.pending_exceptions); | |
2f4cf5e4 AG |
165 | #ifdef EXIT_DEBUG |
166 | printk(KERN_INFO "Queueing interrupt %x\n", vec); | |
167 | #endif | |
168 | } | |
169 | ||
170 | ||
25a8a02d | 171 | void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags) |
2f4cf5e4 | 172 | { |
25a8a02d | 173 | to_book3s(vcpu)->prog_flags = flags; |
2f4cf5e4 AG |
174 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_PROGRAM); |
175 | } | |
176 | ||
177 | void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) | |
178 | { | |
179 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); | |
180 | } | |
181 | ||
182 | int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) | |
183 | { | |
184 | return test_bit(BOOK3S_INTERRUPT_DECREMENTER >> 7, &vcpu->arch.pending_exceptions); | |
185 | } | |
186 | ||
7706664d AG |
187 | void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) |
188 | { | |
189 | kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); | |
190 | } | |
191 | ||
2f4cf5e4 AG |
192 | void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, |
193 | struct kvm_interrupt *irq) | |
194 | { | |
195 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); | |
196 | } | |
197 | ||
198 | int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) | |
199 | { | |
200 | int deliver = 1; | |
201 | int vec = 0; | |
25a8a02d | 202 | ulong flags = 0ULL; |
2f4cf5e4 AG |
203 | |
204 | switch (priority) { | |
205 | case BOOK3S_IRQPRIO_DECREMENTER: | |
206 | deliver = vcpu->arch.msr & MSR_EE; | |
207 | vec = BOOK3S_INTERRUPT_DECREMENTER; | |
208 | break; | |
209 | case BOOK3S_IRQPRIO_EXTERNAL: | |
210 | deliver = vcpu->arch.msr & MSR_EE; | |
211 | vec = BOOK3S_INTERRUPT_EXTERNAL; | |
212 | break; | |
213 | case BOOK3S_IRQPRIO_SYSTEM_RESET: | |
214 | vec = BOOK3S_INTERRUPT_SYSTEM_RESET; | |
215 | break; | |
216 | case BOOK3S_IRQPRIO_MACHINE_CHECK: | |
217 | vec = BOOK3S_INTERRUPT_MACHINE_CHECK; | |
218 | break; | |
219 | case BOOK3S_IRQPRIO_DATA_STORAGE: | |
220 | vec = BOOK3S_INTERRUPT_DATA_STORAGE; | |
221 | break; | |
222 | case BOOK3S_IRQPRIO_INST_STORAGE: | |
223 | vec = BOOK3S_INTERRUPT_INST_STORAGE; | |
224 | break; | |
225 | case BOOK3S_IRQPRIO_DATA_SEGMENT: | |
226 | vec = BOOK3S_INTERRUPT_DATA_SEGMENT; | |
227 | break; | |
228 | case BOOK3S_IRQPRIO_INST_SEGMENT: | |
229 | vec = BOOK3S_INTERRUPT_INST_SEGMENT; | |
230 | break; | |
231 | case BOOK3S_IRQPRIO_ALIGNMENT: | |
232 | vec = BOOK3S_INTERRUPT_ALIGNMENT; | |
233 | break; | |
234 | case BOOK3S_IRQPRIO_PROGRAM: | |
235 | vec = BOOK3S_INTERRUPT_PROGRAM; | |
25a8a02d | 236 | flags = to_book3s(vcpu)->prog_flags; |
2f4cf5e4 AG |
237 | break; |
238 | case BOOK3S_IRQPRIO_VSX: | |
239 | vec = BOOK3S_INTERRUPT_VSX; | |
240 | break; | |
241 | case BOOK3S_IRQPRIO_ALTIVEC: | |
242 | vec = BOOK3S_INTERRUPT_ALTIVEC; | |
243 | break; | |
244 | case BOOK3S_IRQPRIO_FP_UNAVAIL: | |
245 | vec = BOOK3S_INTERRUPT_FP_UNAVAIL; | |
246 | break; | |
247 | case BOOK3S_IRQPRIO_SYSCALL: | |
248 | vec = BOOK3S_INTERRUPT_SYSCALL; | |
249 | break; | |
250 | case BOOK3S_IRQPRIO_DEBUG: | |
251 | vec = BOOK3S_INTERRUPT_TRACE; | |
252 | break; | |
253 | case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR: | |
254 | vec = BOOK3S_INTERRUPT_PERFMON; | |
255 | break; | |
256 | default: | |
257 | deliver = 0; | |
258 | printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority); | |
259 | break; | |
260 | } | |
261 | ||
262 | #if 0 | |
263 | printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver); | |
264 | #endif | |
265 | ||
266 | if (deliver) | |
25a8a02d | 267 | kvmppc_inject_interrupt(vcpu, vec, flags); |
2f4cf5e4 AG |
268 | |
269 | return deliver; | |
270 | } | |
271 | ||
272 | void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) | |
273 | { | |
274 | unsigned long *pending = &vcpu->arch.pending_exceptions; | |
275 | unsigned int priority; | |
276 | ||
2f4cf5e4 AG |
277 | #ifdef EXIT_DEBUG |
278 | if (vcpu->arch.pending_exceptions) | |
279 | printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions); | |
280 | #endif | |
281 | priority = __ffs(*pending); | |
282 | while (priority <= (sizeof(unsigned int) * 8)) { | |
7706664d AG |
283 | if (kvmppc_book3s_irqprio_deliver(vcpu, priority) && |
284 | (priority != BOOK3S_IRQPRIO_DECREMENTER)) { | |
285 | /* DEC interrupts get cleared by mtdec */ | |
2f4cf5e4 AG |
286 | clear_bit(priority, &vcpu->arch.pending_exceptions); |
287 | break; | |
288 | } | |
289 | ||
290 | priority = find_next_bit(pending, | |
291 | BITS_PER_BYTE * sizeof(*pending), | |
292 | priority + 1); | |
293 | } | |
294 | } | |
295 | ||
296 | void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) | |
297 | { | |
e15a1137 | 298 | vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB; |
2f4cf5e4 AG |
299 | vcpu->arch.pvr = pvr; |
300 | if ((pvr >= 0x330000) && (pvr < 0x70330000)) { | |
301 | kvmppc_mmu_book3s_64_init(vcpu); | |
302 | to_book3s(vcpu)->hior = 0xfff00000; | |
303 | to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL; | |
304 | } else { | |
305 | kvmppc_mmu_book3s_32_init(vcpu); | |
306 | to_book3s(vcpu)->hior = 0; | |
307 | to_book3s(vcpu)->msr_mask = 0xffffffffULL; | |
308 | } | |
309 | ||
310 | /* If we are in hypervisor level on 970, we can tell the CPU to | |
311 | * treat DCBZ as 32 bytes store */ | |
312 | vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32; | |
313 | if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) && | |
314 | !strcmp(cur_cpu_spec->platform, "ppc970")) | |
315 | vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; | |
316 | ||
317 | } | |
318 | ||
319 | /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To | |
320 | * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to | |
321 | * emulate 32 bytes dcbz length. | |
322 | * | |
323 | * The Book3s_64 inventors also realized this case and implemented a special bit | |
324 | * in the HID5 register, which is a hypervisor ressource. Thus we can't use it. | |
325 | * | |
326 | * My approach here is to patch the dcbz instruction on executing pages. | |
327 | */ | |
328 | static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) | |
329 | { | |
330 | bool touched = false; | |
331 | hva_t hpage; | |
332 | u32 *page; | |
333 | int i; | |
334 | ||
335 | hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT); | |
336 | if (kvm_is_error_hva(hpage)) | |
337 | return; | |
338 | ||
339 | hpage |= pte->raddr & ~PAGE_MASK; | |
340 | hpage &= ~0xFFFULL; | |
341 | ||
342 | page = vmalloc(HW_PAGE_SIZE); | |
343 | ||
344 | if (copy_from_user(page, (void __user *)hpage, HW_PAGE_SIZE)) | |
345 | goto out; | |
346 | ||
347 | for (i=0; i < HW_PAGE_SIZE / 4; i++) | |
348 | if ((page[i] & 0xff0007ff) == INS_DCBZ) { | |
349 | page[i] &= 0xfffffff7; // reserved instruction, so we trap | |
350 | touched = true; | |
351 | } | |
352 | ||
353 | if (touched) | |
354 | copy_to_user((void __user *)hpage, page, HW_PAGE_SIZE); | |
355 | ||
356 | out: | |
357 | vfree(page); | |
358 | } | |
359 | ||
360 | static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, | |
361 | struct kvmppc_pte *pte) | |
362 | { | |
363 | int relocated = (vcpu->arch.msr & (data ? MSR_DR : MSR_IR)); | |
364 | int r; | |
365 | ||
366 | if (relocated) { | |
367 | r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data); | |
368 | } else { | |
369 | pte->eaddr = eaddr; | |
370 | pte->raddr = eaddr & 0xffffffff; | |
371 | pte->vpage = eaddr >> 12; | |
372 | switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { | |
373 | case 0: | |
374 | pte->vpage |= VSID_REAL; | |
375 | case MSR_DR: | |
376 | pte->vpage |= VSID_REAL_DR; | |
377 | case MSR_IR: | |
378 | pte->vpage |= VSID_REAL_IR; | |
379 | } | |
380 | pte->may_read = true; | |
381 | pte->may_write = true; | |
382 | pte->may_execute = true; | |
383 | r = 0; | |
384 | } | |
385 | ||
386 | return r; | |
387 | } | |
388 | ||
389 | static hva_t kvmppc_bad_hva(void) | |
390 | { | |
391 | return PAGE_OFFSET; | |
392 | } | |
393 | ||
394 | static hva_t kvmppc_pte_to_hva(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte, | |
395 | bool read) | |
396 | { | |
397 | hva_t hpage; | |
398 | ||
399 | if (read && !pte->may_read) | |
400 | goto err; | |
401 | ||
402 | if (!read && !pte->may_write) | |
403 | goto err; | |
404 | ||
405 | hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT); | |
406 | if (kvm_is_error_hva(hpage)) | |
407 | goto err; | |
408 | ||
409 | return hpage | (pte->raddr & ~PAGE_MASK); | |
410 | err: | |
411 | return kvmppc_bad_hva(); | |
412 | } | |
413 | ||
414 | int kvmppc_st(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr) | |
415 | { | |
416 | struct kvmppc_pte pte; | |
417 | hva_t hva = eaddr; | |
418 | ||
419 | vcpu->stat.st++; | |
420 | ||
421 | if (kvmppc_xlate(vcpu, eaddr, false, &pte)) | |
422 | goto err; | |
423 | ||
424 | hva = kvmppc_pte_to_hva(vcpu, &pte, false); | |
425 | if (kvm_is_error_hva(hva)) | |
426 | goto err; | |
427 | ||
428 | if (copy_to_user((void __user *)hva, ptr, size)) { | |
429 | printk(KERN_INFO "kvmppc_st at 0x%lx failed\n", hva); | |
430 | goto err; | |
431 | } | |
432 | ||
433 | return 0; | |
434 | ||
435 | err: | |
436 | return -ENOENT; | |
437 | } | |
438 | ||
439 | int kvmppc_ld(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr, | |
440 | bool data) | |
441 | { | |
442 | struct kvmppc_pte pte; | |
443 | hva_t hva = eaddr; | |
444 | ||
445 | vcpu->stat.ld++; | |
446 | ||
447 | if (kvmppc_xlate(vcpu, eaddr, data, &pte)) | |
448 | goto err; | |
449 | ||
450 | hva = kvmppc_pte_to_hva(vcpu, &pte, true); | |
451 | if (kvm_is_error_hva(hva)) | |
452 | goto err; | |
453 | ||
454 | if (copy_from_user(ptr, (void __user *)hva, size)) { | |
455 | printk(KERN_INFO "kvmppc_ld at 0x%lx failed\n", hva); | |
456 | goto err; | |
457 | } | |
458 | ||
459 | return 0; | |
460 | ||
461 | err: | |
462 | return -ENOENT; | |
463 | } | |
464 | ||
465 | static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) | |
466 | { | |
467 | return kvm_is_visible_gfn(vcpu->kvm, gfn); | |
468 | } | |
469 | ||
470 | int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
471 | ulong eaddr, int vec) | |
472 | { | |
473 | bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE); | |
474 | int r = RESUME_GUEST; | |
475 | int relocated; | |
476 | int page_found = 0; | |
477 | struct kvmppc_pte pte; | |
478 | bool is_mmio = false; | |
479 | ||
480 | if ( vec == BOOK3S_INTERRUPT_DATA_STORAGE ) { | |
481 | relocated = (vcpu->arch.msr & MSR_DR); | |
482 | } else { | |
483 | relocated = (vcpu->arch.msr & MSR_IR); | |
484 | } | |
485 | ||
486 | /* Resolve real address if translation turned on */ | |
487 | if (relocated) { | |
488 | page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data); | |
489 | } else { | |
490 | pte.may_execute = true; | |
491 | pte.may_read = true; | |
492 | pte.may_write = true; | |
493 | pte.raddr = eaddr & 0xffffffff; | |
494 | pte.eaddr = eaddr; | |
495 | pte.vpage = eaddr >> 12; | |
496 | switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { | |
497 | case 0: | |
498 | pte.vpage |= VSID_REAL; | |
499 | case MSR_DR: | |
500 | pte.vpage |= VSID_REAL_DR; | |
501 | case MSR_IR: | |
502 | pte.vpage |= VSID_REAL_IR; | |
503 | } | |
504 | } | |
505 | ||
506 | if (vcpu->arch.mmu.is_dcbz32(vcpu) && | |
507 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { | |
508 | /* | |
509 | * If we do the dcbz hack, we have to NX on every execution, | |
510 | * so we can patch the executing code. This renders our guest | |
511 | * NX-less. | |
512 | */ | |
513 | pte.may_execute = !data; | |
514 | } | |
515 | ||
516 | if (page_found == -ENOENT) { | |
517 | /* Page not found in guest PTE entries */ | |
518 | vcpu->arch.dear = vcpu->arch.fault_dear; | |
519 | to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr; | |
520 | vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x00000000f8000000ULL); | |
521 | kvmppc_book3s_queue_irqprio(vcpu, vec); | |
522 | } else if (page_found == -EPERM) { | |
523 | /* Storage protection */ | |
524 | vcpu->arch.dear = vcpu->arch.fault_dear; | |
525 | to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE; | |
526 | to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT; | |
527 | vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x00000000f8000000ULL); | |
528 | kvmppc_book3s_queue_irqprio(vcpu, vec); | |
529 | } else if (page_found == -EINVAL) { | |
530 | /* Page not found in guest SLB */ | |
531 | vcpu->arch.dear = vcpu->arch.fault_dear; | |
532 | kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); | |
533 | } else if (!is_mmio && | |
534 | kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { | |
535 | /* The guest's PTE is not mapped yet. Map on the host */ | |
536 | kvmppc_mmu_map_page(vcpu, &pte); | |
537 | if (data) | |
538 | vcpu->stat.sp_storage++; | |
539 | else if (vcpu->arch.mmu.is_dcbz32(vcpu) && | |
540 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) | |
541 | kvmppc_patch_dcbz(vcpu, &pte); | |
542 | } else { | |
543 | /* MMIO */ | |
544 | vcpu->stat.mmio_exits++; | |
545 | vcpu->arch.paddr_accessed = pte.raddr; | |
546 | r = kvmppc_emulate_mmio(run, vcpu); | |
547 | if ( r == RESUME_HOST_NV ) | |
548 | r = RESUME_HOST; | |
2f4cf5e4 AG |
549 | } |
550 | ||
551 | return r; | |
552 | } | |
553 | ||
554 | int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
555 | unsigned int exit_nr) | |
556 | { | |
557 | int r = RESUME_HOST; | |
558 | ||
559 | vcpu->stat.sum_exits++; | |
560 | ||
561 | run->exit_reason = KVM_EXIT_UNKNOWN; | |
562 | run->ready_for_interrupt_injection = 1; | |
563 | #ifdef EXIT_DEBUG | |
564 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | dec=0x%x | msr=0x%lx\n", | |
565 | exit_nr, vcpu->arch.pc, vcpu->arch.fault_dear, | |
566 | kvmppc_get_dec(vcpu), vcpu->arch.msr); | |
567 | #elif defined (EXIT_DEBUG_SIMPLE) | |
568 | if ((exit_nr != 0x900) && (exit_nr != 0x500)) | |
569 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | msr=0x%lx\n", | |
570 | exit_nr, vcpu->arch.pc, vcpu->arch.fault_dear, | |
571 | vcpu->arch.msr); | |
572 | #endif | |
573 | kvm_resched(vcpu); | |
574 | switch (exit_nr) { | |
575 | case BOOK3S_INTERRUPT_INST_STORAGE: | |
576 | vcpu->stat.pf_instruc++; | |
577 | /* only care about PTEG not found errors, but leave NX alone */ | |
578 | if (vcpu->arch.shadow_msr & 0x40000000) { | |
579 | r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.pc, exit_nr); | |
580 | vcpu->stat.sp_instruc++; | |
581 | } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && | |
582 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { | |
583 | /* | |
584 | * XXX If we do the dcbz hack we use the NX bit to flush&patch the page, | |
585 | * so we can't use the NX bit inside the guest. Let's cross our fingers, | |
586 | * that no guest that needs the dcbz hack does NX. | |
587 | */ | |
588 | kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); | |
589 | } else { | |
590 | vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x58000000); | |
591 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
592 | kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); | |
593 | r = RESUME_GUEST; | |
594 | } | |
595 | break; | |
596 | case BOOK3S_INTERRUPT_DATA_STORAGE: | |
597 | vcpu->stat.pf_storage++; | |
598 | /* The only case we need to handle is missing shadow PTEs */ | |
599 | if (vcpu->arch.fault_dsisr & DSISR_NOHPTE) { | |
600 | r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.fault_dear, exit_nr); | |
601 | } else { | |
602 | vcpu->arch.dear = vcpu->arch.fault_dear; | |
603 | to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr; | |
604 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
605 | kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFULL); | |
606 | r = RESUME_GUEST; | |
607 | } | |
608 | break; | |
609 | case BOOK3S_INTERRUPT_DATA_SEGMENT: | |
610 | if (kvmppc_mmu_map_segment(vcpu, vcpu->arch.fault_dear) < 0) { | |
611 | vcpu->arch.dear = vcpu->arch.fault_dear; | |
612 | kvmppc_book3s_queue_irqprio(vcpu, | |
613 | BOOK3S_INTERRUPT_DATA_SEGMENT); | |
614 | } | |
615 | r = RESUME_GUEST; | |
616 | break; | |
617 | case BOOK3S_INTERRUPT_INST_SEGMENT: | |
618 | if (kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc) < 0) { | |
619 | kvmppc_book3s_queue_irqprio(vcpu, | |
620 | BOOK3S_INTERRUPT_INST_SEGMENT); | |
621 | } | |
622 | r = RESUME_GUEST; | |
623 | break; | |
624 | /* We're good on these - the host merely wanted to get our attention */ | |
625 | case BOOK3S_INTERRUPT_DECREMENTER: | |
626 | vcpu->stat.dec_exits++; | |
627 | r = RESUME_GUEST; | |
628 | break; | |
629 | case BOOK3S_INTERRUPT_EXTERNAL: | |
630 | vcpu->stat.ext_intr_exits++; | |
631 | r = RESUME_GUEST; | |
632 | break; | |
633 | case BOOK3S_INTERRUPT_PROGRAM: | |
634 | { | |
635 | enum emulation_result er; | |
ff1ca3f9 AG |
636 | ulong flags; |
637 | ||
638 | flags = (vcpu->arch.shadow_msr & 0x1f0000ull); | |
2f4cf5e4 AG |
639 | |
640 | if (vcpu->arch.msr & MSR_PR) { | |
641 | #ifdef EXIT_DEBUG | |
642 | printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", vcpu->arch.pc, vcpu->arch.last_inst); | |
643 | #endif | |
644 | if ((vcpu->arch.last_inst & 0xff0007ff) != | |
645 | (INS_DCBZ & 0xfffffff7)) { | |
ff1ca3f9 | 646 | kvmppc_core_queue_program(vcpu, flags); |
2f4cf5e4 AG |
647 | r = RESUME_GUEST; |
648 | break; | |
649 | } | |
650 | } | |
651 | ||
652 | vcpu->stat.emulated_inst_exits++; | |
653 | er = kvmppc_emulate_instruction(run, vcpu); | |
654 | switch (er) { | |
655 | case EMULATE_DONE: | |
97c4cfbe | 656 | r = RESUME_GUEST_NV; |
2f4cf5e4 AG |
657 | break; |
658 | case EMULATE_FAIL: | |
659 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", | |
660 | __func__, vcpu->arch.pc, vcpu->arch.last_inst); | |
ff1ca3f9 | 661 | kvmppc_core_queue_program(vcpu, flags); |
2f4cf5e4 AG |
662 | r = RESUME_GUEST; |
663 | break; | |
664 | default: | |
665 | BUG(); | |
666 | } | |
667 | break; | |
668 | } | |
669 | case BOOK3S_INTERRUPT_SYSCALL: | |
670 | #ifdef EXIT_DEBUG | |
8e5b26b5 | 671 | printk(KERN_INFO "Syscall Nr %d\n", (int)kvmppc_get_gpr(vcpu, 0)); |
2f4cf5e4 AG |
672 | #endif |
673 | vcpu->stat.syscall_exits++; | |
674 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
675 | r = RESUME_GUEST; | |
676 | break; | |
677 | case BOOK3S_INTERRUPT_MACHINE_CHECK: | |
678 | case BOOK3S_INTERRUPT_FP_UNAVAIL: | |
679 | case BOOK3S_INTERRUPT_TRACE: | |
680 | case BOOK3S_INTERRUPT_ALTIVEC: | |
681 | case BOOK3S_INTERRUPT_VSX: | |
682 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
683 | r = RESUME_GUEST; | |
684 | break; | |
685 | default: | |
686 | /* Ugh - bork here! What did we get? */ | |
687 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", exit_nr, vcpu->arch.pc, vcpu->arch.shadow_msr); | |
688 | r = RESUME_HOST; | |
689 | BUG(); | |
690 | break; | |
691 | } | |
692 | ||
693 | ||
694 | if (!(r & RESUME_HOST)) { | |
695 | /* To avoid clobbering exit_reason, only check for signals if | |
696 | * we aren't already exiting to userspace for some other | |
697 | * reason. */ | |
698 | if (signal_pending(current)) { | |
699 | #ifdef EXIT_DEBUG | |
700 | printk(KERN_EMERG "KVM: Going back to host\n"); | |
701 | #endif | |
702 | vcpu->stat.signal_exits++; | |
703 | run->exit_reason = KVM_EXIT_INTR; | |
704 | r = -EINTR; | |
705 | } else { | |
706 | /* In case an interrupt came in that was triggered | |
707 | * from userspace (like DEC), we need to check what | |
708 | * to inject now! */ | |
709 | kvmppc_core_deliver_interrupts(vcpu); | |
710 | } | |
711 | } | |
712 | ||
713 | #ifdef EXIT_DEBUG | |
714 | printk(KERN_EMERG "KVM exit: vcpu=0x%p pc=0x%lx r=0x%x\n", vcpu, vcpu->arch.pc, r); | |
715 | #endif | |
716 | ||
717 | return r; | |
718 | } | |
719 | ||
720 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |
721 | { | |
722 | return 0; | |
723 | } | |
724 | ||
725 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |
726 | { | |
727 | int i; | |
728 | ||
729 | regs->pc = vcpu->arch.pc; | |
992b5b29 | 730 | regs->cr = kvmppc_get_cr(vcpu); |
2f4cf5e4 AG |
731 | regs->ctr = vcpu->arch.ctr; |
732 | regs->lr = vcpu->arch.lr; | |
992b5b29 | 733 | regs->xer = kvmppc_get_xer(vcpu); |
2f4cf5e4 AG |
734 | regs->msr = vcpu->arch.msr; |
735 | regs->srr0 = vcpu->arch.srr0; | |
736 | regs->srr1 = vcpu->arch.srr1; | |
737 | regs->pid = vcpu->arch.pid; | |
738 | regs->sprg0 = vcpu->arch.sprg0; | |
739 | regs->sprg1 = vcpu->arch.sprg1; | |
740 | regs->sprg2 = vcpu->arch.sprg2; | |
741 | regs->sprg3 = vcpu->arch.sprg3; | |
742 | regs->sprg5 = vcpu->arch.sprg4; | |
743 | regs->sprg6 = vcpu->arch.sprg5; | |
744 | regs->sprg7 = vcpu->arch.sprg6; | |
745 | ||
746 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | |
8e5b26b5 | 747 | regs->gpr[i] = kvmppc_get_gpr(vcpu, i); |
2f4cf5e4 AG |
748 | |
749 | return 0; | |
750 | } | |
751 | ||
752 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |
753 | { | |
754 | int i; | |
755 | ||
756 | vcpu->arch.pc = regs->pc; | |
992b5b29 | 757 | kvmppc_set_cr(vcpu, regs->cr); |
2f4cf5e4 AG |
758 | vcpu->arch.ctr = regs->ctr; |
759 | vcpu->arch.lr = regs->lr; | |
992b5b29 | 760 | kvmppc_set_xer(vcpu, regs->xer); |
2f4cf5e4 AG |
761 | kvmppc_set_msr(vcpu, regs->msr); |
762 | vcpu->arch.srr0 = regs->srr0; | |
763 | vcpu->arch.srr1 = regs->srr1; | |
764 | vcpu->arch.sprg0 = regs->sprg0; | |
765 | vcpu->arch.sprg1 = regs->sprg1; | |
766 | vcpu->arch.sprg2 = regs->sprg2; | |
767 | vcpu->arch.sprg3 = regs->sprg3; | |
768 | vcpu->arch.sprg5 = regs->sprg4; | |
769 | vcpu->arch.sprg6 = regs->sprg5; | |
770 | vcpu->arch.sprg7 = regs->sprg6; | |
771 | ||
8e5b26b5 AG |
772 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
773 | kvmppc_set_gpr(vcpu, i, regs->gpr[i]); | |
2f4cf5e4 AG |
774 | |
775 | return 0; | |
776 | } | |
777 | ||
778 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |
779 | struct kvm_sregs *sregs) | |
780 | { | |
e15a1137 AG |
781 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); |
782 | int i; | |
783 | ||
2f4cf5e4 | 784 | sregs->pvr = vcpu->arch.pvr; |
e15a1137 AG |
785 | |
786 | sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; | |
787 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { | |
788 | for (i = 0; i < 64; i++) { | |
789 | sregs->u.s.ppc64.slb[i].slbe = vcpu3s->slb[i].orige | i; | |
790 | sregs->u.s.ppc64.slb[i].slbv = vcpu3s->slb[i].origv; | |
791 | } | |
792 | } else { | |
793 | for (i = 0; i < 16; i++) { | |
794 | sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw; | |
795 | sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw; | |
796 | } | |
797 | for (i = 0; i < 8; i++) { | |
798 | sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw; | |
799 | sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw; | |
800 | } | |
801 | } | |
2f4cf5e4 AG |
802 | return 0; |
803 | } | |
804 | ||
805 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |
806 | struct kvm_sregs *sregs) | |
807 | { | |
e15a1137 AG |
808 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); |
809 | int i; | |
810 | ||
2f4cf5e4 | 811 | kvmppc_set_pvr(vcpu, sregs->pvr); |
e15a1137 AG |
812 | |
813 | vcpu3s->sdr1 = sregs->u.s.sdr1; | |
814 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { | |
815 | for (i = 0; i < 64; i++) { | |
816 | vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv, | |
817 | sregs->u.s.ppc64.slb[i].slbe); | |
818 | } | |
819 | } else { | |
820 | for (i = 0; i < 16; i++) { | |
821 | vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]); | |
822 | } | |
823 | for (i = 0; i < 8; i++) { | |
824 | kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false, | |
825 | (u32)sregs->u.s.ppc32.ibat[i]); | |
826 | kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true, | |
827 | (u32)(sregs->u.s.ppc32.ibat[i] >> 32)); | |
828 | kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false, | |
829 | (u32)sregs->u.s.ppc32.dbat[i]); | |
830 | kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true, | |
831 | (u32)(sregs->u.s.ppc32.dbat[i] >> 32)); | |
832 | } | |
833 | } | |
834 | ||
835 | /* Flush the MMU after messing with the segments */ | |
836 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | |
2f4cf5e4 AG |
837 | return 0; |
838 | } | |
839 | ||
840 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | |
841 | { | |
842 | return -ENOTSUPP; | |
843 | } | |
844 | ||
845 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | |
846 | { | |
847 | return -ENOTSUPP; | |
848 | } | |
849 | ||
850 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | |
851 | struct kvm_translation *tr) | |
852 | { | |
853 | return 0; | |
854 | } | |
855 | ||
856 | /* | |
857 | * Get (and clear) the dirty memory log for a memory slot. | |
858 | */ | |
859 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |
860 | struct kvm_dirty_log *log) | |
861 | { | |
862 | struct kvm_memory_slot *memslot; | |
863 | struct kvm_vcpu *vcpu; | |
864 | ulong ga, ga_end; | |
865 | int is_dirty = 0; | |
866 | int r, n; | |
867 | ||
79fac95e | 868 | mutex_lock(&kvm->slots_lock); |
2f4cf5e4 AG |
869 | |
870 | r = kvm_get_dirty_log(kvm, log, &is_dirty); | |
871 | if (r) | |
872 | goto out; | |
873 | ||
874 | /* If nothing is dirty, don't bother messing with page tables. */ | |
875 | if (is_dirty) { | |
46a26bf5 | 876 | memslot = &kvm->memslots->memslots[log->slot]; |
2f4cf5e4 AG |
877 | |
878 | ga = memslot->base_gfn << PAGE_SHIFT; | |
879 | ga_end = ga + (memslot->npages << PAGE_SHIFT); | |
880 | ||
881 | kvm_for_each_vcpu(n, vcpu, kvm) | |
882 | kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); | |
883 | ||
884 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | |
885 | memset(memslot->dirty_bitmap, 0, n); | |
886 | } | |
887 | ||
888 | r = 0; | |
889 | out: | |
79fac95e | 890 | mutex_unlock(&kvm->slots_lock); |
2f4cf5e4 AG |
891 | return r; |
892 | } | |
893 | ||
894 | int kvmppc_core_check_processor_compat(void) | |
895 | { | |
896 | return 0; | |
897 | } | |
898 | ||
899 | struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |
900 | { | |
901 | struct kvmppc_vcpu_book3s *vcpu_book3s; | |
902 | struct kvm_vcpu *vcpu; | |
903 | int err; | |
904 | ||
905 | vcpu_book3s = (struct kvmppc_vcpu_book3s *)__get_free_pages( GFP_KERNEL | __GFP_ZERO, | |
906 | get_order(sizeof(struct kvmppc_vcpu_book3s))); | |
907 | if (!vcpu_book3s) { | |
908 | err = -ENOMEM; | |
909 | goto out; | |
910 | } | |
911 | ||
912 | vcpu = &vcpu_book3s->vcpu; | |
913 | err = kvm_vcpu_init(vcpu, kvm, id); | |
914 | if (err) | |
915 | goto free_vcpu; | |
916 | ||
917 | vcpu->arch.host_retip = kvm_return_point; | |
918 | vcpu->arch.host_msr = mfmsr(); | |
919 | /* default to book3s_64 (970fx) */ | |
920 | vcpu->arch.pvr = 0x3C0301; | |
921 | kvmppc_set_pvr(vcpu, vcpu->arch.pvr); | |
922 | vcpu_book3s->slb_nr = 64; | |
923 | ||
924 | /* remember where some real-mode handlers are */ | |
925 | vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem; | |
926 | vcpu->arch.trampoline_enter = kvmppc_trampoline_enter; | |
927 | vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem; | |
021ec9c6 | 928 | vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall; |
2f4cf5e4 AG |
929 | |
930 | vcpu->arch.shadow_msr = MSR_USER64; | |
931 | ||
932 | err = __init_new_context(); | |
933 | if (err < 0) | |
934 | goto free_vcpu; | |
935 | vcpu_book3s->context_id = err; | |
936 | ||
937 | vcpu_book3s->vsid_max = ((vcpu_book3s->context_id + 1) << USER_ESID_BITS) - 1; | |
938 | vcpu_book3s->vsid_first = vcpu_book3s->context_id << USER_ESID_BITS; | |
939 | vcpu_book3s->vsid_next = vcpu_book3s->vsid_first; | |
940 | ||
941 | return vcpu; | |
942 | ||
943 | free_vcpu: | |
944 | free_pages((long)vcpu_book3s, get_order(sizeof(struct kvmppc_vcpu_book3s))); | |
945 | out: | |
946 | return ERR_PTR(err); | |
947 | } | |
948 | ||
949 | void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | |
950 | { | |
951 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | |
952 | ||
953 | __destroy_context(vcpu_book3s->context_id); | |
954 | kvm_vcpu_uninit(vcpu); | |
955 | free_pages((long)vcpu_book3s, get_order(sizeof(struct kvmppc_vcpu_book3s))); | |
956 | } | |
957 | ||
958 | extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); | |
959 | int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |
960 | { | |
961 | int ret; | |
962 | ||
963 | /* No need to go into the guest when all we do is going out */ | |
964 | if (signal_pending(current)) { | |
965 | kvm_run->exit_reason = KVM_EXIT_INTR; | |
966 | return -EINTR; | |
967 | } | |
968 | ||
969 | /* XXX we get called with irq disabled - change that! */ | |
970 | local_irq_enable(); | |
971 | ||
972 | ret = __kvmppc_vcpu_entry(kvm_run, vcpu); | |
973 | ||
974 | local_irq_disable(); | |
975 | ||
976 | return ret; | |
977 | } | |
978 | ||
979 | static int kvmppc_book3s_init(void) | |
980 | { | |
981 | return kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), THIS_MODULE); | |
982 | } | |
983 | ||
984 | static void kvmppc_book3s_exit(void) | |
985 | { | |
986 | kvm_exit(); | |
987 | } | |
988 | ||
989 | module_init(kvmppc_book3s_init); | |
990 | module_exit(kvmppc_book3s_exit); |