]>
Commit | Line | Data |
---|---|---|
2f4cf5e4 AG |
1 | /* |
2 | * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. | |
3 | * | |
4 | * Authors: | |
5 | * Alexander Graf <agraf@suse.de> | |
6 | * Kevin Wolf <mail@kevin-wolf.de> | |
7 | * | |
8 | * Description: | |
9 | * This file is derived from arch/powerpc/kvm/44x.c, | |
10 | * by Hollis Blanchard <hollisb@us.ibm.com>. | |
11 | * | |
12 | * This program is free software; you can redistribute it and/or modify | |
13 | * it under the terms of the GNU General Public License, version 2, as | |
14 | * published by the Free Software Foundation. | |
15 | */ | |
16 | ||
17 | #include <linux/kvm_host.h> | |
18 | #include <linux/err.h> | |
19 | ||
20 | #include <asm/reg.h> | |
21 | #include <asm/cputable.h> | |
22 | #include <asm/cacheflush.h> | |
23 | #include <asm/tlbflush.h> | |
24 | #include <asm/uaccess.h> | |
25 | #include <asm/io.h> | |
26 | #include <asm/kvm_ppc.h> | |
27 | #include <asm/kvm_book3s.h> | |
28 | #include <asm/mmu_context.h> | |
29 | #include <linux/sched.h> | |
30 | #include <linux/vmalloc.h> | |
31 | ||
32 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU | |
33 | ||
34 | /* #define EXIT_DEBUG */ | |
35 | /* #define EXIT_DEBUG_SIMPLE */ | |
36 | ||
2f4cf5e4 AG |
37 | struct kvm_stats_debugfs_item debugfs_entries[] = { |
38 | { "exits", VCPU_STAT(sum_exits) }, | |
39 | { "mmio", VCPU_STAT(mmio_exits) }, | |
40 | { "sig", VCPU_STAT(signal_exits) }, | |
41 | { "sysc", VCPU_STAT(syscall_exits) }, | |
42 | { "inst_emu", VCPU_STAT(emulated_inst_exits) }, | |
43 | { "dec", VCPU_STAT(dec_exits) }, | |
44 | { "ext_intr", VCPU_STAT(ext_intr_exits) }, | |
45 | { "queue_intr", VCPU_STAT(queue_intr) }, | |
46 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, | |
47 | { "pf_storage", VCPU_STAT(pf_storage) }, | |
48 | { "sp_storage", VCPU_STAT(sp_storage) }, | |
49 | { "pf_instruc", VCPU_STAT(pf_instruc) }, | |
50 | { "sp_instruc", VCPU_STAT(sp_instruc) }, | |
51 | { "ld", VCPU_STAT(ld) }, | |
52 | { "ld_slow", VCPU_STAT(ld_slow) }, | |
53 | { "st", VCPU_STAT(st) }, | |
54 | { "st_slow", VCPU_STAT(st_slow) }, | |
55 | { NULL } | |
56 | }; | |
57 | ||
58 | void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu) | |
59 | { | |
60 | } | |
61 | ||
62 | void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) | |
63 | { | |
64 | } | |
65 | ||
66 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |
67 | { | |
68 | memcpy(get_paca()->kvm_slb, to_book3s(vcpu)->slb_shadow, sizeof(get_paca()->kvm_slb)); | |
69 | get_paca()->kvm_slb_max = to_book3s(vcpu)->slb_shadow_max; | |
70 | } | |
71 | ||
72 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | |
73 | { | |
74 | memcpy(to_book3s(vcpu)->slb_shadow, get_paca()->kvm_slb, sizeof(get_paca()->kvm_slb)); | |
75 | to_book3s(vcpu)->slb_shadow_max = get_paca()->kvm_slb_max; | |
76 | } | |
77 | ||
0bb1fb71 | 78 | #if defined(EXIT_DEBUG) |
2f4cf5e4 AG |
79 | static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu) |
80 | { | |
81 | u64 jd = mftb() - vcpu->arch.dec_jiffies; | |
82 | return vcpu->arch.dec - jd; | |
83 | } | |
84 | #endif | |
85 | ||
86 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) | |
87 | { | |
88 | ulong old_msr = vcpu->arch.msr; | |
89 | ||
90 | #ifdef EXIT_DEBUG | |
91 | printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); | |
92 | #endif | |
93 | msr &= to_book3s(vcpu)->msr_mask; | |
94 | vcpu->arch.msr = msr; | |
95 | vcpu->arch.shadow_msr = msr | MSR_USER32; | |
96 | vcpu->arch.shadow_msr &= ( MSR_VEC | MSR_VSX | MSR_FP | MSR_FE0 | | |
97 | MSR_USER64 | MSR_SE | MSR_BE | MSR_DE | | |
98 | MSR_FE1); | |
99 | ||
100 | if (msr & (MSR_WE|MSR_POW)) { | |
101 | if (!vcpu->arch.pending_exceptions) { | |
102 | kvm_vcpu_block(vcpu); | |
103 | vcpu->stat.halt_wakeup++; | |
104 | } | |
105 | } | |
106 | ||
107 | if (((vcpu->arch.msr & (MSR_IR|MSR_DR)) != (old_msr & (MSR_IR|MSR_DR))) || | |
108 | (vcpu->arch.msr & MSR_PR) != (old_msr & MSR_PR)) { | |
109 | kvmppc_mmu_flush_segments(vcpu); | |
110 | kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc); | |
111 | } | |
112 | } | |
113 | ||
114 | void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) | |
115 | { | |
116 | vcpu->arch.srr0 = vcpu->arch.pc; | |
117 | vcpu->arch.srr1 = vcpu->arch.msr | flags; | |
118 | vcpu->arch.pc = to_book3s(vcpu)->hior + vec; | |
119 | vcpu->arch.mmu.reset_msr(vcpu); | |
120 | } | |
121 | ||
583617b7 | 122 | static int kvmppc_book3s_vec2irqprio(unsigned int vec) |
2f4cf5e4 AG |
123 | { |
124 | unsigned int prio; | |
125 | ||
2f4cf5e4 AG |
126 | switch (vec) { |
127 | case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break; | |
128 | case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break; | |
129 | case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE; break; | |
130 | case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT; break; | |
131 | case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break; | |
132 | case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break; | |
133 | case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break; | |
134 | case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break; | |
135 | case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break; | |
136 | case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break; | |
137 | case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER; break; | |
138 | case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL; break; | |
139 | case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break; | |
140 | case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break; | |
141 | case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break; | |
142 | default: prio = BOOK3S_IRQPRIO_MAX; break; | |
143 | } | |
144 | ||
583617b7 AG |
145 | return prio; |
146 | } | |
147 | ||
7706664d AG |
148 | static void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu, |
149 | unsigned int vec) | |
150 | { | |
151 | clear_bit(kvmppc_book3s_vec2irqprio(vec), | |
152 | &vcpu->arch.pending_exceptions); | |
153 | } | |
154 | ||
583617b7 AG |
155 | void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) |
156 | { | |
157 | vcpu->stat.queue_intr++; | |
158 | ||
159 | set_bit(kvmppc_book3s_vec2irqprio(vec), | |
160 | &vcpu->arch.pending_exceptions); | |
2f4cf5e4 AG |
161 | #ifdef EXIT_DEBUG |
162 | printk(KERN_INFO "Queueing interrupt %x\n", vec); | |
163 | #endif | |
164 | } | |
165 | ||
166 | ||
167 | void kvmppc_core_queue_program(struct kvm_vcpu *vcpu) | |
168 | { | |
169 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_PROGRAM); | |
170 | } | |
171 | ||
172 | void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) | |
173 | { | |
174 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); | |
175 | } | |
176 | ||
177 | int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) | |
178 | { | |
179 | return test_bit(BOOK3S_INTERRUPT_DECREMENTER >> 7, &vcpu->arch.pending_exceptions); | |
180 | } | |
181 | ||
7706664d AG |
182 | void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) |
183 | { | |
184 | kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); | |
185 | } | |
186 | ||
2f4cf5e4 AG |
187 | void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, |
188 | struct kvm_interrupt *irq) | |
189 | { | |
190 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); | |
191 | } | |
192 | ||
193 | int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) | |
194 | { | |
195 | int deliver = 1; | |
196 | int vec = 0; | |
197 | ||
198 | switch (priority) { | |
199 | case BOOK3S_IRQPRIO_DECREMENTER: | |
200 | deliver = vcpu->arch.msr & MSR_EE; | |
201 | vec = BOOK3S_INTERRUPT_DECREMENTER; | |
202 | break; | |
203 | case BOOK3S_IRQPRIO_EXTERNAL: | |
204 | deliver = vcpu->arch.msr & MSR_EE; | |
205 | vec = BOOK3S_INTERRUPT_EXTERNAL; | |
206 | break; | |
207 | case BOOK3S_IRQPRIO_SYSTEM_RESET: | |
208 | vec = BOOK3S_INTERRUPT_SYSTEM_RESET; | |
209 | break; | |
210 | case BOOK3S_IRQPRIO_MACHINE_CHECK: | |
211 | vec = BOOK3S_INTERRUPT_MACHINE_CHECK; | |
212 | break; | |
213 | case BOOK3S_IRQPRIO_DATA_STORAGE: | |
214 | vec = BOOK3S_INTERRUPT_DATA_STORAGE; | |
215 | break; | |
216 | case BOOK3S_IRQPRIO_INST_STORAGE: | |
217 | vec = BOOK3S_INTERRUPT_INST_STORAGE; | |
218 | break; | |
219 | case BOOK3S_IRQPRIO_DATA_SEGMENT: | |
220 | vec = BOOK3S_INTERRUPT_DATA_SEGMENT; | |
221 | break; | |
222 | case BOOK3S_IRQPRIO_INST_SEGMENT: | |
223 | vec = BOOK3S_INTERRUPT_INST_SEGMENT; | |
224 | break; | |
225 | case BOOK3S_IRQPRIO_ALIGNMENT: | |
226 | vec = BOOK3S_INTERRUPT_ALIGNMENT; | |
227 | break; | |
228 | case BOOK3S_IRQPRIO_PROGRAM: | |
229 | vec = BOOK3S_INTERRUPT_PROGRAM; | |
230 | break; | |
231 | case BOOK3S_IRQPRIO_VSX: | |
232 | vec = BOOK3S_INTERRUPT_VSX; | |
233 | break; | |
234 | case BOOK3S_IRQPRIO_ALTIVEC: | |
235 | vec = BOOK3S_INTERRUPT_ALTIVEC; | |
236 | break; | |
237 | case BOOK3S_IRQPRIO_FP_UNAVAIL: | |
238 | vec = BOOK3S_INTERRUPT_FP_UNAVAIL; | |
239 | break; | |
240 | case BOOK3S_IRQPRIO_SYSCALL: | |
241 | vec = BOOK3S_INTERRUPT_SYSCALL; | |
242 | break; | |
243 | case BOOK3S_IRQPRIO_DEBUG: | |
244 | vec = BOOK3S_INTERRUPT_TRACE; | |
245 | break; | |
246 | case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR: | |
247 | vec = BOOK3S_INTERRUPT_PERFMON; | |
248 | break; | |
249 | default: | |
250 | deliver = 0; | |
251 | printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority); | |
252 | break; | |
253 | } | |
254 | ||
255 | #if 0 | |
256 | printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver); | |
257 | #endif | |
258 | ||
259 | if (deliver) | |
260 | kvmppc_inject_interrupt(vcpu, vec, 0ULL); | |
261 | ||
262 | return deliver; | |
263 | } | |
264 | ||
265 | void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) | |
266 | { | |
267 | unsigned long *pending = &vcpu->arch.pending_exceptions; | |
268 | unsigned int priority; | |
269 | ||
2f4cf5e4 AG |
270 | #ifdef EXIT_DEBUG |
271 | if (vcpu->arch.pending_exceptions) | |
272 | printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions); | |
273 | #endif | |
274 | priority = __ffs(*pending); | |
275 | while (priority <= (sizeof(unsigned int) * 8)) { | |
7706664d AG |
276 | if (kvmppc_book3s_irqprio_deliver(vcpu, priority) && |
277 | (priority != BOOK3S_IRQPRIO_DECREMENTER)) { | |
278 | /* DEC interrupts get cleared by mtdec */ | |
2f4cf5e4 AG |
279 | clear_bit(priority, &vcpu->arch.pending_exceptions); |
280 | break; | |
281 | } | |
282 | ||
283 | priority = find_next_bit(pending, | |
284 | BITS_PER_BYTE * sizeof(*pending), | |
285 | priority + 1); | |
286 | } | |
287 | } | |
288 | ||
289 | void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) | |
290 | { | |
e15a1137 | 291 | vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB; |
2f4cf5e4 AG |
292 | vcpu->arch.pvr = pvr; |
293 | if ((pvr >= 0x330000) && (pvr < 0x70330000)) { | |
294 | kvmppc_mmu_book3s_64_init(vcpu); | |
295 | to_book3s(vcpu)->hior = 0xfff00000; | |
296 | to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL; | |
297 | } else { | |
298 | kvmppc_mmu_book3s_32_init(vcpu); | |
299 | to_book3s(vcpu)->hior = 0; | |
300 | to_book3s(vcpu)->msr_mask = 0xffffffffULL; | |
301 | } | |
302 | ||
303 | /* If we are in hypervisor level on 970, we can tell the CPU to | |
304 | * treat DCBZ as 32 bytes store */ | |
305 | vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32; | |
306 | if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) && | |
307 | !strcmp(cur_cpu_spec->platform, "ppc970")) | |
308 | vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; | |
309 | ||
310 | } | |
311 | ||
312 | /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To | |
313 | * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to | |
314 | * emulate 32 bytes dcbz length. | |
315 | * | |
316 | * The Book3s_64 inventors also realized this case and implemented a special bit | |
317 | * in the HID5 register, which is a hypervisor ressource. Thus we can't use it. | |
318 | * | |
319 | * My approach here is to patch the dcbz instruction on executing pages. | |
320 | */ | |
321 | static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) | |
322 | { | |
323 | bool touched = false; | |
324 | hva_t hpage; | |
325 | u32 *page; | |
326 | int i; | |
327 | ||
328 | hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT); | |
329 | if (kvm_is_error_hva(hpage)) | |
330 | return; | |
331 | ||
332 | hpage |= pte->raddr & ~PAGE_MASK; | |
333 | hpage &= ~0xFFFULL; | |
334 | ||
335 | page = vmalloc(HW_PAGE_SIZE); | |
336 | ||
337 | if (copy_from_user(page, (void __user *)hpage, HW_PAGE_SIZE)) | |
338 | goto out; | |
339 | ||
340 | for (i=0; i < HW_PAGE_SIZE / 4; i++) | |
341 | if ((page[i] & 0xff0007ff) == INS_DCBZ) { | |
342 | page[i] &= 0xfffffff7; // reserved instruction, so we trap | |
343 | touched = true; | |
344 | } | |
345 | ||
346 | if (touched) | |
347 | copy_to_user((void __user *)hpage, page, HW_PAGE_SIZE); | |
348 | ||
349 | out: | |
350 | vfree(page); | |
351 | } | |
352 | ||
353 | static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, | |
354 | struct kvmppc_pte *pte) | |
355 | { | |
356 | int relocated = (vcpu->arch.msr & (data ? MSR_DR : MSR_IR)); | |
357 | int r; | |
358 | ||
359 | if (relocated) { | |
360 | r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data); | |
361 | } else { | |
362 | pte->eaddr = eaddr; | |
363 | pte->raddr = eaddr & 0xffffffff; | |
364 | pte->vpage = eaddr >> 12; | |
365 | switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { | |
366 | case 0: | |
367 | pte->vpage |= VSID_REAL; | |
368 | case MSR_DR: | |
369 | pte->vpage |= VSID_REAL_DR; | |
370 | case MSR_IR: | |
371 | pte->vpage |= VSID_REAL_IR; | |
372 | } | |
373 | pte->may_read = true; | |
374 | pte->may_write = true; | |
375 | pte->may_execute = true; | |
376 | r = 0; | |
377 | } | |
378 | ||
379 | return r; | |
380 | } | |
381 | ||
382 | static hva_t kvmppc_bad_hva(void) | |
383 | { | |
384 | return PAGE_OFFSET; | |
385 | } | |
386 | ||
387 | static hva_t kvmppc_pte_to_hva(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte, | |
388 | bool read) | |
389 | { | |
390 | hva_t hpage; | |
391 | ||
392 | if (read && !pte->may_read) | |
393 | goto err; | |
394 | ||
395 | if (!read && !pte->may_write) | |
396 | goto err; | |
397 | ||
398 | hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT); | |
399 | if (kvm_is_error_hva(hpage)) | |
400 | goto err; | |
401 | ||
402 | return hpage | (pte->raddr & ~PAGE_MASK); | |
403 | err: | |
404 | return kvmppc_bad_hva(); | |
405 | } | |
406 | ||
407 | int kvmppc_st(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr) | |
408 | { | |
409 | struct kvmppc_pte pte; | |
410 | hva_t hva = eaddr; | |
411 | ||
412 | vcpu->stat.st++; | |
413 | ||
414 | if (kvmppc_xlate(vcpu, eaddr, false, &pte)) | |
415 | goto err; | |
416 | ||
417 | hva = kvmppc_pte_to_hva(vcpu, &pte, false); | |
418 | if (kvm_is_error_hva(hva)) | |
419 | goto err; | |
420 | ||
421 | if (copy_to_user((void __user *)hva, ptr, size)) { | |
422 | printk(KERN_INFO "kvmppc_st at 0x%lx failed\n", hva); | |
423 | goto err; | |
424 | } | |
425 | ||
426 | return 0; | |
427 | ||
428 | err: | |
429 | return -ENOENT; | |
430 | } | |
431 | ||
432 | int kvmppc_ld(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr, | |
433 | bool data) | |
434 | { | |
435 | struct kvmppc_pte pte; | |
436 | hva_t hva = eaddr; | |
437 | ||
438 | vcpu->stat.ld++; | |
439 | ||
440 | if (kvmppc_xlate(vcpu, eaddr, data, &pte)) | |
441 | goto err; | |
442 | ||
443 | hva = kvmppc_pte_to_hva(vcpu, &pte, true); | |
444 | if (kvm_is_error_hva(hva)) | |
445 | goto err; | |
446 | ||
447 | if (copy_from_user(ptr, (void __user *)hva, size)) { | |
448 | printk(KERN_INFO "kvmppc_ld at 0x%lx failed\n", hva); | |
449 | goto err; | |
450 | } | |
451 | ||
452 | return 0; | |
453 | ||
454 | err: | |
455 | return -ENOENT; | |
456 | } | |
457 | ||
458 | static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) | |
459 | { | |
460 | return kvm_is_visible_gfn(vcpu->kvm, gfn); | |
461 | } | |
462 | ||
463 | int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
464 | ulong eaddr, int vec) | |
465 | { | |
466 | bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE); | |
467 | int r = RESUME_GUEST; | |
468 | int relocated; | |
469 | int page_found = 0; | |
470 | struct kvmppc_pte pte; | |
471 | bool is_mmio = false; | |
472 | ||
473 | if ( vec == BOOK3S_INTERRUPT_DATA_STORAGE ) { | |
474 | relocated = (vcpu->arch.msr & MSR_DR); | |
475 | } else { | |
476 | relocated = (vcpu->arch.msr & MSR_IR); | |
477 | } | |
478 | ||
479 | /* Resolve real address if translation turned on */ | |
480 | if (relocated) { | |
481 | page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data); | |
482 | } else { | |
483 | pte.may_execute = true; | |
484 | pte.may_read = true; | |
485 | pte.may_write = true; | |
486 | pte.raddr = eaddr & 0xffffffff; | |
487 | pte.eaddr = eaddr; | |
488 | pte.vpage = eaddr >> 12; | |
489 | switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { | |
490 | case 0: | |
491 | pte.vpage |= VSID_REAL; | |
492 | case MSR_DR: | |
493 | pte.vpage |= VSID_REAL_DR; | |
494 | case MSR_IR: | |
495 | pte.vpage |= VSID_REAL_IR; | |
496 | } | |
497 | } | |
498 | ||
499 | if (vcpu->arch.mmu.is_dcbz32(vcpu) && | |
500 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { | |
501 | /* | |
502 | * If we do the dcbz hack, we have to NX on every execution, | |
503 | * so we can patch the executing code. This renders our guest | |
504 | * NX-less. | |
505 | */ | |
506 | pte.may_execute = !data; | |
507 | } | |
508 | ||
509 | if (page_found == -ENOENT) { | |
510 | /* Page not found in guest PTE entries */ | |
511 | vcpu->arch.dear = vcpu->arch.fault_dear; | |
512 | to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr; | |
513 | vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x00000000f8000000ULL); | |
514 | kvmppc_book3s_queue_irqprio(vcpu, vec); | |
515 | } else if (page_found == -EPERM) { | |
516 | /* Storage protection */ | |
517 | vcpu->arch.dear = vcpu->arch.fault_dear; | |
518 | to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE; | |
519 | to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT; | |
520 | vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x00000000f8000000ULL); | |
521 | kvmppc_book3s_queue_irqprio(vcpu, vec); | |
522 | } else if (page_found == -EINVAL) { | |
523 | /* Page not found in guest SLB */ | |
524 | vcpu->arch.dear = vcpu->arch.fault_dear; | |
525 | kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); | |
526 | } else if (!is_mmio && | |
527 | kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { | |
528 | /* The guest's PTE is not mapped yet. Map on the host */ | |
529 | kvmppc_mmu_map_page(vcpu, &pte); | |
530 | if (data) | |
531 | vcpu->stat.sp_storage++; | |
532 | else if (vcpu->arch.mmu.is_dcbz32(vcpu) && | |
533 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) | |
534 | kvmppc_patch_dcbz(vcpu, &pte); | |
535 | } else { | |
536 | /* MMIO */ | |
537 | vcpu->stat.mmio_exits++; | |
538 | vcpu->arch.paddr_accessed = pte.raddr; | |
539 | r = kvmppc_emulate_mmio(run, vcpu); | |
540 | if ( r == RESUME_HOST_NV ) | |
541 | r = RESUME_HOST; | |
2f4cf5e4 AG |
542 | } |
543 | ||
544 | return r; | |
545 | } | |
546 | ||
547 | int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
548 | unsigned int exit_nr) | |
549 | { | |
550 | int r = RESUME_HOST; | |
551 | ||
552 | vcpu->stat.sum_exits++; | |
553 | ||
554 | run->exit_reason = KVM_EXIT_UNKNOWN; | |
555 | run->ready_for_interrupt_injection = 1; | |
556 | #ifdef EXIT_DEBUG | |
557 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | dec=0x%x | msr=0x%lx\n", | |
558 | exit_nr, vcpu->arch.pc, vcpu->arch.fault_dear, | |
559 | kvmppc_get_dec(vcpu), vcpu->arch.msr); | |
560 | #elif defined (EXIT_DEBUG_SIMPLE) | |
561 | if ((exit_nr != 0x900) && (exit_nr != 0x500)) | |
562 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | msr=0x%lx\n", | |
563 | exit_nr, vcpu->arch.pc, vcpu->arch.fault_dear, | |
564 | vcpu->arch.msr); | |
565 | #endif | |
566 | kvm_resched(vcpu); | |
567 | switch (exit_nr) { | |
568 | case BOOK3S_INTERRUPT_INST_STORAGE: | |
569 | vcpu->stat.pf_instruc++; | |
570 | /* only care about PTEG not found errors, but leave NX alone */ | |
571 | if (vcpu->arch.shadow_msr & 0x40000000) { | |
572 | r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.pc, exit_nr); | |
573 | vcpu->stat.sp_instruc++; | |
574 | } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && | |
575 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { | |
576 | /* | |
577 | * XXX If we do the dcbz hack we use the NX bit to flush&patch the page, | |
578 | * so we can't use the NX bit inside the guest. Let's cross our fingers, | |
579 | * that no guest that needs the dcbz hack does NX. | |
580 | */ | |
581 | kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); | |
582 | } else { | |
583 | vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x58000000); | |
584 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
585 | kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); | |
586 | r = RESUME_GUEST; | |
587 | } | |
588 | break; | |
589 | case BOOK3S_INTERRUPT_DATA_STORAGE: | |
590 | vcpu->stat.pf_storage++; | |
591 | /* The only case we need to handle is missing shadow PTEs */ | |
592 | if (vcpu->arch.fault_dsisr & DSISR_NOHPTE) { | |
593 | r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.fault_dear, exit_nr); | |
594 | } else { | |
595 | vcpu->arch.dear = vcpu->arch.fault_dear; | |
596 | to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr; | |
597 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
598 | kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFULL); | |
599 | r = RESUME_GUEST; | |
600 | } | |
601 | break; | |
602 | case BOOK3S_INTERRUPT_DATA_SEGMENT: | |
603 | if (kvmppc_mmu_map_segment(vcpu, vcpu->arch.fault_dear) < 0) { | |
604 | vcpu->arch.dear = vcpu->arch.fault_dear; | |
605 | kvmppc_book3s_queue_irqprio(vcpu, | |
606 | BOOK3S_INTERRUPT_DATA_SEGMENT); | |
607 | } | |
608 | r = RESUME_GUEST; | |
609 | break; | |
610 | case BOOK3S_INTERRUPT_INST_SEGMENT: | |
611 | if (kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc) < 0) { | |
612 | kvmppc_book3s_queue_irqprio(vcpu, | |
613 | BOOK3S_INTERRUPT_INST_SEGMENT); | |
614 | } | |
615 | r = RESUME_GUEST; | |
616 | break; | |
617 | /* We're good on these - the host merely wanted to get our attention */ | |
618 | case BOOK3S_INTERRUPT_DECREMENTER: | |
619 | vcpu->stat.dec_exits++; | |
620 | r = RESUME_GUEST; | |
621 | break; | |
622 | case BOOK3S_INTERRUPT_EXTERNAL: | |
623 | vcpu->stat.ext_intr_exits++; | |
624 | r = RESUME_GUEST; | |
625 | break; | |
626 | case BOOK3S_INTERRUPT_PROGRAM: | |
627 | { | |
628 | enum emulation_result er; | |
629 | ||
630 | if (vcpu->arch.msr & MSR_PR) { | |
631 | #ifdef EXIT_DEBUG | |
632 | printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", vcpu->arch.pc, vcpu->arch.last_inst); | |
633 | #endif | |
634 | if ((vcpu->arch.last_inst & 0xff0007ff) != | |
635 | (INS_DCBZ & 0xfffffff7)) { | |
636 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
637 | r = RESUME_GUEST; | |
638 | break; | |
639 | } | |
640 | } | |
641 | ||
642 | vcpu->stat.emulated_inst_exits++; | |
643 | er = kvmppc_emulate_instruction(run, vcpu); | |
644 | switch (er) { | |
645 | case EMULATE_DONE: | |
97c4cfbe | 646 | r = RESUME_GUEST_NV; |
2f4cf5e4 AG |
647 | break; |
648 | case EMULATE_FAIL: | |
649 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", | |
650 | __func__, vcpu->arch.pc, vcpu->arch.last_inst); | |
651 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
652 | r = RESUME_GUEST; | |
653 | break; | |
654 | default: | |
655 | BUG(); | |
656 | } | |
657 | break; | |
658 | } | |
659 | case BOOK3S_INTERRUPT_SYSCALL: | |
660 | #ifdef EXIT_DEBUG | |
661 | printk(KERN_INFO "Syscall Nr %d\n", (int)vcpu->arch.gpr[0]); | |
662 | #endif | |
663 | vcpu->stat.syscall_exits++; | |
664 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
665 | r = RESUME_GUEST; | |
666 | break; | |
667 | case BOOK3S_INTERRUPT_MACHINE_CHECK: | |
668 | case BOOK3S_INTERRUPT_FP_UNAVAIL: | |
669 | case BOOK3S_INTERRUPT_TRACE: | |
670 | case BOOK3S_INTERRUPT_ALTIVEC: | |
671 | case BOOK3S_INTERRUPT_VSX: | |
672 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
673 | r = RESUME_GUEST; | |
674 | break; | |
675 | default: | |
676 | /* Ugh - bork here! What did we get? */ | |
677 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", exit_nr, vcpu->arch.pc, vcpu->arch.shadow_msr); | |
678 | r = RESUME_HOST; | |
679 | BUG(); | |
680 | break; | |
681 | } | |
682 | ||
683 | ||
684 | if (!(r & RESUME_HOST)) { | |
685 | /* To avoid clobbering exit_reason, only check for signals if | |
686 | * we aren't already exiting to userspace for some other | |
687 | * reason. */ | |
688 | if (signal_pending(current)) { | |
689 | #ifdef EXIT_DEBUG | |
690 | printk(KERN_EMERG "KVM: Going back to host\n"); | |
691 | #endif | |
692 | vcpu->stat.signal_exits++; | |
693 | run->exit_reason = KVM_EXIT_INTR; | |
694 | r = -EINTR; | |
695 | } else { | |
696 | /* In case an interrupt came in that was triggered | |
697 | * from userspace (like DEC), we need to check what | |
698 | * to inject now! */ | |
699 | kvmppc_core_deliver_interrupts(vcpu); | |
700 | } | |
701 | } | |
702 | ||
703 | #ifdef EXIT_DEBUG | |
704 | printk(KERN_EMERG "KVM exit: vcpu=0x%p pc=0x%lx r=0x%x\n", vcpu, vcpu->arch.pc, r); | |
705 | #endif | |
706 | ||
707 | return r; | |
708 | } | |
709 | ||
710 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |
711 | { | |
712 | return 0; | |
713 | } | |
714 | ||
715 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |
716 | { | |
717 | int i; | |
718 | ||
719 | regs->pc = vcpu->arch.pc; | |
720 | regs->cr = vcpu->arch.cr; | |
721 | regs->ctr = vcpu->arch.ctr; | |
722 | regs->lr = vcpu->arch.lr; | |
723 | regs->xer = vcpu->arch.xer; | |
724 | regs->msr = vcpu->arch.msr; | |
725 | regs->srr0 = vcpu->arch.srr0; | |
726 | regs->srr1 = vcpu->arch.srr1; | |
727 | regs->pid = vcpu->arch.pid; | |
728 | regs->sprg0 = vcpu->arch.sprg0; | |
729 | regs->sprg1 = vcpu->arch.sprg1; | |
730 | regs->sprg2 = vcpu->arch.sprg2; | |
731 | regs->sprg3 = vcpu->arch.sprg3; | |
732 | regs->sprg5 = vcpu->arch.sprg4; | |
733 | regs->sprg6 = vcpu->arch.sprg5; | |
734 | regs->sprg7 = vcpu->arch.sprg6; | |
735 | ||
736 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | |
737 | regs->gpr[i] = vcpu->arch.gpr[i]; | |
738 | ||
739 | return 0; | |
740 | } | |
741 | ||
742 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |
743 | { | |
744 | int i; | |
745 | ||
746 | vcpu->arch.pc = regs->pc; | |
747 | vcpu->arch.cr = regs->cr; | |
748 | vcpu->arch.ctr = regs->ctr; | |
749 | vcpu->arch.lr = regs->lr; | |
750 | vcpu->arch.xer = regs->xer; | |
751 | kvmppc_set_msr(vcpu, regs->msr); | |
752 | vcpu->arch.srr0 = regs->srr0; | |
753 | vcpu->arch.srr1 = regs->srr1; | |
754 | vcpu->arch.sprg0 = regs->sprg0; | |
755 | vcpu->arch.sprg1 = regs->sprg1; | |
756 | vcpu->arch.sprg2 = regs->sprg2; | |
757 | vcpu->arch.sprg3 = regs->sprg3; | |
758 | vcpu->arch.sprg5 = regs->sprg4; | |
759 | vcpu->arch.sprg6 = regs->sprg5; | |
760 | vcpu->arch.sprg7 = regs->sprg6; | |
761 | ||
762 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.gpr); i++) | |
763 | vcpu->arch.gpr[i] = regs->gpr[i]; | |
764 | ||
765 | return 0; | |
766 | } | |
767 | ||
768 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |
769 | struct kvm_sregs *sregs) | |
770 | { | |
e15a1137 AG |
771 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); |
772 | int i; | |
773 | ||
2f4cf5e4 | 774 | sregs->pvr = vcpu->arch.pvr; |
e15a1137 AG |
775 | |
776 | sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; | |
777 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { | |
778 | for (i = 0; i < 64; i++) { | |
779 | sregs->u.s.ppc64.slb[i].slbe = vcpu3s->slb[i].orige | i; | |
780 | sregs->u.s.ppc64.slb[i].slbv = vcpu3s->slb[i].origv; | |
781 | } | |
782 | } else { | |
783 | for (i = 0; i < 16; i++) { | |
784 | sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw; | |
785 | sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw; | |
786 | } | |
787 | for (i = 0; i < 8; i++) { | |
788 | sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw; | |
789 | sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw; | |
790 | } | |
791 | } | |
2f4cf5e4 AG |
792 | return 0; |
793 | } | |
794 | ||
795 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |
796 | struct kvm_sregs *sregs) | |
797 | { | |
e15a1137 AG |
798 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); |
799 | int i; | |
800 | ||
2f4cf5e4 | 801 | kvmppc_set_pvr(vcpu, sregs->pvr); |
e15a1137 AG |
802 | |
803 | vcpu3s->sdr1 = sregs->u.s.sdr1; | |
804 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { | |
805 | for (i = 0; i < 64; i++) { | |
806 | vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv, | |
807 | sregs->u.s.ppc64.slb[i].slbe); | |
808 | } | |
809 | } else { | |
810 | for (i = 0; i < 16; i++) { | |
811 | vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]); | |
812 | } | |
813 | for (i = 0; i < 8; i++) { | |
814 | kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false, | |
815 | (u32)sregs->u.s.ppc32.ibat[i]); | |
816 | kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true, | |
817 | (u32)(sregs->u.s.ppc32.ibat[i] >> 32)); | |
818 | kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false, | |
819 | (u32)sregs->u.s.ppc32.dbat[i]); | |
820 | kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true, | |
821 | (u32)(sregs->u.s.ppc32.dbat[i] >> 32)); | |
822 | } | |
823 | } | |
824 | ||
825 | /* Flush the MMU after messing with the segments */ | |
826 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | |
2f4cf5e4 AG |
827 | return 0; |
828 | } | |
829 | ||
830 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | |
831 | { | |
832 | return -ENOTSUPP; | |
833 | } | |
834 | ||
835 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | |
836 | { | |
837 | return -ENOTSUPP; | |
838 | } | |
839 | ||
840 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | |
841 | struct kvm_translation *tr) | |
842 | { | |
843 | return 0; | |
844 | } | |
845 | ||
846 | /* | |
847 | * Get (and clear) the dirty memory log for a memory slot. | |
848 | */ | |
849 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |
850 | struct kvm_dirty_log *log) | |
851 | { | |
852 | struct kvm_memory_slot *memslot; | |
853 | struct kvm_vcpu *vcpu; | |
854 | ulong ga, ga_end; | |
855 | int is_dirty = 0; | |
856 | int r, n; | |
857 | ||
79fac95e | 858 | mutex_lock(&kvm->slots_lock); |
2f4cf5e4 AG |
859 | |
860 | r = kvm_get_dirty_log(kvm, log, &is_dirty); | |
861 | if (r) | |
862 | goto out; | |
863 | ||
864 | /* If nothing is dirty, don't bother messing with page tables. */ | |
865 | if (is_dirty) { | |
46a26bf5 | 866 | memslot = &kvm->memslots->memslots[log->slot]; |
2f4cf5e4 AG |
867 | |
868 | ga = memslot->base_gfn << PAGE_SHIFT; | |
869 | ga_end = ga + (memslot->npages << PAGE_SHIFT); | |
870 | ||
871 | kvm_for_each_vcpu(n, vcpu, kvm) | |
872 | kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); | |
873 | ||
874 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | |
875 | memset(memslot->dirty_bitmap, 0, n); | |
876 | } | |
877 | ||
878 | r = 0; | |
879 | out: | |
79fac95e | 880 | mutex_unlock(&kvm->slots_lock); |
2f4cf5e4 AG |
881 | return r; |
882 | } | |
883 | ||
884 | int kvmppc_core_check_processor_compat(void) | |
885 | { | |
886 | return 0; | |
887 | } | |
888 | ||
889 | struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |
890 | { | |
891 | struct kvmppc_vcpu_book3s *vcpu_book3s; | |
892 | struct kvm_vcpu *vcpu; | |
893 | int err; | |
894 | ||
895 | vcpu_book3s = (struct kvmppc_vcpu_book3s *)__get_free_pages( GFP_KERNEL | __GFP_ZERO, | |
896 | get_order(sizeof(struct kvmppc_vcpu_book3s))); | |
897 | if (!vcpu_book3s) { | |
898 | err = -ENOMEM; | |
899 | goto out; | |
900 | } | |
901 | ||
902 | vcpu = &vcpu_book3s->vcpu; | |
903 | err = kvm_vcpu_init(vcpu, kvm, id); | |
904 | if (err) | |
905 | goto free_vcpu; | |
906 | ||
907 | vcpu->arch.host_retip = kvm_return_point; | |
908 | vcpu->arch.host_msr = mfmsr(); | |
909 | /* default to book3s_64 (970fx) */ | |
910 | vcpu->arch.pvr = 0x3C0301; | |
911 | kvmppc_set_pvr(vcpu, vcpu->arch.pvr); | |
912 | vcpu_book3s->slb_nr = 64; | |
913 | ||
914 | /* remember where some real-mode handlers are */ | |
915 | vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem; | |
916 | vcpu->arch.trampoline_enter = kvmppc_trampoline_enter; | |
917 | vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem; | |
918 | ||
919 | vcpu->arch.shadow_msr = MSR_USER64; | |
920 | ||
921 | err = __init_new_context(); | |
922 | if (err < 0) | |
923 | goto free_vcpu; | |
924 | vcpu_book3s->context_id = err; | |
925 | ||
926 | vcpu_book3s->vsid_max = ((vcpu_book3s->context_id + 1) << USER_ESID_BITS) - 1; | |
927 | vcpu_book3s->vsid_first = vcpu_book3s->context_id << USER_ESID_BITS; | |
928 | vcpu_book3s->vsid_next = vcpu_book3s->vsid_first; | |
929 | ||
930 | return vcpu; | |
931 | ||
932 | free_vcpu: | |
933 | free_pages((long)vcpu_book3s, get_order(sizeof(struct kvmppc_vcpu_book3s))); | |
934 | out: | |
935 | return ERR_PTR(err); | |
936 | } | |
937 | ||
938 | void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | |
939 | { | |
940 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | |
941 | ||
942 | __destroy_context(vcpu_book3s->context_id); | |
943 | kvm_vcpu_uninit(vcpu); | |
944 | free_pages((long)vcpu_book3s, get_order(sizeof(struct kvmppc_vcpu_book3s))); | |
945 | } | |
946 | ||
947 | extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); | |
948 | int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |
949 | { | |
950 | int ret; | |
951 | ||
952 | /* No need to go into the guest when all we do is going out */ | |
953 | if (signal_pending(current)) { | |
954 | kvm_run->exit_reason = KVM_EXIT_INTR; | |
955 | return -EINTR; | |
956 | } | |
957 | ||
958 | /* XXX we get called with irq disabled - change that! */ | |
959 | local_irq_enable(); | |
960 | ||
961 | ret = __kvmppc_vcpu_entry(kvm_run, vcpu); | |
962 | ||
963 | local_irq_disable(); | |
964 | ||
965 | return ret; | |
966 | } | |
967 | ||
968 | static int kvmppc_book3s_init(void) | |
969 | { | |
970 | return kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), THIS_MODULE); | |
971 | } | |
972 | ||
973 | static void kvmppc_book3s_exit(void) | |
974 | { | |
975 | kvm_exit(); | |
976 | } | |
977 | ||
978 | module_init(kvmppc_book3s_init); | |
979 | module_exit(kvmppc_book3s_exit); |