]>
Commit | Line | Data |
---|---|---|
2f4cf5e4 AG |
1 | /* |
2 | * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. | |
3 | * | |
4 | * Authors: | |
5 | * Alexander Graf <agraf@suse.de> | |
6 | * Kevin Wolf <mail@kevin-wolf.de> | |
7 | * | |
8 | * Description: | |
9 | * This file is derived from arch/powerpc/kvm/44x.c, | |
10 | * by Hollis Blanchard <hollisb@us.ibm.com>. | |
11 | * | |
12 | * This program is free software; you can redistribute it and/or modify | |
13 | * it under the terms of the GNU General Public License, version 2, as | |
14 | * published by the Free Software Foundation. | |
15 | */ | |
16 | ||
17 | #include <linux/kvm_host.h> | |
18 | #include <linux/err.h> | |
19 | ||
20 | #include <asm/reg.h> | |
21 | #include <asm/cputable.h> | |
22 | #include <asm/cacheflush.h> | |
23 | #include <asm/tlbflush.h> | |
24 | #include <asm/uaccess.h> | |
25 | #include <asm/io.h> | |
26 | #include <asm/kvm_ppc.h> | |
27 | #include <asm/kvm_book3s.h> | |
28 | #include <asm/mmu_context.h> | |
29 | #include <linux/sched.h> | |
30 | #include <linux/vmalloc.h> | |
31 | ||
32 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU | |
33 | ||
34 | /* #define EXIT_DEBUG */ | |
35 | /* #define EXIT_DEBUG_SIMPLE */ | |
36 | ||
37 | /* Without AGGRESSIVE_DEC we only fire off a DEC interrupt when DEC turns 0. | |
38 | * When set, we retrigger a DEC interrupt after that if DEC <= 0. | |
39 | * PPC32 Linux runs faster without AGGRESSIVE_DEC, PPC64 Linux requires it. */ | |
40 | ||
41 | /* #define AGGRESSIVE_DEC */ | |
42 | ||
43 | struct kvm_stats_debugfs_item debugfs_entries[] = { | |
44 | { "exits", VCPU_STAT(sum_exits) }, | |
45 | { "mmio", VCPU_STAT(mmio_exits) }, | |
46 | { "sig", VCPU_STAT(signal_exits) }, | |
47 | { "sysc", VCPU_STAT(syscall_exits) }, | |
48 | { "inst_emu", VCPU_STAT(emulated_inst_exits) }, | |
49 | { "dec", VCPU_STAT(dec_exits) }, | |
50 | { "ext_intr", VCPU_STAT(ext_intr_exits) }, | |
51 | { "queue_intr", VCPU_STAT(queue_intr) }, | |
52 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, | |
53 | { "pf_storage", VCPU_STAT(pf_storage) }, | |
54 | { "sp_storage", VCPU_STAT(sp_storage) }, | |
55 | { "pf_instruc", VCPU_STAT(pf_instruc) }, | |
56 | { "sp_instruc", VCPU_STAT(sp_instruc) }, | |
57 | { "ld", VCPU_STAT(ld) }, | |
58 | { "ld_slow", VCPU_STAT(ld_slow) }, | |
59 | { "st", VCPU_STAT(st) }, | |
60 | { "st_slow", VCPU_STAT(st_slow) }, | |
61 | { NULL } | |
62 | }; | |
63 | ||
64 | void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu) | |
65 | { | |
66 | } | |
67 | ||
68 | void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu) | |
69 | { | |
70 | } | |
71 | ||
72 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |
73 | { | |
74 | memcpy(get_paca()->kvm_slb, to_book3s(vcpu)->slb_shadow, sizeof(get_paca()->kvm_slb)); | |
75 | get_paca()->kvm_slb_max = to_book3s(vcpu)->slb_shadow_max; | |
76 | } | |
77 | ||
78 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | |
79 | { | |
80 | memcpy(to_book3s(vcpu)->slb_shadow, get_paca()->kvm_slb, sizeof(get_paca()->kvm_slb)); | |
81 | to_book3s(vcpu)->slb_shadow_max = get_paca()->kvm_slb_max; | |
82 | } | |
83 | ||
84 | #if defined(AGGRESSIVE_DEC) || defined(EXIT_DEBUG) | |
85 | static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu) | |
86 | { | |
87 | u64 jd = mftb() - vcpu->arch.dec_jiffies; | |
88 | return vcpu->arch.dec - jd; | |
89 | } | |
90 | #endif | |
91 | ||
92 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) | |
93 | { | |
94 | ulong old_msr = vcpu->arch.msr; | |
95 | ||
96 | #ifdef EXIT_DEBUG | |
97 | printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); | |
98 | #endif | |
99 | msr &= to_book3s(vcpu)->msr_mask; | |
100 | vcpu->arch.msr = msr; | |
101 | vcpu->arch.shadow_msr = msr | MSR_USER32; | |
102 | vcpu->arch.shadow_msr &= ( MSR_VEC | MSR_VSX | MSR_FP | MSR_FE0 | | |
103 | MSR_USER64 | MSR_SE | MSR_BE | MSR_DE | | |
104 | MSR_FE1); | |
105 | ||
106 | if (msr & (MSR_WE|MSR_POW)) { | |
107 | if (!vcpu->arch.pending_exceptions) { | |
108 | kvm_vcpu_block(vcpu); | |
109 | vcpu->stat.halt_wakeup++; | |
110 | } | |
111 | } | |
112 | ||
113 | if (((vcpu->arch.msr & (MSR_IR|MSR_DR)) != (old_msr & (MSR_IR|MSR_DR))) || | |
114 | (vcpu->arch.msr & MSR_PR) != (old_msr & MSR_PR)) { | |
115 | kvmppc_mmu_flush_segments(vcpu); | |
116 | kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc); | |
117 | } | |
118 | } | |
119 | ||
120 | void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) | |
121 | { | |
122 | vcpu->arch.srr0 = vcpu->arch.pc; | |
123 | vcpu->arch.srr1 = vcpu->arch.msr | flags; | |
124 | vcpu->arch.pc = to_book3s(vcpu)->hior + vec; | |
125 | vcpu->arch.mmu.reset_msr(vcpu); | |
126 | } | |
127 | ||
128 | void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) | |
129 | { | |
130 | unsigned int prio; | |
131 | ||
132 | vcpu->stat.queue_intr++; | |
133 | switch (vec) { | |
134 | case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break; | |
135 | case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break; | |
136 | case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE; break; | |
137 | case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT; break; | |
138 | case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break; | |
139 | case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break; | |
140 | case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break; | |
141 | case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break; | |
142 | case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break; | |
143 | case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break; | |
144 | case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER; break; | |
145 | case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL; break; | |
146 | case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break; | |
147 | case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break; | |
148 | case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break; | |
149 | default: prio = BOOK3S_IRQPRIO_MAX; break; | |
150 | } | |
151 | ||
152 | set_bit(prio, &vcpu->arch.pending_exceptions); | |
153 | #ifdef EXIT_DEBUG | |
154 | printk(KERN_INFO "Queueing interrupt %x\n", vec); | |
155 | #endif | |
156 | } | |
157 | ||
158 | ||
159 | void kvmppc_core_queue_program(struct kvm_vcpu *vcpu) | |
160 | { | |
161 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_PROGRAM); | |
162 | } | |
163 | ||
164 | void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) | |
165 | { | |
166 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER); | |
167 | } | |
168 | ||
169 | int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) | |
170 | { | |
171 | return test_bit(BOOK3S_INTERRUPT_DECREMENTER >> 7, &vcpu->arch.pending_exceptions); | |
172 | } | |
173 | ||
174 | void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, | |
175 | struct kvm_interrupt *irq) | |
176 | { | |
177 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); | |
178 | } | |
179 | ||
180 | int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) | |
181 | { | |
182 | int deliver = 1; | |
183 | int vec = 0; | |
184 | ||
185 | switch (priority) { | |
186 | case BOOK3S_IRQPRIO_DECREMENTER: | |
187 | deliver = vcpu->arch.msr & MSR_EE; | |
188 | vec = BOOK3S_INTERRUPT_DECREMENTER; | |
189 | break; | |
190 | case BOOK3S_IRQPRIO_EXTERNAL: | |
191 | deliver = vcpu->arch.msr & MSR_EE; | |
192 | vec = BOOK3S_INTERRUPT_EXTERNAL; | |
193 | break; | |
194 | case BOOK3S_IRQPRIO_SYSTEM_RESET: | |
195 | vec = BOOK3S_INTERRUPT_SYSTEM_RESET; | |
196 | break; | |
197 | case BOOK3S_IRQPRIO_MACHINE_CHECK: | |
198 | vec = BOOK3S_INTERRUPT_MACHINE_CHECK; | |
199 | break; | |
200 | case BOOK3S_IRQPRIO_DATA_STORAGE: | |
201 | vec = BOOK3S_INTERRUPT_DATA_STORAGE; | |
202 | break; | |
203 | case BOOK3S_IRQPRIO_INST_STORAGE: | |
204 | vec = BOOK3S_INTERRUPT_INST_STORAGE; | |
205 | break; | |
206 | case BOOK3S_IRQPRIO_DATA_SEGMENT: | |
207 | vec = BOOK3S_INTERRUPT_DATA_SEGMENT; | |
208 | break; | |
209 | case BOOK3S_IRQPRIO_INST_SEGMENT: | |
210 | vec = BOOK3S_INTERRUPT_INST_SEGMENT; | |
211 | break; | |
212 | case BOOK3S_IRQPRIO_ALIGNMENT: | |
213 | vec = BOOK3S_INTERRUPT_ALIGNMENT; | |
214 | break; | |
215 | case BOOK3S_IRQPRIO_PROGRAM: | |
216 | vec = BOOK3S_INTERRUPT_PROGRAM; | |
217 | break; | |
218 | case BOOK3S_IRQPRIO_VSX: | |
219 | vec = BOOK3S_INTERRUPT_VSX; | |
220 | break; | |
221 | case BOOK3S_IRQPRIO_ALTIVEC: | |
222 | vec = BOOK3S_INTERRUPT_ALTIVEC; | |
223 | break; | |
224 | case BOOK3S_IRQPRIO_FP_UNAVAIL: | |
225 | vec = BOOK3S_INTERRUPT_FP_UNAVAIL; | |
226 | break; | |
227 | case BOOK3S_IRQPRIO_SYSCALL: | |
228 | vec = BOOK3S_INTERRUPT_SYSCALL; | |
229 | break; | |
230 | case BOOK3S_IRQPRIO_DEBUG: | |
231 | vec = BOOK3S_INTERRUPT_TRACE; | |
232 | break; | |
233 | case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR: | |
234 | vec = BOOK3S_INTERRUPT_PERFMON; | |
235 | break; | |
236 | default: | |
237 | deliver = 0; | |
238 | printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority); | |
239 | break; | |
240 | } | |
241 | ||
242 | #if 0 | |
243 | printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver); | |
244 | #endif | |
245 | ||
246 | if (deliver) | |
247 | kvmppc_inject_interrupt(vcpu, vec, 0ULL); | |
248 | ||
249 | return deliver; | |
250 | } | |
251 | ||
252 | void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) | |
253 | { | |
254 | unsigned long *pending = &vcpu->arch.pending_exceptions; | |
255 | unsigned int priority; | |
256 | ||
257 | /* XXX be more clever here - no need to mftb() on every entry */ | |
258 | /* Issue DEC again if it's still active */ | |
259 | #ifdef AGGRESSIVE_DEC | |
260 | if (vcpu->arch.msr & MSR_EE) | |
261 | if (kvmppc_get_dec(vcpu) & 0x80000000) | |
262 | kvmppc_core_queue_dec(vcpu); | |
263 | #endif | |
264 | ||
265 | #ifdef EXIT_DEBUG | |
266 | if (vcpu->arch.pending_exceptions) | |
267 | printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions); | |
268 | #endif | |
269 | priority = __ffs(*pending); | |
270 | while (priority <= (sizeof(unsigned int) * 8)) { | |
271 | if (kvmppc_book3s_irqprio_deliver(vcpu, priority)) { | |
272 | clear_bit(priority, &vcpu->arch.pending_exceptions); | |
273 | break; | |
274 | } | |
275 | ||
276 | priority = find_next_bit(pending, | |
277 | BITS_PER_BYTE * sizeof(*pending), | |
278 | priority + 1); | |
279 | } | |
280 | } | |
281 | ||
282 | void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) | |
283 | { | |
e15a1137 | 284 | vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB; |
2f4cf5e4 AG |
285 | vcpu->arch.pvr = pvr; |
286 | if ((pvr >= 0x330000) && (pvr < 0x70330000)) { | |
287 | kvmppc_mmu_book3s_64_init(vcpu); | |
288 | to_book3s(vcpu)->hior = 0xfff00000; | |
289 | to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL; | |
290 | } else { | |
291 | kvmppc_mmu_book3s_32_init(vcpu); | |
292 | to_book3s(vcpu)->hior = 0; | |
293 | to_book3s(vcpu)->msr_mask = 0xffffffffULL; | |
294 | } | |
295 | ||
296 | /* If we are in hypervisor level on 970, we can tell the CPU to | |
297 | * treat DCBZ as 32 bytes store */ | |
298 | vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32; | |
299 | if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) && | |
300 | !strcmp(cur_cpu_spec->platform, "ppc970")) | |
301 | vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; | |
302 | ||
303 | } | |
304 | ||
305 | /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To | |
306 | * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to | |
307 | * emulate 32 bytes dcbz length. | |
308 | * | |
309 | * The Book3s_64 inventors also realized this case and implemented a special bit | |
310 | * in the HID5 register, which is a hypervisor ressource. Thus we can't use it. | |
311 | * | |
312 | * My approach here is to patch the dcbz instruction on executing pages. | |
313 | */ | |
314 | static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) | |
315 | { | |
316 | bool touched = false; | |
317 | hva_t hpage; | |
318 | u32 *page; | |
319 | int i; | |
320 | ||
321 | hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT); | |
322 | if (kvm_is_error_hva(hpage)) | |
323 | return; | |
324 | ||
325 | hpage |= pte->raddr & ~PAGE_MASK; | |
326 | hpage &= ~0xFFFULL; | |
327 | ||
328 | page = vmalloc(HW_PAGE_SIZE); | |
329 | ||
330 | if (copy_from_user(page, (void __user *)hpage, HW_PAGE_SIZE)) | |
331 | goto out; | |
332 | ||
333 | for (i=0; i < HW_PAGE_SIZE / 4; i++) | |
334 | if ((page[i] & 0xff0007ff) == INS_DCBZ) { | |
335 | page[i] &= 0xfffffff7; // reserved instruction, so we trap | |
336 | touched = true; | |
337 | } | |
338 | ||
339 | if (touched) | |
340 | copy_to_user((void __user *)hpage, page, HW_PAGE_SIZE); | |
341 | ||
342 | out: | |
343 | vfree(page); | |
344 | } | |
345 | ||
346 | static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, | |
347 | struct kvmppc_pte *pte) | |
348 | { | |
349 | int relocated = (vcpu->arch.msr & (data ? MSR_DR : MSR_IR)); | |
350 | int r; | |
351 | ||
352 | if (relocated) { | |
353 | r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data); | |
354 | } else { | |
355 | pte->eaddr = eaddr; | |
356 | pte->raddr = eaddr & 0xffffffff; | |
357 | pte->vpage = eaddr >> 12; | |
358 | switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { | |
359 | case 0: | |
360 | pte->vpage |= VSID_REAL; | |
361 | case MSR_DR: | |
362 | pte->vpage |= VSID_REAL_DR; | |
363 | case MSR_IR: | |
364 | pte->vpage |= VSID_REAL_IR; | |
365 | } | |
366 | pte->may_read = true; | |
367 | pte->may_write = true; | |
368 | pte->may_execute = true; | |
369 | r = 0; | |
370 | } | |
371 | ||
372 | return r; | |
373 | } | |
374 | ||
375 | static hva_t kvmppc_bad_hva(void) | |
376 | { | |
377 | return PAGE_OFFSET; | |
378 | } | |
379 | ||
380 | static hva_t kvmppc_pte_to_hva(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte, | |
381 | bool read) | |
382 | { | |
383 | hva_t hpage; | |
384 | ||
385 | if (read && !pte->may_read) | |
386 | goto err; | |
387 | ||
388 | if (!read && !pte->may_write) | |
389 | goto err; | |
390 | ||
391 | hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT); | |
392 | if (kvm_is_error_hva(hpage)) | |
393 | goto err; | |
394 | ||
395 | return hpage | (pte->raddr & ~PAGE_MASK); | |
396 | err: | |
397 | return kvmppc_bad_hva(); | |
398 | } | |
399 | ||
400 | int kvmppc_st(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr) | |
401 | { | |
402 | struct kvmppc_pte pte; | |
403 | hva_t hva = eaddr; | |
404 | ||
405 | vcpu->stat.st++; | |
406 | ||
407 | if (kvmppc_xlate(vcpu, eaddr, false, &pte)) | |
408 | goto err; | |
409 | ||
410 | hva = kvmppc_pte_to_hva(vcpu, &pte, false); | |
411 | if (kvm_is_error_hva(hva)) | |
412 | goto err; | |
413 | ||
414 | if (copy_to_user((void __user *)hva, ptr, size)) { | |
415 | printk(KERN_INFO "kvmppc_st at 0x%lx failed\n", hva); | |
416 | goto err; | |
417 | } | |
418 | ||
419 | return 0; | |
420 | ||
421 | err: | |
422 | return -ENOENT; | |
423 | } | |
424 | ||
425 | int kvmppc_ld(struct kvm_vcpu *vcpu, ulong eaddr, int size, void *ptr, | |
426 | bool data) | |
427 | { | |
428 | struct kvmppc_pte pte; | |
429 | hva_t hva = eaddr; | |
430 | ||
431 | vcpu->stat.ld++; | |
432 | ||
433 | if (kvmppc_xlate(vcpu, eaddr, data, &pte)) | |
434 | goto err; | |
435 | ||
436 | hva = kvmppc_pte_to_hva(vcpu, &pte, true); | |
437 | if (kvm_is_error_hva(hva)) | |
438 | goto err; | |
439 | ||
440 | if (copy_from_user(ptr, (void __user *)hva, size)) { | |
441 | printk(KERN_INFO "kvmppc_ld at 0x%lx failed\n", hva); | |
442 | goto err; | |
443 | } | |
444 | ||
445 | return 0; | |
446 | ||
447 | err: | |
448 | return -ENOENT; | |
449 | } | |
450 | ||
451 | static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) | |
452 | { | |
453 | return kvm_is_visible_gfn(vcpu->kvm, gfn); | |
454 | } | |
455 | ||
456 | int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
457 | ulong eaddr, int vec) | |
458 | { | |
459 | bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE); | |
460 | int r = RESUME_GUEST; | |
461 | int relocated; | |
462 | int page_found = 0; | |
463 | struct kvmppc_pte pte; | |
464 | bool is_mmio = false; | |
465 | ||
466 | if ( vec == BOOK3S_INTERRUPT_DATA_STORAGE ) { | |
467 | relocated = (vcpu->arch.msr & MSR_DR); | |
468 | } else { | |
469 | relocated = (vcpu->arch.msr & MSR_IR); | |
470 | } | |
471 | ||
472 | /* Resolve real address if translation turned on */ | |
473 | if (relocated) { | |
474 | page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data); | |
475 | } else { | |
476 | pte.may_execute = true; | |
477 | pte.may_read = true; | |
478 | pte.may_write = true; | |
479 | pte.raddr = eaddr & 0xffffffff; | |
480 | pte.eaddr = eaddr; | |
481 | pte.vpage = eaddr >> 12; | |
482 | switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { | |
483 | case 0: | |
484 | pte.vpage |= VSID_REAL; | |
485 | case MSR_DR: | |
486 | pte.vpage |= VSID_REAL_DR; | |
487 | case MSR_IR: | |
488 | pte.vpage |= VSID_REAL_IR; | |
489 | } | |
490 | } | |
491 | ||
492 | if (vcpu->arch.mmu.is_dcbz32(vcpu) && | |
493 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { | |
494 | /* | |
495 | * If we do the dcbz hack, we have to NX on every execution, | |
496 | * so we can patch the executing code. This renders our guest | |
497 | * NX-less. | |
498 | */ | |
499 | pte.may_execute = !data; | |
500 | } | |
501 | ||
502 | if (page_found == -ENOENT) { | |
503 | /* Page not found in guest PTE entries */ | |
504 | vcpu->arch.dear = vcpu->arch.fault_dear; | |
505 | to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr; | |
506 | vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x00000000f8000000ULL); | |
507 | kvmppc_book3s_queue_irqprio(vcpu, vec); | |
508 | } else if (page_found == -EPERM) { | |
509 | /* Storage protection */ | |
510 | vcpu->arch.dear = vcpu->arch.fault_dear; | |
511 | to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE; | |
512 | to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT; | |
513 | vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x00000000f8000000ULL); | |
514 | kvmppc_book3s_queue_irqprio(vcpu, vec); | |
515 | } else if (page_found == -EINVAL) { | |
516 | /* Page not found in guest SLB */ | |
517 | vcpu->arch.dear = vcpu->arch.fault_dear; | |
518 | kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); | |
519 | } else if (!is_mmio && | |
520 | kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { | |
521 | /* The guest's PTE is not mapped yet. Map on the host */ | |
522 | kvmppc_mmu_map_page(vcpu, &pte); | |
523 | if (data) | |
524 | vcpu->stat.sp_storage++; | |
525 | else if (vcpu->arch.mmu.is_dcbz32(vcpu) && | |
526 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) | |
527 | kvmppc_patch_dcbz(vcpu, &pte); | |
528 | } else { | |
529 | /* MMIO */ | |
530 | vcpu->stat.mmio_exits++; | |
531 | vcpu->arch.paddr_accessed = pte.raddr; | |
532 | r = kvmppc_emulate_mmio(run, vcpu); | |
533 | if ( r == RESUME_HOST_NV ) | |
534 | r = RESUME_HOST; | |
535 | if ( r == RESUME_GUEST_NV ) | |
536 | r = RESUME_GUEST; | |
537 | } | |
538 | ||
539 | return r; | |
540 | } | |
541 | ||
542 | int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
543 | unsigned int exit_nr) | |
544 | { | |
545 | int r = RESUME_HOST; | |
546 | ||
547 | vcpu->stat.sum_exits++; | |
548 | ||
549 | run->exit_reason = KVM_EXIT_UNKNOWN; | |
550 | run->ready_for_interrupt_injection = 1; | |
551 | #ifdef EXIT_DEBUG | |
552 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | dec=0x%x | msr=0x%lx\n", | |
553 | exit_nr, vcpu->arch.pc, vcpu->arch.fault_dear, | |
554 | kvmppc_get_dec(vcpu), vcpu->arch.msr); | |
555 | #elif defined (EXIT_DEBUG_SIMPLE) | |
556 | if ((exit_nr != 0x900) && (exit_nr != 0x500)) | |
557 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | msr=0x%lx\n", | |
558 | exit_nr, vcpu->arch.pc, vcpu->arch.fault_dear, | |
559 | vcpu->arch.msr); | |
560 | #endif | |
561 | kvm_resched(vcpu); | |
562 | switch (exit_nr) { | |
563 | case BOOK3S_INTERRUPT_INST_STORAGE: | |
564 | vcpu->stat.pf_instruc++; | |
565 | /* only care about PTEG not found errors, but leave NX alone */ | |
566 | if (vcpu->arch.shadow_msr & 0x40000000) { | |
567 | r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.pc, exit_nr); | |
568 | vcpu->stat.sp_instruc++; | |
569 | } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && | |
570 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { | |
571 | /* | |
572 | * XXX If we do the dcbz hack we use the NX bit to flush&patch the page, | |
573 | * so we can't use the NX bit inside the guest. Let's cross our fingers, | |
574 | * that no guest that needs the dcbz hack does NX. | |
575 | */ | |
576 | kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); | |
577 | } else { | |
578 | vcpu->arch.msr |= (vcpu->arch.shadow_msr & 0x58000000); | |
579 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
580 | kvmppc_mmu_pte_flush(vcpu, vcpu->arch.pc, ~0xFFFULL); | |
581 | r = RESUME_GUEST; | |
582 | } | |
583 | break; | |
584 | case BOOK3S_INTERRUPT_DATA_STORAGE: | |
585 | vcpu->stat.pf_storage++; | |
586 | /* The only case we need to handle is missing shadow PTEs */ | |
587 | if (vcpu->arch.fault_dsisr & DSISR_NOHPTE) { | |
588 | r = kvmppc_handle_pagefault(run, vcpu, vcpu->arch.fault_dear, exit_nr); | |
589 | } else { | |
590 | vcpu->arch.dear = vcpu->arch.fault_dear; | |
591 | to_book3s(vcpu)->dsisr = vcpu->arch.fault_dsisr; | |
592 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
593 | kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFULL); | |
594 | r = RESUME_GUEST; | |
595 | } | |
596 | break; | |
597 | case BOOK3S_INTERRUPT_DATA_SEGMENT: | |
598 | if (kvmppc_mmu_map_segment(vcpu, vcpu->arch.fault_dear) < 0) { | |
599 | vcpu->arch.dear = vcpu->arch.fault_dear; | |
600 | kvmppc_book3s_queue_irqprio(vcpu, | |
601 | BOOK3S_INTERRUPT_DATA_SEGMENT); | |
602 | } | |
603 | r = RESUME_GUEST; | |
604 | break; | |
605 | case BOOK3S_INTERRUPT_INST_SEGMENT: | |
606 | if (kvmppc_mmu_map_segment(vcpu, vcpu->arch.pc) < 0) { | |
607 | kvmppc_book3s_queue_irqprio(vcpu, | |
608 | BOOK3S_INTERRUPT_INST_SEGMENT); | |
609 | } | |
610 | r = RESUME_GUEST; | |
611 | break; | |
612 | /* We're good on these - the host merely wanted to get our attention */ | |
613 | case BOOK3S_INTERRUPT_DECREMENTER: | |
614 | vcpu->stat.dec_exits++; | |
615 | r = RESUME_GUEST; | |
616 | break; | |
617 | case BOOK3S_INTERRUPT_EXTERNAL: | |
618 | vcpu->stat.ext_intr_exits++; | |
619 | r = RESUME_GUEST; | |
620 | break; | |
621 | case BOOK3S_INTERRUPT_PROGRAM: | |
622 | { | |
623 | enum emulation_result er; | |
624 | ||
625 | if (vcpu->arch.msr & MSR_PR) { | |
626 | #ifdef EXIT_DEBUG | |
627 | printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", vcpu->arch.pc, vcpu->arch.last_inst); | |
628 | #endif | |
629 | if ((vcpu->arch.last_inst & 0xff0007ff) != | |
630 | (INS_DCBZ & 0xfffffff7)) { | |
631 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
632 | r = RESUME_GUEST; | |
633 | break; | |
634 | } | |
635 | } | |
636 | ||
637 | vcpu->stat.emulated_inst_exits++; | |
638 | er = kvmppc_emulate_instruction(run, vcpu); | |
639 | switch (er) { | |
640 | case EMULATE_DONE: | |
641 | r = RESUME_GUEST; | |
642 | break; | |
643 | case EMULATE_FAIL: | |
644 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", | |
645 | __func__, vcpu->arch.pc, vcpu->arch.last_inst); | |
646 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
647 | r = RESUME_GUEST; | |
648 | break; | |
649 | default: | |
650 | BUG(); | |
651 | } | |
652 | break; | |
653 | } | |
654 | case BOOK3S_INTERRUPT_SYSCALL: | |
655 | #ifdef EXIT_DEBUG | |
656 | printk(KERN_INFO "Syscall Nr %d\n", (int)vcpu->arch.gpr[0]); | |
657 | #endif | |
658 | vcpu->stat.syscall_exits++; | |
659 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
660 | r = RESUME_GUEST; | |
661 | break; | |
662 | case BOOK3S_INTERRUPT_MACHINE_CHECK: | |
663 | case BOOK3S_INTERRUPT_FP_UNAVAIL: | |
664 | case BOOK3S_INTERRUPT_TRACE: | |
665 | case BOOK3S_INTERRUPT_ALTIVEC: | |
666 | case BOOK3S_INTERRUPT_VSX: | |
667 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
668 | r = RESUME_GUEST; | |
669 | break; | |
670 | default: | |
671 | /* Ugh - bork here! What did we get? */ | |
672 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", exit_nr, vcpu->arch.pc, vcpu->arch.shadow_msr); | |
673 | r = RESUME_HOST; | |
674 | BUG(); | |
675 | break; | |
676 | } | |
677 | ||
678 | ||
679 | if (!(r & RESUME_HOST)) { | |
680 | /* To avoid clobbering exit_reason, only check for signals if | |
681 | * we aren't already exiting to userspace for some other | |
682 | * reason. */ | |
683 | if (signal_pending(current)) { | |
684 | #ifdef EXIT_DEBUG | |
685 | printk(KERN_EMERG "KVM: Going back to host\n"); | |
686 | #endif | |
687 | vcpu->stat.signal_exits++; | |
688 | run->exit_reason = KVM_EXIT_INTR; | |
689 | r = -EINTR; | |
690 | } else { | |
691 | /* In case an interrupt came in that was triggered | |
692 | * from userspace (like DEC), we need to check what | |
693 | * to inject now! */ | |
694 | kvmppc_core_deliver_interrupts(vcpu); | |
695 | } | |
696 | } | |
697 | ||
698 | #ifdef EXIT_DEBUG | |
699 | printk(KERN_EMERG "KVM exit: vcpu=0x%p pc=0x%lx r=0x%x\n", vcpu, vcpu->arch.pc, r); | |
700 | #endif | |
701 | ||
702 | return r; | |
703 | } | |
704 | ||
705 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |
706 | { | |
707 | return 0; | |
708 | } | |
709 | ||
710 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |
711 | { | |
712 | int i; | |
713 | ||
714 | regs->pc = vcpu->arch.pc; | |
715 | regs->cr = vcpu->arch.cr; | |
716 | regs->ctr = vcpu->arch.ctr; | |
717 | regs->lr = vcpu->arch.lr; | |
718 | regs->xer = vcpu->arch.xer; | |
719 | regs->msr = vcpu->arch.msr; | |
720 | regs->srr0 = vcpu->arch.srr0; | |
721 | regs->srr1 = vcpu->arch.srr1; | |
722 | regs->pid = vcpu->arch.pid; | |
723 | regs->sprg0 = vcpu->arch.sprg0; | |
724 | regs->sprg1 = vcpu->arch.sprg1; | |
725 | regs->sprg2 = vcpu->arch.sprg2; | |
726 | regs->sprg3 = vcpu->arch.sprg3; | |
727 | regs->sprg5 = vcpu->arch.sprg4; | |
728 | regs->sprg6 = vcpu->arch.sprg5; | |
729 | regs->sprg7 = vcpu->arch.sprg6; | |
730 | ||
731 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | |
732 | regs->gpr[i] = vcpu->arch.gpr[i]; | |
733 | ||
734 | return 0; | |
735 | } | |
736 | ||
737 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |
738 | { | |
739 | int i; | |
740 | ||
741 | vcpu->arch.pc = regs->pc; | |
742 | vcpu->arch.cr = regs->cr; | |
743 | vcpu->arch.ctr = regs->ctr; | |
744 | vcpu->arch.lr = regs->lr; | |
745 | vcpu->arch.xer = regs->xer; | |
746 | kvmppc_set_msr(vcpu, regs->msr); | |
747 | vcpu->arch.srr0 = regs->srr0; | |
748 | vcpu->arch.srr1 = regs->srr1; | |
749 | vcpu->arch.sprg0 = regs->sprg0; | |
750 | vcpu->arch.sprg1 = regs->sprg1; | |
751 | vcpu->arch.sprg2 = regs->sprg2; | |
752 | vcpu->arch.sprg3 = regs->sprg3; | |
753 | vcpu->arch.sprg5 = regs->sprg4; | |
754 | vcpu->arch.sprg6 = regs->sprg5; | |
755 | vcpu->arch.sprg7 = regs->sprg6; | |
756 | ||
757 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.gpr); i++) | |
758 | vcpu->arch.gpr[i] = regs->gpr[i]; | |
759 | ||
760 | return 0; | |
761 | } | |
762 | ||
763 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |
764 | struct kvm_sregs *sregs) | |
765 | { | |
e15a1137 AG |
766 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); |
767 | int i; | |
768 | ||
2f4cf5e4 | 769 | sregs->pvr = vcpu->arch.pvr; |
e15a1137 AG |
770 | |
771 | sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; | |
772 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { | |
773 | for (i = 0; i < 64; i++) { | |
774 | sregs->u.s.ppc64.slb[i].slbe = vcpu3s->slb[i].orige | i; | |
775 | sregs->u.s.ppc64.slb[i].slbv = vcpu3s->slb[i].origv; | |
776 | } | |
777 | } else { | |
778 | for (i = 0; i < 16; i++) { | |
779 | sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw; | |
780 | sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw; | |
781 | } | |
782 | for (i = 0; i < 8; i++) { | |
783 | sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw; | |
784 | sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw; | |
785 | } | |
786 | } | |
2f4cf5e4 AG |
787 | return 0; |
788 | } | |
789 | ||
790 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |
791 | struct kvm_sregs *sregs) | |
792 | { | |
e15a1137 AG |
793 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); |
794 | int i; | |
795 | ||
2f4cf5e4 | 796 | kvmppc_set_pvr(vcpu, sregs->pvr); |
e15a1137 AG |
797 | |
798 | vcpu3s->sdr1 = sregs->u.s.sdr1; | |
799 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { | |
800 | for (i = 0; i < 64; i++) { | |
801 | vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv, | |
802 | sregs->u.s.ppc64.slb[i].slbe); | |
803 | } | |
804 | } else { | |
805 | for (i = 0; i < 16; i++) { | |
806 | vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]); | |
807 | } | |
808 | for (i = 0; i < 8; i++) { | |
809 | kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false, | |
810 | (u32)sregs->u.s.ppc32.ibat[i]); | |
811 | kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true, | |
812 | (u32)(sregs->u.s.ppc32.ibat[i] >> 32)); | |
813 | kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false, | |
814 | (u32)sregs->u.s.ppc32.dbat[i]); | |
815 | kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true, | |
816 | (u32)(sregs->u.s.ppc32.dbat[i] >> 32)); | |
817 | } | |
818 | } | |
819 | ||
820 | /* Flush the MMU after messing with the segments */ | |
821 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | |
2f4cf5e4 AG |
822 | return 0; |
823 | } | |
824 | ||
825 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | |
826 | { | |
827 | return -ENOTSUPP; | |
828 | } | |
829 | ||
830 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | |
831 | { | |
832 | return -ENOTSUPP; | |
833 | } | |
834 | ||
835 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | |
836 | struct kvm_translation *tr) | |
837 | { | |
838 | return 0; | |
839 | } | |
840 | ||
841 | /* | |
842 | * Get (and clear) the dirty memory log for a memory slot. | |
843 | */ | |
844 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, | |
845 | struct kvm_dirty_log *log) | |
846 | { | |
847 | struct kvm_memory_slot *memslot; | |
848 | struct kvm_vcpu *vcpu; | |
849 | ulong ga, ga_end; | |
850 | int is_dirty = 0; | |
851 | int r, n; | |
852 | ||
853 | down_write(&kvm->slots_lock); | |
854 | ||
855 | r = kvm_get_dirty_log(kvm, log, &is_dirty); | |
856 | if (r) | |
857 | goto out; | |
858 | ||
859 | /* If nothing is dirty, don't bother messing with page tables. */ | |
860 | if (is_dirty) { | |
861 | memslot = &kvm->memslots[log->slot]; | |
862 | ||
863 | ga = memslot->base_gfn << PAGE_SHIFT; | |
864 | ga_end = ga + (memslot->npages << PAGE_SHIFT); | |
865 | ||
866 | kvm_for_each_vcpu(n, vcpu, kvm) | |
867 | kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); | |
868 | ||
869 | n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; | |
870 | memset(memslot->dirty_bitmap, 0, n); | |
871 | } | |
872 | ||
873 | r = 0; | |
874 | out: | |
875 | up_write(&kvm->slots_lock); | |
876 | return r; | |
877 | } | |
878 | ||
879 | int kvmppc_core_check_processor_compat(void) | |
880 | { | |
881 | return 0; | |
882 | } | |
883 | ||
884 | struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |
885 | { | |
886 | struct kvmppc_vcpu_book3s *vcpu_book3s; | |
887 | struct kvm_vcpu *vcpu; | |
888 | int err; | |
889 | ||
890 | vcpu_book3s = (struct kvmppc_vcpu_book3s *)__get_free_pages( GFP_KERNEL | __GFP_ZERO, | |
891 | get_order(sizeof(struct kvmppc_vcpu_book3s))); | |
892 | if (!vcpu_book3s) { | |
893 | err = -ENOMEM; | |
894 | goto out; | |
895 | } | |
896 | ||
897 | vcpu = &vcpu_book3s->vcpu; | |
898 | err = kvm_vcpu_init(vcpu, kvm, id); | |
899 | if (err) | |
900 | goto free_vcpu; | |
901 | ||
902 | vcpu->arch.host_retip = kvm_return_point; | |
903 | vcpu->arch.host_msr = mfmsr(); | |
904 | /* default to book3s_64 (970fx) */ | |
905 | vcpu->arch.pvr = 0x3C0301; | |
906 | kvmppc_set_pvr(vcpu, vcpu->arch.pvr); | |
907 | vcpu_book3s->slb_nr = 64; | |
908 | ||
909 | /* remember where some real-mode handlers are */ | |
910 | vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem; | |
911 | vcpu->arch.trampoline_enter = kvmppc_trampoline_enter; | |
912 | vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem; | |
913 | ||
914 | vcpu->arch.shadow_msr = MSR_USER64; | |
915 | ||
916 | err = __init_new_context(); | |
917 | if (err < 0) | |
918 | goto free_vcpu; | |
919 | vcpu_book3s->context_id = err; | |
920 | ||
921 | vcpu_book3s->vsid_max = ((vcpu_book3s->context_id + 1) << USER_ESID_BITS) - 1; | |
922 | vcpu_book3s->vsid_first = vcpu_book3s->context_id << USER_ESID_BITS; | |
923 | vcpu_book3s->vsid_next = vcpu_book3s->vsid_first; | |
924 | ||
925 | return vcpu; | |
926 | ||
927 | free_vcpu: | |
928 | free_pages((long)vcpu_book3s, get_order(sizeof(struct kvmppc_vcpu_book3s))); | |
929 | out: | |
930 | return ERR_PTR(err); | |
931 | } | |
932 | ||
933 | void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | |
934 | { | |
935 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | |
936 | ||
937 | __destroy_context(vcpu_book3s->context_id); | |
938 | kvm_vcpu_uninit(vcpu); | |
939 | free_pages((long)vcpu_book3s, get_order(sizeof(struct kvmppc_vcpu_book3s))); | |
940 | } | |
941 | ||
942 | extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); | |
943 | int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |
944 | { | |
945 | int ret; | |
946 | ||
947 | /* No need to go into the guest when all we do is going out */ | |
948 | if (signal_pending(current)) { | |
949 | kvm_run->exit_reason = KVM_EXIT_INTR; | |
950 | return -EINTR; | |
951 | } | |
952 | ||
953 | /* XXX we get called with irq disabled - change that! */ | |
954 | local_irq_enable(); | |
955 | ||
956 | ret = __kvmppc_vcpu_entry(kvm_run, vcpu); | |
957 | ||
958 | local_irq_disable(); | |
959 | ||
960 | return ret; | |
961 | } | |
962 | ||
963 | static int kvmppc_book3s_init(void) | |
964 | { | |
965 | return kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), THIS_MODULE); | |
966 | } | |
967 | ||
968 | static void kvmppc_book3s_exit(void) | |
969 | { | |
970 | kvm_exit(); | |
971 | } | |
972 | ||
973 | module_init(kvmppc_book3s_init); | |
974 | module_exit(kvmppc_book3s_exit); |