2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/module.h>
25 #include <linux/vmalloc.h>
27 #include <asm/cputable.h>
28 #include <asm/uaccess.h>
29 #include <asm/kvm_ppc.h>
30 #include <asm/tlbflush.h>
31 #include "../mm/mmu_decl.h"
34 gfn_t
unalias_gfn(struct kvm
*kvm
, gfn_t gfn
)
39 int kvm_cpu_has_interrupt(struct kvm_vcpu
*v
)
41 return !!(v
->arch
.pending_exceptions
);
44 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*v
)
46 return !(v
->arch
.msr
& MSR_WE
);
50 int kvmppc_emulate_mmio(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
52 enum emulation_result er
;
55 er
= kvmppc_emulate_instruction(run
, vcpu
);
58 /* Future optimization: only reload non-volatiles if they were
59 * actually modified. */
63 run
->exit_reason
= KVM_EXIT_MMIO
;
64 /* We must reload nonvolatiles because "update" load/store
65 * instructions modify register state. */
66 /* Future optimization: only reload non-volatiles if they were
67 * actually modified. */
71 /* XXX Deliver Program interrupt to guest. */
72 printk(KERN_EMERG
"%s: emulation failed (%08x)\n", __func__
,
73 vcpu
->arch
.last_inst
);
83 void kvm_arch_hardware_enable(void *garbage
)
87 void kvm_arch_hardware_disable(void *garbage
)
91 int kvm_arch_hardware_setup(void)
96 void kvm_arch_hardware_unsetup(void)
100 void kvm_arch_check_processor_compat(void *rtn
)
104 if (strcmp(cur_cpu_spec
->platform
, "ppc440") == 0)
112 struct kvm
*kvm_arch_create_vm(void)
116 kvm
= kzalloc(sizeof(struct kvm
), GFP_KERNEL
);
118 return ERR_PTR(-ENOMEM
);
123 static void kvmppc_free_vcpus(struct kvm
*kvm
)
127 for (i
= 0; i
< KVM_MAX_VCPUS
; ++i
) {
129 kvm_arch_vcpu_free(kvm
->vcpus
[i
]);
130 kvm
->vcpus
[i
] = NULL
;
135 void kvm_arch_destroy_vm(struct kvm
*kvm
)
137 kvmppc_free_vcpus(kvm
);
138 kvm_free_physmem(kvm
);
142 int kvm_dev_ioctl_check_extension(long ext
)
147 case KVM_CAP_USER_MEMORY
:
150 case KVM_CAP_COALESCED_MMIO
:
151 r
= KVM_COALESCED_MMIO_PAGE_OFFSET
;
161 long kvm_arch_dev_ioctl(struct file
*filp
,
162 unsigned int ioctl
, unsigned long arg
)
167 int kvm_arch_set_memory_region(struct kvm
*kvm
,
168 struct kvm_userspace_memory_region
*mem
,
169 struct kvm_memory_slot old
,
175 void kvm_arch_flush_shadow(struct kvm
*kvm
)
179 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
, unsigned int id
)
181 struct kvm_vcpu
*vcpu
;
184 vcpu
= kmem_cache_zalloc(kvm_vcpu_cache
, GFP_KERNEL
);
190 err
= kvm_vcpu_init(vcpu
, kvm
, id
);
197 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
202 void kvm_arch_vcpu_free(struct kvm_vcpu
*vcpu
)
204 kvm_vcpu_uninit(vcpu
);
205 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
208 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
210 kvm_arch_vcpu_free(vcpu
);
213 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
215 unsigned int priority
= exception_priority
[BOOKE_INTERRUPT_DECREMENTER
];
217 return test_bit(priority
, &vcpu
->arch
.pending_exceptions
);
220 static void kvmppc_decrementer_func(unsigned long data
)
222 struct kvm_vcpu
*vcpu
= (struct kvm_vcpu
*)data
;
224 kvmppc_queue_exception(vcpu
, BOOKE_INTERRUPT_DECREMENTER
);
226 if (waitqueue_active(&vcpu
->wq
)) {
227 wake_up_interruptible(&vcpu
->wq
);
228 vcpu
->stat
.halt_wakeup
++;
232 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
234 setup_timer(&vcpu
->arch
.dec_timer
, kvmppc_decrementer_func
,
235 (unsigned long)vcpu
);
240 void kvm_arch_vcpu_uninit(struct kvm_vcpu
*vcpu
)
242 kvmppc_core_destroy_mmu(vcpu
);
245 /* Note: clearing MSR[DE] just means that the debug interrupt will not be
246 * delivered *immediately*. Instead, it simply sets the appropriate DBSR bits.
247 * If those DBSR bits are still set when MSR[DE] is re-enabled, the interrupt
248 * will be delivered as an "imprecise debug event" (which is indicated by
251 static void kvmppc_disable_debug_interrupts(void)
253 mtmsr(mfmsr() & ~MSR_DE
);
256 static void kvmppc_restore_host_debug_state(struct kvm_vcpu
*vcpu
)
258 kvmppc_disable_debug_interrupts();
260 mtspr(SPRN_IAC1
, vcpu
->arch
.host_iac
[0]);
261 mtspr(SPRN_IAC2
, vcpu
->arch
.host_iac
[1]);
262 mtspr(SPRN_IAC3
, vcpu
->arch
.host_iac
[2]);
263 mtspr(SPRN_IAC4
, vcpu
->arch
.host_iac
[3]);
264 mtspr(SPRN_DBCR1
, vcpu
->arch
.host_dbcr1
);
265 mtspr(SPRN_DBCR2
, vcpu
->arch
.host_dbcr2
);
266 mtspr(SPRN_DBCR0
, vcpu
->arch
.host_dbcr0
);
267 mtmsr(vcpu
->arch
.host_msr
);
270 static void kvmppc_load_guest_debug_registers(struct kvm_vcpu
*vcpu
)
272 struct kvm_guest_debug
*dbg
= &vcpu
->guest_debug
;
275 vcpu
->arch
.host_msr
= mfmsr();
276 kvmppc_disable_debug_interrupts();
278 /* Save host debug register state. */
279 vcpu
->arch
.host_iac
[0] = mfspr(SPRN_IAC1
);
280 vcpu
->arch
.host_iac
[1] = mfspr(SPRN_IAC2
);
281 vcpu
->arch
.host_iac
[2] = mfspr(SPRN_IAC3
);
282 vcpu
->arch
.host_iac
[3] = mfspr(SPRN_IAC4
);
283 vcpu
->arch
.host_dbcr0
= mfspr(SPRN_DBCR0
);
284 vcpu
->arch
.host_dbcr1
= mfspr(SPRN_DBCR1
);
285 vcpu
->arch
.host_dbcr2
= mfspr(SPRN_DBCR2
);
287 /* set registers up for guest */
290 mtspr(SPRN_IAC1
, dbg
->bp
[0]);
291 dbcr0
|= DBCR0_IAC1
| DBCR0_IDM
;
294 mtspr(SPRN_IAC2
, dbg
->bp
[1]);
295 dbcr0
|= DBCR0_IAC2
| DBCR0_IDM
;
298 mtspr(SPRN_IAC3
, dbg
->bp
[2]);
299 dbcr0
|= DBCR0_IAC3
| DBCR0_IDM
;
302 mtspr(SPRN_IAC4
, dbg
->bp
[3]);
303 dbcr0
|= DBCR0_IAC4
| DBCR0_IDM
;
306 mtspr(SPRN_DBCR0
, dbcr0
);
307 mtspr(SPRN_DBCR1
, 0);
308 mtspr(SPRN_DBCR2
, 0);
311 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
315 if (vcpu
->guest_debug
.enabled
)
316 kvmppc_load_guest_debug_registers(vcpu
);
318 /* Mark every guest entry in the shadow TLB entry modified, so that they
319 * will all be reloaded on the next vcpu run (instead of being
320 * demand-faulted). */
321 for (i
= 0; i
<= tlb_44x_hwater
; i
++)
322 kvmppc_tlbe_set_modified(vcpu
, i
);
325 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
327 if (vcpu
->guest_debug
.enabled
)
328 kvmppc_restore_host_debug_state(vcpu
);
330 /* Don't leave guest TLB entries resident when being de-scheduled. */
331 /* XXX It would be nice to differentiate between heavyweight exit and
332 * sched_out here, since we could avoid the TLB flush for heavyweight
337 int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu
*vcpu
,
338 struct kvm_debug_guest
*dbg
)
342 vcpu
->guest_debug
.enabled
= dbg
->enabled
;
343 if (vcpu
->guest_debug
.enabled
) {
344 for (i
=0; i
< ARRAY_SIZE(vcpu
->guest_debug
.bp
); i
++) {
345 if (dbg
->breakpoints
[i
].enabled
)
346 vcpu
->guest_debug
.bp
[i
] = dbg
->breakpoints
[i
].address
;
348 vcpu
->guest_debug
.bp
[i
] = 0;
355 static void kvmppc_complete_dcr_load(struct kvm_vcpu
*vcpu
,
358 u32
*gpr
= &vcpu
->arch
.gpr
[vcpu
->arch
.io_gpr
];
359 *gpr
= run
->dcr
.data
;
362 static void kvmppc_complete_mmio_load(struct kvm_vcpu
*vcpu
,
365 u32
*gpr
= &vcpu
->arch
.gpr
[vcpu
->arch
.io_gpr
];
367 if (run
->mmio
.len
> sizeof(*gpr
)) {
368 printk(KERN_ERR
"bad MMIO length: %d\n", run
->mmio
.len
);
372 if (vcpu
->arch
.mmio_is_bigendian
) {
373 switch (run
->mmio
.len
) {
374 case 4: *gpr
= *(u32
*)run
->mmio
.data
; break;
375 case 2: *gpr
= *(u16
*)run
->mmio
.data
; break;
376 case 1: *gpr
= *(u8
*)run
->mmio
.data
; break;
379 /* Convert BE data from userland back to LE. */
380 switch (run
->mmio
.len
) {
381 case 4: *gpr
= ld_le32((u32
*)run
->mmio
.data
); break;
382 case 2: *gpr
= ld_le16((u16
*)run
->mmio
.data
); break;
383 case 1: *gpr
= *(u8
*)run
->mmio
.data
; break;
388 int kvmppc_handle_load(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
389 unsigned int rt
, unsigned int bytes
, int is_bigendian
)
391 if (bytes
> sizeof(run
->mmio
.data
)) {
392 printk(KERN_ERR
"%s: bad MMIO length: %d\n", __func__
,
396 run
->mmio
.phys_addr
= vcpu
->arch
.paddr_accessed
;
397 run
->mmio
.len
= bytes
;
398 run
->mmio
.is_write
= 0;
400 vcpu
->arch
.io_gpr
= rt
;
401 vcpu
->arch
.mmio_is_bigendian
= is_bigendian
;
402 vcpu
->mmio_needed
= 1;
403 vcpu
->mmio_is_write
= 0;
405 return EMULATE_DO_MMIO
;
408 int kvmppc_handle_store(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
409 u32 val
, unsigned int bytes
, int is_bigendian
)
411 void *data
= run
->mmio
.data
;
413 if (bytes
> sizeof(run
->mmio
.data
)) {
414 printk(KERN_ERR
"%s: bad MMIO length: %d\n", __func__
,
418 run
->mmio
.phys_addr
= vcpu
->arch
.paddr_accessed
;
419 run
->mmio
.len
= bytes
;
420 run
->mmio
.is_write
= 1;
421 vcpu
->mmio_needed
= 1;
422 vcpu
->mmio_is_write
= 1;
424 /* Store the value at the lowest bytes in 'data'. */
427 case 4: *(u32
*)data
= val
; break;
428 case 2: *(u16
*)data
= val
; break;
429 case 1: *(u8
*)data
= val
; break;
432 /* Store LE value into 'data'. */
434 case 4: st_le32(data
, val
); break;
435 case 2: st_le16(data
, val
); break;
436 case 1: *(u8
*)data
= val
; break;
440 return EMULATE_DO_MMIO
;
443 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
450 if (vcpu
->sigset_active
)
451 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
453 if (vcpu
->mmio_needed
) {
454 if (!vcpu
->mmio_is_write
)
455 kvmppc_complete_mmio_load(vcpu
, run
);
456 vcpu
->mmio_needed
= 0;
457 } else if (vcpu
->arch
.dcr_needed
) {
458 if (!vcpu
->arch
.dcr_is_write
)
459 kvmppc_complete_dcr_load(vcpu
, run
);
460 vcpu
->arch
.dcr_needed
= 0;
463 kvmppc_check_and_deliver_interrupts(vcpu
);
467 r
= __kvmppc_vcpu_run(run
, vcpu
);
471 if (vcpu
->sigset_active
)
472 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
479 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu
*vcpu
, struct kvm_interrupt
*irq
)
481 kvmppc_queue_exception(vcpu
, BOOKE_INTERRUPT_EXTERNAL
);
483 if (waitqueue_active(&vcpu
->wq
)) {
484 wake_up_interruptible(&vcpu
->wq
);
485 vcpu
->stat
.halt_wakeup
++;
491 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
492 struct kvm_mp_state
*mp_state
)
497 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
498 struct kvm_mp_state
*mp_state
)
503 long kvm_arch_vcpu_ioctl(struct file
*filp
,
504 unsigned int ioctl
, unsigned long arg
)
506 struct kvm_vcpu
*vcpu
= filp
->private_data
;
507 void __user
*argp
= (void __user
*)arg
;
511 case KVM_INTERRUPT
: {
512 struct kvm_interrupt irq
;
514 if (copy_from_user(&irq
, argp
, sizeof(irq
)))
516 r
= kvm_vcpu_ioctl_interrupt(vcpu
, &irq
);
527 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
, struct kvm_dirty_log
*log
)
532 long kvm_arch_vm_ioctl(struct file
*filp
,
533 unsigned int ioctl
, unsigned long arg
)
545 int kvm_arch_init(void *opaque
)
550 void kvm_arch_exit(void)