2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/vmalloc.h>
25 #include <linux/hrtimer.h>
27 #include <linux/slab.h>
28 #include <asm/cputable.h>
29 #include <asm/uaccess.h>
30 #include <asm/kvm_ppc.h>
31 #include <asm/tlbflush.h>
32 #include <asm/cputhreads.h>
34 #include "../mm/mmu_decl.h"
36 #define CREATE_TRACE_POINTS
39 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*v
)
41 return !(v
->arch
.shared
->msr
& MSR_WE
) ||
42 !!(v
->arch
.pending_exceptions
) ||
46 int kvmppc_kvm_pv(struct kvm_vcpu
*vcpu
)
48 int nr
= kvmppc_get_gpr(vcpu
, 11);
50 unsigned long __maybe_unused param1
= kvmppc_get_gpr(vcpu
, 3);
51 unsigned long __maybe_unused param2
= kvmppc_get_gpr(vcpu
, 4);
52 unsigned long __maybe_unused param3
= kvmppc_get_gpr(vcpu
, 5);
53 unsigned long __maybe_unused param4
= kvmppc_get_gpr(vcpu
, 6);
56 if (!(vcpu
->arch
.shared
->msr
& MSR_SF
)) {
65 case HC_VENDOR_KVM
| KVM_HC_PPC_MAP_MAGIC_PAGE
:
67 vcpu
->arch
.magic_page_pa
= param1
;
68 vcpu
->arch
.magic_page_ea
= param2
;
70 r2
= KVM_MAGIC_FEAT_SR
| KVM_MAGIC_FEAT_MAS0_TO_SPRG7
;
75 case HC_VENDOR_KVM
| KVM_HC_FEATURES
:
77 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500)
78 /* XXX Missing magic page on 44x */
79 r2
|= (1 << KVM_FEATURE_MAGIC_PAGE
);
82 /* Second return value is in r4 */
85 r
= HC_EV_UNIMPLEMENTED
;
89 kvmppc_set_gpr(vcpu
, 4, r2
);
94 int kvmppc_sanity_check(struct kvm_vcpu
*vcpu
)
98 /* We have to know what CPU to virtualize */
102 /* PAPR only works with book3s_64 */
103 if ((vcpu
->arch
.cpu_type
!= KVM_CPU_3S_64
) && vcpu
->arch
.papr_enabled
)
106 #ifdef CONFIG_KVM_BOOK3S_64_HV
107 /* HV KVM can only do PAPR mode for now */
108 if (!vcpu
->arch
.papr_enabled
)
116 return r
? 0 : -EINVAL
;
119 int kvmppc_emulate_mmio(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
121 enum emulation_result er
;
124 er
= kvmppc_emulate_instruction(run
, vcpu
);
127 /* Future optimization: only reload non-volatiles if they were
128 * actually modified. */
131 case EMULATE_DO_MMIO
:
132 run
->exit_reason
= KVM_EXIT_MMIO
;
133 /* We must reload nonvolatiles because "update" load/store
134 * instructions modify register state. */
135 /* Future optimization: only reload non-volatiles if they were
136 * actually modified. */
140 /* XXX Deliver Program interrupt to guest. */
141 printk(KERN_EMERG
"%s: emulation failed (%08x)\n", __func__
,
142 kvmppc_get_last_inst(vcpu
));
152 int kvm_arch_hardware_enable(void *garbage
)
157 void kvm_arch_hardware_disable(void *garbage
)
161 int kvm_arch_hardware_setup(void)
166 void kvm_arch_hardware_unsetup(void)
170 void kvm_arch_check_processor_compat(void *rtn
)
172 *(int *)rtn
= kvmppc_core_check_processor_compat();
175 int kvm_arch_init_vm(struct kvm
*kvm
, unsigned long type
)
180 return kvmppc_core_init_vm(kvm
);
183 void kvm_arch_destroy_vm(struct kvm
*kvm
)
186 struct kvm_vcpu
*vcpu
;
188 kvm_for_each_vcpu(i
, vcpu
, kvm
)
189 kvm_arch_vcpu_free(vcpu
);
191 mutex_lock(&kvm
->lock
);
192 for (i
= 0; i
< atomic_read(&kvm
->online_vcpus
); i
++)
193 kvm
->vcpus
[i
] = NULL
;
195 atomic_set(&kvm
->online_vcpus
, 0);
197 kvmppc_core_destroy_vm(kvm
);
199 mutex_unlock(&kvm
->lock
);
202 void kvm_arch_sync_events(struct kvm
*kvm
)
206 int kvm_dev_ioctl_check_extension(long ext
)
212 case KVM_CAP_PPC_BOOKE_SREGS
:
214 case KVM_CAP_PPC_SEGSTATE
:
215 case KVM_CAP_PPC_HIOR
:
216 case KVM_CAP_PPC_PAPR
:
218 case KVM_CAP_PPC_UNSET_IRQ
:
219 case KVM_CAP_PPC_IRQ_LEVEL
:
220 case KVM_CAP_ENABLE_CAP
:
221 case KVM_CAP_ONE_REG
:
224 #ifndef CONFIG_KVM_BOOK3S_64_HV
225 case KVM_CAP_PPC_PAIRED_SINGLES
:
226 case KVM_CAP_PPC_OSI
:
227 case KVM_CAP_PPC_GET_PVINFO
:
228 #ifdef CONFIG_KVM_E500
233 case KVM_CAP_COALESCED_MMIO
:
234 r
= KVM_COALESCED_MMIO_PAGE_OFFSET
;
237 #ifdef CONFIG_KVM_BOOK3S_64_HV
238 case KVM_CAP_SPAPR_TCE
:
241 case KVM_CAP_PPC_SMT
:
242 r
= threads_per_core
;
244 case KVM_CAP_PPC_RMA
:
246 /* PPC970 requires an RMA */
247 if (cpu_has_feature(CPU_FTR_ARCH_201
))
250 case KVM_CAP_SYNC_MMU
:
251 r
= cpu_has_feature(CPU_FTR_ARCH_206
) ? 1 : 0;
254 case KVM_CAP_NR_VCPUS
:
256 * Recommending a number of CPUs is somewhat arbitrary; we
257 * return the number of present CPUs for -HV (since a host
258 * will have secondary threads "offline"), and for other KVM
259 * implementations just count online CPUs.
261 #ifdef CONFIG_KVM_BOOK3S_64_HV
262 r
= num_present_cpus();
264 r
= num_online_cpus();
267 case KVM_CAP_MAX_VCPUS
:
278 long kvm_arch_dev_ioctl(struct file
*filp
,
279 unsigned int ioctl
, unsigned long arg
)
284 void kvm_arch_free_memslot(struct kvm_memory_slot
*free
,
285 struct kvm_memory_slot
*dont
)
289 int kvm_arch_create_memslot(struct kvm_memory_slot
*slot
, unsigned long npages
)
294 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
295 struct kvm_memory_slot
*memslot
,
296 struct kvm_memory_slot old
,
297 struct kvm_userspace_memory_region
*mem
,
300 return kvmppc_core_prepare_memory_region(kvm
, mem
);
303 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
304 struct kvm_userspace_memory_region
*mem
,
305 struct kvm_memory_slot old
,
308 kvmppc_core_commit_memory_region(kvm
, mem
);
312 void kvm_arch_flush_shadow(struct kvm
*kvm
)
316 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
, unsigned int id
)
318 struct kvm_vcpu
*vcpu
;
319 vcpu
= kvmppc_core_vcpu_create(kvm
, id
);
321 vcpu
->arch
.wqp
= &vcpu
->wq
;
322 kvmppc_create_vcpu_debugfs(vcpu
, id
);
327 void kvm_arch_vcpu_free(struct kvm_vcpu
*vcpu
)
329 /* Make sure we're not using the vcpu anymore */
330 hrtimer_cancel(&vcpu
->arch
.dec_timer
);
331 tasklet_kill(&vcpu
->arch
.tasklet
);
333 kvmppc_remove_vcpu_debugfs(vcpu
);
334 kvmppc_core_vcpu_free(vcpu
);
337 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
339 kvm_arch_vcpu_free(vcpu
);
342 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
344 return kvmppc_core_pending_dec(vcpu
);
348 * low level hrtimer wake routine. Because this runs in hardirq context
349 * we schedule a tasklet to do the real work.
351 enum hrtimer_restart
kvmppc_decrementer_wakeup(struct hrtimer
*timer
)
353 struct kvm_vcpu
*vcpu
;
355 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.dec_timer
);
356 tasklet_schedule(&vcpu
->arch
.tasklet
);
358 return HRTIMER_NORESTART
;
361 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
363 hrtimer_init(&vcpu
->arch
.dec_timer
, CLOCK_REALTIME
, HRTIMER_MODE_ABS
);
364 tasklet_init(&vcpu
->arch
.tasklet
, kvmppc_decrementer_func
, (ulong
)vcpu
);
365 vcpu
->arch
.dec_timer
.function
= kvmppc_decrementer_wakeup
;
366 vcpu
->arch
.dec_expires
= ~(u64
)0;
368 #ifdef CONFIG_KVM_EXIT_TIMING
369 mutex_init(&vcpu
->arch
.exit_timing_lock
);
375 void kvm_arch_vcpu_uninit(struct kvm_vcpu
*vcpu
)
377 kvmppc_mmu_destroy(vcpu
);
380 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
384 * vrsave (formerly usprg0) isn't used by Linux, but may
385 * be used by the guest.
387 * On non-booke this is associated with Altivec and
388 * is handled by code in book3s.c.
390 mtspr(SPRN_VRSAVE
, vcpu
->arch
.vrsave
);
392 kvmppc_core_vcpu_load(vcpu
, cpu
);
393 vcpu
->cpu
= smp_processor_id();
396 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
398 kvmppc_core_vcpu_put(vcpu
);
400 vcpu
->arch
.vrsave
= mfspr(SPRN_VRSAVE
);
405 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
406 struct kvm_guest_debug
*dbg
)
411 static void kvmppc_complete_dcr_load(struct kvm_vcpu
*vcpu
,
414 kvmppc_set_gpr(vcpu
, vcpu
->arch
.io_gpr
, run
->dcr
.data
);
417 static void kvmppc_complete_mmio_load(struct kvm_vcpu
*vcpu
,
420 u64
uninitialized_var(gpr
);
422 if (run
->mmio
.len
> sizeof(gpr
)) {
423 printk(KERN_ERR
"bad MMIO length: %d\n", run
->mmio
.len
);
427 if (vcpu
->arch
.mmio_is_bigendian
) {
428 switch (run
->mmio
.len
) {
429 case 8: gpr
= *(u64
*)run
->mmio
.data
; break;
430 case 4: gpr
= *(u32
*)run
->mmio
.data
; break;
431 case 2: gpr
= *(u16
*)run
->mmio
.data
; break;
432 case 1: gpr
= *(u8
*)run
->mmio
.data
; break;
435 /* Convert BE data from userland back to LE. */
436 switch (run
->mmio
.len
) {
437 case 4: gpr
= ld_le32((u32
*)run
->mmio
.data
); break;
438 case 2: gpr
= ld_le16((u16
*)run
->mmio
.data
); break;
439 case 1: gpr
= *(u8
*)run
->mmio
.data
; break;
443 if (vcpu
->arch
.mmio_sign_extend
) {
444 switch (run
->mmio
.len
) {
459 kvmppc_set_gpr(vcpu
, vcpu
->arch
.io_gpr
, gpr
);
461 switch (vcpu
->arch
.io_gpr
& KVM_MMIO_REG_EXT_MASK
) {
462 case KVM_MMIO_REG_GPR
:
463 kvmppc_set_gpr(vcpu
, vcpu
->arch
.io_gpr
, gpr
);
465 case KVM_MMIO_REG_FPR
:
466 vcpu
->arch
.fpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
468 #ifdef CONFIG_PPC_BOOK3S
469 case KVM_MMIO_REG_QPR
:
470 vcpu
->arch
.qpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
472 case KVM_MMIO_REG_FQPR
:
473 vcpu
->arch
.fpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
474 vcpu
->arch
.qpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
482 int kvmppc_handle_load(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
483 unsigned int rt
, unsigned int bytes
, int is_bigendian
)
485 if (bytes
> sizeof(run
->mmio
.data
)) {
486 printk(KERN_ERR
"%s: bad MMIO length: %d\n", __func__
,
490 run
->mmio
.phys_addr
= vcpu
->arch
.paddr_accessed
;
491 run
->mmio
.len
= bytes
;
492 run
->mmio
.is_write
= 0;
494 vcpu
->arch
.io_gpr
= rt
;
495 vcpu
->arch
.mmio_is_bigendian
= is_bigendian
;
496 vcpu
->mmio_needed
= 1;
497 vcpu
->mmio_is_write
= 0;
498 vcpu
->arch
.mmio_sign_extend
= 0;
500 return EMULATE_DO_MMIO
;
503 /* Same as above, but sign extends */
504 int kvmppc_handle_loads(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
505 unsigned int rt
, unsigned int bytes
, int is_bigendian
)
509 r
= kvmppc_handle_load(run
, vcpu
, rt
, bytes
, is_bigendian
);
510 vcpu
->arch
.mmio_sign_extend
= 1;
515 int kvmppc_handle_store(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
516 u64 val
, unsigned int bytes
, int is_bigendian
)
518 void *data
= run
->mmio
.data
;
520 if (bytes
> sizeof(run
->mmio
.data
)) {
521 printk(KERN_ERR
"%s: bad MMIO length: %d\n", __func__
,
525 run
->mmio
.phys_addr
= vcpu
->arch
.paddr_accessed
;
526 run
->mmio
.len
= bytes
;
527 run
->mmio
.is_write
= 1;
528 vcpu
->mmio_needed
= 1;
529 vcpu
->mmio_is_write
= 1;
531 /* Store the value at the lowest bytes in 'data'. */
534 case 8: *(u64
*)data
= val
; break;
535 case 4: *(u32
*)data
= val
; break;
536 case 2: *(u16
*)data
= val
; break;
537 case 1: *(u8
*)data
= val
; break;
540 /* Store LE value into 'data'. */
542 case 4: st_le32(data
, val
); break;
543 case 2: st_le16(data
, val
); break;
544 case 1: *(u8
*)data
= val
; break;
548 return EMULATE_DO_MMIO
;
551 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
556 if (vcpu
->sigset_active
)
557 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
559 if (vcpu
->mmio_needed
) {
560 if (!vcpu
->mmio_is_write
)
561 kvmppc_complete_mmio_load(vcpu
, run
);
562 vcpu
->mmio_needed
= 0;
563 } else if (vcpu
->arch
.dcr_needed
) {
564 if (!vcpu
->arch
.dcr_is_write
)
565 kvmppc_complete_dcr_load(vcpu
, run
);
566 vcpu
->arch
.dcr_needed
= 0;
567 } else if (vcpu
->arch
.osi_needed
) {
568 u64
*gprs
= run
->osi
.gprs
;
571 for (i
= 0; i
< 32; i
++)
572 kvmppc_set_gpr(vcpu
, i
, gprs
[i
]);
573 vcpu
->arch
.osi_needed
= 0;
574 } else if (vcpu
->arch
.hcall_needed
) {
577 kvmppc_set_gpr(vcpu
, 3, run
->papr_hcall
.ret
);
578 for (i
= 0; i
< 9; ++i
)
579 kvmppc_set_gpr(vcpu
, 4 + i
, run
->papr_hcall
.args
[i
]);
580 vcpu
->arch
.hcall_needed
= 0;
583 r
= kvmppc_vcpu_run(run
, vcpu
);
585 if (vcpu
->sigset_active
)
586 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
591 void kvm_vcpu_kick(struct kvm_vcpu
*vcpu
)
597 if (waitqueue_active(vcpu
->arch
.wqp
)) {
598 wake_up_interruptible(vcpu
->arch
.wqp
);
599 vcpu
->stat
.halt_wakeup
++;
600 } else if (cpu
!= me
&& cpu
!= -1) {
601 smp_send_reschedule(vcpu
->cpu
);
606 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu
*vcpu
, struct kvm_interrupt
*irq
)
608 if (irq
->irq
== KVM_INTERRUPT_UNSET
) {
609 kvmppc_core_dequeue_external(vcpu
, irq
);
613 kvmppc_core_queue_external(vcpu
, irq
);
619 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu
*vcpu
,
620 struct kvm_enable_cap
*cap
)
628 case KVM_CAP_PPC_OSI
:
630 vcpu
->arch
.osi_enabled
= true;
632 case KVM_CAP_PPC_PAPR
:
634 vcpu
->arch
.papr_enabled
= true;
636 #ifdef CONFIG_KVM_E500
637 case KVM_CAP_SW_TLB
: {
638 struct kvm_config_tlb cfg
;
639 void __user
*user_ptr
= (void __user
*)(uintptr_t)cap
->args
[0];
642 if (copy_from_user(&cfg
, user_ptr
, sizeof(cfg
)))
645 r
= kvm_vcpu_ioctl_config_tlb(vcpu
, &cfg
);
655 r
= kvmppc_sanity_check(vcpu
);
660 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
661 struct kvm_mp_state
*mp_state
)
666 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
667 struct kvm_mp_state
*mp_state
)
672 long kvm_arch_vcpu_ioctl(struct file
*filp
,
673 unsigned int ioctl
, unsigned long arg
)
675 struct kvm_vcpu
*vcpu
= filp
->private_data
;
676 void __user
*argp
= (void __user
*)arg
;
680 case KVM_INTERRUPT
: {
681 struct kvm_interrupt irq
;
683 if (copy_from_user(&irq
, argp
, sizeof(irq
)))
685 r
= kvm_vcpu_ioctl_interrupt(vcpu
, &irq
);
691 struct kvm_enable_cap cap
;
693 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
695 r
= kvm_vcpu_ioctl_enable_cap(vcpu
, &cap
);
699 case KVM_SET_ONE_REG
:
700 case KVM_GET_ONE_REG
:
702 struct kvm_one_reg reg
;
704 if (copy_from_user(®
, argp
, sizeof(reg
)))
706 if (ioctl
== KVM_SET_ONE_REG
)
707 r
= kvm_vcpu_ioctl_set_one_reg(vcpu
, ®
);
709 r
= kvm_vcpu_ioctl_get_one_reg(vcpu
, ®
);
713 #ifdef CONFIG_KVM_E500
714 case KVM_DIRTY_TLB
: {
715 struct kvm_dirty_tlb dirty
;
717 if (copy_from_user(&dirty
, argp
, sizeof(dirty
)))
719 r
= kvm_vcpu_ioctl_dirty_tlb(vcpu
, &dirty
);
732 int kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
734 return VM_FAULT_SIGBUS
;
737 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo
*pvinfo
)
739 u32 inst_lis
= 0x3c000000;
740 u32 inst_ori
= 0x60000000;
741 u32 inst_nop
= 0x60000000;
742 u32 inst_sc
= 0x44000002;
743 u32 inst_imm_mask
= 0xffff;
746 * The hypercall to get into KVM from within guest context is as
749 * lis r0, r0, KVM_SC_MAGIC_R0@h
750 * ori r0, KVM_SC_MAGIC_R0@l
754 pvinfo
->hcall
[0] = inst_lis
| ((KVM_SC_MAGIC_R0
>> 16) & inst_imm_mask
);
755 pvinfo
->hcall
[1] = inst_ori
| (KVM_SC_MAGIC_R0
& inst_imm_mask
);
756 pvinfo
->hcall
[2] = inst_sc
;
757 pvinfo
->hcall
[3] = inst_nop
;
762 long kvm_arch_vm_ioctl(struct file
*filp
,
763 unsigned int ioctl
, unsigned long arg
)
765 void __user
*argp
= (void __user
*)arg
;
769 case KVM_PPC_GET_PVINFO
: {
770 struct kvm_ppc_pvinfo pvinfo
;
771 memset(&pvinfo
, 0, sizeof(pvinfo
));
772 r
= kvm_vm_ioctl_get_pvinfo(&pvinfo
);
773 if (copy_to_user(argp
, &pvinfo
, sizeof(pvinfo
))) {
780 #ifdef CONFIG_KVM_BOOK3S_64_HV
781 case KVM_CREATE_SPAPR_TCE
: {
782 struct kvm_create_spapr_tce create_tce
;
783 struct kvm
*kvm
= filp
->private_data
;
786 if (copy_from_user(&create_tce
, argp
, sizeof(create_tce
)))
788 r
= kvm_vm_ioctl_create_spapr_tce(kvm
, &create_tce
);
792 case KVM_ALLOCATE_RMA
: {
793 struct kvm
*kvm
= filp
->private_data
;
794 struct kvm_allocate_rma rma
;
796 r
= kvm_vm_ioctl_allocate_rma(kvm
, &rma
);
797 if (r
>= 0 && copy_to_user(argp
, &rma
, sizeof(rma
)))
801 #endif /* CONFIG_KVM_BOOK3S_64_HV */
811 int kvm_arch_init(void *opaque
)
816 void kvm_arch_exit(void)