2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/vmalloc.h>
25 #include <linux/hrtimer.h>
27 #include <linux/slab.h>
28 #include <linux/file.h>
29 #include <linux/module.h>
30 #include <asm/cputable.h>
31 #include <asm/uaccess.h>
32 #include <asm/kvm_ppc.h>
33 #include <asm/tlbflush.h>
34 #include <asm/cputhreads.h>
35 #include <asm/irqflags.h>
38 #include "../mm/mmu_decl.h"
40 #define CREATE_TRACE_POINTS
43 struct kvmppc_ops
*kvmppc_hv_ops
;
44 EXPORT_SYMBOL_GPL(kvmppc_hv_ops
);
45 struct kvmppc_ops
*kvmppc_pr_ops
;
46 EXPORT_SYMBOL_GPL(kvmppc_pr_ops
);
49 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*v
)
51 return !!(v
->arch
.pending_exceptions
) ||
55 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
61 * Common checks before entering the guest world. Call with interrupts
66 * == 1 if we're ready to go into guest state
67 * <= 0 if we need to go back to the host with return value
69 int kvmppc_prepare_to_enter(struct kvm_vcpu
*vcpu
)
73 WARN_ON(irqs_disabled());
84 if (signal_pending(current
)) {
85 kvmppc_account_exit(vcpu
, SIGNAL_EXITS
);
86 vcpu
->run
->exit_reason
= KVM_EXIT_INTR
;
91 vcpu
->mode
= IN_GUEST_MODE
;
94 * Reading vcpu->requests must happen after setting vcpu->mode,
95 * so we don't miss a request because the requester sees
96 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
97 * before next entering the guest (and thus doesn't IPI).
101 if (vcpu
->requests
) {
102 /* Make sure we process requests preemptable */
104 trace_kvm_check_requests(vcpu
);
105 r
= kvmppc_core_check_requests(vcpu
);
112 if (kvmppc_core_prepare_to_enter(vcpu
)) {
113 /* interrupts got enabled in between, so we
114 are back at square 1 */
126 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter
);
128 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
129 static void kvmppc_swab_shared(struct kvm_vcpu
*vcpu
)
131 struct kvm_vcpu_arch_shared
*shared
= vcpu
->arch
.shared
;
134 shared
->sprg0
= swab64(shared
->sprg0
);
135 shared
->sprg1
= swab64(shared
->sprg1
);
136 shared
->sprg2
= swab64(shared
->sprg2
);
137 shared
->sprg3
= swab64(shared
->sprg3
);
138 shared
->srr0
= swab64(shared
->srr0
);
139 shared
->srr1
= swab64(shared
->srr1
);
140 shared
->dar
= swab64(shared
->dar
);
141 shared
->msr
= swab64(shared
->msr
);
142 shared
->dsisr
= swab32(shared
->dsisr
);
143 shared
->int_pending
= swab32(shared
->int_pending
);
144 for (i
= 0; i
< ARRAY_SIZE(shared
->sr
); i
++)
145 shared
->sr
[i
] = swab32(shared
->sr
[i
]);
149 int kvmppc_kvm_pv(struct kvm_vcpu
*vcpu
)
151 int nr
= kvmppc_get_gpr(vcpu
, 11);
153 unsigned long __maybe_unused param1
= kvmppc_get_gpr(vcpu
, 3);
154 unsigned long __maybe_unused param2
= kvmppc_get_gpr(vcpu
, 4);
155 unsigned long __maybe_unused param3
= kvmppc_get_gpr(vcpu
, 5);
156 unsigned long __maybe_unused param4
= kvmppc_get_gpr(vcpu
, 6);
157 unsigned long r2
= 0;
159 if (!(kvmppc_get_msr(vcpu
) & MSR_SF
)) {
161 param1
&= 0xffffffff;
162 param2
&= 0xffffffff;
163 param3
&= 0xffffffff;
164 param4
&= 0xffffffff;
168 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE
):
170 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
171 /* Book3S can be little endian, find it out here */
172 int shared_big_endian
= true;
173 if (vcpu
->arch
.intr_msr
& MSR_LE
)
174 shared_big_endian
= false;
175 if (shared_big_endian
!= vcpu
->arch
.shared_big_endian
)
176 kvmppc_swab_shared(vcpu
);
177 vcpu
->arch
.shared_big_endian
= shared_big_endian
;
180 if (!(param2
& MAGIC_PAGE_FLAG_NOT_MAPPED_NX
)) {
182 * Older versions of the Linux magic page code had
183 * a bug where they would map their trampoline code
184 * NX. If that's the case, remove !PR NX capability.
186 vcpu
->arch
.disable_kernel_nx
= true;
187 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
190 vcpu
->arch
.magic_page_pa
= param1
& ~0xfffULL
;
191 vcpu
->arch
.magic_page_ea
= param2
& ~0xfffULL
;
193 #ifdef CONFIG_PPC_64K_PAGES
195 * Make sure our 4k magic page is in the same window of a 64k
196 * page within the guest and within the host's page.
198 if ((vcpu
->arch
.magic_page_pa
& 0xf000) !=
199 ((ulong
)vcpu
->arch
.shared
& 0xf000)) {
200 void *old_shared
= vcpu
->arch
.shared
;
201 ulong shared
= (ulong
)vcpu
->arch
.shared
;
205 shared
|= vcpu
->arch
.magic_page_pa
& 0xf000;
206 new_shared
= (void*)shared
;
207 memcpy(new_shared
, old_shared
, 0x1000);
208 vcpu
->arch
.shared
= new_shared
;
212 r2
= KVM_MAGIC_FEAT_SR
| KVM_MAGIC_FEAT_MAS0_TO_SPRG7
;
217 case KVM_HCALL_TOKEN(KVM_HC_FEATURES
):
219 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
220 r2
|= (1 << KVM_FEATURE_MAGIC_PAGE
);
223 /* Second return value is in r4 */
225 case EV_HCALL_TOKEN(EV_IDLE
):
227 kvm_vcpu_block(vcpu
);
228 clear_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
231 r
= EV_UNIMPLEMENTED
;
235 kvmppc_set_gpr(vcpu
, 4, r2
);
239 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv
);
241 int kvmppc_sanity_check(struct kvm_vcpu
*vcpu
)
245 /* We have to know what CPU to virtualize */
249 /* PAPR only works with book3s_64 */
250 if ((vcpu
->arch
.cpu_type
!= KVM_CPU_3S_64
) && vcpu
->arch
.papr_enabled
)
253 /* HV KVM can only do PAPR mode for now */
254 if (!vcpu
->arch
.papr_enabled
&& is_kvmppc_hv_enabled(vcpu
->kvm
))
257 #ifdef CONFIG_KVM_BOOKE_HV
258 if (!cpu_has_feature(CPU_FTR_EMB_HV
))
266 return r
? 0 : -EINVAL
;
268 EXPORT_SYMBOL_GPL(kvmppc_sanity_check
);
270 int kvmppc_emulate_mmio(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
272 enum emulation_result er
;
275 er
= kvmppc_emulate_instruction(run
, vcpu
);
278 /* Future optimization: only reload non-volatiles if they were
279 * actually modified. */
285 case EMULATE_DO_MMIO
:
286 run
->exit_reason
= KVM_EXIT_MMIO
;
287 /* We must reload nonvolatiles because "update" load/store
288 * instructions modify register state. */
289 /* Future optimization: only reload non-volatiles if they were
290 * actually modified. */
297 kvmppc_get_last_inst(vcpu
, false, &last_inst
);
298 /* XXX Deliver Program interrupt to guest. */
299 pr_emerg("%s: emulation failed (%08x)\n", __func__
, last_inst
);
310 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio
);
312 static hva_t
kvmppc_pte_to_hva(struct kvm_vcpu
*vcpu
, struct kvmppc_pte
*pte
)
316 hpage
= gfn_to_hva(vcpu
->kvm
, pte
->raddr
>> PAGE_SHIFT
);
317 if (kvm_is_error_hva(hpage
))
320 return hpage
| (pte
->raddr
& ~PAGE_MASK
);
322 return KVM_HVA_ERR_BAD
;
325 int kvmppc_st(struct kvm_vcpu
*vcpu
, ulong
*eaddr
, int size
, void *ptr
,
328 struct kvmppc_pte pte
;
333 r
= kvmppc_xlate(vcpu
, *eaddr
, data
? XLATE_DATA
: XLATE_INST
,
343 if (kvm_write_guest(vcpu
->kvm
, pte
.raddr
, ptr
, size
))
344 return EMULATE_DO_MMIO
;
348 EXPORT_SYMBOL_GPL(kvmppc_st
);
350 int kvmppc_ld(struct kvm_vcpu
*vcpu
, ulong
*eaddr
, int size
, void *ptr
,
353 struct kvmppc_pte pte
;
359 rc
= kvmppc_xlate(vcpu
, *eaddr
, data
? XLATE_DATA
: XLATE_INST
,
369 if (!data
&& !pte
.may_execute
)
372 hva
= kvmppc_pte_to_hva(vcpu
, &pte
);
373 if (kvm_is_error_hva(hva
))
376 if (copy_from_user(ptr
, (void __user
*)hva
, size
)) {
377 printk(KERN_INFO
"kvmppc_ld at 0x%lx failed\n", hva
);
384 return EMULATE_DO_MMIO
;
386 EXPORT_SYMBOL_GPL(kvmppc_ld
);
388 int kvm_arch_hardware_enable(void *garbage
)
393 void kvm_arch_hardware_disable(void *garbage
)
397 int kvm_arch_hardware_setup(void)
402 void kvm_arch_hardware_unsetup(void)
406 void kvm_arch_check_processor_compat(void *rtn
)
408 *(int *)rtn
= kvmppc_core_check_processor_compat();
411 int kvm_arch_init_vm(struct kvm
*kvm
, unsigned long type
)
413 struct kvmppc_ops
*kvm_ops
= NULL
;
415 * if we have both HV and PR enabled, default is HV
419 kvm_ops
= kvmppc_hv_ops
;
421 kvm_ops
= kvmppc_pr_ops
;
424 } else if (type
== KVM_VM_PPC_HV
) {
427 kvm_ops
= kvmppc_hv_ops
;
428 } else if (type
== KVM_VM_PPC_PR
) {
431 kvm_ops
= kvmppc_pr_ops
;
435 if (kvm_ops
->owner
&& !try_module_get(kvm_ops
->owner
))
438 kvm
->arch
.kvm_ops
= kvm_ops
;
439 return kvmppc_core_init_vm(kvm
);
444 void kvm_arch_destroy_vm(struct kvm
*kvm
)
447 struct kvm_vcpu
*vcpu
;
449 kvm_for_each_vcpu(i
, vcpu
, kvm
)
450 kvm_arch_vcpu_free(vcpu
);
452 mutex_lock(&kvm
->lock
);
453 for (i
= 0; i
< atomic_read(&kvm
->online_vcpus
); i
++)
454 kvm
->vcpus
[i
] = NULL
;
456 atomic_set(&kvm
->online_vcpus
, 0);
458 kvmppc_core_destroy_vm(kvm
);
460 mutex_unlock(&kvm
->lock
);
462 /* drop the module reference */
463 module_put(kvm
->arch
.kvm_ops
->owner
);
466 void kvm_arch_sync_events(struct kvm
*kvm
)
470 int kvm_vm_ioctl_check_extension(struct kvm
*kvm
, long ext
)
473 /* Assume we're using HV mode when the HV module is loaded */
474 int hv_enabled
= kvmppc_hv_ops
? 1 : 0;
478 * Hooray - we know which VM type we're running on. Depend on
479 * that rather than the guess above.
481 hv_enabled
= is_kvmppc_hv_enabled(kvm
);
486 case KVM_CAP_PPC_BOOKE_SREGS
:
487 case KVM_CAP_PPC_BOOKE_WATCHDOG
:
488 case KVM_CAP_PPC_EPR
:
490 case KVM_CAP_PPC_SEGSTATE
:
491 case KVM_CAP_PPC_HIOR
:
492 case KVM_CAP_PPC_PAPR
:
494 case KVM_CAP_PPC_UNSET_IRQ
:
495 case KVM_CAP_PPC_IRQ_LEVEL
:
496 case KVM_CAP_ENABLE_CAP
:
497 case KVM_CAP_ENABLE_CAP_VM
:
498 case KVM_CAP_ONE_REG
:
499 case KVM_CAP_IOEVENTFD
:
500 case KVM_CAP_DEVICE_CTRL
:
503 case KVM_CAP_PPC_PAIRED_SINGLES
:
504 case KVM_CAP_PPC_OSI
:
505 case KVM_CAP_PPC_GET_PVINFO
:
506 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
509 /* We support this only for PR */
512 #ifdef CONFIG_KVM_MMIO
513 case KVM_CAP_COALESCED_MMIO
:
514 r
= KVM_COALESCED_MMIO_PAGE_OFFSET
;
517 #ifdef CONFIG_KVM_MPIC
518 case KVM_CAP_IRQ_MPIC
:
523 #ifdef CONFIG_PPC_BOOK3S_64
524 case KVM_CAP_SPAPR_TCE
:
525 case KVM_CAP_PPC_ALLOC_HTAB
:
526 case KVM_CAP_PPC_RTAS
:
527 case KVM_CAP_PPC_FIXUP_HCALL
:
528 case KVM_CAP_PPC_ENABLE_HCALL
:
529 #ifdef CONFIG_KVM_XICS
530 case KVM_CAP_IRQ_XICS
:
534 #endif /* CONFIG_PPC_BOOK3S_64 */
535 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
536 case KVM_CAP_PPC_SMT
:
538 r
= threads_per_subcore
;
542 case KVM_CAP_PPC_RMA
:
544 /* PPC970 requires an RMA */
545 if (r
&& cpu_has_feature(CPU_FTR_ARCH_201
))
549 case KVM_CAP_SYNC_MMU
:
550 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
552 r
= cpu_has_feature(CPU_FTR_ARCH_206
) ? 1 : 0;
555 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
561 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
562 case KVM_CAP_PPC_HTAB_FD
:
566 case KVM_CAP_NR_VCPUS
:
568 * Recommending a number of CPUs is somewhat arbitrary; we
569 * return the number of present CPUs for -HV (since a host
570 * will have secondary threads "offline"), and for other KVM
571 * implementations just count online CPUs.
574 r
= num_present_cpus();
576 r
= num_online_cpus();
578 case KVM_CAP_MAX_VCPUS
:
581 #ifdef CONFIG_PPC_BOOK3S_64
582 case KVM_CAP_PPC_GET_SMMU_INFO
:
594 long kvm_arch_dev_ioctl(struct file
*filp
,
595 unsigned int ioctl
, unsigned long arg
)
600 void kvm_arch_free_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*free
,
601 struct kvm_memory_slot
*dont
)
603 kvmppc_core_free_memslot(kvm
, free
, dont
);
606 int kvm_arch_create_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*slot
,
607 unsigned long npages
)
609 return kvmppc_core_create_memslot(kvm
, slot
, npages
);
612 void kvm_arch_memslots_updated(struct kvm
*kvm
)
616 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
617 struct kvm_memory_slot
*memslot
,
618 struct kvm_userspace_memory_region
*mem
,
619 enum kvm_mr_change change
)
621 return kvmppc_core_prepare_memory_region(kvm
, memslot
, mem
);
624 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
625 struct kvm_userspace_memory_region
*mem
,
626 const struct kvm_memory_slot
*old
,
627 enum kvm_mr_change change
)
629 kvmppc_core_commit_memory_region(kvm
, mem
, old
);
632 void kvm_arch_flush_shadow_all(struct kvm
*kvm
)
636 void kvm_arch_flush_shadow_memslot(struct kvm
*kvm
,
637 struct kvm_memory_slot
*slot
)
639 kvmppc_core_flush_memslot(kvm
, slot
);
642 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
, unsigned int id
)
644 struct kvm_vcpu
*vcpu
;
645 vcpu
= kvmppc_core_vcpu_create(kvm
, id
);
647 vcpu
->arch
.wqp
= &vcpu
->wq
;
648 kvmppc_create_vcpu_debugfs(vcpu
, id
);
653 int kvm_arch_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
658 void kvm_arch_vcpu_free(struct kvm_vcpu
*vcpu
)
660 /* Make sure we're not using the vcpu anymore */
661 hrtimer_cancel(&vcpu
->arch
.dec_timer
);
662 tasklet_kill(&vcpu
->arch
.tasklet
);
664 kvmppc_remove_vcpu_debugfs(vcpu
);
666 switch (vcpu
->arch
.irq_type
) {
667 case KVMPPC_IRQ_MPIC
:
668 kvmppc_mpic_disconnect_vcpu(vcpu
->arch
.mpic
, vcpu
);
670 case KVMPPC_IRQ_XICS
:
671 kvmppc_xics_free_icp(vcpu
);
675 kvmppc_core_vcpu_free(vcpu
);
678 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
680 kvm_arch_vcpu_free(vcpu
);
683 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
685 return kvmppc_core_pending_dec(vcpu
);
689 * low level hrtimer wake routine. Because this runs in hardirq context
690 * we schedule a tasklet to do the real work.
692 enum hrtimer_restart
kvmppc_decrementer_wakeup(struct hrtimer
*timer
)
694 struct kvm_vcpu
*vcpu
;
696 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.dec_timer
);
697 tasklet_schedule(&vcpu
->arch
.tasklet
);
699 return HRTIMER_NORESTART
;
702 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
706 hrtimer_init(&vcpu
->arch
.dec_timer
, CLOCK_REALTIME
, HRTIMER_MODE_ABS
);
707 tasklet_init(&vcpu
->arch
.tasklet
, kvmppc_decrementer_func
, (ulong
)vcpu
);
708 vcpu
->arch
.dec_timer
.function
= kvmppc_decrementer_wakeup
;
709 vcpu
->arch
.dec_expires
= ~(u64
)0;
711 #ifdef CONFIG_KVM_EXIT_TIMING
712 mutex_init(&vcpu
->arch
.exit_timing_lock
);
714 ret
= kvmppc_subarch_vcpu_init(vcpu
);
718 void kvm_arch_vcpu_uninit(struct kvm_vcpu
*vcpu
)
720 kvmppc_mmu_destroy(vcpu
);
721 kvmppc_subarch_vcpu_uninit(vcpu
);
724 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
728 * vrsave (formerly usprg0) isn't used by Linux, but may
729 * be used by the guest.
731 * On non-booke this is associated with Altivec and
732 * is handled by code in book3s.c.
734 mtspr(SPRN_VRSAVE
, vcpu
->arch
.vrsave
);
736 kvmppc_core_vcpu_load(vcpu
, cpu
);
739 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
741 kvmppc_core_vcpu_put(vcpu
);
743 vcpu
->arch
.vrsave
= mfspr(SPRN_VRSAVE
);
747 static void kvmppc_complete_dcr_load(struct kvm_vcpu
*vcpu
,
750 kvmppc_set_gpr(vcpu
, vcpu
->arch
.io_gpr
, run
->dcr
.data
);
753 static void kvmppc_complete_mmio_load(struct kvm_vcpu
*vcpu
,
756 u64
uninitialized_var(gpr
);
758 if (run
->mmio
.len
> sizeof(gpr
)) {
759 printk(KERN_ERR
"bad MMIO length: %d\n", run
->mmio
.len
);
763 if (vcpu
->arch
.mmio_is_bigendian
) {
764 switch (run
->mmio
.len
) {
765 case 8: gpr
= *(u64
*)run
->mmio
.data
; break;
766 case 4: gpr
= *(u32
*)run
->mmio
.data
; break;
767 case 2: gpr
= *(u16
*)run
->mmio
.data
; break;
768 case 1: gpr
= *(u8
*)run
->mmio
.data
; break;
771 /* Convert BE data from userland back to LE. */
772 switch (run
->mmio
.len
) {
773 case 4: gpr
= ld_le32((u32
*)run
->mmio
.data
); break;
774 case 2: gpr
= ld_le16((u16
*)run
->mmio
.data
); break;
775 case 1: gpr
= *(u8
*)run
->mmio
.data
; break;
779 if (vcpu
->arch
.mmio_sign_extend
) {
780 switch (run
->mmio
.len
) {
795 kvmppc_set_gpr(vcpu
, vcpu
->arch
.io_gpr
, gpr
);
797 switch (vcpu
->arch
.io_gpr
& KVM_MMIO_REG_EXT_MASK
) {
798 case KVM_MMIO_REG_GPR
:
799 kvmppc_set_gpr(vcpu
, vcpu
->arch
.io_gpr
, gpr
);
801 case KVM_MMIO_REG_FPR
:
802 VCPU_FPR(vcpu
, vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
) = gpr
;
804 #ifdef CONFIG_PPC_BOOK3S
805 case KVM_MMIO_REG_QPR
:
806 vcpu
->arch
.qpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
808 case KVM_MMIO_REG_FQPR
:
809 VCPU_FPR(vcpu
, vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
) = gpr
;
810 vcpu
->arch
.qpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
818 int kvmppc_handle_load(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
819 unsigned int rt
, unsigned int bytes
,
820 int is_default_endian
)
825 if (kvmppc_need_byteswap(vcpu
)) {
826 /* Default endianness is "little endian". */
827 is_bigendian
= !is_default_endian
;
829 /* Default endianness is "big endian". */
830 is_bigendian
= is_default_endian
;
833 if (bytes
> sizeof(run
->mmio
.data
)) {
834 printk(KERN_ERR
"%s: bad MMIO length: %d\n", __func__
,
838 run
->mmio
.phys_addr
= vcpu
->arch
.paddr_accessed
;
839 run
->mmio
.len
= bytes
;
840 run
->mmio
.is_write
= 0;
842 vcpu
->arch
.io_gpr
= rt
;
843 vcpu
->arch
.mmio_is_bigendian
= is_bigendian
;
844 vcpu
->mmio_needed
= 1;
845 vcpu
->mmio_is_write
= 0;
846 vcpu
->arch
.mmio_sign_extend
= 0;
848 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
850 ret
= kvm_io_bus_read(vcpu
->kvm
, KVM_MMIO_BUS
, run
->mmio
.phys_addr
,
851 bytes
, &run
->mmio
.data
);
853 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
856 kvmppc_complete_mmio_load(vcpu
, run
);
857 vcpu
->mmio_needed
= 0;
861 return EMULATE_DO_MMIO
;
863 EXPORT_SYMBOL_GPL(kvmppc_handle_load
);
865 /* Same as above, but sign extends */
866 int kvmppc_handle_loads(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
867 unsigned int rt
, unsigned int bytes
,
868 int is_default_endian
)
872 vcpu
->arch
.mmio_sign_extend
= 1;
873 r
= kvmppc_handle_load(run
, vcpu
, rt
, bytes
, is_default_endian
);
878 int kvmppc_handle_store(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
879 u64 val
, unsigned int bytes
, int is_default_endian
)
881 void *data
= run
->mmio
.data
;
885 if (kvmppc_need_byteswap(vcpu
)) {
886 /* Default endianness is "little endian". */
887 is_bigendian
= !is_default_endian
;
889 /* Default endianness is "big endian". */
890 is_bigendian
= is_default_endian
;
893 if (bytes
> sizeof(run
->mmio
.data
)) {
894 printk(KERN_ERR
"%s: bad MMIO length: %d\n", __func__
,
898 run
->mmio
.phys_addr
= vcpu
->arch
.paddr_accessed
;
899 run
->mmio
.len
= bytes
;
900 run
->mmio
.is_write
= 1;
901 vcpu
->mmio_needed
= 1;
902 vcpu
->mmio_is_write
= 1;
904 /* Store the value at the lowest bytes in 'data'. */
907 case 8: *(u64
*)data
= val
; break;
908 case 4: *(u32
*)data
= val
; break;
909 case 2: *(u16
*)data
= val
; break;
910 case 1: *(u8
*)data
= val
; break;
913 /* Store LE value into 'data'. */
915 case 4: st_le32(data
, val
); break;
916 case 2: st_le16(data
, val
); break;
917 case 1: *(u8
*)data
= val
; break;
921 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
923 ret
= kvm_io_bus_write(vcpu
->kvm
, KVM_MMIO_BUS
, run
->mmio
.phys_addr
,
924 bytes
, &run
->mmio
.data
);
926 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
929 vcpu
->mmio_needed
= 0;
933 return EMULATE_DO_MMIO
;
935 EXPORT_SYMBOL_GPL(kvmppc_handle_store
);
937 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
942 if (vcpu
->sigset_active
)
943 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
945 if (vcpu
->mmio_needed
) {
946 if (!vcpu
->mmio_is_write
)
947 kvmppc_complete_mmio_load(vcpu
, run
);
948 vcpu
->mmio_needed
= 0;
949 } else if (vcpu
->arch
.dcr_needed
) {
950 if (!vcpu
->arch
.dcr_is_write
)
951 kvmppc_complete_dcr_load(vcpu
, run
);
952 vcpu
->arch
.dcr_needed
= 0;
953 } else if (vcpu
->arch
.osi_needed
) {
954 u64
*gprs
= run
->osi
.gprs
;
957 for (i
= 0; i
< 32; i
++)
958 kvmppc_set_gpr(vcpu
, i
, gprs
[i
]);
959 vcpu
->arch
.osi_needed
= 0;
960 } else if (vcpu
->arch
.hcall_needed
) {
963 kvmppc_set_gpr(vcpu
, 3, run
->papr_hcall
.ret
);
964 for (i
= 0; i
< 9; ++i
)
965 kvmppc_set_gpr(vcpu
, 4 + i
, run
->papr_hcall
.args
[i
]);
966 vcpu
->arch
.hcall_needed
= 0;
968 } else if (vcpu
->arch
.epr_needed
) {
969 kvmppc_set_epr(vcpu
, run
->epr
.epr
);
970 vcpu
->arch
.epr_needed
= 0;
974 r
= kvmppc_vcpu_run(run
, vcpu
);
976 if (vcpu
->sigset_active
)
977 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
982 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu
*vcpu
, struct kvm_interrupt
*irq
)
984 if (irq
->irq
== KVM_INTERRUPT_UNSET
) {
985 kvmppc_core_dequeue_external(vcpu
);
989 kvmppc_core_queue_external(vcpu
, irq
);
996 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu
*vcpu
,
997 struct kvm_enable_cap
*cap
)
1005 case KVM_CAP_PPC_OSI
:
1007 vcpu
->arch
.osi_enabled
= true;
1009 case KVM_CAP_PPC_PAPR
:
1011 vcpu
->arch
.papr_enabled
= true;
1013 case KVM_CAP_PPC_EPR
:
1016 vcpu
->arch
.epr_flags
|= KVMPPC_EPR_USER
;
1018 vcpu
->arch
.epr_flags
&= ~KVMPPC_EPR_USER
;
1021 case KVM_CAP_PPC_BOOKE_WATCHDOG
:
1023 vcpu
->arch
.watchdog_enabled
= true;
1026 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1027 case KVM_CAP_SW_TLB
: {
1028 struct kvm_config_tlb cfg
;
1029 void __user
*user_ptr
= (void __user
*)(uintptr_t)cap
->args
[0];
1032 if (copy_from_user(&cfg
, user_ptr
, sizeof(cfg
)))
1035 r
= kvm_vcpu_ioctl_config_tlb(vcpu
, &cfg
);
1039 #ifdef CONFIG_KVM_MPIC
1040 case KVM_CAP_IRQ_MPIC
: {
1042 struct kvm_device
*dev
;
1045 f
= fdget(cap
->args
[0]);
1050 dev
= kvm_device_from_filp(f
.file
);
1052 r
= kvmppc_mpic_connect_vcpu(dev
, vcpu
, cap
->args
[1]);
1058 #ifdef CONFIG_KVM_XICS
1059 case KVM_CAP_IRQ_XICS
: {
1061 struct kvm_device
*dev
;
1064 f
= fdget(cap
->args
[0]);
1069 dev
= kvm_device_from_filp(f
.file
);
1071 r
= kvmppc_xics_connect_vcpu(dev
, vcpu
, cap
->args
[1]);
1076 #endif /* CONFIG_KVM_XICS */
1083 r
= kvmppc_sanity_check(vcpu
);
1088 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
1089 struct kvm_mp_state
*mp_state
)
1094 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
1095 struct kvm_mp_state
*mp_state
)
1100 long kvm_arch_vcpu_ioctl(struct file
*filp
,
1101 unsigned int ioctl
, unsigned long arg
)
1103 struct kvm_vcpu
*vcpu
= filp
->private_data
;
1104 void __user
*argp
= (void __user
*)arg
;
1108 case KVM_INTERRUPT
: {
1109 struct kvm_interrupt irq
;
1111 if (copy_from_user(&irq
, argp
, sizeof(irq
)))
1113 r
= kvm_vcpu_ioctl_interrupt(vcpu
, &irq
);
1117 case KVM_ENABLE_CAP
:
1119 struct kvm_enable_cap cap
;
1121 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
1123 r
= kvm_vcpu_ioctl_enable_cap(vcpu
, &cap
);
1127 case KVM_SET_ONE_REG
:
1128 case KVM_GET_ONE_REG
:
1130 struct kvm_one_reg reg
;
1132 if (copy_from_user(®
, argp
, sizeof(reg
)))
1134 if (ioctl
== KVM_SET_ONE_REG
)
1135 r
= kvm_vcpu_ioctl_set_one_reg(vcpu
, ®
);
1137 r
= kvm_vcpu_ioctl_get_one_reg(vcpu
, ®
);
1141 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1142 case KVM_DIRTY_TLB
: {
1143 struct kvm_dirty_tlb dirty
;
1145 if (copy_from_user(&dirty
, argp
, sizeof(dirty
)))
1147 r
= kvm_vcpu_ioctl_dirty_tlb(vcpu
, &dirty
);
1159 int kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
1161 return VM_FAULT_SIGBUS
;
1164 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo
*pvinfo
)
1166 u32 inst_nop
= 0x60000000;
1167 #ifdef CONFIG_KVM_BOOKE_HV
1168 u32 inst_sc1
= 0x44000022;
1169 pvinfo
->hcall
[0] = cpu_to_be32(inst_sc1
);
1170 pvinfo
->hcall
[1] = cpu_to_be32(inst_nop
);
1171 pvinfo
->hcall
[2] = cpu_to_be32(inst_nop
);
1172 pvinfo
->hcall
[3] = cpu_to_be32(inst_nop
);
1174 u32 inst_lis
= 0x3c000000;
1175 u32 inst_ori
= 0x60000000;
1176 u32 inst_sc
= 0x44000002;
1177 u32 inst_imm_mask
= 0xffff;
1180 * The hypercall to get into KVM from within guest context is as
1183 * lis r0, r0, KVM_SC_MAGIC_R0@h
1184 * ori r0, KVM_SC_MAGIC_R0@l
1188 pvinfo
->hcall
[0] = cpu_to_be32(inst_lis
| ((KVM_SC_MAGIC_R0
>> 16) & inst_imm_mask
));
1189 pvinfo
->hcall
[1] = cpu_to_be32(inst_ori
| (KVM_SC_MAGIC_R0
& inst_imm_mask
));
1190 pvinfo
->hcall
[2] = cpu_to_be32(inst_sc
);
1191 pvinfo
->hcall
[3] = cpu_to_be32(inst_nop
);
1194 pvinfo
->flags
= KVM_PPC_PVINFO_FLAGS_EV_IDLE
;
1199 int kvm_vm_ioctl_irq_line(struct kvm
*kvm
, struct kvm_irq_level
*irq_event
,
1202 if (!irqchip_in_kernel(kvm
))
1205 irq_event
->status
= kvm_set_irq(kvm
, KVM_USERSPACE_IRQ_SOURCE_ID
,
1206 irq_event
->irq
, irq_event
->level
,
1212 static int kvm_vm_ioctl_enable_cap(struct kvm
*kvm
,
1213 struct kvm_enable_cap
*cap
)
1221 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1222 case KVM_CAP_PPC_ENABLE_HCALL
: {
1223 unsigned long hcall
= cap
->args
[0];
1226 if (hcall
> MAX_HCALL_OPCODE
|| (hcall
& 3) ||
1229 if (!kvmppc_book3s_hcall_implemented(kvm
, hcall
))
1232 set_bit(hcall
/ 4, kvm
->arch
.enabled_hcalls
);
1234 clear_bit(hcall
/ 4, kvm
->arch
.enabled_hcalls
);
1247 long kvm_arch_vm_ioctl(struct file
*filp
,
1248 unsigned int ioctl
, unsigned long arg
)
1250 struct kvm
*kvm __maybe_unused
= filp
->private_data
;
1251 void __user
*argp
= (void __user
*)arg
;
1255 case KVM_PPC_GET_PVINFO
: {
1256 struct kvm_ppc_pvinfo pvinfo
;
1257 memset(&pvinfo
, 0, sizeof(pvinfo
));
1258 r
= kvm_vm_ioctl_get_pvinfo(&pvinfo
);
1259 if (copy_to_user(argp
, &pvinfo
, sizeof(pvinfo
))) {
1266 case KVM_ENABLE_CAP
:
1268 struct kvm_enable_cap cap
;
1270 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
1272 r
= kvm_vm_ioctl_enable_cap(kvm
, &cap
);
1275 #ifdef CONFIG_PPC_BOOK3S_64
1276 case KVM_CREATE_SPAPR_TCE
: {
1277 struct kvm_create_spapr_tce create_tce
;
1280 if (copy_from_user(&create_tce
, argp
, sizeof(create_tce
)))
1282 r
= kvm_vm_ioctl_create_spapr_tce(kvm
, &create_tce
);
1285 case KVM_PPC_GET_SMMU_INFO
: {
1286 struct kvm_ppc_smmu_info info
;
1287 struct kvm
*kvm
= filp
->private_data
;
1289 memset(&info
, 0, sizeof(info
));
1290 r
= kvm
->arch
.kvm_ops
->get_smmu_info(kvm
, &info
);
1291 if (r
>= 0 && copy_to_user(argp
, &info
, sizeof(info
)))
1295 case KVM_PPC_RTAS_DEFINE_TOKEN
: {
1296 struct kvm
*kvm
= filp
->private_data
;
1298 r
= kvm_vm_ioctl_rtas_define_token(kvm
, argp
);
1302 struct kvm
*kvm
= filp
->private_data
;
1303 r
= kvm
->arch
.kvm_ops
->arch_vm_ioctl(filp
, ioctl
, arg
);
1305 #else /* CONFIG_PPC_BOOK3S_64 */
1314 static unsigned long lpid_inuse
[BITS_TO_LONGS(KVMPPC_NR_LPIDS
)];
1315 static unsigned long nr_lpids
;
1317 long kvmppc_alloc_lpid(void)
1322 lpid
= find_first_zero_bit(lpid_inuse
, KVMPPC_NR_LPIDS
);
1323 if (lpid
>= nr_lpids
) {
1324 pr_err("%s: No LPIDs free\n", __func__
);
1327 } while (test_and_set_bit(lpid
, lpid_inuse
));
1331 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid
);
1333 void kvmppc_claim_lpid(long lpid
)
1335 set_bit(lpid
, lpid_inuse
);
1337 EXPORT_SYMBOL_GPL(kvmppc_claim_lpid
);
1339 void kvmppc_free_lpid(long lpid
)
1341 clear_bit(lpid
, lpid_inuse
);
1343 EXPORT_SYMBOL_GPL(kvmppc_free_lpid
);
1345 void kvmppc_init_lpid(unsigned long nr_lpids_param
)
1347 nr_lpids
= min_t(unsigned long, KVMPPC_NR_LPIDS
, nr_lpids_param
);
1348 memset(lpid_inuse
, 0, sizeof(lpid_inuse
));
1350 EXPORT_SYMBOL_GPL(kvmppc_init_lpid
);
1352 int kvm_arch_init(void *opaque
)
1357 void kvm_arch_exit(void)