2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/vmalloc.h>
25 #include <linux/hrtimer.h>
26 #include <linux/sched/signal.h>
28 #include <linux/slab.h>
29 #include <linux/file.h>
30 #include <linux/module.h>
31 #include <linux/irqbypass.h>
32 #include <linux/kvm_irqfd.h>
33 #include <asm/cputable.h>
34 #include <linux/uaccess.h>
35 #include <asm/kvm_ppc.h>
36 #include <asm/tlbflush.h>
37 #include <asm/cputhreads.h>
38 #include <asm/irqflags.h>
39 #include <asm/iommu.h>
40 #include <asm/switch_to.h>
42 #ifdef CONFIG_PPC_PSERIES
43 #include <asm/hvcall.h>
44 #include <asm/plpar_wrappers.h>
49 #include "../mm/mmu_decl.h"
51 #define CREATE_TRACE_POINTS
54 struct kvmppc_ops
*kvmppc_hv_ops
;
55 EXPORT_SYMBOL_GPL(kvmppc_hv_ops
);
56 struct kvmppc_ops
*kvmppc_pr_ops
;
57 EXPORT_SYMBOL_GPL(kvmppc_pr_ops
);
60 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*v
)
62 return !!(v
->arch
.pending_exceptions
) || kvm_request_pending(v
);
65 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu
*vcpu
)
70 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
76 * Common checks before entering the guest world. Call with interrupts
81 * == 1 if we're ready to go into guest state
82 * <= 0 if we need to go back to the host with return value
84 int kvmppc_prepare_to_enter(struct kvm_vcpu
*vcpu
)
88 WARN_ON(irqs_disabled());
99 if (signal_pending(current
)) {
100 kvmppc_account_exit(vcpu
, SIGNAL_EXITS
);
101 vcpu
->run
->exit_reason
= KVM_EXIT_INTR
;
106 vcpu
->mode
= IN_GUEST_MODE
;
109 * Reading vcpu->requests must happen after setting vcpu->mode,
110 * so we don't miss a request because the requester sees
111 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
112 * before next entering the guest (and thus doesn't IPI).
113 * This also orders the write to mode from any reads
114 * to the page tables done while the VCPU is running.
115 * Please see the comment in kvm_flush_remote_tlbs.
119 if (kvm_request_pending(vcpu
)) {
120 /* Make sure we process requests preemptable */
122 trace_kvm_check_requests(vcpu
);
123 r
= kvmppc_core_check_requests(vcpu
);
130 if (kvmppc_core_prepare_to_enter(vcpu
)) {
131 /* interrupts got enabled in between, so we
132 are back at square 1 */
136 guest_enter_irqoff();
144 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter
);
146 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
147 static void kvmppc_swab_shared(struct kvm_vcpu
*vcpu
)
149 struct kvm_vcpu_arch_shared
*shared
= vcpu
->arch
.shared
;
152 shared
->sprg0
= swab64(shared
->sprg0
);
153 shared
->sprg1
= swab64(shared
->sprg1
);
154 shared
->sprg2
= swab64(shared
->sprg2
);
155 shared
->sprg3
= swab64(shared
->sprg3
);
156 shared
->srr0
= swab64(shared
->srr0
);
157 shared
->srr1
= swab64(shared
->srr1
);
158 shared
->dar
= swab64(shared
->dar
);
159 shared
->msr
= swab64(shared
->msr
);
160 shared
->dsisr
= swab32(shared
->dsisr
);
161 shared
->int_pending
= swab32(shared
->int_pending
);
162 for (i
= 0; i
< ARRAY_SIZE(shared
->sr
); i
++)
163 shared
->sr
[i
] = swab32(shared
->sr
[i
]);
167 int kvmppc_kvm_pv(struct kvm_vcpu
*vcpu
)
169 int nr
= kvmppc_get_gpr(vcpu
, 11);
171 unsigned long __maybe_unused param1
= kvmppc_get_gpr(vcpu
, 3);
172 unsigned long __maybe_unused param2
= kvmppc_get_gpr(vcpu
, 4);
173 unsigned long __maybe_unused param3
= kvmppc_get_gpr(vcpu
, 5);
174 unsigned long __maybe_unused param4
= kvmppc_get_gpr(vcpu
, 6);
175 unsigned long r2
= 0;
177 if (!(kvmppc_get_msr(vcpu
) & MSR_SF
)) {
179 param1
&= 0xffffffff;
180 param2
&= 0xffffffff;
181 param3
&= 0xffffffff;
182 param4
&= 0xffffffff;
186 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE
):
188 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
189 /* Book3S can be little endian, find it out here */
190 int shared_big_endian
= true;
191 if (vcpu
->arch
.intr_msr
& MSR_LE
)
192 shared_big_endian
= false;
193 if (shared_big_endian
!= vcpu
->arch
.shared_big_endian
)
194 kvmppc_swab_shared(vcpu
);
195 vcpu
->arch
.shared_big_endian
= shared_big_endian
;
198 if (!(param2
& MAGIC_PAGE_FLAG_NOT_MAPPED_NX
)) {
200 * Older versions of the Linux magic page code had
201 * a bug where they would map their trampoline code
202 * NX. If that's the case, remove !PR NX capability.
204 vcpu
->arch
.disable_kernel_nx
= true;
205 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
208 vcpu
->arch
.magic_page_pa
= param1
& ~0xfffULL
;
209 vcpu
->arch
.magic_page_ea
= param2
& ~0xfffULL
;
211 #ifdef CONFIG_PPC_64K_PAGES
213 * Make sure our 4k magic page is in the same window of a 64k
214 * page within the guest and within the host's page.
216 if ((vcpu
->arch
.magic_page_pa
& 0xf000) !=
217 ((ulong
)vcpu
->arch
.shared
& 0xf000)) {
218 void *old_shared
= vcpu
->arch
.shared
;
219 ulong shared
= (ulong
)vcpu
->arch
.shared
;
223 shared
|= vcpu
->arch
.magic_page_pa
& 0xf000;
224 new_shared
= (void*)shared
;
225 memcpy(new_shared
, old_shared
, 0x1000);
226 vcpu
->arch
.shared
= new_shared
;
230 r2
= KVM_MAGIC_FEAT_SR
| KVM_MAGIC_FEAT_MAS0_TO_SPRG7
;
235 case KVM_HCALL_TOKEN(KVM_HC_FEATURES
):
237 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
238 r2
|= (1 << KVM_FEATURE_MAGIC_PAGE
);
241 /* Second return value is in r4 */
243 case EV_HCALL_TOKEN(EV_IDLE
):
245 kvm_vcpu_block(vcpu
);
246 kvm_clear_request(KVM_REQ_UNHALT
, vcpu
);
249 r
= EV_UNIMPLEMENTED
;
253 kvmppc_set_gpr(vcpu
, 4, r2
);
257 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv
);
259 int kvmppc_sanity_check(struct kvm_vcpu
*vcpu
)
263 /* We have to know what CPU to virtualize */
267 /* PAPR only works with book3s_64 */
268 if ((vcpu
->arch
.cpu_type
!= KVM_CPU_3S_64
) && vcpu
->arch
.papr_enabled
)
271 /* HV KVM can only do PAPR mode for now */
272 if (!vcpu
->arch
.papr_enabled
&& is_kvmppc_hv_enabled(vcpu
->kvm
))
275 #ifdef CONFIG_KVM_BOOKE_HV
276 if (!cpu_has_feature(CPU_FTR_EMB_HV
))
284 return r
? 0 : -EINVAL
;
286 EXPORT_SYMBOL_GPL(kvmppc_sanity_check
);
288 int kvmppc_emulate_mmio(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
290 enum emulation_result er
;
293 er
= kvmppc_emulate_loadstore(vcpu
);
296 /* Future optimization: only reload non-volatiles if they were
297 * actually modified. */
303 case EMULATE_DO_MMIO
:
304 run
->exit_reason
= KVM_EXIT_MMIO
;
305 /* We must reload nonvolatiles because "update" load/store
306 * instructions modify register state. */
307 /* Future optimization: only reload non-volatiles if they were
308 * actually modified. */
315 kvmppc_get_last_inst(vcpu
, INST_GENERIC
, &last_inst
);
316 /* XXX Deliver Program interrupt to guest. */
317 pr_emerg("%s: emulation failed (%08x)\n", __func__
, last_inst
);
328 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio
);
330 int kvmppc_st(struct kvm_vcpu
*vcpu
, ulong
*eaddr
, int size
, void *ptr
,
333 ulong mp_pa
= vcpu
->arch
.magic_page_pa
& KVM_PAM
& PAGE_MASK
;
334 struct kvmppc_pte pte
;
339 r
= kvmppc_xlate(vcpu
, *eaddr
, data
? XLATE_DATA
: XLATE_INST
,
349 /* Magic page override */
350 if (kvmppc_supports_magic_page(vcpu
) && mp_pa
&&
351 ((pte
.raddr
& KVM_PAM
& PAGE_MASK
) == mp_pa
) &&
352 !(kvmppc_get_msr(vcpu
) & MSR_PR
)) {
353 void *magic
= vcpu
->arch
.shared
;
354 magic
+= pte
.eaddr
& 0xfff;
355 memcpy(magic
, ptr
, size
);
359 if (kvm_write_guest(vcpu
->kvm
, pte
.raddr
, ptr
, size
))
360 return EMULATE_DO_MMIO
;
364 EXPORT_SYMBOL_GPL(kvmppc_st
);
366 int kvmppc_ld(struct kvm_vcpu
*vcpu
, ulong
*eaddr
, int size
, void *ptr
,
369 ulong mp_pa
= vcpu
->arch
.magic_page_pa
& KVM_PAM
& PAGE_MASK
;
370 struct kvmppc_pte pte
;
375 rc
= kvmppc_xlate(vcpu
, *eaddr
, data
? XLATE_DATA
: XLATE_INST
,
385 if (!data
&& !pte
.may_execute
)
388 /* Magic page override */
389 if (kvmppc_supports_magic_page(vcpu
) && mp_pa
&&
390 ((pte
.raddr
& KVM_PAM
& PAGE_MASK
) == mp_pa
) &&
391 !(kvmppc_get_msr(vcpu
) & MSR_PR
)) {
392 void *magic
= vcpu
->arch
.shared
;
393 magic
+= pte
.eaddr
& 0xfff;
394 memcpy(ptr
, magic
, size
);
398 if (kvm_read_guest(vcpu
->kvm
, pte
.raddr
, ptr
, size
))
399 return EMULATE_DO_MMIO
;
403 EXPORT_SYMBOL_GPL(kvmppc_ld
);
405 int kvm_arch_hardware_enable(void)
410 int kvm_arch_hardware_setup(void)
415 void kvm_arch_check_processor_compat(void *rtn
)
417 *(int *)rtn
= kvmppc_core_check_processor_compat();
420 int kvm_arch_init_vm(struct kvm
*kvm
, unsigned long type
)
422 struct kvmppc_ops
*kvm_ops
= NULL
;
424 * if we have both HV and PR enabled, default is HV
428 kvm_ops
= kvmppc_hv_ops
;
430 kvm_ops
= kvmppc_pr_ops
;
433 } else if (type
== KVM_VM_PPC_HV
) {
436 kvm_ops
= kvmppc_hv_ops
;
437 } else if (type
== KVM_VM_PPC_PR
) {
440 kvm_ops
= kvmppc_pr_ops
;
444 if (kvm_ops
->owner
&& !try_module_get(kvm_ops
->owner
))
447 kvm
->arch
.kvm_ops
= kvm_ops
;
448 return kvmppc_core_init_vm(kvm
);
453 bool kvm_arch_has_vcpu_debugfs(void)
458 int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu
*vcpu
)
463 void kvm_arch_destroy_vm(struct kvm
*kvm
)
466 struct kvm_vcpu
*vcpu
;
468 #ifdef CONFIG_KVM_XICS
470 * We call kick_all_cpus_sync() to ensure that all
471 * CPUs have executed any pending IPIs before we
472 * continue and free VCPUs structures below.
474 if (is_kvmppc_hv_enabled(kvm
))
475 kick_all_cpus_sync();
478 kvm_for_each_vcpu(i
, vcpu
, kvm
)
479 kvm_arch_vcpu_free(vcpu
);
481 mutex_lock(&kvm
->lock
);
482 for (i
= 0; i
< atomic_read(&kvm
->online_vcpus
); i
++)
483 kvm
->vcpus
[i
] = NULL
;
485 atomic_set(&kvm
->online_vcpus
, 0);
487 kvmppc_core_destroy_vm(kvm
);
489 mutex_unlock(&kvm
->lock
);
491 /* drop the module reference */
492 module_put(kvm
->arch
.kvm_ops
->owner
);
495 int kvm_vm_ioctl_check_extension(struct kvm
*kvm
, long ext
)
498 /* Assume we're using HV mode when the HV module is loaded */
499 int hv_enabled
= kvmppc_hv_ops
? 1 : 0;
503 * Hooray - we know which VM type we're running on. Depend on
504 * that rather than the guess above.
506 hv_enabled
= is_kvmppc_hv_enabled(kvm
);
511 case KVM_CAP_PPC_BOOKE_SREGS
:
512 case KVM_CAP_PPC_BOOKE_WATCHDOG
:
513 case KVM_CAP_PPC_EPR
:
515 case KVM_CAP_PPC_SEGSTATE
:
516 case KVM_CAP_PPC_HIOR
:
517 case KVM_CAP_PPC_PAPR
:
519 case KVM_CAP_PPC_UNSET_IRQ
:
520 case KVM_CAP_PPC_IRQ_LEVEL
:
521 case KVM_CAP_ENABLE_CAP
:
522 case KVM_CAP_ENABLE_CAP_VM
:
523 case KVM_CAP_ONE_REG
:
524 case KVM_CAP_IOEVENTFD
:
525 case KVM_CAP_DEVICE_CTRL
:
526 case KVM_CAP_IMMEDIATE_EXIT
:
529 case KVM_CAP_PPC_PAIRED_SINGLES
:
530 case KVM_CAP_PPC_OSI
:
531 case KVM_CAP_PPC_GET_PVINFO
:
532 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
535 /* We support this only for PR */
538 #ifdef CONFIG_KVM_MPIC
539 case KVM_CAP_IRQ_MPIC
:
544 #ifdef CONFIG_PPC_BOOK3S_64
545 case KVM_CAP_SPAPR_TCE
:
546 case KVM_CAP_SPAPR_TCE_64
:
548 case KVM_CAP_SPAPR_TCE_VFIO
:
549 case KVM_CAP_PPC_RTAS
:
550 case KVM_CAP_PPC_FIXUP_HCALL
:
551 case KVM_CAP_PPC_ENABLE_HCALL
:
552 #ifdef CONFIG_KVM_XICS
553 case KVM_CAP_IRQ_XICS
:
555 case KVM_CAP_PPC_GET_CPU_CHAR
:
559 case KVM_CAP_PPC_ALLOC_HTAB
:
562 #endif /* CONFIG_PPC_BOOK3S_64 */
563 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
564 case KVM_CAP_PPC_SMT
:
567 if (kvm
->arch
.emul_smt_mode
> 1)
568 r
= kvm
->arch
.emul_smt_mode
;
570 r
= kvm
->arch
.smt_mode
;
571 } else if (hv_enabled
) {
572 if (cpu_has_feature(CPU_FTR_ARCH_300
))
575 r
= threads_per_subcore
;
578 case KVM_CAP_PPC_SMT_POSSIBLE
:
581 if (!cpu_has_feature(CPU_FTR_ARCH_300
))
582 r
= ((threads_per_subcore
<< 1) - 1);
584 /* P9 can emulate dbells, so allow any mode */
588 case KVM_CAP_PPC_RMA
:
591 case KVM_CAP_PPC_HWRNG
:
592 r
= kvmppc_hwrng_present();
594 case KVM_CAP_PPC_MMU_RADIX
:
595 r
= !!(hv_enabled
&& radix_enabled());
597 case KVM_CAP_PPC_MMU_HASH_V3
:
598 r
= !!(hv_enabled
&& cpu_has_feature(CPU_FTR_ARCH_300
));
601 case KVM_CAP_SYNC_MMU
:
602 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
604 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
610 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
611 case KVM_CAP_PPC_HTAB_FD
:
615 case KVM_CAP_NR_VCPUS
:
617 * Recommending a number of CPUs is somewhat arbitrary; we
618 * return the number of present CPUs for -HV (since a host
619 * will have secondary threads "offline"), and for other KVM
620 * implementations just count online CPUs.
623 r
= num_present_cpus();
625 r
= num_online_cpus();
627 case KVM_CAP_NR_MEMSLOTS
:
628 r
= KVM_USER_MEM_SLOTS
;
630 case KVM_CAP_MAX_VCPUS
:
633 #ifdef CONFIG_PPC_BOOK3S_64
634 case KVM_CAP_PPC_GET_SMMU_INFO
:
637 case KVM_CAP_SPAPR_MULTITCE
:
640 case KVM_CAP_SPAPR_RESIZE_HPT
:
644 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
645 case KVM_CAP_PPC_FWNMI
:
649 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
650 case KVM_CAP_PPC_HTM
:
652 (!!(cur_cpu_spec
->cpu_user_features2
& PPC_FEATURE2_HTM
) ||
653 cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST
));
664 long kvm_arch_dev_ioctl(struct file
*filp
,
665 unsigned int ioctl
, unsigned long arg
)
670 void kvm_arch_free_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*free
,
671 struct kvm_memory_slot
*dont
)
673 kvmppc_core_free_memslot(kvm
, free
, dont
);
676 int kvm_arch_create_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*slot
,
677 unsigned long npages
)
679 return kvmppc_core_create_memslot(kvm
, slot
, npages
);
682 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
683 struct kvm_memory_slot
*memslot
,
684 const struct kvm_userspace_memory_region
*mem
,
685 enum kvm_mr_change change
)
687 return kvmppc_core_prepare_memory_region(kvm
, memslot
, mem
);
690 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
691 const struct kvm_userspace_memory_region
*mem
,
692 const struct kvm_memory_slot
*old
,
693 const struct kvm_memory_slot
*new,
694 enum kvm_mr_change change
)
696 kvmppc_core_commit_memory_region(kvm
, mem
, old
, new);
699 void kvm_arch_flush_shadow_memslot(struct kvm
*kvm
,
700 struct kvm_memory_slot
*slot
)
702 kvmppc_core_flush_memslot(kvm
, slot
);
705 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
, unsigned int id
)
707 struct kvm_vcpu
*vcpu
;
708 vcpu
= kvmppc_core_vcpu_create(kvm
, id
);
710 vcpu
->arch
.wqp
= &vcpu
->wq
;
711 kvmppc_create_vcpu_debugfs(vcpu
, id
);
716 void kvm_arch_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
720 void kvm_arch_vcpu_free(struct kvm_vcpu
*vcpu
)
722 /* Make sure we're not using the vcpu anymore */
723 hrtimer_cancel(&vcpu
->arch
.dec_timer
);
725 kvmppc_remove_vcpu_debugfs(vcpu
);
727 switch (vcpu
->arch
.irq_type
) {
728 case KVMPPC_IRQ_MPIC
:
729 kvmppc_mpic_disconnect_vcpu(vcpu
->arch
.mpic
, vcpu
);
731 case KVMPPC_IRQ_XICS
:
733 kvmppc_xive_cleanup_vcpu(vcpu
);
735 kvmppc_xics_free_icp(vcpu
);
739 kvmppc_core_vcpu_free(vcpu
);
742 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
744 kvm_arch_vcpu_free(vcpu
);
747 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
749 return kvmppc_core_pending_dec(vcpu
);
752 static enum hrtimer_restart
kvmppc_decrementer_wakeup(struct hrtimer
*timer
)
754 struct kvm_vcpu
*vcpu
;
756 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.dec_timer
);
757 kvmppc_decrementer_func(vcpu
);
759 return HRTIMER_NORESTART
;
762 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
766 hrtimer_init(&vcpu
->arch
.dec_timer
, CLOCK_REALTIME
, HRTIMER_MODE_ABS
);
767 vcpu
->arch
.dec_timer
.function
= kvmppc_decrementer_wakeup
;
768 vcpu
->arch
.dec_expires
= get_tb();
770 #ifdef CONFIG_KVM_EXIT_TIMING
771 mutex_init(&vcpu
->arch
.exit_timing_lock
);
773 ret
= kvmppc_subarch_vcpu_init(vcpu
);
777 void kvm_arch_vcpu_uninit(struct kvm_vcpu
*vcpu
)
779 kvmppc_mmu_destroy(vcpu
);
780 kvmppc_subarch_vcpu_uninit(vcpu
);
783 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
787 * vrsave (formerly usprg0) isn't used by Linux, but may
788 * be used by the guest.
790 * On non-booke this is associated with Altivec and
791 * is handled by code in book3s.c.
793 mtspr(SPRN_VRSAVE
, vcpu
->arch
.vrsave
);
795 kvmppc_core_vcpu_load(vcpu
, cpu
);
798 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
800 kvmppc_core_vcpu_put(vcpu
);
802 vcpu
->arch
.vrsave
= mfspr(SPRN_VRSAVE
);
807 * irq_bypass_add_producer and irq_bypass_del_producer are only
808 * useful if the architecture supports PCI passthrough.
809 * irq_bypass_stop and irq_bypass_start are not needed and so
810 * kvm_ops are not defined for them.
812 bool kvm_arch_has_irq_bypass(void)
814 return ((kvmppc_hv_ops
&& kvmppc_hv_ops
->irq_bypass_add_producer
) ||
815 (kvmppc_pr_ops
&& kvmppc_pr_ops
->irq_bypass_add_producer
));
818 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer
*cons
,
819 struct irq_bypass_producer
*prod
)
821 struct kvm_kernel_irqfd
*irqfd
=
822 container_of(cons
, struct kvm_kernel_irqfd
, consumer
);
823 struct kvm
*kvm
= irqfd
->kvm
;
825 if (kvm
->arch
.kvm_ops
->irq_bypass_add_producer
)
826 return kvm
->arch
.kvm_ops
->irq_bypass_add_producer(cons
, prod
);
831 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer
*cons
,
832 struct irq_bypass_producer
*prod
)
834 struct kvm_kernel_irqfd
*irqfd
=
835 container_of(cons
, struct kvm_kernel_irqfd
, consumer
);
836 struct kvm
*kvm
= irqfd
->kvm
;
838 if (kvm
->arch
.kvm_ops
->irq_bypass_del_producer
)
839 kvm
->arch
.kvm_ops
->irq_bypass_del_producer(cons
, prod
);
843 static inline int kvmppc_get_vsr_dword_offset(int index
)
847 if ((index
!= 0) && (index
!= 1))
859 static inline int kvmppc_get_vsr_word_offset(int index
)
863 if ((index
> 3) || (index
< 0))
874 static inline void kvmppc_set_vsr_dword(struct kvm_vcpu
*vcpu
,
877 union kvmppc_one_reg val
;
878 int offset
= kvmppc_get_vsr_dword_offset(vcpu
->arch
.mmio_vsx_offset
);
879 int index
= vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
;
884 if (vcpu
->arch
.mmio_vsx_tx_sx_enabled
) {
885 val
.vval
= VCPU_VSX_VR(vcpu
, index
);
886 val
.vsxval
[offset
] = gpr
;
887 VCPU_VSX_VR(vcpu
, index
) = val
.vval
;
889 VCPU_VSX_FPR(vcpu
, index
, offset
) = gpr
;
893 static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu
*vcpu
,
896 union kvmppc_one_reg val
;
897 int index
= vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
;
899 if (vcpu
->arch
.mmio_vsx_tx_sx_enabled
) {
900 val
.vval
= VCPU_VSX_VR(vcpu
, index
);
903 VCPU_VSX_VR(vcpu
, index
) = val
.vval
;
905 VCPU_VSX_FPR(vcpu
, index
, 0) = gpr
;
906 VCPU_VSX_FPR(vcpu
, index
, 1) = gpr
;
910 static inline void kvmppc_set_vsr_word(struct kvm_vcpu
*vcpu
,
913 union kvmppc_one_reg val
;
914 int offset
= kvmppc_get_vsr_word_offset(vcpu
->arch
.mmio_vsx_offset
);
915 int index
= vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
;
916 int dword_offset
, word_offset
;
921 if (vcpu
->arch
.mmio_vsx_tx_sx_enabled
) {
922 val
.vval
= VCPU_VSX_VR(vcpu
, index
);
923 val
.vsx32val
[offset
] = gpr32
;
924 VCPU_VSX_VR(vcpu
, index
) = val
.vval
;
926 dword_offset
= offset
/ 2;
927 word_offset
= offset
% 2;
928 val
.vsxval
[0] = VCPU_VSX_FPR(vcpu
, index
, dword_offset
);
929 val
.vsx32val
[word_offset
] = gpr32
;
930 VCPU_VSX_FPR(vcpu
, index
, dword_offset
) = val
.vsxval
[0];
933 #endif /* CONFIG_VSX */
935 #ifdef CONFIG_ALTIVEC
936 static inline void kvmppc_set_vmx_dword(struct kvm_vcpu
*vcpu
,
939 int index
= vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
;
945 lo
= gpr
& 0xffffffff;
948 hi
= gpr
& 0xffffffff;
951 di
= 2 - vcpu
->arch
.mmio_vmx_copy_nums
; /* doubleword index */
955 if (vcpu
->arch
.mmio_host_swabbed
)
958 VCPU_VSX_VR(vcpu
, index
).u
[di
* 2] = hi
;
959 VCPU_VSX_VR(vcpu
, index
).u
[di
* 2 + 1] = lo
;
961 #endif /* CONFIG_ALTIVEC */
963 #ifdef CONFIG_PPC_FPU
964 static inline u64
sp_to_dp(u32 fprs
)
970 asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd
) : "m" (fprs
)
976 static inline u32
dp_to_sp(u64 fprd
)
982 asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs
) : "m" (fprd
)
989 #define sp_to_dp(x) (x)
990 #define dp_to_sp(x) (x)
991 #endif /* CONFIG_PPC_FPU */
993 static void kvmppc_complete_mmio_load(struct kvm_vcpu
*vcpu
,
996 u64
uninitialized_var(gpr
);
998 if (run
->mmio
.len
> sizeof(gpr
)) {
999 printk(KERN_ERR
"bad MMIO length: %d\n", run
->mmio
.len
);
1003 if (!vcpu
->arch
.mmio_host_swabbed
) {
1004 switch (run
->mmio
.len
) {
1005 case 8: gpr
= *(u64
*)run
->mmio
.data
; break;
1006 case 4: gpr
= *(u32
*)run
->mmio
.data
; break;
1007 case 2: gpr
= *(u16
*)run
->mmio
.data
; break;
1008 case 1: gpr
= *(u8
*)run
->mmio
.data
; break;
1011 switch (run
->mmio
.len
) {
1012 case 8: gpr
= swab64(*(u64
*)run
->mmio
.data
); break;
1013 case 4: gpr
= swab32(*(u32
*)run
->mmio
.data
); break;
1014 case 2: gpr
= swab16(*(u16
*)run
->mmio
.data
); break;
1015 case 1: gpr
= *(u8
*)run
->mmio
.data
; break;
1019 /* conversion between single and double precision */
1020 if ((vcpu
->arch
.mmio_sp64_extend
) && (run
->mmio
.len
== 4))
1021 gpr
= sp_to_dp(gpr
);
1023 if (vcpu
->arch
.mmio_sign_extend
) {
1024 switch (run
->mmio
.len
) {
1027 gpr
= (s64
)(s32
)gpr
;
1031 gpr
= (s64
)(s16
)gpr
;
1039 switch (vcpu
->arch
.io_gpr
& KVM_MMIO_REG_EXT_MASK
) {
1040 case KVM_MMIO_REG_GPR
:
1041 kvmppc_set_gpr(vcpu
, vcpu
->arch
.io_gpr
, gpr
);
1043 case KVM_MMIO_REG_FPR
:
1044 VCPU_FPR(vcpu
, vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
) = gpr
;
1046 #ifdef CONFIG_PPC_BOOK3S
1047 case KVM_MMIO_REG_QPR
:
1048 vcpu
->arch
.qpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
1050 case KVM_MMIO_REG_FQPR
:
1051 VCPU_FPR(vcpu
, vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
) = gpr
;
1052 vcpu
->arch
.qpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
1056 case KVM_MMIO_REG_VSX
:
1057 if (vcpu
->arch
.mmio_vsx_copy_type
== KVMPPC_VSX_COPY_DWORD
)
1058 kvmppc_set_vsr_dword(vcpu
, gpr
);
1059 else if (vcpu
->arch
.mmio_vsx_copy_type
== KVMPPC_VSX_COPY_WORD
)
1060 kvmppc_set_vsr_word(vcpu
, gpr
);
1061 else if (vcpu
->arch
.mmio_vsx_copy_type
==
1062 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP
)
1063 kvmppc_set_vsr_dword_dump(vcpu
, gpr
);
1066 #ifdef CONFIG_ALTIVEC
1067 case KVM_MMIO_REG_VMX
:
1068 kvmppc_set_vmx_dword(vcpu
, gpr
);
1076 static int __kvmppc_handle_load(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
1077 unsigned int rt
, unsigned int bytes
,
1078 int is_default_endian
, int sign_extend
)
1083 /* Pity C doesn't have a logical XOR operator */
1084 if (kvmppc_need_byteswap(vcpu
)) {
1085 host_swabbed
= is_default_endian
;
1087 host_swabbed
= !is_default_endian
;
1090 if (bytes
> sizeof(run
->mmio
.data
)) {
1091 printk(KERN_ERR
"%s: bad MMIO length: %d\n", __func__
,
1095 run
->mmio
.phys_addr
= vcpu
->arch
.paddr_accessed
;
1096 run
->mmio
.len
= bytes
;
1097 run
->mmio
.is_write
= 0;
1099 vcpu
->arch
.io_gpr
= rt
;
1100 vcpu
->arch
.mmio_host_swabbed
= host_swabbed
;
1101 vcpu
->mmio_needed
= 1;
1102 vcpu
->mmio_is_write
= 0;
1103 vcpu
->arch
.mmio_sign_extend
= sign_extend
;
1105 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
1107 ret
= kvm_io_bus_read(vcpu
, KVM_MMIO_BUS
, run
->mmio
.phys_addr
,
1108 bytes
, &run
->mmio
.data
);
1110 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
1113 kvmppc_complete_mmio_load(vcpu
, run
);
1114 vcpu
->mmio_needed
= 0;
1115 return EMULATE_DONE
;
1118 return EMULATE_DO_MMIO
;
1121 int kvmppc_handle_load(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
1122 unsigned int rt
, unsigned int bytes
,
1123 int is_default_endian
)
1125 return __kvmppc_handle_load(run
, vcpu
, rt
, bytes
, is_default_endian
, 0);
1127 EXPORT_SYMBOL_GPL(kvmppc_handle_load
);
1129 /* Same as above, but sign extends */
1130 int kvmppc_handle_loads(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
1131 unsigned int rt
, unsigned int bytes
,
1132 int is_default_endian
)
1134 return __kvmppc_handle_load(run
, vcpu
, rt
, bytes
, is_default_endian
, 1);
1138 int kvmppc_handle_vsx_load(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
1139 unsigned int rt
, unsigned int bytes
,
1140 int is_default_endian
, int mmio_sign_extend
)
1142 enum emulation_result emulated
= EMULATE_DONE
;
1144 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1145 if (vcpu
->arch
.mmio_vsx_copy_nums
> 4)
1146 return EMULATE_FAIL
;
1148 while (vcpu
->arch
.mmio_vsx_copy_nums
) {
1149 emulated
= __kvmppc_handle_load(run
, vcpu
, rt
, bytes
,
1150 is_default_endian
, mmio_sign_extend
);
1152 if (emulated
!= EMULATE_DONE
)
1155 vcpu
->arch
.paddr_accessed
+= run
->mmio
.len
;
1157 vcpu
->arch
.mmio_vsx_copy_nums
--;
1158 vcpu
->arch
.mmio_vsx_offset
++;
1162 #endif /* CONFIG_VSX */
1164 int kvmppc_handle_store(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
1165 u64 val
, unsigned int bytes
, int is_default_endian
)
1167 void *data
= run
->mmio
.data
;
1171 /* Pity C doesn't have a logical XOR operator */
1172 if (kvmppc_need_byteswap(vcpu
)) {
1173 host_swabbed
= is_default_endian
;
1175 host_swabbed
= !is_default_endian
;
1178 if (bytes
> sizeof(run
->mmio
.data
)) {
1179 printk(KERN_ERR
"%s: bad MMIO length: %d\n", __func__
,
1183 run
->mmio
.phys_addr
= vcpu
->arch
.paddr_accessed
;
1184 run
->mmio
.len
= bytes
;
1185 run
->mmio
.is_write
= 1;
1186 vcpu
->mmio_needed
= 1;
1187 vcpu
->mmio_is_write
= 1;
1189 if ((vcpu
->arch
.mmio_sp64_extend
) && (bytes
== 4))
1190 val
= dp_to_sp(val
);
1192 /* Store the value at the lowest bytes in 'data'. */
1193 if (!host_swabbed
) {
1195 case 8: *(u64
*)data
= val
; break;
1196 case 4: *(u32
*)data
= val
; break;
1197 case 2: *(u16
*)data
= val
; break;
1198 case 1: *(u8
*)data
= val
; break;
1202 case 8: *(u64
*)data
= swab64(val
); break;
1203 case 4: *(u32
*)data
= swab32(val
); break;
1204 case 2: *(u16
*)data
= swab16(val
); break;
1205 case 1: *(u8
*)data
= val
; break;
1209 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
1211 ret
= kvm_io_bus_write(vcpu
, KVM_MMIO_BUS
, run
->mmio
.phys_addr
,
1212 bytes
, &run
->mmio
.data
);
1214 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
1217 vcpu
->mmio_needed
= 0;
1218 return EMULATE_DONE
;
1221 return EMULATE_DO_MMIO
;
1223 EXPORT_SYMBOL_GPL(kvmppc_handle_store
);
1226 static inline int kvmppc_get_vsr_data(struct kvm_vcpu
*vcpu
, int rs
, u64
*val
)
1228 u32 dword_offset
, word_offset
;
1229 union kvmppc_one_reg reg
;
1231 int copy_type
= vcpu
->arch
.mmio_vsx_copy_type
;
1234 switch (copy_type
) {
1235 case KVMPPC_VSX_COPY_DWORD
:
1237 kvmppc_get_vsr_dword_offset(vcpu
->arch
.mmio_vsx_offset
);
1239 if (vsx_offset
== -1) {
1244 if (!vcpu
->arch
.mmio_vsx_tx_sx_enabled
) {
1245 *val
= VCPU_VSX_FPR(vcpu
, rs
, vsx_offset
);
1247 reg
.vval
= VCPU_VSX_VR(vcpu
, rs
);
1248 *val
= reg
.vsxval
[vsx_offset
];
1252 case KVMPPC_VSX_COPY_WORD
:
1254 kvmppc_get_vsr_word_offset(vcpu
->arch
.mmio_vsx_offset
);
1256 if (vsx_offset
== -1) {
1261 if (!vcpu
->arch
.mmio_vsx_tx_sx_enabled
) {
1262 dword_offset
= vsx_offset
/ 2;
1263 word_offset
= vsx_offset
% 2;
1264 reg
.vsxval
[0] = VCPU_VSX_FPR(vcpu
, rs
, dword_offset
);
1265 *val
= reg
.vsx32val
[word_offset
];
1267 reg
.vval
= VCPU_VSX_VR(vcpu
, rs
);
1268 *val
= reg
.vsx32val
[vsx_offset
];
1280 int kvmppc_handle_vsx_store(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
1281 int rs
, unsigned int bytes
, int is_default_endian
)
1284 enum emulation_result emulated
= EMULATE_DONE
;
1286 vcpu
->arch
.io_gpr
= rs
;
1288 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1289 if (vcpu
->arch
.mmio_vsx_copy_nums
> 4)
1290 return EMULATE_FAIL
;
1292 while (vcpu
->arch
.mmio_vsx_copy_nums
) {
1293 if (kvmppc_get_vsr_data(vcpu
, rs
, &val
) == -1)
1294 return EMULATE_FAIL
;
1296 emulated
= kvmppc_handle_store(run
, vcpu
,
1297 val
, bytes
, is_default_endian
);
1299 if (emulated
!= EMULATE_DONE
)
1302 vcpu
->arch
.paddr_accessed
+= run
->mmio
.len
;
1304 vcpu
->arch
.mmio_vsx_copy_nums
--;
1305 vcpu
->arch
.mmio_vsx_offset
++;
1311 static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu
*vcpu
,
1312 struct kvm_run
*run
)
1314 enum emulation_result emulated
= EMULATE_FAIL
;
1317 vcpu
->arch
.paddr_accessed
+= run
->mmio
.len
;
1319 if (!vcpu
->mmio_is_write
) {
1320 emulated
= kvmppc_handle_vsx_load(run
, vcpu
, vcpu
->arch
.io_gpr
,
1321 run
->mmio
.len
, 1, vcpu
->arch
.mmio_sign_extend
);
1323 emulated
= kvmppc_handle_vsx_store(run
, vcpu
,
1324 vcpu
->arch
.io_gpr
, run
->mmio
.len
, 1);
1328 case EMULATE_DO_MMIO
:
1329 run
->exit_reason
= KVM_EXIT_MMIO
;
1333 pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
1334 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
1335 run
->internal
.suberror
= KVM_INTERNAL_ERROR_EMULATION
;
1344 #endif /* CONFIG_VSX */
1346 #ifdef CONFIG_ALTIVEC
1347 /* handle quadword load access in two halves */
1348 int kvmppc_handle_load128_by2x64(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
1349 unsigned int rt
, int is_default_endian
)
1351 enum emulation_result emulated
= EMULATE_DONE
;
1353 while (vcpu
->arch
.mmio_vmx_copy_nums
) {
1354 emulated
= __kvmppc_handle_load(run
, vcpu
, rt
, 8,
1355 is_default_endian
, 0);
1357 if (emulated
!= EMULATE_DONE
)
1360 vcpu
->arch
.paddr_accessed
+= run
->mmio
.len
;
1361 vcpu
->arch
.mmio_vmx_copy_nums
--;
1367 static inline int kvmppc_get_vmx_data(struct kvm_vcpu
*vcpu
, int rs
, u64
*val
)
1369 vector128 vrs
= VCPU_VSX_VR(vcpu
, rs
);
1373 di
= 2 - vcpu
->arch
.mmio_vmx_copy_nums
; /* doubleword index */
1377 if (vcpu
->arch
.mmio_host_swabbed
)
1381 w1
= vrs
.u
[di
* 2 + 1];
1384 *val
= (w0
<< 32) | w1
;
1386 *val
= (w1
<< 32) | w0
;
1391 /* handle quadword store in two halves */
1392 int kvmppc_handle_store128_by2x64(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
1393 unsigned int rs
, int is_default_endian
)
1396 enum emulation_result emulated
= EMULATE_DONE
;
1398 vcpu
->arch
.io_gpr
= rs
;
1400 while (vcpu
->arch
.mmio_vmx_copy_nums
) {
1401 if (kvmppc_get_vmx_data(vcpu
, rs
, &val
) == -1)
1402 return EMULATE_FAIL
;
1404 emulated
= kvmppc_handle_store(run
, vcpu
, val
, 8,
1406 if (emulated
!= EMULATE_DONE
)
1409 vcpu
->arch
.paddr_accessed
+= run
->mmio
.len
;
1410 vcpu
->arch
.mmio_vmx_copy_nums
--;
1416 static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu
*vcpu
,
1417 struct kvm_run
*run
)
1419 enum emulation_result emulated
= EMULATE_FAIL
;
1422 vcpu
->arch
.paddr_accessed
+= run
->mmio
.len
;
1424 if (!vcpu
->mmio_is_write
) {
1425 emulated
= kvmppc_handle_load128_by2x64(run
, vcpu
,
1426 vcpu
->arch
.io_gpr
, 1);
1428 emulated
= kvmppc_handle_store128_by2x64(run
, vcpu
,
1429 vcpu
->arch
.io_gpr
, 1);
1433 case EMULATE_DO_MMIO
:
1434 run
->exit_reason
= KVM_EXIT_MMIO
;
1438 pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
1439 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
1440 run
->internal
.suberror
= KVM_INTERNAL_ERROR_EMULATION
;
1449 #endif /* CONFIG_ALTIVEC */
1451 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu
*vcpu
, struct kvm_one_reg
*reg
)
1454 union kvmppc_one_reg val
;
1457 size
= one_reg_size(reg
->id
);
1458 if (size
> sizeof(val
))
1461 r
= kvmppc_get_one_reg(vcpu
, reg
->id
, &val
);
1465 #ifdef CONFIG_ALTIVEC
1466 case KVM_REG_PPC_VR0
... KVM_REG_PPC_VR31
:
1467 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
1471 val
.vval
= vcpu
->arch
.vr
.vr
[reg
->id
- KVM_REG_PPC_VR0
];
1473 case KVM_REG_PPC_VSCR
:
1474 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
1478 val
= get_reg_val(reg
->id
, vcpu
->arch
.vr
.vscr
.u
[3]);
1480 case KVM_REG_PPC_VRSAVE
:
1481 val
= get_reg_val(reg
->id
, vcpu
->arch
.vrsave
);
1483 #endif /* CONFIG_ALTIVEC */
1493 if (copy_to_user((char __user
*)(unsigned long)reg
->addr
, &val
, size
))
1499 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu
*vcpu
, struct kvm_one_reg
*reg
)
1502 union kvmppc_one_reg val
;
1505 size
= one_reg_size(reg
->id
);
1506 if (size
> sizeof(val
))
1509 if (copy_from_user(&val
, (char __user
*)(unsigned long)reg
->addr
, size
))
1512 r
= kvmppc_set_one_reg(vcpu
, reg
->id
, &val
);
1516 #ifdef CONFIG_ALTIVEC
1517 case KVM_REG_PPC_VR0
... KVM_REG_PPC_VR31
:
1518 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
1522 vcpu
->arch
.vr
.vr
[reg
->id
- KVM_REG_PPC_VR0
] = val
.vval
;
1524 case KVM_REG_PPC_VSCR
:
1525 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
1529 vcpu
->arch
.vr
.vscr
.u
[3] = set_reg_val(reg
->id
, val
);
1531 case KVM_REG_PPC_VRSAVE
:
1532 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
1536 vcpu
->arch
.vrsave
= set_reg_val(reg
->id
, val
);
1538 #endif /* CONFIG_ALTIVEC */
1548 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
1554 if (vcpu
->mmio_needed
) {
1555 vcpu
->mmio_needed
= 0;
1556 if (!vcpu
->mmio_is_write
)
1557 kvmppc_complete_mmio_load(vcpu
, run
);
1559 if (vcpu
->arch
.mmio_vsx_copy_nums
> 0) {
1560 vcpu
->arch
.mmio_vsx_copy_nums
--;
1561 vcpu
->arch
.mmio_vsx_offset
++;
1564 if (vcpu
->arch
.mmio_vsx_copy_nums
> 0) {
1565 r
= kvmppc_emulate_mmio_vsx_loadstore(vcpu
, run
);
1566 if (r
== RESUME_HOST
) {
1567 vcpu
->mmio_needed
= 1;
1572 #ifdef CONFIG_ALTIVEC
1573 if (vcpu
->arch
.mmio_vmx_copy_nums
> 0)
1574 vcpu
->arch
.mmio_vmx_copy_nums
--;
1576 if (vcpu
->arch
.mmio_vmx_copy_nums
> 0) {
1577 r
= kvmppc_emulate_mmio_vmx_loadstore(vcpu
, run
);
1578 if (r
== RESUME_HOST
) {
1579 vcpu
->mmio_needed
= 1;
1584 } else if (vcpu
->arch
.osi_needed
) {
1585 u64
*gprs
= run
->osi
.gprs
;
1588 for (i
= 0; i
< 32; i
++)
1589 kvmppc_set_gpr(vcpu
, i
, gprs
[i
]);
1590 vcpu
->arch
.osi_needed
= 0;
1591 } else if (vcpu
->arch
.hcall_needed
) {
1594 kvmppc_set_gpr(vcpu
, 3, run
->papr_hcall
.ret
);
1595 for (i
= 0; i
< 9; ++i
)
1596 kvmppc_set_gpr(vcpu
, 4 + i
, run
->papr_hcall
.args
[i
]);
1597 vcpu
->arch
.hcall_needed
= 0;
1599 } else if (vcpu
->arch
.epr_needed
) {
1600 kvmppc_set_epr(vcpu
, run
->epr
.epr
);
1601 vcpu
->arch
.epr_needed
= 0;
1605 kvm_sigset_activate(vcpu
);
1607 if (run
->immediate_exit
)
1610 r
= kvmppc_vcpu_run(run
, vcpu
);
1612 kvm_sigset_deactivate(vcpu
);
1614 #ifdef CONFIG_ALTIVEC
1621 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu
*vcpu
, struct kvm_interrupt
*irq
)
1623 if (irq
->irq
== KVM_INTERRUPT_UNSET
) {
1624 kvmppc_core_dequeue_external(vcpu
);
1628 kvmppc_core_queue_external(vcpu
, irq
);
1630 kvm_vcpu_kick(vcpu
);
1635 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu
*vcpu
,
1636 struct kvm_enable_cap
*cap
)
1644 case KVM_CAP_PPC_OSI
:
1646 vcpu
->arch
.osi_enabled
= true;
1648 case KVM_CAP_PPC_PAPR
:
1650 vcpu
->arch
.papr_enabled
= true;
1652 case KVM_CAP_PPC_EPR
:
1655 vcpu
->arch
.epr_flags
|= KVMPPC_EPR_USER
;
1657 vcpu
->arch
.epr_flags
&= ~KVMPPC_EPR_USER
;
1660 case KVM_CAP_PPC_BOOKE_WATCHDOG
:
1662 vcpu
->arch
.watchdog_enabled
= true;
1665 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1666 case KVM_CAP_SW_TLB
: {
1667 struct kvm_config_tlb cfg
;
1668 void __user
*user_ptr
= (void __user
*)(uintptr_t)cap
->args
[0];
1671 if (copy_from_user(&cfg
, user_ptr
, sizeof(cfg
)))
1674 r
= kvm_vcpu_ioctl_config_tlb(vcpu
, &cfg
);
1678 #ifdef CONFIG_KVM_MPIC
1679 case KVM_CAP_IRQ_MPIC
: {
1681 struct kvm_device
*dev
;
1684 f
= fdget(cap
->args
[0]);
1689 dev
= kvm_device_from_filp(f
.file
);
1691 r
= kvmppc_mpic_connect_vcpu(dev
, vcpu
, cap
->args
[1]);
1697 #ifdef CONFIG_KVM_XICS
1698 case KVM_CAP_IRQ_XICS
: {
1700 struct kvm_device
*dev
;
1703 f
= fdget(cap
->args
[0]);
1708 dev
= kvm_device_from_filp(f
.file
);
1711 r
= kvmppc_xive_connect_vcpu(dev
, vcpu
, cap
->args
[1]);
1713 r
= kvmppc_xics_connect_vcpu(dev
, vcpu
, cap
->args
[1]);
1719 #endif /* CONFIG_KVM_XICS */
1720 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1721 case KVM_CAP_PPC_FWNMI
:
1723 if (!is_kvmppc_hv_enabled(vcpu
->kvm
))
1726 vcpu
->kvm
->arch
.fwnmi_enabled
= true;
1728 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
1735 r
= kvmppc_sanity_check(vcpu
);
1740 bool kvm_arch_intc_initialized(struct kvm
*kvm
)
1742 #ifdef CONFIG_KVM_MPIC
1746 #ifdef CONFIG_KVM_XICS
1747 if (kvm
->arch
.xics
|| kvm
->arch
.xive
)
1753 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
1754 struct kvm_mp_state
*mp_state
)
1759 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
1760 struct kvm_mp_state
*mp_state
)
1765 long kvm_arch_vcpu_async_ioctl(struct file
*filp
,
1766 unsigned int ioctl
, unsigned long arg
)
1768 struct kvm_vcpu
*vcpu
= filp
->private_data
;
1769 void __user
*argp
= (void __user
*)arg
;
1771 if (ioctl
== KVM_INTERRUPT
) {
1772 struct kvm_interrupt irq
;
1773 if (copy_from_user(&irq
, argp
, sizeof(irq
)))
1775 return kvm_vcpu_ioctl_interrupt(vcpu
, &irq
);
1777 return -ENOIOCTLCMD
;
1780 long kvm_arch_vcpu_ioctl(struct file
*filp
,
1781 unsigned int ioctl
, unsigned long arg
)
1783 struct kvm_vcpu
*vcpu
= filp
->private_data
;
1784 void __user
*argp
= (void __user
*)arg
;
1790 case KVM_ENABLE_CAP
:
1792 struct kvm_enable_cap cap
;
1794 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
1796 r
= kvm_vcpu_ioctl_enable_cap(vcpu
, &cap
);
1800 case KVM_SET_ONE_REG
:
1801 case KVM_GET_ONE_REG
:
1803 struct kvm_one_reg reg
;
1805 if (copy_from_user(®
, argp
, sizeof(reg
)))
1807 if (ioctl
== KVM_SET_ONE_REG
)
1808 r
= kvm_vcpu_ioctl_set_one_reg(vcpu
, ®
);
1810 r
= kvm_vcpu_ioctl_get_one_reg(vcpu
, ®
);
1814 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1815 case KVM_DIRTY_TLB
: {
1816 struct kvm_dirty_tlb dirty
;
1818 if (copy_from_user(&dirty
, argp
, sizeof(dirty
)))
1820 r
= kvm_vcpu_ioctl_dirty_tlb(vcpu
, &dirty
);
1833 vm_fault_t
kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
1835 return VM_FAULT_SIGBUS
;
1838 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo
*pvinfo
)
1840 u32 inst_nop
= 0x60000000;
1841 #ifdef CONFIG_KVM_BOOKE_HV
1842 u32 inst_sc1
= 0x44000022;
1843 pvinfo
->hcall
[0] = cpu_to_be32(inst_sc1
);
1844 pvinfo
->hcall
[1] = cpu_to_be32(inst_nop
);
1845 pvinfo
->hcall
[2] = cpu_to_be32(inst_nop
);
1846 pvinfo
->hcall
[3] = cpu_to_be32(inst_nop
);
1848 u32 inst_lis
= 0x3c000000;
1849 u32 inst_ori
= 0x60000000;
1850 u32 inst_sc
= 0x44000002;
1851 u32 inst_imm_mask
= 0xffff;
1854 * The hypercall to get into KVM from within guest context is as
1857 * lis r0, r0, KVM_SC_MAGIC_R0@h
1858 * ori r0, KVM_SC_MAGIC_R0@l
1862 pvinfo
->hcall
[0] = cpu_to_be32(inst_lis
| ((KVM_SC_MAGIC_R0
>> 16) & inst_imm_mask
));
1863 pvinfo
->hcall
[1] = cpu_to_be32(inst_ori
| (KVM_SC_MAGIC_R0
& inst_imm_mask
));
1864 pvinfo
->hcall
[2] = cpu_to_be32(inst_sc
);
1865 pvinfo
->hcall
[3] = cpu_to_be32(inst_nop
);
1868 pvinfo
->flags
= KVM_PPC_PVINFO_FLAGS_EV_IDLE
;
1873 int kvm_vm_ioctl_irq_line(struct kvm
*kvm
, struct kvm_irq_level
*irq_event
,
1876 if (!irqchip_in_kernel(kvm
))
1879 irq_event
->status
= kvm_set_irq(kvm
, KVM_USERSPACE_IRQ_SOURCE_ID
,
1880 irq_event
->irq
, irq_event
->level
,
1886 static int kvm_vm_ioctl_enable_cap(struct kvm
*kvm
,
1887 struct kvm_enable_cap
*cap
)
1895 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1896 case KVM_CAP_PPC_ENABLE_HCALL
: {
1897 unsigned long hcall
= cap
->args
[0];
1900 if (hcall
> MAX_HCALL_OPCODE
|| (hcall
& 3) ||
1903 if (!kvmppc_book3s_hcall_implemented(kvm
, hcall
))
1906 set_bit(hcall
/ 4, kvm
->arch
.enabled_hcalls
);
1908 clear_bit(hcall
/ 4, kvm
->arch
.enabled_hcalls
);
1912 case KVM_CAP_PPC_SMT
: {
1913 unsigned long mode
= cap
->args
[0];
1914 unsigned long flags
= cap
->args
[1];
1917 if (kvm
->arch
.kvm_ops
->set_smt_mode
)
1918 r
= kvm
->arch
.kvm_ops
->set_smt_mode(kvm
, mode
, flags
);
1930 #ifdef CONFIG_PPC_BOOK3S_64
1932 * These functions check whether the underlying hardware is safe
1933 * against attacks based on observing the effects of speculatively
1934 * executed instructions, and whether it supplies instructions for
1935 * use in workarounds. The information comes from firmware, either
1936 * via the device tree on powernv platforms or from an hcall on
1937 * pseries platforms.
1939 #ifdef CONFIG_PPC_PSERIES
1940 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char
*cp
)
1942 struct h_cpu_char_result c
;
1945 if (!machine_is(pseries
))
1948 rc
= plpar_get_cpu_characteristics(&c
);
1949 if (rc
== H_SUCCESS
) {
1950 cp
->character
= c
.character
;
1951 cp
->behaviour
= c
.behaviour
;
1952 cp
->character_mask
= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31
|
1953 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED
|
1954 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30
|
1955 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2
|
1956 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV
|
1957 KVM_PPC_CPU_CHAR_BR_HINT_HONOURED
|
1958 KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF
|
1959 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS
;
1960 cp
->behaviour_mask
= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY
|
1961 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR
|
1962 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR
;
1967 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char
*cp
)
1973 static inline bool have_fw_feat(struct device_node
*fw_features
,
1974 const char *state
, const char *name
)
1976 struct device_node
*np
;
1979 np
= of_get_child_by_name(fw_features
, name
);
1981 r
= of_property_read_bool(np
, state
);
1987 static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char
*cp
)
1989 struct device_node
*np
, *fw_features
;
1992 memset(cp
, 0, sizeof(*cp
));
1993 r
= pseries_get_cpu_char(cp
);
1997 np
= of_find_node_by_name(NULL
, "ibm,opal");
1999 fw_features
= of_get_child_by_name(np
, "fw-features");
2003 if (have_fw_feat(fw_features
, "enabled",
2004 "inst-spec-barrier-ori31,31,0"))
2005 cp
->character
|= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31
;
2006 if (have_fw_feat(fw_features
, "enabled",
2007 "fw-bcctrl-serialized"))
2008 cp
->character
|= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED
;
2009 if (have_fw_feat(fw_features
, "enabled",
2010 "inst-l1d-flush-ori30,30,0"))
2011 cp
->character
|= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30
;
2012 if (have_fw_feat(fw_features
, "enabled",
2013 "inst-l1d-flush-trig2"))
2014 cp
->character
|= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2
;
2015 if (have_fw_feat(fw_features
, "enabled",
2016 "fw-l1d-thread-split"))
2017 cp
->character
|= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV
;
2018 if (have_fw_feat(fw_features
, "enabled",
2019 "fw-count-cache-disabled"))
2020 cp
->character
|= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS
;
2021 cp
->character_mask
= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31
|
2022 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED
|
2023 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30
|
2024 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2
|
2025 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV
|
2026 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS
;
2028 if (have_fw_feat(fw_features
, "enabled",
2029 "speculation-policy-favor-security"))
2030 cp
->behaviour
|= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY
;
2031 if (!have_fw_feat(fw_features
, "disabled",
2032 "needs-l1d-flush-msr-pr-0-to-1"))
2033 cp
->behaviour
|= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR
;
2034 if (!have_fw_feat(fw_features
, "disabled",
2035 "needs-spec-barrier-for-bound-checks"))
2036 cp
->behaviour
|= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR
;
2037 cp
->behaviour_mask
= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY
|
2038 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR
|
2039 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR
;
2041 of_node_put(fw_features
);
2048 long kvm_arch_vm_ioctl(struct file
*filp
,
2049 unsigned int ioctl
, unsigned long arg
)
2051 struct kvm
*kvm __maybe_unused
= filp
->private_data
;
2052 void __user
*argp
= (void __user
*)arg
;
2056 case KVM_PPC_GET_PVINFO
: {
2057 struct kvm_ppc_pvinfo pvinfo
;
2058 memset(&pvinfo
, 0, sizeof(pvinfo
));
2059 r
= kvm_vm_ioctl_get_pvinfo(&pvinfo
);
2060 if (copy_to_user(argp
, &pvinfo
, sizeof(pvinfo
))) {
2067 case KVM_ENABLE_CAP
:
2069 struct kvm_enable_cap cap
;
2071 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
2073 r
= kvm_vm_ioctl_enable_cap(kvm
, &cap
);
2076 #ifdef CONFIG_SPAPR_TCE_IOMMU
2077 case KVM_CREATE_SPAPR_TCE_64
: {
2078 struct kvm_create_spapr_tce_64 create_tce_64
;
2081 if (copy_from_user(&create_tce_64
, argp
, sizeof(create_tce_64
)))
2083 if (create_tce_64
.flags
) {
2087 r
= kvm_vm_ioctl_create_spapr_tce(kvm
, &create_tce_64
);
2090 case KVM_CREATE_SPAPR_TCE
: {
2091 struct kvm_create_spapr_tce create_tce
;
2092 struct kvm_create_spapr_tce_64 create_tce_64
;
2095 if (copy_from_user(&create_tce
, argp
, sizeof(create_tce
)))
2098 create_tce_64
.liobn
= create_tce
.liobn
;
2099 create_tce_64
.page_shift
= IOMMU_PAGE_SHIFT_4K
;
2100 create_tce_64
.offset
= 0;
2101 create_tce_64
.size
= create_tce
.window_size
>>
2102 IOMMU_PAGE_SHIFT_4K
;
2103 create_tce_64
.flags
= 0;
2104 r
= kvm_vm_ioctl_create_spapr_tce(kvm
, &create_tce_64
);
2108 #ifdef CONFIG_PPC_BOOK3S_64
2109 case KVM_PPC_GET_SMMU_INFO
: {
2110 struct kvm_ppc_smmu_info info
;
2111 struct kvm
*kvm
= filp
->private_data
;
2113 memset(&info
, 0, sizeof(info
));
2114 r
= kvm
->arch
.kvm_ops
->get_smmu_info(kvm
, &info
);
2115 if (r
>= 0 && copy_to_user(argp
, &info
, sizeof(info
)))
2119 case KVM_PPC_RTAS_DEFINE_TOKEN
: {
2120 struct kvm
*kvm
= filp
->private_data
;
2122 r
= kvm_vm_ioctl_rtas_define_token(kvm
, argp
);
2125 case KVM_PPC_CONFIGURE_V3_MMU
: {
2126 struct kvm
*kvm
= filp
->private_data
;
2127 struct kvm_ppc_mmuv3_cfg cfg
;
2130 if (!kvm
->arch
.kvm_ops
->configure_mmu
)
2133 if (copy_from_user(&cfg
, argp
, sizeof(cfg
)))
2135 r
= kvm
->arch
.kvm_ops
->configure_mmu(kvm
, &cfg
);
2138 case KVM_PPC_GET_RMMU_INFO
: {
2139 struct kvm
*kvm
= filp
->private_data
;
2140 struct kvm_ppc_rmmu_info info
;
2143 if (!kvm
->arch
.kvm_ops
->get_rmmu_info
)
2145 r
= kvm
->arch
.kvm_ops
->get_rmmu_info(kvm
, &info
);
2146 if (r
>= 0 && copy_to_user(argp
, &info
, sizeof(info
)))
2150 case KVM_PPC_GET_CPU_CHAR
: {
2151 struct kvm_ppc_cpu_char cpuchar
;
2153 r
= kvmppc_get_cpu_char(&cpuchar
);
2154 if (r
>= 0 && copy_to_user(argp
, &cpuchar
, sizeof(cpuchar
)))
2159 struct kvm
*kvm
= filp
->private_data
;
2160 r
= kvm
->arch
.kvm_ops
->arch_vm_ioctl(filp
, ioctl
, arg
);
2162 #else /* CONFIG_PPC_BOOK3S_64 */
2171 static unsigned long lpid_inuse
[BITS_TO_LONGS(KVMPPC_NR_LPIDS
)];
2172 static unsigned long nr_lpids
;
2174 long kvmppc_alloc_lpid(void)
2179 lpid
= find_first_zero_bit(lpid_inuse
, KVMPPC_NR_LPIDS
);
2180 if (lpid
>= nr_lpids
) {
2181 pr_err("%s: No LPIDs free\n", __func__
);
2184 } while (test_and_set_bit(lpid
, lpid_inuse
));
2188 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid
);
2190 void kvmppc_claim_lpid(long lpid
)
2192 set_bit(lpid
, lpid_inuse
);
2194 EXPORT_SYMBOL_GPL(kvmppc_claim_lpid
);
2196 void kvmppc_free_lpid(long lpid
)
2198 clear_bit(lpid
, lpid_inuse
);
2200 EXPORT_SYMBOL_GPL(kvmppc_free_lpid
);
2202 void kvmppc_init_lpid(unsigned long nr_lpids_param
)
2204 nr_lpids
= min_t(unsigned long, KVMPPC_NR_LPIDS
, nr_lpids_param
);
2205 memset(lpid_inuse
, 0, sizeof(lpid_inuse
));
2207 EXPORT_SYMBOL_GPL(kvmppc_init_lpid
);
2209 int kvm_arch_init(void *opaque
)
2214 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr
);