2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/vmalloc.h>
25 #include <linux/hrtimer.h>
26 #include <linux/sched/signal.h>
28 #include <linux/slab.h>
29 #include <linux/file.h>
30 #include <linux/module.h>
31 #include <linux/irqbypass.h>
32 #include <linux/kvm_irqfd.h>
33 #include <asm/cputable.h>
34 #include <linux/uaccess.h>
35 #include <asm/kvm_ppc.h>
36 #include <asm/cputhreads.h>
37 #include <asm/irqflags.h>
38 #include <asm/iommu.h>
39 #include <asm/switch_to.h>
41 #ifdef CONFIG_PPC_PSERIES
42 #include <asm/hvcall.h>
43 #include <asm/plpar_wrappers.h>
48 #include "../mm/mmu_decl.h"
50 #define CREATE_TRACE_POINTS
53 struct kvmppc_ops
*kvmppc_hv_ops
;
54 EXPORT_SYMBOL_GPL(kvmppc_hv_ops
);
55 struct kvmppc_ops
*kvmppc_pr_ops
;
56 EXPORT_SYMBOL_GPL(kvmppc_pr_ops
);
59 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*v
)
61 return !!(v
->arch
.pending_exceptions
) || kvm_request_pending(v
);
64 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu
*vcpu
)
69 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
75 * Common checks before entering the guest world. Call with interrupts
80 * == 1 if we're ready to go into guest state
81 * <= 0 if we need to go back to the host with return value
83 int kvmppc_prepare_to_enter(struct kvm_vcpu
*vcpu
)
87 WARN_ON(irqs_disabled());
98 if (signal_pending(current
)) {
99 kvmppc_account_exit(vcpu
, SIGNAL_EXITS
);
100 vcpu
->run
->exit_reason
= KVM_EXIT_INTR
;
105 vcpu
->mode
= IN_GUEST_MODE
;
108 * Reading vcpu->requests must happen after setting vcpu->mode,
109 * so we don't miss a request because the requester sees
110 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
111 * before next entering the guest (and thus doesn't IPI).
112 * This also orders the write to mode from any reads
113 * to the page tables done while the VCPU is running.
114 * Please see the comment in kvm_flush_remote_tlbs.
118 if (kvm_request_pending(vcpu
)) {
119 /* Make sure we process requests preemptable */
121 trace_kvm_check_requests(vcpu
);
122 r
= kvmppc_core_check_requests(vcpu
);
129 if (kvmppc_core_prepare_to_enter(vcpu
)) {
130 /* interrupts got enabled in between, so we
131 are back at square 1 */
135 guest_enter_irqoff();
143 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter
);
145 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
146 static void kvmppc_swab_shared(struct kvm_vcpu
*vcpu
)
148 struct kvm_vcpu_arch_shared
*shared
= vcpu
->arch
.shared
;
151 shared
->sprg0
= swab64(shared
->sprg0
);
152 shared
->sprg1
= swab64(shared
->sprg1
);
153 shared
->sprg2
= swab64(shared
->sprg2
);
154 shared
->sprg3
= swab64(shared
->sprg3
);
155 shared
->srr0
= swab64(shared
->srr0
);
156 shared
->srr1
= swab64(shared
->srr1
);
157 shared
->dar
= swab64(shared
->dar
);
158 shared
->msr
= swab64(shared
->msr
);
159 shared
->dsisr
= swab32(shared
->dsisr
);
160 shared
->int_pending
= swab32(shared
->int_pending
);
161 for (i
= 0; i
< ARRAY_SIZE(shared
->sr
); i
++)
162 shared
->sr
[i
] = swab32(shared
->sr
[i
]);
166 int kvmppc_kvm_pv(struct kvm_vcpu
*vcpu
)
168 int nr
= kvmppc_get_gpr(vcpu
, 11);
170 unsigned long __maybe_unused param1
= kvmppc_get_gpr(vcpu
, 3);
171 unsigned long __maybe_unused param2
= kvmppc_get_gpr(vcpu
, 4);
172 unsigned long __maybe_unused param3
= kvmppc_get_gpr(vcpu
, 5);
173 unsigned long __maybe_unused param4
= kvmppc_get_gpr(vcpu
, 6);
174 unsigned long r2
= 0;
176 if (!(kvmppc_get_msr(vcpu
) & MSR_SF
)) {
178 param1
&= 0xffffffff;
179 param2
&= 0xffffffff;
180 param3
&= 0xffffffff;
181 param4
&= 0xffffffff;
185 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE
):
187 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
188 /* Book3S can be little endian, find it out here */
189 int shared_big_endian
= true;
190 if (vcpu
->arch
.intr_msr
& MSR_LE
)
191 shared_big_endian
= false;
192 if (shared_big_endian
!= vcpu
->arch
.shared_big_endian
)
193 kvmppc_swab_shared(vcpu
);
194 vcpu
->arch
.shared_big_endian
= shared_big_endian
;
197 if (!(param2
& MAGIC_PAGE_FLAG_NOT_MAPPED_NX
)) {
199 * Older versions of the Linux magic page code had
200 * a bug where they would map their trampoline code
201 * NX. If that's the case, remove !PR NX capability.
203 vcpu
->arch
.disable_kernel_nx
= true;
204 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
207 vcpu
->arch
.magic_page_pa
= param1
& ~0xfffULL
;
208 vcpu
->arch
.magic_page_ea
= param2
& ~0xfffULL
;
210 #ifdef CONFIG_PPC_64K_PAGES
212 * Make sure our 4k magic page is in the same window of a 64k
213 * page within the guest and within the host's page.
215 if ((vcpu
->arch
.magic_page_pa
& 0xf000) !=
216 ((ulong
)vcpu
->arch
.shared
& 0xf000)) {
217 void *old_shared
= vcpu
->arch
.shared
;
218 ulong shared
= (ulong
)vcpu
->arch
.shared
;
222 shared
|= vcpu
->arch
.magic_page_pa
& 0xf000;
223 new_shared
= (void*)shared
;
224 memcpy(new_shared
, old_shared
, 0x1000);
225 vcpu
->arch
.shared
= new_shared
;
229 r2
= KVM_MAGIC_FEAT_SR
| KVM_MAGIC_FEAT_MAS0_TO_SPRG7
;
234 case KVM_HCALL_TOKEN(KVM_HC_FEATURES
):
236 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
237 r2
|= (1 << KVM_FEATURE_MAGIC_PAGE
);
240 /* Second return value is in r4 */
242 case EV_HCALL_TOKEN(EV_IDLE
):
244 kvm_vcpu_block(vcpu
);
245 kvm_clear_request(KVM_REQ_UNHALT
, vcpu
);
248 r
= EV_UNIMPLEMENTED
;
252 kvmppc_set_gpr(vcpu
, 4, r2
);
256 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv
);
258 int kvmppc_sanity_check(struct kvm_vcpu
*vcpu
)
262 /* We have to know what CPU to virtualize */
266 /* PAPR only works with book3s_64 */
267 if ((vcpu
->arch
.cpu_type
!= KVM_CPU_3S_64
) && vcpu
->arch
.papr_enabled
)
270 /* HV KVM can only do PAPR mode for now */
271 if (!vcpu
->arch
.papr_enabled
&& is_kvmppc_hv_enabled(vcpu
->kvm
))
274 #ifdef CONFIG_KVM_BOOKE_HV
275 if (!cpu_has_feature(CPU_FTR_EMB_HV
))
283 return r
? 0 : -EINVAL
;
285 EXPORT_SYMBOL_GPL(kvmppc_sanity_check
);
287 int kvmppc_emulate_mmio(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
289 enum emulation_result er
;
292 er
= kvmppc_emulate_loadstore(vcpu
);
295 /* Future optimization: only reload non-volatiles if they were
296 * actually modified. */
302 case EMULATE_DO_MMIO
:
303 run
->exit_reason
= KVM_EXIT_MMIO
;
304 /* We must reload nonvolatiles because "update" load/store
305 * instructions modify register state. */
306 /* Future optimization: only reload non-volatiles if they were
307 * actually modified. */
314 kvmppc_get_last_inst(vcpu
, INST_GENERIC
, &last_inst
);
315 /* XXX Deliver Program interrupt to guest. */
316 pr_emerg("%s: emulation failed (%08x)\n", __func__
, last_inst
);
327 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio
);
329 int kvmppc_st(struct kvm_vcpu
*vcpu
, ulong
*eaddr
, int size
, void *ptr
,
332 ulong mp_pa
= vcpu
->arch
.magic_page_pa
& KVM_PAM
& PAGE_MASK
;
333 struct kvmppc_pte pte
;
338 if (vcpu
->kvm
->arch
.kvm_ops
&& vcpu
->kvm
->arch
.kvm_ops
->store_to_eaddr
)
339 r
= vcpu
->kvm
->arch
.kvm_ops
->store_to_eaddr(vcpu
, eaddr
, ptr
,
342 if ((!r
) || (r
== -EAGAIN
))
345 r
= kvmppc_xlate(vcpu
, *eaddr
, data
? XLATE_DATA
: XLATE_INST
,
355 /* Magic page override */
356 if (kvmppc_supports_magic_page(vcpu
) && mp_pa
&&
357 ((pte
.raddr
& KVM_PAM
& PAGE_MASK
) == mp_pa
) &&
358 !(kvmppc_get_msr(vcpu
) & MSR_PR
)) {
359 void *magic
= vcpu
->arch
.shared
;
360 magic
+= pte
.eaddr
& 0xfff;
361 memcpy(magic
, ptr
, size
);
365 if (kvm_write_guest(vcpu
->kvm
, pte
.raddr
, ptr
, size
))
366 return EMULATE_DO_MMIO
;
370 EXPORT_SYMBOL_GPL(kvmppc_st
);
372 int kvmppc_ld(struct kvm_vcpu
*vcpu
, ulong
*eaddr
, int size
, void *ptr
,
375 ulong mp_pa
= vcpu
->arch
.magic_page_pa
& KVM_PAM
& PAGE_MASK
;
376 struct kvmppc_pte pte
;
381 if (vcpu
->kvm
->arch
.kvm_ops
&& vcpu
->kvm
->arch
.kvm_ops
->load_from_eaddr
)
382 rc
= vcpu
->kvm
->arch
.kvm_ops
->load_from_eaddr(vcpu
, eaddr
, ptr
,
385 if ((!rc
) || (rc
== -EAGAIN
))
388 rc
= kvmppc_xlate(vcpu
, *eaddr
, data
? XLATE_DATA
: XLATE_INST
,
398 if (!data
&& !pte
.may_execute
)
401 /* Magic page override */
402 if (kvmppc_supports_magic_page(vcpu
) && mp_pa
&&
403 ((pte
.raddr
& KVM_PAM
& PAGE_MASK
) == mp_pa
) &&
404 !(kvmppc_get_msr(vcpu
) & MSR_PR
)) {
405 void *magic
= vcpu
->arch
.shared
;
406 magic
+= pte
.eaddr
& 0xfff;
407 memcpy(ptr
, magic
, size
);
411 if (kvm_read_guest(vcpu
->kvm
, pte
.raddr
, ptr
, size
))
412 return EMULATE_DO_MMIO
;
416 EXPORT_SYMBOL_GPL(kvmppc_ld
);
418 int kvm_arch_hardware_enable(void)
423 int kvm_arch_hardware_setup(void)
428 void kvm_arch_check_processor_compat(void *rtn
)
430 *(int *)rtn
= kvmppc_core_check_processor_compat();
433 int kvm_arch_init_vm(struct kvm
*kvm
, unsigned long type
)
435 struct kvmppc_ops
*kvm_ops
= NULL
;
437 * if we have both HV and PR enabled, default is HV
441 kvm_ops
= kvmppc_hv_ops
;
443 kvm_ops
= kvmppc_pr_ops
;
446 } else if (type
== KVM_VM_PPC_HV
) {
449 kvm_ops
= kvmppc_hv_ops
;
450 } else if (type
== KVM_VM_PPC_PR
) {
453 kvm_ops
= kvmppc_pr_ops
;
457 if (kvm_ops
->owner
&& !try_module_get(kvm_ops
->owner
))
460 kvm
->arch
.kvm_ops
= kvm_ops
;
461 return kvmppc_core_init_vm(kvm
);
466 bool kvm_arch_has_vcpu_debugfs(void)
471 int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu
*vcpu
)
476 void kvm_arch_destroy_vm(struct kvm
*kvm
)
479 struct kvm_vcpu
*vcpu
;
481 #ifdef CONFIG_KVM_XICS
483 * We call kick_all_cpus_sync() to ensure that all
484 * CPUs have executed any pending IPIs before we
485 * continue and free VCPUs structures below.
487 if (is_kvmppc_hv_enabled(kvm
))
488 kick_all_cpus_sync();
491 kvm_for_each_vcpu(i
, vcpu
, kvm
)
492 kvm_arch_vcpu_free(vcpu
);
494 mutex_lock(&kvm
->lock
);
495 for (i
= 0; i
< atomic_read(&kvm
->online_vcpus
); i
++)
496 kvm
->vcpus
[i
] = NULL
;
498 atomic_set(&kvm
->online_vcpus
, 0);
500 kvmppc_core_destroy_vm(kvm
);
502 mutex_unlock(&kvm
->lock
);
504 /* drop the module reference */
505 module_put(kvm
->arch
.kvm_ops
->owner
);
508 int kvm_vm_ioctl_check_extension(struct kvm
*kvm
, long ext
)
511 /* Assume we're using HV mode when the HV module is loaded */
512 int hv_enabled
= kvmppc_hv_ops
? 1 : 0;
516 * Hooray - we know which VM type we're running on. Depend on
517 * that rather than the guess above.
519 hv_enabled
= is_kvmppc_hv_enabled(kvm
);
524 case KVM_CAP_PPC_BOOKE_SREGS
:
525 case KVM_CAP_PPC_BOOKE_WATCHDOG
:
526 case KVM_CAP_PPC_EPR
:
528 case KVM_CAP_PPC_SEGSTATE
:
529 case KVM_CAP_PPC_HIOR
:
530 case KVM_CAP_PPC_PAPR
:
532 case KVM_CAP_PPC_UNSET_IRQ
:
533 case KVM_CAP_PPC_IRQ_LEVEL
:
534 case KVM_CAP_ENABLE_CAP
:
535 case KVM_CAP_ONE_REG
:
536 case KVM_CAP_IOEVENTFD
:
537 case KVM_CAP_DEVICE_CTRL
:
538 case KVM_CAP_IMMEDIATE_EXIT
:
541 case KVM_CAP_PPC_PAIRED_SINGLES
:
542 case KVM_CAP_PPC_OSI
:
543 case KVM_CAP_PPC_GET_PVINFO
:
544 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
547 /* We support this only for PR */
550 #ifdef CONFIG_KVM_MPIC
551 case KVM_CAP_IRQ_MPIC
:
556 #ifdef CONFIG_PPC_BOOK3S_64
557 case KVM_CAP_SPAPR_TCE
:
558 case KVM_CAP_SPAPR_TCE_64
:
561 case KVM_CAP_SPAPR_TCE_VFIO
:
562 r
= !!cpu_has_feature(CPU_FTR_HVMODE
);
564 case KVM_CAP_PPC_RTAS
:
565 case KVM_CAP_PPC_FIXUP_HCALL
:
566 case KVM_CAP_PPC_ENABLE_HCALL
:
567 #ifdef CONFIG_KVM_XICS
568 case KVM_CAP_IRQ_XICS
:
570 case KVM_CAP_PPC_GET_CPU_CHAR
:
574 case KVM_CAP_PPC_ALLOC_HTAB
:
577 #endif /* CONFIG_PPC_BOOK3S_64 */
578 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
579 case KVM_CAP_PPC_SMT
:
582 if (kvm
->arch
.emul_smt_mode
> 1)
583 r
= kvm
->arch
.emul_smt_mode
;
585 r
= kvm
->arch
.smt_mode
;
586 } else if (hv_enabled
) {
587 if (cpu_has_feature(CPU_FTR_ARCH_300
))
590 r
= threads_per_subcore
;
593 case KVM_CAP_PPC_SMT_POSSIBLE
:
596 if (!cpu_has_feature(CPU_FTR_ARCH_300
))
597 r
= ((threads_per_subcore
<< 1) - 1);
599 /* P9 can emulate dbells, so allow any mode */
603 case KVM_CAP_PPC_RMA
:
606 case KVM_CAP_PPC_HWRNG
:
607 r
= kvmppc_hwrng_present();
609 case KVM_CAP_PPC_MMU_RADIX
:
610 r
= !!(hv_enabled
&& radix_enabled());
612 case KVM_CAP_PPC_MMU_HASH_V3
:
613 r
= !!(hv_enabled
&& cpu_has_feature(CPU_FTR_ARCH_300
) &&
614 cpu_has_feature(CPU_FTR_HVMODE
));
616 case KVM_CAP_PPC_NESTED_HV
:
617 r
= !!(hv_enabled
&& kvmppc_hv_ops
->enable_nested
&&
618 !kvmppc_hv_ops
->enable_nested(NULL
));
621 case KVM_CAP_SYNC_MMU
:
622 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
624 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
630 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
631 case KVM_CAP_PPC_HTAB_FD
:
635 case KVM_CAP_NR_VCPUS
:
637 * Recommending a number of CPUs is somewhat arbitrary; we
638 * return the number of present CPUs for -HV (since a host
639 * will have secondary threads "offline"), and for other KVM
640 * implementations just count online CPUs.
643 r
= num_present_cpus();
645 r
= num_online_cpus();
647 case KVM_CAP_NR_MEMSLOTS
:
648 r
= KVM_USER_MEM_SLOTS
;
650 case KVM_CAP_MAX_VCPUS
:
653 #ifdef CONFIG_PPC_BOOK3S_64
654 case KVM_CAP_PPC_GET_SMMU_INFO
:
657 case KVM_CAP_SPAPR_MULTITCE
:
660 case KVM_CAP_SPAPR_RESIZE_HPT
:
664 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
665 case KVM_CAP_PPC_FWNMI
:
669 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
670 case KVM_CAP_PPC_HTM
:
671 r
= !!(cur_cpu_spec
->cpu_user_features2
& PPC_FEATURE2_HTM
) ||
672 (hv_enabled
&& cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST
));
683 long kvm_arch_dev_ioctl(struct file
*filp
,
684 unsigned int ioctl
, unsigned long arg
)
689 void kvm_arch_free_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*free
,
690 struct kvm_memory_slot
*dont
)
692 kvmppc_core_free_memslot(kvm
, free
, dont
);
695 int kvm_arch_create_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*slot
,
696 unsigned long npages
)
698 return kvmppc_core_create_memslot(kvm
, slot
, npages
);
701 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
702 struct kvm_memory_slot
*memslot
,
703 const struct kvm_userspace_memory_region
*mem
,
704 enum kvm_mr_change change
)
706 return kvmppc_core_prepare_memory_region(kvm
, memslot
, mem
);
709 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
710 const struct kvm_userspace_memory_region
*mem
,
711 const struct kvm_memory_slot
*old
,
712 const struct kvm_memory_slot
*new,
713 enum kvm_mr_change change
)
715 kvmppc_core_commit_memory_region(kvm
, mem
, old
, new, change
);
718 void kvm_arch_flush_shadow_memslot(struct kvm
*kvm
,
719 struct kvm_memory_slot
*slot
)
721 kvmppc_core_flush_memslot(kvm
, slot
);
724 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
, unsigned int id
)
726 struct kvm_vcpu
*vcpu
;
727 vcpu
= kvmppc_core_vcpu_create(kvm
, id
);
729 vcpu
->arch
.wqp
= &vcpu
->wq
;
730 kvmppc_create_vcpu_debugfs(vcpu
, id
);
735 void kvm_arch_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
739 void kvm_arch_vcpu_free(struct kvm_vcpu
*vcpu
)
741 /* Make sure we're not using the vcpu anymore */
742 hrtimer_cancel(&vcpu
->arch
.dec_timer
);
744 kvmppc_remove_vcpu_debugfs(vcpu
);
746 switch (vcpu
->arch
.irq_type
) {
747 case KVMPPC_IRQ_MPIC
:
748 kvmppc_mpic_disconnect_vcpu(vcpu
->arch
.mpic
, vcpu
);
750 case KVMPPC_IRQ_XICS
:
752 kvmppc_xive_cleanup_vcpu(vcpu
);
754 kvmppc_xics_free_icp(vcpu
);
758 kvmppc_core_vcpu_free(vcpu
);
761 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
763 kvm_arch_vcpu_free(vcpu
);
766 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
768 return kvmppc_core_pending_dec(vcpu
);
771 static enum hrtimer_restart
kvmppc_decrementer_wakeup(struct hrtimer
*timer
)
773 struct kvm_vcpu
*vcpu
;
775 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.dec_timer
);
776 kvmppc_decrementer_func(vcpu
);
778 return HRTIMER_NORESTART
;
781 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
785 hrtimer_init(&vcpu
->arch
.dec_timer
, CLOCK_REALTIME
, HRTIMER_MODE_ABS
);
786 vcpu
->arch
.dec_timer
.function
= kvmppc_decrementer_wakeup
;
787 vcpu
->arch
.dec_expires
= get_tb();
789 #ifdef CONFIG_KVM_EXIT_TIMING
790 mutex_init(&vcpu
->arch
.exit_timing_lock
);
792 ret
= kvmppc_subarch_vcpu_init(vcpu
);
796 void kvm_arch_vcpu_uninit(struct kvm_vcpu
*vcpu
)
798 kvmppc_mmu_destroy(vcpu
);
799 kvmppc_subarch_vcpu_uninit(vcpu
);
802 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
806 * vrsave (formerly usprg0) isn't used by Linux, but may
807 * be used by the guest.
809 * On non-booke this is associated with Altivec and
810 * is handled by code in book3s.c.
812 mtspr(SPRN_VRSAVE
, vcpu
->arch
.vrsave
);
814 kvmppc_core_vcpu_load(vcpu
, cpu
);
817 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
819 kvmppc_core_vcpu_put(vcpu
);
821 vcpu
->arch
.vrsave
= mfspr(SPRN_VRSAVE
);
826 * irq_bypass_add_producer and irq_bypass_del_producer are only
827 * useful if the architecture supports PCI passthrough.
828 * irq_bypass_stop and irq_bypass_start are not needed and so
829 * kvm_ops are not defined for them.
831 bool kvm_arch_has_irq_bypass(void)
833 return ((kvmppc_hv_ops
&& kvmppc_hv_ops
->irq_bypass_add_producer
) ||
834 (kvmppc_pr_ops
&& kvmppc_pr_ops
->irq_bypass_add_producer
));
837 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer
*cons
,
838 struct irq_bypass_producer
*prod
)
840 struct kvm_kernel_irqfd
*irqfd
=
841 container_of(cons
, struct kvm_kernel_irqfd
, consumer
);
842 struct kvm
*kvm
= irqfd
->kvm
;
844 if (kvm
->arch
.kvm_ops
->irq_bypass_add_producer
)
845 return kvm
->arch
.kvm_ops
->irq_bypass_add_producer(cons
, prod
);
850 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer
*cons
,
851 struct irq_bypass_producer
*prod
)
853 struct kvm_kernel_irqfd
*irqfd
=
854 container_of(cons
, struct kvm_kernel_irqfd
, consumer
);
855 struct kvm
*kvm
= irqfd
->kvm
;
857 if (kvm
->arch
.kvm_ops
->irq_bypass_del_producer
)
858 kvm
->arch
.kvm_ops
->irq_bypass_del_producer(cons
, prod
);
862 static inline int kvmppc_get_vsr_dword_offset(int index
)
866 if ((index
!= 0) && (index
!= 1))
878 static inline int kvmppc_get_vsr_word_offset(int index
)
882 if ((index
> 3) || (index
< 0))
893 static inline void kvmppc_set_vsr_dword(struct kvm_vcpu
*vcpu
,
896 union kvmppc_one_reg val
;
897 int offset
= kvmppc_get_vsr_dword_offset(vcpu
->arch
.mmio_vsx_offset
);
898 int index
= vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
;
904 val
.vval
= VCPU_VSX_VR(vcpu
, index
- 32);
905 val
.vsxval
[offset
] = gpr
;
906 VCPU_VSX_VR(vcpu
, index
- 32) = val
.vval
;
908 VCPU_VSX_FPR(vcpu
, index
, offset
) = gpr
;
912 static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu
*vcpu
,
915 union kvmppc_one_reg val
;
916 int index
= vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
;
919 val
.vval
= VCPU_VSX_VR(vcpu
, index
- 32);
922 VCPU_VSX_VR(vcpu
, index
- 32) = val
.vval
;
924 VCPU_VSX_FPR(vcpu
, index
, 0) = gpr
;
925 VCPU_VSX_FPR(vcpu
, index
, 1) = gpr
;
929 static inline void kvmppc_set_vsr_word_dump(struct kvm_vcpu
*vcpu
,
932 union kvmppc_one_reg val
;
933 int index
= vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
;
936 val
.vsx32val
[0] = gpr
;
937 val
.vsx32val
[1] = gpr
;
938 val
.vsx32val
[2] = gpr
;
939 val
.vsx32val
[3] = gpr
;
940 VCPU_VSX_VR(vcpu
, index
- 32) = val
.vval
;
942 val
.vsx32val
[0] = gpr
;
943 val
.vsx32val
[1] = gpr
;
944 VCPU_VSX_FPR(vcpu
, index
, 0) = val
.vsxval
[0];
945 VCPU_VSX_FPR(vcpu
, index
, 1) = val
.vsxval
[0];
949 static inline void kvmppc_set_vsr_word(struct kvm_vcpu
*vcpu
,
952 union kvmppc_one_reg val
;
953 int offset
= kvmppc_get_vsr_word_offset(vcpu
->arch
.mmio_vsx_offset
);
954 int index
= vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
;
955 int dword_offset
, word_offset
;
961 val
.vval
= VCPU_VSX_VR(vcpu
, index
- 32);
962 val
.vsx32val
[offset
] = gpr32
;
963 VCPU_VSX_VR(vcpu
, index
- 32) = val
.vval
;
965 dword_offset
= offset
/ 2;
966 word_offset
= offset
% 2;
967 val
.vsxval
[0] = VCPU_VSX_FPR(vcpu
, index
, dword_offset
);
968 val
.vsx32val
[word_offset
] = gpr32
;
969 VCPU_VSX_FPR(vcpu
, index
, dword_offset
) = val
.vsxval
[0];
972 #endif /* CONFIG_VSX */
974 #ifdef CONFIG_ALTIVEC
975 static inline int kvmppc_get_vmx_offset_generic(struct kvm_vcpu
*vcpu
,
976 int index
, int element_size
)
979 int elts
= sizeof(vector128
)/element_size
;
981 if ((index
< 0) || (index
>= elts
))
984 if (kvmppc_need_byteswap(vcpu
))
985 offset
= elts
- index
- 1;
992 static inline int kvmppc_get_vmx_dword_offset(struct kvm_vcpu
*vcpu
,
995 return kvmppc_get_vmx_offset_generic(vcpu
, index
, 8);
998 static inline int kvmppc_get_vmx_word_offset(struct kvm_vcpu
*vcpu
,
1001 return kvmppc_get_vmx_offset_generic(vcpu
, index
, 4);
1004 static inline int kvmppc_get_vmx_hword_offset(struct kvm_vcpu
*vcpu
,
1007 return kvmppc_get_vmx_offset_generic(vcpu
, index
, 2);
1010 static inline int kvmppc_get_vmx_byte_offset(struct kvm_vcpu
*vcpu
,
1013 return kvmppc_get_vmx_offset_generic(vcpu
, index
, 1);
1017 static inline void kvmppc_set_vmx_dword(struct kvm_vcpu
*vcpu
,
1020 union kvmppc_one_reg val
;
1021 int offset
= kvmppc_get_vmx_dword_offset(vcpu
,
1022 vcpu
->arch
.mmio_vmx_offset
);
1023 int index
= vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
;
1028 val
.vval
= VCPU_VSX_VR(vcpu
, index
);
1029 val
.vsxval
[offset
] = gpr
;
1030 VCPU_VSX_VR(vcpu
, index
) = val
.vval
;
1033 static inline void kvmppc_set_vmx_word(struct kvm_vcpu
*vcpu
,
1036 union kvmppc_one_reg val
;
1037 int offset
= kvmppc_get_vmx_word_offset(vcpu
,
1038 vcpu
->arch
.mmio_vmx_offset
);
1039 int index
= vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
;
1044 val
.vval
= VCPU_VSX_VR(vcpu
, index
);
1045 val
.vsx32val
[offset
] = gpr32
;
1046 VCPU_VSX_VR(vcpu
, index
) = val
.vval
;
1049 static inline void kvmppc_set_vmx_hword(struct kvm_vcpu
*vcpu
,
1052 union kvmppc_one_reg val
;
1053 int offset
= kvmppc_get_vmx_hword_offset(vcpu
,
1054 vcpu
->arch
.mmio_vmx_offset
);
1055 int index
= vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
;
1060 val
.vval
= VCPU_VSX_VR(vcpu
, index
);
1061 val
.vsx16val
[offset
] = gpr16
;
1062 VCPU_VSX_VR(vcpu
, index
) = val
.vval
;
1065 static inline void kvmppc_set_vmx_byte(struct kvm_vcpu
*vcpu
,
1068 union kvmppc_one_reg val
;
1069 int offset
= kvmppc_get_vmx_byte_offset(vcpu
,
1070 vcpu
->arch
.mmio_vmx_offset
);
1071 int index
= vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
;
1076 val
.vval
= VCPU_VSX_VR(vcpu
, index
);
1077 val
.vsx8val
[offset
] = gpr8
;
1078 VCPU_VSX_VR(vcpu
, index
) = val
.vval
;
1080 #endif /* CONFIG_ALTIVEC */
1082 #ifdef CONFIG_PPC_FPU
1083 static inline u64
sp_to_dp(u32 fprs
)
1089 asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd
) : "m" (fprs
)
1095 static inline u32
dp_to_sp(u64 fprd
)
1101 asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs
) : "m" (fprd
)
1108 #define sp_to_dp(x) (x)
1109 #define dp_to_sp(x) (x)
1110 #endif /* CONFIG_PPC_FPU */
1112 static void kvmppc_complete_mmio_load(struct kvm_vcpu
*vcpu
,
1113 struct kvm_run
*run
)
1115 u64
uninitialized_var(gpr
);
1117 if (run
->mmio
.len
> sizeof(gpr
)) {
1118 printk(KERN_ERR
"bad MMIO length: %d\n", run
->mmio
.len
);
1122 if (!vcpu
->arch
.mmio_host_swabbed
) {
1123 switch (run
->mmio
.len
) {
1124 case 8: gpr
= *(u64
*)run
->mmio
.data
; break;
1125 case 4: gpr
= *(u32
*)run
->mmio
.data
; break;
1126 case 2: gpr
= *(u16
*)run
->mmio
.data
; break;
1127 case 1: gpr
= *(u8
*)run
->mmio
.data
; break;
1130 switch (run
->mmio
.len
) {
1131 case 8: gpr
= swab64(*(u64
*)run
->mmio
.data
); break;
1132 case 4: gpr
= swab32(*(u32
*)run
->mmio
.data
); break;
1133 case 2: gpr
= swab16(*(u16
*)run
->mmio
.data
); break;
1134 case 1: gpr
= *(u8
*)run
->mmio
.data
; break;
1138 /* conversion between single and double precision */
1139 if ((vcpu
->arch
.mmio_sp64_extend
) && (run
->mmio
.len
== 4))
1140 gpr
= sp_to_dp(gpr
);
1142 if (vcpu
->arch
.mmio_sign_extend
) {
1143 switch (run
->mmio
.len
) {
1146 gpr
= (s64
)(s32
)gpr
;
1150 gpr
= (s64
)(s16
)gpr
;
1158 switch (vcpu
->arch
.io_gpr
& KVM_MMIO_REG_EXT_MASK
) {
1159 case KVM_MMIO_REG_GPR
:
1160 kvmppc_set_gpr(vcpu
, vcpu
->arch
.io_gpr
, gpr
);
1162 case KVM_MMIO_REG_FPR
:
1163 if (vcpu
->kvm
->arch
.kvm_ops
->giveup_ext
)
1164 vcpu
->kvm
->arch
.kvm_ops
->giveup_ext(vcpu
, MSR_FP
);
1166 VCPU_FPR(vcpu
, vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
) = gpr
;
1168 #ifdef CONFIG_PPC_BOOK3S
1169 case KVM_MMIO_REG_QPR
:
1170 vcpu
->arch
.qpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
1172 case KVM_MMIO_REG_FQPR
:
1173 VCPU_FPR(vcpu
, vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
) = gpr
;
1174 vcpu
->arch
.qpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
1178 case KVM_MMIO_REG_VSX
:
1179 if (vcpu
->kvm
->arch
.kvm_ops
->giveup_ext
)
1180 vcpu
->kvm
->arch
.kvm_ops
->giveup_ext(vcpu
, MSR_VSX
);
1182 if (vcpu
->arch
.mmio_copy_type
== KVMPPC_VSX_COPY_DWORD
)
1183 kvmppc_set_vsr_dword(vcpu
, gpr
);
1184 else if (vcpu
->arch
.mmio_copy_type
== KVMPPC_VSX_COPY_WORD
)
1185 kvmppc_set_vsr_word(vcpu
, gpr
);
1186 else if (vcpu
->arch
.mmio_copy_type
==
1187 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP
)
1188 kvmppc_set_vsr_dword_dump(vcpu
, gpr
);
1189 else if (vcpu
->arch
.mmio_copy_type
==
1190 KVMPPC_VSX_COPY_WORD_LOAD_DUMP
)
1191 kvmppc_set_vsr_word_dump(vcpu
, gpr
);
1194 #ifdef CONFIG_ALTIVEC
1195 case KVM_MMIO_REG_VMX
:
1196 if (vcpu
->kvm
->arch
.kvm_ops
->giveup_ext
)
1197 vcpu
->kvm
->arch
.kvm_ops
->giveup_ext(vcpu
, MSR_VEC
);
1199 if (vcpu
->arch
.mmio_copy_type
== KVMPPC_VMX_COPY_DWORD
)
1200 kvmppc_set_vmx_dword(vcpu
, gpr
);
1201 else if (vcpu
->arch
.mmio_copy_type
== KVMPPC_VMX_COPY_WORD
)
1202 kvmppc_set_vmx_word(vcpu
, gpr
);
1203 else if (vcpu
->arch
.mmio_copy_type
==
1204 KVMPPC_VMX_COPY_HWORD
)
1205 kvmppc_set_vmx_hword(vcpu
, gpr
);
1206 else if (vcpu
->arch
.mmio_copy_type
==
1207 KVMPPC_VMX_COPY_BYTE
)
1208 kvmppc_set_vmx_byte(vcpu
, gpr
);
1211 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1212 case KVM_MMIO_REG_NESTED_GPR
:
1213 if (kvmppc_need_byteswap(vcpu
))
1215 kvm_vcpu_write_guest(vcpu
, vcpu
->arch
.nested_io_gpr
, &gpr
,
1224 static int __kvmppc_handle_load(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
1225 unsigned int rt
, unsigned int bytes
,
1226 int is_default_endian
, int sign_extend
)
1231 /* Pity C doesn't have a logical XOR operator */
1232 if (kvmppc_need_byteswap(vcpu
)) {
1233 host_swabbed
= is_default_endian
;
1235 host_swabbed
= !is_default_endian
;
1238 if (bytes
> sizeof(run
->mmio
.data
)) {
1239 printk(KERN_ERR
"%s: bad MMIO length: %d\n", __func__
,
1243 run
->mmio
.phys_addr
= vcpu
->arch
.paddr_accessed
;
1244 run
->mmio
.len
= bytes
;
1245 run
->mmio
.is_write
= 0;
1247 vcpu
->arch
.io_gpr
= rt
;
1248 vcpu
->arch
.mmio_host_swabbed
= host_swabbed
;
1249 vcpu
->mmio_needed
= 1;
1250 vcpu
->mmio_is_write
= 0;
1251 vcpu
->arch
.mmio_sign_extend
= sign_extend
;
1253 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
1255 ret
= kvm_io_bus_read(vcpu
, KVM_MMIO_BUS
, run
->mmio
.phys_addr
,
1256 bytes
, &run
->mmio
.data
);
1258 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
1261 kvmppc_complete_mmio_load(vcpu
, run
);
1262 vcpu
->mmio_needed
= 0;
1263 return EMULATE_DONE
;
1266 return EMULATE_DO_MMIO
;
1269 int kvmppc_handle_load(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
1270 unsigned int rt
, unsigned int bytes
,
1271 int is_default_endian
)
1273 return __kvmppc_handle_load(run
, vcpu
, rt
, bytes
, is_default_endian
, 0);
1275 EXPORT_SYMBOL_GPL(kvmppc_handle_load
);
1277 /* Same as above, but sign extends */
1278 int kvmppc_handle_loads(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
1279 unsigned int rt
, unsigned int bytes
,
1280 int is_default_endian
)
1282 return __kvmppc_handle_load(run
, vcpu
, rt
, bytes
, is_default_endian
, 1);
1286 int kvmppc_handle_vsx_load(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
1287 unsigned int rt
, unsigned int bytes
,
1288 int is_default_endian
, int mmio_sign_extend
)
1290 enum emulation_result emulated
= EMULATE_DONE
;
1292 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1293 if (vcpu
->arch
.mmio_vsx_copy_nums
> 4)
1294 return EMULATE_FAIL
;
1296 while (vcpu
->arch
.mmio_vsx_copy_nums
) {
1297 emulated
= __kvmppc_handle_load(run
, vcpu
, rt
, bytes
,
1298 is_default_endian
, mmio_sign_extend
);
1300 if (emulated
!= EMULATE_DONE
)
1303 vcpu
->arch
.paddr_accessed
+= run
->mmio
.len
;
1305 vcpu
->arch
.mmio_vsx_copy_nums
--;
1306 vcpu
->arch
.mmio_vsx_offset
++;
1310 #endif /* CONFIG_VSX */
1312 int kvmppc_handle_store(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
1313 u64 val
, unsigned int bytes
, int is_default_endian
)
1315 void *data
= run
->mmio
.data
;
1319 /* Pity C doesn't have a logical XOR operator */
1320 if (kvmppc_need_byteswap(vcpu
)) {
1321 host_swabbed
= is_default_endian
;
1323 host_swabbed
= !is_default_endian
;
1326 if (bytes
> sizeof(run
->mmio
.data
)) {
1327 printk(KERN_ERR
"%s: bad MMIO length: %d\n", __func__
,
1331 run
->mmio
.phys_addr
= vcpu
->arch
.paddr_accessed
;
1332 run
->mmio
.len
= bytes
;
1333 run
->mmio
.is_write
= 1;
1334 vcpu
->mmio_needed
= 1;
1335 vcpu
->mmio_is_write
= 1;
1337 if ((vcpu
->arch
.mmio_sp64_extend
) && (bytes
== 4))
1338 val
= dp_to_sp(val
);
1340 /* Store the value at the lowest bytes in 'data'. */
1341 if (!host_swabbed
) {
1343 case 8: *(u64
*)data
= val
; break;
1344 case 4: *(u32
*)data
= val
; break;
1345 case 2: *(u16
*)data
= val
; break;
1346 case 1: *(u8
*)data
= val
; break;
1350 case 8: *(u64
*)data
= swab64(val
); break;
1351 case 4: *(u32
*)data
= swab32(val
); break;
1352 case 2: *(u16
*)data
= swab16(val
); break;
1353 case 1: *(u8
*)data
= val
; break;
1357 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
1359 ret
= kvm_io_bus_write(vcpu
, KVM_MMIO_BUS
, run
->mmio
.phys_addr
,
1360 bytes
, &run
->mmio
.data
);
1362 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
1365 vcpu
->mmio_needed
= 0;
1366 return EMULATE_DONE
;
1369 return EMULATE_DO_MMIO
;
1371 EXPORT_SYMBOL_GPL(kvmppc_handle_store
);
1374 static inline int kvmppc_get_vsr_data(struct kvm_vcpu
*vcpu
, int rs
, u64
*val
)
1376 u32 dword_offset
, word_offset
;
1377 union kvmppc_one_reg reg
;
1379 int copy_type
= vcpu
->arch
.mmio_copy_type
;
1382 switch (copy_type
) {
1383 case KVMPPC_VSX_COPY_DWORD
:
1385 kvmppc_get_vsr_dword_offset(vcpu
->arch
.mmio_vsx_offset
);
1387 if (vsx_offset
== -1) {
1393 *val
= VCPU_VSX_FPR(vcpu
, rs
, vsx_offset
);
1395 reg
.vval
= VCPU_VSX_VR(vcpu
, rs
- 32);
1396 *val
= reg
.vsxval
[vsx_offset
];
1400 case KVMPPC_VSX_COPY_WORD
:
1402 kvmppc_get_vsr_word_offset(vcpu
->arch
.mmio_vsx_offset
);
1404 if (vsx_offset
== -1) {
1410 dword_offset
= vsx_offset
/ 2;
1411 word_offset
= vsx_offset
% 2;
1412 reg
.vsxval
[0] = VCPU_VSX_FPR(vcpu
, rs
, dword_offset
);
1413 *val
= reg
.vsx32val
[word_offset
];
1415 reg
.vval
= VCPU_VSX_VR(vcpu
, rs
- 32);
1416 *val
= reg
.vsx32val
[vsx_offset
];
1428 int kvmppc_handle_vsx_store(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
1429 int rs
, unsigned int bytes
, int is_default_endian
)
1432 enum emulation_result emulated
= EMULATE_DONE
;
1434 vcpu
->arch
.io_gpr
= rs
;
1436 /* Currently, mmio_vsx_copy_nums only allowed to be 4 or less */
1437 if (vcpu
->arch
.mmio_vsx_copy_nums
> 4)
1438 return EMULATE_FAIL
;
1440 while (vcpu
->arch
.mmio_vsx_copy_nums
) {
1441 if (kvmppc_get_vsr_data(vcpu
, rs
, &val
) == -1)
1442 return EMULATE_FAIL
;
1444 emulated
= kvmppc_handle_store(run
, vcpu
,
1445 val
, bytes
, is_default_endian
);
1447 if (emulated
!= EMULATE_DONE
)
1450 vcpu
->arch
.paddr_accessed
+= run
->mmio
.len
;
1452 vcpu
->arch
.mmio_vsx_copy_nums
--;
1453 vcpu
->arch
.mmio_vsx_offset
++;
1459 static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu
*vcpu
,
1460 struct kvm_run
*run
)
1462 enum emulation_result emulated
= EMULATE_FAIL
;
1465 vcpu
->arch
.paddr_accessed
+= run
->mmio
.len
;
1467 if (!vcpu
->mmio_is_write
) {
1468 emulated
= kvmppc_handle_vsx_load(run
, vcpu
, vcpu
->arch
.io_gpr
,
1469 run
->mmio
.len
, 1, vcpu
->arch
.mmio_sign_extend
);
1471 emulated
= kvmppc_handle_vsx_store(run
, vcpu
,
1472 vcpu
->arch
.io_gpr
, run
->mmio
.len
, 1);
1476 case EMULATE_DO_MMIO
:
1477 run
->exit_reason
= KVM_EXIT_MMIO
;
1481 pr_info("KVM: MMIO emulation failed (VSX repeat)\n");
1482 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
1483 run
->internal
.suberror
= KVM_INTERNAL_ERROR_EMULATION
;
1492 #endif /* CONFIG_VSX */
1494 #ifdef CONFIG_ALTIVEC
1495 int kvmppc_handle_vmx_load(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
1496 unsigned int rt
, unsigned int bytes
, int is_default_endian
)
1498 enum emulation_result emulated
= EMULATE_DONE
;
1500 if (vcpu
->arch
.mmio_vsx_copy_nums
> 2)
1501 return EMULATE_FAIL
;
1503 while (vcpu
->arch
.mmio_vmx_copy_nums
) {
1504 emulated
= __kvmppc_handle_load(run
, vcpu
, rt
, bytes
,
1505 is_default_endian
, 0);
1507 if (emulated
!= EMULATE_DONE
)
1510 vcpu
->arch
.paddr_accessed
+= run
->mmio
.len
;
1511 vcpu
->arch
.mmio_vmx_copy_nums
--;
1512 vcpu
->arch
.mmio_vmx_offset
++;
1518 int kvmppc_get_vmx_dword(struct kvm_vcpu
*vcpu
, int index
, u64
*val
)
1520 union kvmppc_one_reg reg
;
1525 kvmppc_get_vmx_dword_offset(vcpu
, vcpu
->arch
.mmio_vmx_offset
);
1527 if (vmx_offset
== -1)
1530 reg
.vval
= VCPU_VSX_VR(vcpu
, index
);
1531 *val
= reg
.vsxval
[vmx_offset
];
1536 int kvmppc_get_vmx_word(struct kvm_vcpu
*vcpu
, int index
, u64
*val
)
1538 union kvmppc_one_reg reg
;
1543 kvmppc_get_vmx_word_offset(vcpu
, vcpu
->arch
.mmio_vmx_offset
);
1545 if (vmx_offset
== -1)
1548 reg
.vval
= VCPU_VSX_VR(vcpu
, index
);
1549 *val
= reg
.vsx32val
[vmx_offset
];
1554 int kvmppc_get_vmx_hword(struct kvm_vcpu
*vcpu
, int index
, u64
*val
)
1556 union kvmppc_one_reg reg
;
1561 kvmppc_get_vmx_hword_offset(vcpu
, vcpu
->arch
.mmio_vmx_offset
);
1563 if (vmx_offset
== -1)
1566 reg
.vval
= VCPU_VSX_VR(vcpu
, index
);
1567 *val
= reg
.vsx16val
[vmx_offset
];
1572 int kvmppc_get_vmx_byte(struct kvm_vcpu
*vcpu
, int index
, u64
*val
)
1574 union kvmppc_one_reg reg
;
1579 kvmppc_get_vmx_byte_offset(vcpu
, vcpu
->arch
.mmio_vmx_offset
);
1581 if (vmx_offset
== -1)
1584 reg
.vval
= VCPU_VSX_VR(vcpu
, index
);
1585 *val
= reg
.vsx8val
[vmx_offset
];
1590 int kvmppc_handle_vmx_store(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
1591 unsigned int rs
, unsigned int bytes
, int is_default_endian
)
1594 unsigned int index
= rs
& KVM_MMIO_REG_MASK
;
1595 enum emulation_result emulated
= EMULATE_DONE
;
1597 if (vcpu
->arch
.mmio_vsx_copy_nums
> 2)
1598 return EMULATE_FAIL
;
1600 vcpu
->arch
.io_gpr
= rs
;
1602 while (vcpu
->arch
.mmio_vmx_copy_nums
) {
1603 switch (vcpu
->arch
.mmio_copy_type
) {
1604 case KVMPPC_VMX_COPY_DWORD
:
1605 if (kvmppc_get_vmx_dword(vcpu
, index
, &val
) == -1)
1606 return EMULATE_FAIL
;
1609 case KVMPPC_VMX_COPY_WORD
:
1610 if (kvmppc_get_vmx_word(vcpu
, index
, &val
) == -1)
1611 return EMULATE_FAIL
;
1613 case KVMPPC_VMX_COPY_HWORD
:
1614 if (kvmppc_get_vmx_hword(vcpu
, index
, &val
) == -1)
1615 return EMULATE_FAIL
;
1617 case KVMPPC_VMX_COPY_BYTE
:
1618 if (kvmppc_get_vmx_byte(vcpu
, index
, &val
) == -1)
1619 return EMULATE_FAIL
;
1622 return EMULATE_FAIL
;
1625 emulated
= kvmppc_handle_store(run
, vcpu
, val
, bytes
,
1627 if (emulated
!= EMULATE_DONE
)
1630 vcpu
->arch
.paddr_accessed
+= run
->mmio
.len
;
1631 vcpu
->arch
.mmio_vmx_copy_nums
--;
1632 vcpu
->arch
.mmio_vmx_offset
++;
1638 static int kvmppc_emulate_mmio_vmx_loadstore(struct kvm_vcpu
*vcpu
,
1639 struct kvm_run
*run
)
1641 enum emulation_result emulated
= EMULATE_FAIL
;
1644 vcpu
->arch
.paddr_accessed
+= run
->mmio
.len
;
1646 if (!vcpu
->mmio_is_write
) {
1647 emulated
= kvmppc_handle_vmx_load(run
, vcpu
,
1648 vcpu
->arch
.io_gpr
, run
->mmio
.len
, 1);
1650 emulated
= kvmppc_handle_vmx_store(run
, vcpu
,
1651 vcpu
->arch
.io_gpr
, run
->mmio
.len
, 1);
1655 case EMULATE_DO_MMIO
:
1656 run
->exit_reason
= KVM_EXIT_MMIO
;
1660 pr_info("KVM: MMIO emulation failed (VMX repeat)\n");
1661 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
1662 run
->internal
.suberror
= KVM_INTERNAL_ERROR_EMULATION
;
1671 #endif /* CONFIG_ALTIVEC */
1673 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu
*vcpu
, struct kvm_one_reg
*reg
)
1676 union kvmppc_one_reg val
;
1679 size
= one_reg_size(reg
->id
);
1680 if (size
> sizeof(val
))
1683 r
= kvmppc_get_one_reg(vcpu
, reg
->id
, &val
);
1687 #ifdef CONFIG_ALTIVEC
1688 case KVM_REG_PPC_VR0
... KVM_REG_PPC_VR31
:
1689 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
1693 val
.vval
= vcpu
->arch
.vr
.vr
[reg
->id
- KVM_REG_PPC_VR0
];
1695 case KVM_REG_PPC_VSCR
:
1696 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
1700 val
= get_reg_val(reg
->id
, vcpu
->arch
.vr
.vscr
.u
[3]);
1702 case KVM_REG_PPC_VRSAVE
:
1703 val
= get_reg_val(reg
->id
, vcpu
->arch
.vrsave
);
1705 #endif /* CONFIG_ALTIVEC */
1715 if (copy_to_user((char __user
*)(unsigned long)reg
->addr
, &val
, size
))
1721 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu
*vcpu
, struct kvm_one_reg
*reg
)
1724 union kvmppc_one_reg val
;
1727 size
= one_reg_size(reg
->id
);
1728 if (size
> sizeof(val
))
1731 if (copy_from_user(&val
, (char __user
*)(unsigned long)reg
->addr
, size
))
1734 r
= kvmppc_set_one_reg(vcpu
, reg
->id
, &val
);
1738 #ifdef CONFIG_ALTIVEC
1739 case KVM_REG_PPC_VR0
... KVM_REG_PPC_VR31
:
1740 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
1744 vcpu
->arch
.vr
.vr
[reg
->id
- KVM_REG_PPC_VR0
] = val
.vval
;
1746 case KVM_REG_PPC_VSCR
:
1747 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
1751 vcpu
->arch
.vr
.vscr
.u
[3] = set_reg_val(reg
->id
, val
);
1753 case KVM_REG_PPC_VRSAVE
:
1754 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
1758 vcpu
->arch
.vrsave
= set_reg_val(reg
->id
, val
);
1760 #endif /* CONFIG_ALTIVEC */
1770 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
1776 if (vcpu
->mmio_needed
) {
1777 vcpu
->mmio_needed
= 0;
1778 if (!vcpu
->mmio_is_write
)
1779 kvmppc_complete_mmio_load(vcpu
, run
);
1781 if (vcpu
->arch
.mmio_vsx_copy_nums
> 0) {
1782 vcpu
->arch
.mmio_vsx_copy_nums
--;
1783 vcpu
->arch
.mmio_vsx_offset
++;
1786 if (vcpu
->arch
.mmio_vsx_copy_nums
> 0) {
1787 r
= kvmppc_emulate_mmio_vsx_loadstore(vcpu
, run
);
1788 if (r
== RESUME_HOST
) {
1789 vcpu
->mmio_needed
= 1;
1794 #ifdef CONFIG_ALTIVEC
1795 if (vcpu
->arch
.mmio_vmx_copy_nums
> 0) {
1796 vcpu
->arch
.mmio_vmx_copy_nums
--;
1797 vcpu
->arch
.mmio_vmx_offset
++;
1800 if (vcpu
->arch
.mmio_vmx_copy_nums
> 0) {
1801 r
= kvmppc_emulate_mmio_vmx_loadstore(vcpu
, run
);
1802 if (r
== RESUME_HOST
) {
1803 vcpu
->mmio_needed
= 1;
1808 } else if (vcpu
->arch
.osi_needed
) {
1809 u64
*gprs
= run
->osi
.gprs
;
1812 for (i
= 0; i
< 32; i
++)
1813 kvmppc_set_gpr(vcpu
, i
, gprs
[i
]);
1814 vcpu
->arch
.osi_needed
= 0;
1815 } else if (vcpu
->arch
.hcall_needed
) {
1818 kvmppc_set_gpr(vcpu
, 3, run
->papr_hcall
.ret
);
1819 for (i
= 0; i
< 9; ++i
)
1820 kvmppc_set_gpr(vcpu
, 4 + i
, run
->papr_hcall
.args
[i
]);
1821 vcpu
->arch
.hcall_needed
= 0;
1823 } else if (vcpu
->arch
.epr_needed
) {
1824 kvmppc_set_epr(vcpu
, run
->epr
.epr
);
1825 vcpu
->arch
.epr_needed
= 0;
1829 kvm_sigset_activate(vcpu
);
1831 if (run
->immediate_exit
)
1834 r
= kvmppc_vcpu_run(run
, vcpu
);
1836 kvm_sigset_deactivate(vcpu
);
1838 #ifdef CONFIG_ALTIVEC
1845 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu
*vcpu
, struct kvm_interrupt
*irq
)
1847 if (irq
->irq
== KVM_INTERRUPT_UNSET
) {
1848 kvmppc_core_dequeue_external(vcpu
);
1852 kvmppc_core_queue_external(vcpu
, irq
);
1854 kvm_vcpu_kick(vcpu
);
1859 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu
*vcpu
,
1860 struct kvm_enable_cap
*cap
)
1868 case KVM_CAP_PPC_OSI
:
1870 vcpu
->arch
.osi_enabled
= true;
1872 case KVM_CAP_PPC_PAPR
:
1874 vcpu
->arch
.papr_enabled
= true;
1876 case KVM_CAP_PPC_EPR
:
1879 vcpu
->arch
.epr_flags
|= KVMPPC_EPR_USER
;
1881 vcpu
->arch
.epr_flags
&= ~KVMPPC_EPR_USER
;
1884 case KVM_CAP_PPC_BOOKE_WATCHDOG
:
1886 vcpu
->arch
.watchdog_enabled
= true;
1889 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1890 case KVM_CAP_SW_TLB
: {
1891 struct kvm_config_tlb cfg
;
1892 void __user
*user_ptr
= (void __user
*)(uintptr_t)cap
->args
[0];
1895 if (copy_from_user(&cfg
, user_ptr
, sizeof(cfg
)))
1898 r
= kvm_vcpu_ioctl_config_tlb(vcpu
, &cfg
);
1902 #ifdef CONFIG_KVM_MPIC
1903 case KVM_CAP_IRQ_MPIC
: {
1905 struct kvm_device
*dev
;
1908 f
= fdget(cap
->args
[0]);
1913 dev
= kvm_device_from_filp(f
.file
);
1915 r
= kvmppc_mpic_connect_vcpu(dev
, vcpu
, cap
->args
[1]);
1921 #ifdef CONFIG_KVM_XICS
1922 case KVM_CAP_IRQ_XICS
: {
1924 struct kvm_device
*dev
;
1927 f
= fdget(cap
->args
[0]);
1932 dev
= kvm_device_from_filp(f
.file
);
1935 r
= kvmppc_xive_connect_vcpu(dev
, vcpu
, cap
->args
[1]);
1937 r
= kvmppc_xics_connect_vcpu(dev
, vcpu
, cap
->args
[1]);
1943 #endif /* CONFIG_KVM_XICS */
1944 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
1945 case KVM_CAP_PPC_FWNMI
:
1947 if (!is_kvmppc_hv_enabled(vcpu
->kvm
))
1950 vcpu
->kvm
->arch
.fwnmi_enabled
= true;
1952 #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
1959 r
= kvmppc_sanity_check(vcpu
);
1964 bool kvm_arch_intc_initialized(struct kvm
*kvm
)
1966 #ifdef CONFIG_KVM_MPIC
1970 #ifdef CONFIG_KVM_XICS
1971 if (kvm
->arch
.xics
|| kvm
->arch
.xive
)
1977 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
1978 struct kvm_mp_state
*mp_state
)
1983 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
1984 struct kvm_mp_state
*mp_state
)
1989 long kvm_arch_vcpu_async_ioctl(struct file
*filp
,
1990 unsigned int ioctl
, unsigned long arg
)
1992 struct kvm_vcpu
*vcpu
= filp
->private_data
;
1993 void __user
*argp
= (void __user
*)arg
;
1995 if (ioctl
== KVM_INTERRUPT
) {
1996 struct kvm_interrupt irq
;
1997 if (copy_from_user(&irq
, argp
, sizeof(irq
)))
1999 return kvm_vcpu_ioctl_interrupt(vcpu
, &irq
);
2001 return -ENOIOCTLCMD
;
2004 long kvm_arch_vcpu_ioctl(struct file
*filp
,
2005 unsigned int ioctl
, unsigned long arg
)
2007 struct kvm_vcpu
*vcpu
= filp
->private_data
;
2008 void __user
*argp
= (void __user
*)arg
;
2012 case KVM_ENABLE_CAP
:
2014 struct kvm_enable_cap cap
;
2017 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
2019 r
= kvm_vcpu_ioctl_enable_cap(vcpu
, &cap
);
2024 case KVM_SET_ONE_REG
:
2025 case KVM_GET_ONE_REG
:
2027 struct kvm_one_reg reg
;
2029 if (copy_from_user(®
, argp
, sizeof(reg
)))
2031 if (ioctl
== KVM_SET_ONE_REG
)
2032 r
= kvm_vcpu_ioctl_set_one_reg(vcpu
, ®
);
2034 r
= kvm_vcpu_ioctl_get_one_reg(vcpu
, ®
);
2038 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
2039 case KVM_DIRTY_TLB
: {
2040 struct kvm_dirty_tlb dirty
;
2043 if (copy_from_user(&dirty
, argp
, sizeof(dirty
)))
2045 r
= kvm_vcpu_ioctl_dirty_tlb(vcpu
, &dirty
);
2058 vm_fault_t
kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
2060 return VM_FAULT_SIGBUS
;
2063 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo
*pvinfo
)
2065 u32 inst_nop
= 0x60000000;
2066 #ifdef CONFIG_KVM_BOOKE_HV
2067 u32 inst_sc1
= 0x44000022;
2068 pvinfo
->hcall
[0] = cpu_to_be32(inst_sc1
);
2069 pvinfo
->hcall
[1] = cpu_to_be32(inst_nop
);
2070 pvinfo
->hcall
[2] = cpu_to_be32(inst_nop
);
2071 pvinfo
->hcall
[3] = cpu_to_be32(inst_nop
);
2073 u32 inst_lis
= 0x3c000000;
2074 u32 inst_ori
= 0x60000000;
2075 u32 inst_sc
= 0x44000002;
2076 u32 inst_imm_mask
= 0xffff;
2079 * The hypercall to get into KVM from within guest context is as
2082 * lis r0, r0, KVM_SC_MAGIC_R0@h
2083 * ori r0, KVM_SC_MAGIC_R0@l
2087 pvinfo
->hcall
[0] = cpu_to_be32(inst_lis
| ((KVM_SC_MAGIC_R0
>> 16) & inst_imm_mask
));
2088 pvinfo
->hcall
[1] = cpu_to_be32(inst_ori
| (KVM_SC_MAGIC_R0
& inst_imm_mask
));
2089 pvinfo
->hcall
[2] = cpu_to_be32(inst_sc
);
2090 pvinfo
->hcall
[3] = cpu_to_be32(inst_nop
);
2093 pvinfo
->flags
= KVM_PPC_PVINFO_FLAGS_EV_IDLE
;
2098 int kvm_vm_ioctl_irq_line(struct kvm
*kvm
, struct kvm_irq_level
*irq_event
,
2101 if (!irqchip_in_kernel(kvm
))
2104 irq_event
->status
= kvm_set_irq(kvm
, KVM_USERSPACE_IRQ_SOURCE_ID
,
2105 irq_event
->irq
, irq_event
->level
,
2111 int kvm_vm_ioctl_enable_cap(struct kvm
*kvm
,
2112 struct kvm_enable_cap
*cap
)
2120 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
2121 case KVM_CAP_PPC_ENABLE_HCALL
: {
2122 unsigned long hcall
= cap
->args
[0];
2125 if (hcall
> MAX_HCALL_OPCODE
|| (hcall
& 3) ||
2128 if (!kvmppc_book3s_hcall_implemented(kvm
, hcall
))
2131 set_bit(hcall
/ 4, kvm
->arch
.enabled_hcalls
);
2133 clear_bit(hcall
/ 4, kvm
->arch
.enabled_hcalls
);
2137 case KVM_CAP_PPC_SMT
: {
2138 unsigned long mode
= cap
->args
[0];
2139 unsigned long flags
= cap
->args
[1];
2142 if (kvm
->arch
.kvm_ops
->set_smt_mode
)
2143 r
= kvm
->arch
.kvm_ops
->set_smt_mode(kvm
, mode
, flags
);
2147 case KVM_CAP_PPC_NESTED_HV
:
2149 if (!is_kvmppc_hv_enabled(kvm
) ||
2150 !kvm
->arch
.kvm_ops
->enable_nested
)
2152 r
= kvm
->arch
.kvm_ops
->enable_nested(kvm
);
2163 #ifdef CONFIG_PPC_BOOK3S_64
2165 * These functions check whether the underlying hardware is safe
2166 * against attacks based on observing the effects of speculatively
2167 * executed instructions, and whether it supplies instructions for
2168 * use in workarounds. The information comes from firmware, either
2169 * via the device tree on powernv platforms or from an hcall on
2170 * pseries platforms.
2172 #ifdef CONFIG_PPC_PSERIES
2173 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char
*cp
)
2175 struct h_cpu_char_result c
;
2178 if (!machine_is(pseries
))
2181 rc
= plpar_get_cpu_characteristics(&c
);
2182 if (rc
== H_SUCCESS
) {
2183 cp
->character
= c
.character
;
2184 cp
->behaviour
= c
.behaviour
;
2185 cp
->character_mask
= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31
|
2186 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED
|
2187 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30
|
2188 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2
|
2189 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV
|
2190 KVM_PPC_CPU_CHAR_BR_HINT_HONOURED
|
2191 KVM_PPC_CPU_CHAR_MTTRIG_THR_RECONF
|
2192 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS
;
2193 cp
->behaviour_mask
= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY
|
2194 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR
|
2195 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR
;
2200 static int pseries_get_cpu_char(struct kvm_ppc_cpu_char
*cp
)
2206 static inline bool have_fw_feat(struct device_node
*fw_features
,
2207 const char *state
, const char *name
)
2209 struct device_node
*np
;
2212 np
= of_get_child_by_name(fw_features
, name
);
2214 r
= of_property_read_bool(np
, state
);
2220 static int kvmppc_get_cpu_char(struct kvm_ppc_cpu_char
*cp
)
2222 struct device_node
*np
, *fw_features
;
2225 memset(cp
, 0, sizeof(*cp
));
2226 r
= pseries_get_cpu_char(cp
);
2230 np
= of_find_node_by_name(NULL
, "ibm,opal");
2232 fw_features
= of_get_child_by_name(np
, "fw-features");
2236 if (have_fw_feat(fw_features
, "enabled",
2237 "inst-spec-barrier-ori31,31,0"))
2238 cp
->character
|= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31
;
2239 if (have_fw_feat(fw_features
, "enabled",
2240 "fw-bcctrl-serialized"))
2241 cp
->character
|= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED
;
2242 if (have_fw_feat(fw_features
, "enabled",
2243 "inst-l1d-flush-ori30,30,0"))
2244 cp
->character
|= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30
;
2245 if (have_fw_feat(fw_features
, "enabled",
2246 "inst-l1d-flush-trig2"))
2247 cp
->character
|= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2
;
2248 if (have_fw_feat(fw_features
, "enabled",
2249 "fw-l1d-thread-split"))
2250 cp
->character
|= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV
;
2251 if (have_fw_feat(fw_features
, "enabled",
2252 "fw-count-cache-disabled"))
2253 cp
->character
|= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS
;
2254 cp
->character_mask
= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31
|
2255 KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED
|
2256 KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30
|
2257 KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2
|
2258 KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV
|
2259 KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS
;
2261 if (have_fw_feat(fw_features
, "enabled",
2262 "speculation-policy-favor-security"))
2263 cp
->behaviour
|= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY
;
2264 if (!have_fw_feat(fw_features
, "disabled",
2265 "needs-l1d-flush-msr-pr-0-to-1"))
2266 cp
->behaviour
|= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR
;
2267 if (!have_fw_feat(fw_features
, "disabled",
2268 "needs-spec-barrier-for-bound-checks"))
2269 cp
->behaviour
|= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR
;
2270 cp
->behaviour_mask
= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY
|
2271 KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR
|
2272 KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR
;
2274 of_node_put(fw_features
);
2281 long kvm_arch_vm_ioctl(struct file
*filp
,
2282 unsigned int ioctl
, unsigned long arg
)
2284 struct kvm
*kvm __maybe_unused
= filp
->private_data
;
2285 void __user
*argp
= (void __user
*)arg
;
2289 case KVM_PPC_GET_PVINFO
: {
2290 struct kvm_ppc_pvinfo pvinfo
;
2291 memset(&pvinfo
, 0, sizeof(pvinfo
));
2292 r
= kvm_vm_ioctl_get_pvinfo(&pvinfo
);
2293 if (copy_to_user(argp
, &pvinfo
, sizeof(pvinfo
))) {
2300 #ifdef CONFIG_SPAPR_TCE_IOMMU
2301 case KVM_CREATE_SPAPR_TCE_64
: {
2302 struct kvm_create_spapr_tce_64 create_tce_64
;
2305 if (copy_from_user(&create_tce_64
, argp
, sizeof(create_tce_64
)))
2307 if (create_tce_64
.flags
) {
2311 r
= kvm_vm_ioctl_create_spapr_tce(kvm
, &create_tce_64
);
2314 case KVM_CREATE_SPAPR_TCE
: {
2315 struct kvm_create_spapr_tce create_tce
;
2316 struct kvm_create_spapr_tce_64 create_tce_64
;
2319 if (copy_from_user(&create_tce
, argp
, sizeof(create_tce
)))
2322 create_tce_64
.liobn
= create_tce
.liobn
;
2323 create_tce_64
.page_shift
= IOMMU_PAGE_SHIFT_4K
;
2324 create_tce_64
.offset
= 0;
2325 create_tce_64
.size
= create_tce
.window_size
>>
2326 IOMMU_PAGE_SHIFT_4K
;
2327 create_tce_64
.flags
= 0;
2328 r
= kvm_vm_ioctl_create_spapr_tce(kvm
, &create_tce_64
);
2332 #ifdef CONFIG_PPC_BOOK3S_64
2333 case KVM_PPC_GET_SMMU_INFO
: {
2334 struct kvm_ppc_smmu_info info
;
2335 struct kvm
*kvm
= filp
->private_data
;
2337 memset(&info
, 0, sizeof(info
));
2338 r
= kvm
->arch
.kvm_ops
->get_smmu_info(kvm
, &info
);
2339 if (r
>= 0 && copy_to_user(argp
, &info
, sizeof(info
)))
2343 case KVM_PPC_RTAS_DEFINE_TOKEN
: {
2344 struct kvm
*kvm
= filp
->private_data
;
2346 r
= kvm_vm_ioctl_rtas_define_token(kvm
, argp
);
2349 case KVM_PPC_CONFIGURE_V3_MMU
: {
2350 struct kvm
*kvm
= filp
->private_data
;
2351 struct kvm_ppc_mmuv3_cfg cfg
;
2354 if (!kvm
->arch
.kvm_ops
->configure_mmu
)
2357 if (copy_from_user(&cfg
, argp
, sizeof(cfg
)))
2359 r
= kvm
->arch
.kvm_ops
->configure_mmu(kvm
, &cfg
);
2362 case KVM_PPC_GET_RMMU_INFO
: {
2363 struct kvm
*kvm
= filp
->private_data
;
2364 struct kvm_ppc_rmmu_info info
;
2367 if (!kvm
->arch
.kvm_ops
->get_rmmu_info
)
2369 r
= kvm
->arch
.kvm_ops
->get_rmmu_info(kvm
, &info
);
2370 if (r
>= 0 && copy_to_user(argp
, &info
, sizeof(info
)))
2374 case KVM_PPC_GET_CPU_CHAR
: {
2375 struct kvm_ppc_cpu_char cpuchar
;
2377 r
= kvmppc_get_cpu_char(&cpuchar
);
2378 if (r
>= 0 && copy_to_user(argp
, &cpuchar
, sizeof(cpuchar
)))
2383 struct kvm
*kvm
= filp
->private_data
;
2384 r
= kvm
->arch
.kvm_ops
->arch_vm_ioctl(filp
, ioctl
, arg
);
2386 #else /* CONFIG_PPC_BOOK3S_64 */
2395 static unsigned long lpid_inuse
[BITS_TO_LONGS(KVMPPC_NR_LPIDS
)];
2396 static unsigned long nr_lpids
;
2398 long kvmppc_alloc_lpid(void)
2403 lpid
= find_first_zero_bit(lpid_inuse
, KVMPPC_NR_LPIDS
);
2404 if (lpid
>= nr_lpids
) {
2405 pr_err("%s: No LPIDs free\n", __func__
);
2408 } while (test_and_set_bit(lpid
, lpid_inuse
));
2412 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid
);
2414 void kvmppc_claim_lpid(long lpid
)
2416 set_bit(lpid
, lpid_inuse
);
2418 EXPORT_SYMBOL_GPL(kvmppc_claim_lpid
);
2420 void kvmppc_free_lpid(long lpid
)
2422 clear_bit(lpid
, lpid_inuse
);
2424 EXPORT_SYMBOL_GPL(kvmppc_free_lpid
);
2426 void kvmppc_init_lpid(unsigned long nr_lpids_param
)
2428 nr_lpids
= min_t(unsigned long, KVMPPC_NR_LPIDS
, nr_lpids_param
);
2429 memset(lpid_inuse
, 0, sizeof(lpid_inuse
));
2431 EXPORT_SYMBOL_GPL(kvmppc_init_lpid
);
2433 int kvm_arch_init(void *opaque
)
2438 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr
);