2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/vmalloc.h>
25 #include <linux/hrtimer.h>
26 #include <linux/sched/signal.h>
28 #include <linux/slab.h>
29 #include <linux/file.h>
30 #include <linux/module.h>
31 #include <linux/irqbypass.h>
32 #include <linux/kvm_irqfd.h>
33 #include <asm/cputable.h>
34 #include <linux/uaccess.h>
35 #include <asm/kvm_ppc.h>
36 #include <asm/tlbflush.h>
37 #include <asm/cputhreads.h>
38 #include <asm/irqflags.h>
39 #include <asm/iommu.h>
42 #include "../mm/mmu_decl.h"
44 #define CREATE_TRACE_POINTS
47 struct kvmppc_ops
*kvmppc_hv_ops
;
48 EXPORT_SYMBOL_GPL(kvmppc_hv_ops
);
49 struct kvmppc_ops
*kvmppc_pr_ops
;
50 EXPORT_SYMBOL_GPL(kvmppc_pr_ops
);
53 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*v
)
55 return !!(v
->arch
.pending_exceptions
) ||
59 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
65 * Common checks before entering the guest world. Call with interrupts
70 * == 1 if we're ready to go into guest state
71 * <= 0 if we need to go back to the host with return value
73 int kvmppc_prepare_to_enter(struct kvm_vcpu
*vcpu
)
77 WARN_ON(irqs_disabled());
88 if (signal_pending(current
)) {
89 kvmppc_account_exit(vcpu
, SIGNAL_EXITS
);
90 vcpu
->run
->exit_reason
= KVM_EXIT_INTR
;
95 vcpu
->mode
= IN_GUEST_MODE
;
98 * Reading vcpu->requests must happen after setting vcpu->mode,
99 * so we don't miss a request because the requester sees
100 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
101 * before next entering the guest (and thus doesn't IPI).
102 * This also orders the write to mode from any reads
103 * to the page tables done while the VCPU is running.
104 * Please see the comment in kvm_flush_remote_tlbs.
108 if (vcpu
->requests
) {
109 /* Make sure we process requests preemptable */
111 trace_kvm_check_requests(vcpu
);
112 r
= kvmppc_core_check_requests(vcpu
);
119 if (kvmppc_core_prepare_to_enter(vcpu
)) {
120 /* interrupts got enabled in between, so we
121 are back at square 1 */
125 guest_enter_irqoff();
133 EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter
);
135 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
136 static void kvmppc_swab_shared(struct kvm_vcpu
*vcpu
)
138 struct kvm_vcpu_arch_shared
*shared
= vcpu
->arch
.shared
;
141 shared
->sprg0
= swab64(shared
->sprg0
);
142 shared
->sprg1
= swab64(shared
->sprg1
);
143 shared
->sprg2
= swab64(shared
->sprg2
);
144 shared
->sprg3
= swab64(shared
->sprg3
);
145 shared
->srr0
= swab64(shared
->srr0
);
146 shared
->srr1
= swab64(shared
->srr1
);
147 shared
->dar
= swab64(shared
->dar
);
148 shared
->msr
= swab64(shared
->msr
);
149 shared
->dsisr
= swab32(shared
->dsisr
);
150 shared
->int_pending
= swab32(shared
->int_pending
);
151 for (i
= 0; i
< ARRAY_SIZE(shared
->sr
); i
++)
152 shared
->sr
[i
] = swab32(shared
->sr
[i
]);
156 int kvmppc_kvm_pv(struct kvm_vcpu
*vcpu
)
158 int nr
= kvmppc_get_gpr(vcpu
, 11);
160 unsigned long __maybe_unused param1
= kvmppc_get_gpr(vcpu
, 3);
161 unsigned long __maybe_unused param2
= kvmppc_get_gpr(vcpu
, 4);
162 unsigned long __maybe_unused param3
= kvmppc_get_gpr(vcpu
, 5);
163 unsigned long __maybe_unused param4
= kvmppc_get_gpr(vcpu
, 6);
164 unsigned long r2
= 0;
166 if (!(kvmppc_get_msr(vcpu
) & MSR_SF
)) {
168 param1
&= 0xffffffff;
169 param2
&= 0xffffffff;
170 param3
&= 0xffffffff;
171 param4
&= 0xffffffff;
175 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE
):
177 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
178 /* Book3S can be little endian, find it out here */
179 int shared_big_endian
= true;
180 if (vcpu
->arch
.intr_msr
& MSR_LE
)
181 shared_big_endian
= false;
182 if (shared_big_endian
!= vcpu
->arch
.shared_big_endian
)
183 kvmppc_swab_shared(vcpu
);
184 vcpu
->arch
.shared_big_endian
= shared_big_endian
;
187 if (!(param2
& MAGIC_PAGE_FLAG_NOT_MAPPED_NX
)) {
189 * Older versions of the Linux magic page code had
190 * a bug where they would map their trampoline code
191 * NX. If that's the case, remove !PR NX capability.
193 vcpu
->arch
.disable_kernel_nx
= true;
194 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
197 vcpu
->arch
.magic_page_pa
= param1
& ~0xfffULL
;
198 vcpu
->arch
.magic_page_ea
= param2
& ~0xfffULL
;
200 #ifdef CONFIG_PPC_64K_PAGES
202 * Make sure our 4k magic page is in the same window of a 64k
203 * page within the guest and within the host's page.
205 if ((vcpu
->arch
.magic_page_pa
& 0xf000) !=
206 ((ulong
)vcpu
->arch
.shared
& 0xf000)) {
207 void *old_shared
= vcpu
->arch
.shared
;
208 ulong shared
= (ulong
)vcpu
->arch
.shared
;
212 shared
|= vcpu
->arch
.magic_page_pa
& 0xf000;
213 new_shared
= (void*)shared
;
214 memcpy(new_shared
, old_shared
, 0x1000);
215 vcpu
->arch
.shared
= new_shared
;
219 r2
= KVM_MAGIC_FEAT_SR
| KVM_MAGIC_FEAT_MAS0_TO_SPRG7
;
224 case KVM_HCALL_TOKEN(KVM_HC_FEATURES
):
226 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
227 r2
|= (1 << KVM_FEATURE_MAGIC_PAGE
);
230 /* Second return value is in r4 */
232 case EV_HCALL_TOKEN(EV_IDLE
):
234 kvm_vcpu_block(vcpu
);
235 clear_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
238 r
= EV_UNIMPLEMENTED
;
242 kvmppc_set_gpr(vcpu
, 4, r2
);
246 EXPORT_SYMBOL_GPL(kvmppc_kvm_pv
);
248 int kvmppc_sanity_check(struct kvm_vcpu
*vcpu
)
252 /* We have to know what CPU to virtualize */
256 /* PAPR only works with book3s_64 */
257 if ((vcpu
->arch
.cpu_type
!= KVM_CPU_3S_64
) && vcpu
->arch
.papr_enabled
)
260 /* HV KVM can only do PAPR mode for now */
261 if (!vcpu
->arch
.papr_enabled
&& is_kvmppc_hv_enabled(vcpu
->kvm
))
264 #ifdef CONFIG_KVM_BOOKE_HV
265 if (!cpu_has_feature(CPU_FTR_EMB_HV
))
273 return r
? 0 : -EINVAL
;
275 EXPORT_SYMBOL_GPL(kvmppc_sanity_check
);
277 int kvmppc_emulate_mmio(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
279 enum emulation_result er
;
282 er
= kvmppc_emulate_loadstore(vcpu
);
285 /* Future optimization: only reload non-volatiles if they were
286 * actually modified. */
292 case EMULATE_DO_MMIO
:
293 run
->exit_reason
= KVM_EXIT_MMIO
;
294 /* We must reload nonvolatiles because "update" load/store
295 * instructions modify register state. */
296 /* Future optimization: only reload non-volatiles if they were
297 * actually modified. */
304 kvmppc_get_last_inst(vcpu
, INST_GENERIC
, &last_inst
);
305 /* XXX Deliver Program interrupt to guest. */
306 pr_emerg("%s: emulation failed (%08x)\n", __func__
, last_inst
);
317 EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio
);
319 int kvmppc_st(struct kvm_vcpu
*vcpu
, ulong
*eaddr
, int size
, void *ptr
,
322 ulong mp_pa
= vcpu
->arch
.magic_page_pa
& KVM_PAM
& PAGE_MASK
;
323 struct kvmppc_pte pte
;
328 r
= kvmppc_xlate(vcpu
, *eaddr
, data
? XLATE_DATA
: XLATE_INST
,
338 /* Magic page override */
339 if (kvmppc_supports_magic_page(vcpu
) && mp_pa
&&
340 ((pte
.raddr
& KVM_PAM
& PAGE_MASK
) == mp_pa
) &&
341 !(kvmppc_get_msr(vcpu
) & MSR_PR
)) {
342 void *magic
= vcpu
->arch
.shared
;
343 magic
+= pte
.eaddr
& 0xfff;
344 memcpy(magic
, ptr
, size
);
348 if (kvm_write_guest(vcpu
->kvm
, pte
.raddr
, ptr
, size
))
349 return EMULATE_DO_MMIO
;
353 EXPORT_SYMBOL_GPL(kvmppc_st
);
355 int kvmppc_ld(struct kvm_vcpu
*vcpu
, ulong
*eaddr
, int size
, void *ptr
,
358 ulong mp_pa
= vcpu
->arch
.magic_page_pa
& KVM_PAM
& PAGE_MASK
;
359 struct kvmppc_pte pte
;
364 rc
= kvmppc_xlate(vcpu
, *eaddr
, data
? XLATE_DATA
: XLATE_INST
,
374 if (!data
&& !pte
.may_execute
)
377 /* Magic page override */
378 if (kvmppc_supports_magic_page(vcpu
) && mp_pa
&&
379 ((pte
.raddr
& KVM_PAM
& PAGE_MASK
) == mp_pa
) &&
380 !(kvmppc_get_msr(vcpu
) & MSR_PR
)) {
381 void *magic
= vcpu
->arch
.shared
;
382 magic
+= pte
.eaddr
& 0xfff;
383 memcpy(ptr
, magic
, size
);
387 if (kvm_read_guest(vcpu
->kvm
, pte
.raddr
, ptr
, size
))
388 return EMULATE_DO_MMIO
;
392 EXPORT_SYMBOL_GPL(kvmppc_ld
);
394 int kvm_arch_hardware_enable(void)
399 int kvm_arch_hardware_setup(void)
404 void kvm_arch_check_processor_compat(void *rtn
)
406 *(int *)rtn
= kvmppc_core_check_processor_compat();
409 int kvm_arch_init_vm(struct kvm
*kvm
, unsigned long type
)
411 struct kvmppc_ops
*kvm_ops
= NULL
;
413 * if we have both HV and PR enabled, default is HV
417 kvm_ops
= kvmppc_hv_ops
;
419 kvm_ops
= kvmppc_pr_ops
;
422 } else if (type
== KVM_VM_PPC_HV
) {
425 kvm_ops
= kvmppc_hv_ops
;
426 } else if (type
== KVM_VM_PPC_PR
) {
429 kvm_ops
= kvmppc_pr_ops
;
433 if (kvm_ops
->owner
&& !try_module_get(kvm_ops
->owner
))
436 kvm
->arch
.kvm_ops
= kvm_ops
;
437 return kvmppc_core_init_vm(kvm
);
442 bool kvm_arch_has_vcpu_debugfs(void)
447 int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu
*vcpu
)
452 void kvm_arch_destroy_vm(struct kvm
*kvm
)
455 struct kvm_vcpu
*vcpu
;
457 #ifdef CONFIG_KVM_XICS
459 * We call kick_all_cpus_sync() to ensure that all
460 * CPUs have executed any pending IPIs before we
461 * continue and free VCPUs structures below.
463 if (is_kvmppc_hv_enabled(kvm
))
464 kick_all_cpus_sync();
467 kvm_for_each_vcpu(i
, vcpu
, kvm
)
468 kvm_arch_vcpu_free(vcpu
);
470 mutex_lock(&kvm
->lock
);
471 for (i
= 0; i
< atomic_read(&kvm
->online_vcpus
); i
++)
472 kvm
->vcpus
[i
] = NULL
;
474 atomic_set(&kvm
->online_vcpus
, 0);
476 kvmppc_core_destroy_vm(kvm
);
478 mutex_unlock(&kvm
->lock
);
480 /* drop the module reference */
481 module_put(kvm
->arch
.kvm_ops
->owner
);
484 int kvm_vm_ioctl_check_extension(struct kvm
*kvm
, long ext
)
487 /* Assume we're using HV mode when the HV module is loaded */
488 int hv_enabled
= kvmppc_hv_ops
? 1 : 0;
492 * Hooray - we know which VM type we're running on. Depend on
493 * that rather than the guess above.
495 hv_enabled
= is_kvmppc_hv_enabled(kvm
);
500 case KVM_CAP_PPC_BOOKE_SREGS
:
501 case KVM_CAP_PPC_BOOKE_WATCHDOG
:
502 case KVM_CAP_PPC_EPR
:
504 case KVM_CAP_PPC_SEGSTATE
:
505 case KVM_CAP_PPC_HIOR
:
506 case KVM_CAP_PPC_PAPR
:
508 case KVM_CAP_PPC_UNSET_IRQ
:
509 case KVM_CAP_PPC_IRQ_LEVEL
:
510 case KVM_CAP_ENABLE_CAP
:
511 case KVM_CAP_ENABLE_CAP_VM
:
512 case KVM_CAP_ONE_REG
:
513 case KVM_CAP_IOEVENTFD
:
514 case KVM_CAP_DEVICE_CTRL
:
515 case KVM_CAP_IMMEDIATE_EXIT
:
518 case KVM_CAP_PPC_PAIRED_SINGLES
:
519 case KVM_CAP_PPC_OSI
:
520 case KVM_CAP_PPC_GET_PVINFO
:
521 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
524 /* We support this only for PR */
527 #ifdef CONFIG_KVM_MMIO
528 case KVM_CAP_COALESCED_MMIO
:
529 r
= KVM_COALESCED_MMIO_PAGE_OFFSET
;
532 #ifdef CONFIG_KVM_MPIC
533 case KVM_CAP_IRQ_MPIC
:
538 #ifdef CONFIG_PPC_BOOK3S_64
539 case KVM_CAP_SPAPR_TCE
:
540 case KVM_CAP_SPAPR_TCE_64
:
541 case KVM_CAP_PPC_RTAS
:
542 case KVM_CAP_PPC_FIXUP_HCALL
:
543 case KVM_CAP_PPC_ENABLE_HCALL
:
544 #ifdef CONFIG_KVM_XICS
545 case KVM_CAP_IRQ_XICS
:
550 case KVM_CAP_PPC_ALLOC_HTAB
:
553 #endif /* CONFIG_PPC_BOOK3S_64 */
554 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
555 case KVM_CAP_PPC_SMT
:
558 if (cpu_has_feature(CPU_FTR_ARCH_300
))
561 r
= threads_per_subcore
;
564 case KVM_CAP_PPC_RMA
:
567 case KVM_CAP_PPC_HWRNG
:
568 r
= kvmppc_hwrng_present();
570 case KVM_CAP_PPC_MMU_RADIX
:
571 r
= !!(hv_enabled
&& radix_enabled());
573 case KVM_CAP_PPC_MMU_HASH_V3
:
574 r
= !!(hv_enabled
&& !radix_enabled() &&
575 cpu_has_feature(CPU_FTR_ARCH_300
));
578 case KVM_CAP_SYNC_MMU
:
579 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
581 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
587 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
588 case KVM_CAP_PPC_HTAB_FD
:
592 case KVM_CAP_NR_VCPUS
:
594 * Recommending a number of CPUs is somewhat arbitrary; we
595 * return the number of present CPUs for -HV (since a host
596 * will have secondary threads "offline"), and for other KVM
597 * implementations just count online CPUs.
600 r
= num_present_cpus();
602 r
= num_online_cpus();
604 case KVM_CAP_NR_MEMSLOTS
:
605 r
= KVM_USER_MEM_SLOTS
;
607 case KVM_CAP_MAX_VCPUS
:
610 #ifdef CONFIG_PPC_BOOK3S_64
611 case KVM_CAP_PPC_GET_SMMU_INFO
:
614 case KVM_CAP_SPAPR_MULTITCE
:
617 case KVM_CAP_SPAPR_RESIZE_HPT
:
618 /* Disable this on POWER9 until code handles new HPTE format */
619 r
= !!hv_enabled
&& !cpu_has_feature(CPU_FTR_ARCH_300
);
622 case KVM_CAP_PPC_HTM
:
623 r
= cpu_has_feature(CPU_FTR_TM_COMP
) &&
624 is_kvmppc_hv_enabled(kvm
);
634 long kvm_arch_dev_ioctl(struct file
*filp
,
635 unsigned int ioctl
, unsigned long arg
)
640 void kvm_arch_free_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*free
,
641 struct kvm_memory_slot
*dont
)
643 kvmppc_core_free_memslot(kvm
, free
, dont
);
646 int kvm_arch_create_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*slot
,
647 unsigned long npages
)
649 return kvmppc_core_create_memslot(kvm
, slot
, npages
);
652 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
653 struct kvm_memory_slot
*memslot
,
654 const struct kvm_userspace_memory_region
*mem
,
655 enum kvm_mr_change change
)
657 return kvmppc_core_prepare_memory_region(kvm
, memslot
, mem
);
660 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
661 const struct kvm_userspace_memory_region
*mem
,
662 const struct kvm_memory_slot
*old
,
663 const struct kvm_memory_slot
*new,
664 enum kvm_mr_change change
)
666 kvmppc_core_commit_memory_region(kvm
, mem
, old
, new);
669 void kvm_arch_flush_shadow_memslot(struct kvm
*kvm
,
670 struct kvm_memory_slot
*slot
)
672 kvmppc_core_flush_memslot(kvm
, slot
);
675 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
, unsigned int id
)
677 struct kvm_vcpu
*vcpu
;
678 vcpu
= kvmppc_core_vcpu_create(kvm
, id
);
680 vcpu
->arch
.wqp
= &vcpu
->wq
;
681 kvmppc_create_vcpu_debugfs(vcpu
, id
);
686 void kvm_arch_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
690 void kvm_arch_vcpu_free(struct kvm_vcpu
*vcpu
)
692 /* Make sure we're not using the vcpu anymore */
693 hrtimer_cancel(&vcpu
->arch
.dec_timer
);
695 kvmppc_remove_vcpu_debugfs(vcpu
);
697 switch (vcpu
->arch
.irq_type
) {
698 case KVMPPC_IRQ_MPIC
:
699 kvmppc_mpic_disconnect_vcpu(vcpu
->arch
.mpic
, vcpu
);
701 case KVMPPC_IRQ_XICS
:
702 kvmppc_xics_free_icp(vcpu
);
706 kvmppc_core_vcpu_free(vcpu
);
709 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
711 kvm_arch_vcpu_free(vcpu
);
714 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
716 return kvmppc_core_pending_dec(vcpu
);
719 static enum hrtimer_restart
kvmppc_decrementer_wakeup(struct hrtimer
*timer
)
721 struct kvm_vcpu
*vcpu
;
723 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.dec_timer
);
724 kvmppc_decrementer_func(vcpu
);
726 return HRTIMER_NORESTART
;
729 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
733 hrtimer_init(&vcpu
->arch
.dec_timer
, CLOCK_REALTIME
, HRTIMER_MODE_ABS
);
734 vcpu
->arch
.dec_timer
.function
= kvmppc_decrementer_wakeup
;
735 vcpu
->arch
.dec_expires
= ~(u64
)0;
737 #ifdef CONFIG_KVM_EXIT_TIMING
738 mutex_init(&vcpu
->arch
.exit_timing_lock
);
740 ret
= kvmppc_subarch_vcpu_init(vcpu
);
744 void kvm_arch_vcpu_uninit(struct kvm_vcpu
*vcpu
)
746 kvmppc_mmu_destroy(vcpu
);
747 kvmppc_subarch_vcpu_uninit(vcpu
);
750 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
754 * vrsave (formerly usprg0) isn't used by Linux, but may
755 * be used by the guest.
757 * On non-booke this is associated with Altivec and
758 * is handled by code in book3s.c.
760 mtspr(SPRN_VRSAVE
, vcpu
->arch
.vrsave
);
762 kvmppc_core_vcpu_load(vcpu
, cpu
);
765 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
767 kvmppc_core_vcpu_put(vcpu
);
769 vcpu
->arch
.vrsave
= mfspr(SPRN_VRSAVE
);
774 * irq_bypass_add_producer and irq_bypass_del_producer are only
775 * useful if the architecture supports PCI passthrough.
776 * irq_bypass_stop and irq_bypass_start are not needed and so
777 * kvm_ops are not defined for them.
779 bool kvm_arch_has_irq_bypass(void)
781 return ((kvmppc_hv_ops
&& kvmppc_hv_ops
->irq_bypass_add_producer
) ||
782 (kvmppc_pr_ops
&& kvmppc_pr_ops
->irq_bypass_add_producer
));
785 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer
*cons
,
786 struct irq_bypass_producer
*prod
)
788 struct kvm_kernel_irqfd
*irqfd
=
789 container_of(cons
, struct kvm_kernel_irqfd
, consumer
);
790 struct kvm
*kvm
= irqfd
->kvm
;
792 if (kvm
->arch
.kvm_ops
->irq_bypass_add_producer
)
793 return kvm
->arch
.kvm_ops
->irq_bypass_add_producer(cons
, prod
);
798 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer
*cons
,
799 struct irq_bypass_producer
*prod
)
801 struct kvm_kernel_irqfd
*irqfd
=
802 container_of(cons
, struct kvm_kernel_irqfd
, consumer
);
803 struct kvm
*kvm
= irqfd
->kvm
;
805 if (kvm
->arch
.kvm_ops
->irq_bypass_del_producer
)
806 kvm
->arch
.kvm_ops
->irq_bypass_del_producer(cons
, prod
);
809 static void kvmppc_complete_mmio_load(struct kvm_vcpu
*vcpu
,
812 u64
uninitialized_var(gpr
);
814 if (run
->mmio
.len
> sizeof(gpr
)) {
815 printk(KERN_ERR
"bad MMIO length: %d\n", run
->mmio
.len
);
819 if (!vcpu
->arch
.mmio_host_swabbed
) {
820 switch (run
->mmio
.len
) {
821 case 8: gpr
= *(u64
*)run
->mmio
.data
; break;
822 case 4: gpr
= *(u32
*)run
->mmio
.data
; break;
823 case 2: gpr
= *(u16
*)run
->mmio
.data
; break;
824 case 1: gpr
= *(u8
*)run
->mmio
.data
; break;
827 switch (run
->mmio
.len
) {
828 case 8: gpr
= swab64(*(u64
*)run
->mmio
.data
); break;
829 case 4: gpr
= swab32(*(u32
*)run
->mmio
.data
); break;
830 case 2: gpr
= swab16(*(u16
*)run
->mmio
.data
); break;
831 case 1: gpr
= *(u8
*)run
->mmio
.data
; break;
835 if (vcpu
->arch
.mmio_sign_extend
) {
836 switch (run
->mmio
.len
) {
851 kvmppc_set_gpr(vcpu
, vcpu
->arch
.io_gpr
, gpr
);
853 switch (vcpu
->arch
.io_gpr
& KVM_MMIO_REG_EXT_MASK
) {
854 case KVM_MMIO_REG_GPR
:
855 kvmppc_set_gpr(vcpu
, vcpu
->arch
.io_gpr
, gpr
);
857 case KVM_MMIO_REG_FPR
:
858 VCPU_FPR(vcpu
, vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
) = gpr
;
860 #ifdef CONFIG_PPC_BOOK3S
861 case KVM_MMIO_REG_QPR
:
862 vcpu
->arch
.qpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
864 case KVM_MMIO_REG_FQPR
:
865 VCPU_FPR(vcpu
, vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
) = gpr
;
866 vcpu
->arch
.qpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
874 static int __kvmppc_handle_load(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
875 unsigned int rt
, unsigned int bytes
,
876 int is_default_endian
, int sign_extend
)
881 /* Pity C doesn't have a logical XOR operator */
882 if (kvmppc_need_byteswap(vcpu
)) {
883 host_swabbed
= is_default_endian
;
885 host_swabbed
= !is_default_endian
;
888 if (bytes
> sizeof(run
->mmio
.data
)) {
889 printk(KERN_ERR
"%s: bad MMIO length: %d\n", __func__
,
893 run
->mmio
.phys_addr
= vcpu
->arch
.paddr_accessed
;
894 run
->mmio
.len
= bytes
;
895 run
->mmio
.is_write
= 0;
897 vcpu
->arch
.io_gpr
= rt
;
898 vcpu
->arch
.mmio_host_swabbed
= host_swabbed
;
899 vcpu
->mmio_needed
= 1;
900 vcpu
->mmio_is_write
= 0;
901 vcpu
->arch
.mmio_sign_extend
= sign_extend
;
903 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
905 ret
= kvm_io_bus_read(vcpu
, KVM_MMIO_BUS
, run
->mmio
.phys_addr
,
906 bytes
, &run
->mmio
.data
);
908 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
911 kvmppc_complete_mmio_load(vcpu
, run
);
912 vcpu
->mmio_needed
= 0;
916 return EMULATE_DO_MMIO
;
919 int kvmppc_handle_load(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
920 unsigned int rt
, unsigned int bytes
,
921 int is_default_endian
)
923 return __kvmppc_handle_load(run
, vcpu
, rt
, bytes
, is_default_endian
, 0);
925 EXPORT_SYMBOL_GPL(kvmppc_handle_load
);
927 /* Same as above, but sign extends */
928 int kvmppc_handle_loads(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
929 unsigned int rt
, unsigned int bytes
,
930 int is_default_endian
)
932 return __kvmppc_handle_load(run
, vcpu
, rt
, bytes
, is_default_endian
, 1);
935 int kvmppc_handle_store(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
936 u64 val
, unsigned int bytes
, int is_default_endian
)
938 void *data
= run
->mmio
.data
;
942 /* Pity C doesn't have a logical XOR operator */
943 if (kvmppc_need_byteswap(vcpu
)) {
944 host_swabbed
= is_default_endian
;
946 host_swabbed
= !is_default_endian
;
949 if (bytes
> sizeof(run
->mmio
.data
)) {
950 printk(KERN_ERR
"%s: bad MMIO length: %d\n", __func__
,
954 run
->mmio
.phys_addr
= vcpu
->arch
.paddr_accessed
;
955 run
->mmio
.len
= bytes
;
956 run
->mmio
.is_write
= 1;
957 vcpu
->mmio_needed
= 1;
958 vcpu
->mmio_is_write
= 1;
960 /* Store the value at the lowest bytes in 'data'. */
963 case 8: *(u64
*)data
= val
; break;
964 case 4: *(u32
*)data
= val
; break;
965 case 2: *(u16
*)data
= val
; break;
966 case 1: *(u8
*)data
= val
; break;
970 case 8: *(u64
*)data
= swab64(val
); break;
971 case 4: *(u32
*)data
= swab32(val
); break;
972 case 2: *(u16
*)data
= swab16(val
); break;
973 case 1: *(u8
*)data
= val
; break;
977 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
979 ret
= kvm_io_bus_write(vcpu
, KVM_MMIO_BUS
, run
->mmio
.phys_addr
,
980 bytes
, &run
->mmio
.data
);
982 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
985 vcpu
->mmio_needed
= 0;
989 return EMULATE_DO_MMIO
;
991 EXPORT_SYMBOL_GPL(kvmppc_handle_store
);
993 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu
*vcpu
, struct kvm_one_reg
*reg
)
996 union kvmppc_one_reg val
;
999 size
= one_reg_size(reg
->id
);
1000 if (size
> sizeof(val
))
1003 r
= kvmppc_get_one_reg(vcpu
, reg
->id
, &val
);
1007 #ifdef CONFIG_ALTIVEC
1008 case KVM_REG_PPC_VR0
... KVM_REG_PPC_VR31
:
1009 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
1013 val
.vval
= vcpu
->arch
.vr
.vr
[reg
->id
- KVM_REG_PPC_VR0
];
1015 case KVM_REG_PPC_VSCR
:
1016 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
1020 val
= get_reg_val(reg
->id
, vcpu
->arch
.vr
.vscr
.u
[3]);
1022 case KVM_REG_PPC_VRSAVE
:
1023 val
= get_reg_val(reg
->id
, vcpu
->arch
.vrsave
);
1025 #endif /* CONFIG_ALTIVEC */
1035 if (copy_to_user((char __user
*)(unsigned long)reg
->addr
, &val
, size
))
1041 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu
*vcpu
, struct kvm_one_reg
*reg
)
1044 union kvmppc_one_reg val
;
1047 size
= one_reg_size(reg
->id
);
1048 if (size
> sizeof(val
))
1051 if (copy_from_user(&val
, (char __user
*)(unsigned long)reg
->addr
, size
))
1054 r
= kvmppc_set_one_reg(vcpu
, reg
->id
, &val
);
1058 #ifdef CONFIG_ALTIVEC
1059 case KVM_REG_PPC_VR0
... KVM_REG_PPC_VR31
:
1060 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
1064 vcpu
->arch
.vr
.vr
[reg
->id
- KVM_REG_PPC_VR0
] = val
.vval
;
1066 case KVM_REG_PPC_VSCR
:
1067 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
1071 vcpu
->arch
.vr
.vscr
.u
[3] = set_reg_val(reg
->id
, val
);
1073 case KVM_REG_PPC_VRSAVE
:
1074 if (!cpu_has_feature(CPU_FTR_ALTIVEC
)) {
1078 vcpu
->arch
.vrsave
= set_reg_val(reg
->id
, val
);
1080 #endif /* CONFIG_ALTIVEC */
1090 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
1095 if (vcpu
->sigset_active
)
1096 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
1098 if (vcpu
->mmio_needed
) {
1099 if (!vcpu
->mmio_is_write
)
1100 kvmppc_complete_mmio_load(vcpu
, run
);
1101 vcpu
->mmio_needed
= 0;
1102 } else if (vcpu
->arch
.osi_needed
) {
1103 u64
*gprs
= run
->osi
.gprs
;
1106 for (i
= 0; i
< 32; i
++)
1107 kvmppc_set_gpr(vcpu
, i
, gprs
[i
]);
1108 vcpu
->arch
.osi_needed
= 0;
1109 } else if (vcpu
->arch
.hcall_needed
) {
1112 kvmppc_set_gpr(vcpu
, 3, run
->papr_hcall
.ret
);
1113 for (i
= 0; i
< 9; ++i
)
1114 kvmppc_set_gpr(vcpu
, 4 + i
, run
->papr_hcall
.args
[i
]);
1115 vcpu
->arch
.hcall_needed
= 0;
1117 } else if (vcpu
->arch
.epr_needed
) {
1118 kvmppc_set_epr(vcpu
, run
->epr
.epr
);
1119 vcpu
->arch
.epr_needed
= 0;
1123 if (run
->immediate_exit
)
1126 r
= kvmppc_vcpu_run(run
, vcpu
);
1128 if (vcpu
->sigset_active
)
1129 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
1134 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu
*vcpu
, struct kvm_interrupt
*irq
)
1136 if (irq
->irq
== KVM_INTERRUPT_UNSET
) {
1137 kvmppc_core_dequeue_external(vcpu
);
1141 kvmppc_core_queue_external(vcpu
, irq
);
1143 kvm_vcpu_kick(vcpu
);
1148 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu
*vcpu
,
1149 struct kvm_enable_cap
*cap
)
1157 case KVM_CAP_PPC_OSI
:
1159 vcpu
->arch
.osi_enabled
= true;
1161 case KVM_CAP_PPC_PAPR
:
1163 vcpu
->arch
.papr_enabled
= true;
1165 case KVM_CAP_PPC_EPR
:
1168 vcpu
->arch
.epr_flags
|= KVMPPC_EPR_USER
;
1170 vcpu
->arch
.epr_flags
&= ~KVMPPC_EPR_USER
;
1173 case KVM_CAP_PPC_BOOKE_WATCHDOG
:
1175 vcpu
->arch
.watchdog_enabled
= true;
1178 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1179 case KVM_CAP_SW_TLB
: {
1180 struct kvm_config_tlb cfg
;
1181 void __user
*user_ptr
= (void __user
*)(uintptr_t)cap
->args
[0];
1184 if (copy_from_user(&cfg
, user_ptr
, sizeof(cfg
)))
1187 r
= kvm_vcpu_ioctl_config_tlb(vcpu
, &cfg
);
1191 #ifdef CONFIG_KVM_MPIC
1192 case KVM_CAP_IRQ_MPIC
: {
1194 struct kvm_device
*dev
;
1197 f
= fdget(cap
->args
[0]);
1202 dev
= kvm_device_from_filp(f
.file
);
1204 r
= kvmppc_mpic_connect_vcpu(dev
, vcpu
, cap
->args
[1]);
1210 #ifdef CONFIG_KVM_XICS
1211 case KVM_CAP_IRQ_XICS
: {
1213 struct kvm_device
*dev
;
1216 f
= fdget(cap
->args
[0]);
1221 dev
= kvm_device_from_filp(f
.file
);
1223 r
= kvmppc_xics_connect_vcpu(dev
, vcpu
, cap
->args
[1]);
1228 #endif /* CONFIG_KVM_XICS */
1235 r
= kvmppc_sanity_check(vcpu
);
1240 bool kvm_arch_intc_initialized(struct kvm
*kvm
)
1242 #ifdef CONFIG_KVM_MPIC
1246 #ifdef CONFIG_KVM_XICS
1253 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
1254 struct kvm_mp_state
*mp_state
)
1259 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
1260 struct kvm_mp_state
*mp_state
)
1265 long kvm_arch_vcpu_ioctl(struct file
*filp
,
1266 unsigned int ioctl
, unsigned long arg
)
1268 struct kvm_vcpu
*vcpu
= filp
->private_data
;
1269 void __user
*argp
= (void __user
*)arg
;
1273 case KVM_INTERRUPT
: {
1274 struct kvm_interrupt irq
;
1276 if (copy_from_user(&irq
, argp
, sizeof(irq
)))
1278 r
= kvm_vcpu_ioctl_interrupt(vcpu
, &irq
);
1282 case KVM_ENABLE_CAP
:
1284 struct kvm_enable_cap cap
;
1286 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
1288 r
= kvm_vcpu_ioctl_enable_cap(vcpu
, &cap
);
1292 case KVM_SET_ONE_REG
:
1293 case KVM_GET_ONE_REG
:
1295 struct kvm_one_reg reg
;
1297 if (copy_from_user(®
, argp
, sizeof(reg
)))
1299 if (ioctl
== KVM_SET_ONE_REG
)
1300 r
= kvm_vcpu_ioctl_set_one_reg(vcpu
, ®
);
1302 r
= kvm_vcpu_ioctl_get_one_reg(vcpu
, ®
);
1306 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
1307 case KVM_DIRTY_TLB
: {
1308 struct kvm_dirty_tlb dirty
;
1310 if (copy_from_user(&dirty
, argp
, sizeof(dirty
)))
1312 r
= kvm_vcpu_ioctl_dirty_tlb(vcpu
, &dirty
);
1324 int kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
1326 return VM_FAULT_SIGBUS
;
1329 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo
*pvinfo
)
1331 u32 inst_nop
= 0x60000000;
1332 #ifdef CONFIG_KVM_BOOKE_HV
1333 u32 inst_sc1
= 0x44000022;
1334 pvinfo
->hcall
[0] = cpu_to_be32(inst_sc1
);
1335 pvinfo
->hcall
[1] = cpu_to_be32(inst_nop
);
1336 pvinfo
->hcall
[2] = cpu_to_be32(inst_nop
);
1337 pvinfo
->hcall
[3] = cpu_to_be32(inst_nop
);
1339 u32 inst_lis
= 0x3c000000;
1340 u32 inst_ori
= 0x60000000;
1341 u32 inst_sc
= 0x44000002;
1342 u32 inst_imm_mask
= 0xffff;
1345 * The hypercall to get into KVM from within guest context is as
1348 * lis r0, r0, KVM_SC_MAGIC_R0@h
1349 * ori r0, KVM_SC_MAGIC_R0@l
1353 pvinfo
->hcall
[0] = cpu_to_be32(inst_lis
| ((KVM_SC_MAGIC_R0
>> 16) & inst_imm_mask
));
1354 pvinfo
->hcall
[1] = cpu_to_be32(inst_ori
| (KVM_SC_MAGIC_R0
& inst_imm_mask
));
1355 pvinfo
->hcall
[2] = cpu_to_be32(inst_sc
);
1356 pvinfo
->hcall
[3] = cpu_to_be32(inst_nop
);
1359 pvinfo
->flags
= KVM_PPC_PVINFO_FLAGS_EV_IDLE
;
1364 int kvm_vm_ioctl_irq_line(struct kvm
*kvm
, struct kvm_irq_level
*irq_event
,
1367 if (!irqchip_in_kernel(kvm
))
1370 irq_event
->status
= kvm_set_irq(kvm
, KVM_USERSPACE_IRQ_SOURCE_ID
,
1371 irq_event
->irq
, irq_event
->level
,
1377 static int kvm_vm_ioctl_enable_cap(struct kvm
*kvm
,
1378 struct kvm_enable_cap
*cap
)
1386 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1387 case KVM_CAP_PPC_ENABLE_HCALL
: {
1388 unsigned long hcall
= cap
->args
[0];
1391 if (hcall
> MAX_HCALL_OPCODE
|| (hcall
& 3) ||
1394 if (!kvmppc_book3s_hcall_implemented(kvm
, hcall
))
1397 set_bit(hcall
/ 4, kvm
->arch
.enabled_hcalls
);
1399 clear_bit(hcall
/ 4, kvm
->arch
.enabled_hcalls
);
1412 long kvm_arch_vm_ioctl(struct file
*filp
,
1413 unsigned int ioctl
, unsigned long arg
)
1415 struct kvm
*kvm __maybe_unused
= filp
->private_data
;
1416 void __user
*argp
= (void __user
*)arg
;
1420 case KVM_PPC_GET_PVINFO
: {
1421 struct kvm_ppc_pvinfo pvinfo
;
1422 memset(&pvinfo
, 0, sizeof(pvinfo
));
1423 r
= kvm_vm_ioctl_get_pvinfo(&pvinfo
);
1424 if (copy_to_user(argp
, &pvinfo
, sizeof(pvinfo
))) {
1431 case KVM_ENABLE_CAP
:
1433 struct kvm_enable_cap cap
;
1435 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
1437 r
= kvm_vm_ioctl_enable_cap(kvm
, &cap
);
1440 #ifdef CONFIG_PPC_BOOK3S_64
1441 case KVM_CREATE_SPAPR_TCE_64
: {
1442 struct kvm_create_spapr_tce_64 create_tce_64
;
1445 if (copy_from_user(&create_tce_64
, argp
, sizeof(create_tce_64
)))
1447 if (create_tce_64
.flags
) {
1451 r
= kvm_vm_ioctl_create_spapr_tce(kvm
, &create_tce_64
);
1454 case KVM_CREATE_SPAPR_TCE
: {
1455 struct kvm_create_spapr_tce create_tce
;
1456 struct kvm_create_spapr_tce_64 create_tce_64
;
1459 if (copy_from_user(&create_tce
, argp
, sizeof(create_tce
)))
1462 create_tce_64
.liobn
= create_tce
.liobn
;
1463 create_tce_64
.page_shift
= IOMMU_PAGE_SHIFT_4K
;
1464 create_tce_64
.offset
= 0;
1465 create_tce_64
.size
= create_tce
.window_size
>>
1466 IOMMU_PAGE_SHIFT_4K
;
1467 create_tce_64
.flags
= 0;
1468 r
= kvm_vm_ioctl_create_spapr_tce(kvm
, &create_tce_64
);
1471 case KVM_PPC_GET_SMMU_INFO
: {
1472 struct kvm_ppc_smmu_info info
;
1473 struct kvm
*kvm
= filp
->private_data
;
1475 memset(&info
, 0, sizeof(info
));
1476 r
= kvm
->arch
.kvm_ops
->get_smmu_info(kvm
, &info
);
1477 if (r
>= 0 && copy_to_user(argp
, &info
, sizeof(info
)))
1481 case KVM_PPC_RTAS_DEFINE_TOKEN
: {
1482 struct kvm
*kvm
= filp
->private_data
;
1484 r
= kvm_vm_ioctl_rtas_define_token(kvm
, argp
);
1487 case KVM_PPC_CONFIGURE_V3_MMU
: {
1488 struct kvm
*kvm
= filp
->private_data
;
1489 struct kvm_ppc_mmuv3_cfg cfg
;
1492 if (!kvm
->arch
.kvm_ops
->configure_mmu
)
1495 if (copy_from_user(&cfg
, argp
, sizeof(cfg
)))
1497 r
= kvm
->arch
.kvm_ops
->configure_mmu(kvm
, &cfg
);
1500 case KVM_PPC_GET_RMMU_INFO
: {
1501 struct kvm
*kvm
= filp
->private_data
;
1502 struct kvm_ppc_rmmu_info info
;
1505 if (!kvm
->arch
.kvm_ops
->get_rmmu_info
)
1507 r
= kvm
->arch
.kvm_ops
->get_rmmu_info(kvm
, &info
);
1508 if (r
>= 0 && copy_to_user(argp
, &info
, sizeof(info
)))
1513 struct kvm
*kvm
= filp
->private_data
;
1514 r
= kvm
->arch
.kvm_ops
->arch_vm_ioctl(filp
, ioctl
, arg
);
1516 #else /* CONFIG_PPC_BOOK3S_64 */
1525 static unsigned long lpid_inuse
[BITS_TO_LONGS(KVMPPC_NR_LPIDS
)];
1526 static unsigned long nr_lpids
;
1528 long kvmppc_alloc_lpid(void)
1533 lpid
= find_first_zero_bit(lpid_inuse
, KVMPPC_NR_LPIDS
);
1534 if (lpid
>= nr_lpids
) {
1535 pr_err("%s: No LPIDs free\n", __func__
);
1538 } while (test_and_set_bit(lpid
, lpid_inuse
));
1542 EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid
);
1544 void kvmppc_claim_lpid(long lpid
)
1546 set_bit(lpid
, lpid_inuse
);
1548 EXPORT_SYMBOL_GPL(kvmppc_claim_lpid
);
1550 void kvmppc_free_lpid(long lpid
)
1552 clear_bit(lpid
, lpid_inuse
);
1554 EXPORT_SYMBOL_GPL(kvmppc_free_lpid
);
1556 void kvmppc_init_lpid(unsigned long nr_lpids_param
)
1558 nr_lpids
= min_t(unsigned long, KVMPPC_NR_LPIDS
, nr_lpids_param
);
1559 memset(lpid_inuse
, 0, sizeof(lpid_inuse
));
1561 EXPORT_SYMBOL_GPL(kvmppc_init_lpid
);
1563 int kvm_arch_init(void *opaque
)
1568 EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr
);