2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/vmalloc.h>
25 #include <linux/hrtimer.h>
27 #include <linux/slab.h>
28 #include <linux/file.h>
29 #include <asm/cputable.h>
30 #include <asm/uaccess.h>
31 #include <asm/kvm_ppc.h>
32 #include <asm/tlbflush.h>
33 #include <asm/cputhreads.h>
34 #include <asm/irqflags.h>
37 #include "../mm/mmu_decl.h"
39 #define CREATE_TRACE_POINTS
42 struct kvmppc_ops
*kvmppc_ops
;
44 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*v
)
46 return !!(v
->arch
.pending_exceptions
) ||
50 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
55 #ifndef CONFIG_KVM_BOOK3S_64_HV
57 * Common checks before entering the guest world. Call with interrupts
62 * == 1 if we're ready to go into guest state
63 * <= 0 if we need to go back to the host with return value
65 int kvmppc_prepare_to_enter(struct kvm_vcpu
*vcpu
)
69 WARN_ON_ONCE(!irqs_disabled());
78 if (signal_pending(current
)) {
79 kvmppc_account_exit(vcpu
, SIGNAL_EXITS
);
80 vcpu
->run
->exit_reason
= KVM_EXIT_INTR
;
85 vcpu
->mode
= IN_GUEST_MODE
;
88 * Reading vcpu->requests must happen after setting vcpu->mode,
89 * so we don't miss a request because the requester sees
90 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
91 * before next entering the guest (and thus doesn't IPI).
96 /* Make sure we process requests preemptable */
98 trace_kvm_check_requests(vcpu
);
99 r
= kvmppc_core_check_requests(vcpu
);
106 if (kvmppc_core_prepare_to_enter(vcpu
)) {
107 /* interrupts got enabled in between, so we
108 are back at square 1 */
115 if (lazy_irq_pending()) {
116 /* Got an interrupt in between, try again */
130 #endif /* CONFIG_KVM_BOOK3S_64_HV */
132 int kvmppc_kvm_pv(struct kvm_vcpu
*vcpu
)
134 int nr
= kvmppc_get_gpr(vcpu
, 11);
136 unsigned long __maybe_unused param1
= kvmppc_get_gpr(vcpu
, 3);
137 unsigned long __maybe_unused param2
= kvmppc_get_gpr(vcpu
, 4);
138 unsigned long __maybe_unused param3
= kvmppc_get_gpr(vcpu
, 5);
139 unsigned long __maybe_unused param4
= kvmppc_get_gpr(vcpu
, 6);
140 unsigned long r2
= 0;
142 if (!(vcpu
->arch
.shared
->msr
& MSR_SF
)) {
144 param1
&= 0xffffffff;
145 param2
&= 0xffffffff;
146 param3
&= 0xffffffff;
147 param4
&= 0xffffffff;
151 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE
):
153 vcpu
->arch
.magic_page_pa
= param1
;
154 vcpu
->arch
.magic_page_ea
= param2
;
156 r2
= KVM_MAGIC_FEAT_SR
| KVM_MAGIC_FEAT_MAS0_TO_SPRG7
;
161 case KVM_HCALL_TOKEN(KVM_HC_FEATURES
):
163 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
164 /* XXX Missing magic page on 44x */
165 r2
|= (1 << KVM_FEATURE_MAGIC_PAGE
);
168 /* Second return value is in r4 */
170 case EV_HCALL_TOKEN(EV_IDLE
):
172 kvm_vcpu_block(vcpu
);
173 clear_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
176 r
= EV_UNIMPLEMENTED
;
180 kvmppc_set_gpr(vcpu
, 4, r2
);
185 int kvmppc_sanity_check(struct kvm_vcpu
*vcpu
)
189 /* We have to know what CPU to virtualize */
193 /* PAPR only works with book3s_64 */
194 if ((vcpu
->arch
.cpu_type
!= KVM_CPU_3S_64
) && vcpu
->arch
.papr_enabled
)
197 #ifdef CONFIG_KVM_BOOK3S_64_HV
198 /* HV KVM can only do PAPR mode for now */
199 if (!vcpu
->arch
.papr_enabled
)
203 #ifdef CONFIG_KVM_BOOKE_HV
204 if (!cpu_has_feature(CPU_FTR_EMB_HV
))
212 return r
? 0 : -EINVAL
;
215 int kvmppc_emulate_mmio(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
217 enum emulation_result er
;
220 er
= kvmppc_emulate_instruction(run
, vcpu
);
223 /* Future optimization: only reload non-volatiles if they were
224 * actually modified. */
227 case EMULATE_DO_MMIO
:
228 run
->exit_reason
= KVM_EXIT_MMIO
;
229 /* We must reload nonvolatiles because "update" load/store
230 * instructions modify register state. */
231 /* Future optimization: only reload non-volatiles if they were
232 * actually modified. */
236 /* XXX Deliver Program interrupt to guest. */
237 printk(KERN_EMERG
"%s: emulation failed (%08x)\n", __func__
,
238 kvmppc_get_last_inst(vcpu
));
249 int kvm_arch_hardware_enable(void *garbage
)
254 void kvm_arch_hardware_disable(void *garbage
)
258 int kvm_arch_hardware_setup(void)
263 void kvm_arch_hardware_unsetup(void)
267 void kvm_arch_check_processor_compat(void *rtn
)
269 *(int *)rtn
= kvmppc_core_check_processor_compat();
272 int kvm_arch_init_vm(struct kvm
*kvm
, unsigned long type
)
277 return kvmppc_core_init_vm(kvm
);
280 void kvm_arch_destroy_vm(struct kvm
*kvm
)
283 struct kvm_vcpu
*vcpu
;
285 kvm_for_each_vcpu(i
, vcpu
, kvm
)
286 kvm_arch_vcpu_free(vcpu
);
288 mutex_lock(&kvm
->lock
);
289 for (i
= 0; i
< atomic_read(&kvm
->online_vcpus
); i
++)
290 kvm
->vcpus
[i
] = NULL
;
292 atomic_set(&kvm
->online_vcpus
, 0);
294 kvmppc_core_destroy_vm(kvm
);
296 mutex_unlock(&kvm
->lock
);
299 void kvm_arch_sync_events(struct kvm
*kvm
)
303 int kvm_dev_ioctl_check_extension(long ext
)
309 case KVM_CAP_PPC_BOOKE_SREGS
:
310 case KVM_CAP_PPC_BOOKE_WATCHDOG
:
311 case KVM_CAP_PPC_EPR
:
313 case KVM_CAP_PPC_SEGSTATE
:
314 case KVM_CAP_PPC_HIOR
:
315 case KVM_CAP_PPC_PAPR
:
317 case KVM_CAP_PPC_UNSET_IRQ
:
318 case KVM_CAP_PPC_IRQ_LEVEL
:
319 case KVM_CAP_ENABLE_CAP
:
320 case KVM_CAP_ONE_REG
:
321 case KVM_CAP_IOEVENTFD
:
322 case KVM_CAP_DEVICE_CTRL
:
325 #ifndef CONFIG_KVM_BOOK3S_64_HV
326 case KVM_CAP_PPC_PAIRED_SINGLES
:
327 case KVM_CAP_PPC_OSI
:
328 case KVM_CAP_PPC_GET_PVINFO
:
329 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
332 #ifdef CONFIG_KVM_MPIC
333 case KVM_CAP_IRQ_MPIC
:
337 case KVM_CAP_COALESCED_MMIO
:
338 r
= KVM_COALESCED_MMIO_PAGE_OFFSET
;
341 #ifdef CONFIG_PPC_BOOK3S_64
342 case KVM_CAP_SPAPR_TCE
:
343 case KVM_CAP_PPC_ALLOC_HTAB
:
344 case KVM_CAP_PPC_RTAS
:
345 #ifdef CONFIG_KVM_XICS
346 case KVM_CAP_IRQ_XICS
:
350 #endif /* CONFIG_PPC_BOOK3S_64 */
351 #ifdef CONFIG_KVM_BOOK3S_64_HV
352 case KVM_CAP_PPC_SMT
:
353 r
= threads_per_core
;
355 case KVM_CAP_PPC_RMA
:
357 /* PPC970 requires an RMA */
358 if (cpu_has_feature(CPU_FTR_ARCH_201
))
362 case KVM_CAP_SYNC_MMU
:
363 #ifdef CONFIG_KVM_BOOK3S_64_HV
364 r
= cpu_has_feature(CPU_FTR_ARCH_206
) ? 1 : 0;
365 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
371 #ifdef CONFIG_KVM_BOOK3S_64_HV
372 case KVM_CAP_PPC_HTAB_FD
:
377 case KVM_CAP_NR_VCPUS
:
379 * Recommending a number of CPUs is somewhat arbitrary; we
380 * return the number of present CPUs for -HV (since a host
381 * will have secondary threads "offline"), and for other KVM
382 * implementations just count online CPUs.
384 #ifdef CONFIG_KVM_BOOK3S_64_HV
385 r
= num_present_cpus();
387 r
= num_online_cpus();
390 case KVM_CAP_MAX_VCPUS
:
393 #ifdef CONFIG_PPC_BOOK3S_64
394 case KVM_CAP_PPC_GET_SMMU_INFO
:
406 long kvm_arch_dev_ioctl(struct file
*filp
,
407 unsigned int ioctl
, unsigned long arg
)
412 void kvm_arch_free_memslot(struct kvm_memory_slot
*free
,
413 struct kvm_memory_slot
*dont
)
415 kvmppc_core_free_memslot(free
, dont
);
418 int kvm_arch_create_memslot(struct kvm_memory_slot
*slot
, unsigned long npages
)
420 return kvmppc_core_create_memslot(slot
, npages
);
423 void kvm_arch_memslots_updated(struct kvm
*kvm
)
427 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
428 struct kvm_memory_slot
*memslot
,
429 struct kvm_userspace_memory_region
*mem
,
430 enum kvm_mr_change change
)
432 return kvmppc_core_prepare_memory_region(kvm
, memslot
, mem
);
435 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
436 struct kvm_userspace_memory_region
*mem
,
437 const struct kvm_memory_slot
*old
,
438 enum kvm_mr_change change
)
440 kvmppc_core_commit_memory_region(kvm
, mem
, old
);
443 void kvm_arch_flush_shadow_all(struct kvm
*kvm
)
447 void kvm_arch_flush_shadow_memslot(struct kvm
*kvm
,
448 struct kvm_memory_slot
*slot
)
450 kvmppc_core_flush_memslot(kvm
, slot
);
453 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
, unsigned int id
)
455 struct kvm_vcpu
*vcpu
;
456 vcpu
= kvmppc_core_vcpu_create(kvm
, id
);
458 vcpu
->arch
.wqp
= &vcpu
->wq
;
459 kvmppc_create_vcpu_debugfs(vcpu
, id
);
464 int kvm_arch_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
469 void kvm_arch_vcpu_free(struct kvm_vcpu
*vcpu
)
471 /* Make sure we're not using the vcpu anymore */
472 hrtimer_cancel(&vcpu
->arch
.dec_timer
);
473 tasklet_kill(&vcpu
->arch
.tasklet
);
475 kvmppc_remove_vcpu_debugfs(vcpu
);
477 switch (vcpu
->arch
.irq_type
) {
478 case KVMPPC_IRQ_MPIC
:
479 kvmppc_mpic_disconnect_vcpu(vcpu
->arch
.mpic
, vcpu
);
481 case KVMPPC_IRQ_XICS
:
482 kvmppc_xics_free_icp(vcpu
);
486 kvmppc_core_vcpu_free(vcpu
);
489 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
491 kvm_arch_vcpu_free(vcpu
);
494 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
496 return kvmppc_core_pending_dec(vcpu
);
500 * low level hrtimer wake routine. Because this runs in hardirq context
501 * we schedule a tasklet to do the real work.
503 enum hrtimer_restart
kvmppc_decrementer_wakeup(struct hrtimer
*timer
)
505 struct kvm_vcpu
*vcpu
;
507 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.dec_timer
);
508 tasklet_schedule(&vcpu
->arch
.tasklet
);
510 return HRTIMER_NORESTART
;
513 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
517 hrtimer_init(&vcpu
->arch
.dec_timer
, CLOCK_REALTIME
, HRTIMER_MODE_ABS
);
518 tasklet_init(&vcpu
->arch
.tasklet
, kvmppc_decrementer_func
, (ulong
)vcpu
);
519 vcpu
->arch
.dec_timer
.function
= kvmppc_decrementer_wakeup
;
520 vcpu
->arch
.dec_expires
= ~(u64
)0;
522 #ifdef CONFIG_KVM_EXIT_TIMING
523 mutex_init(&vcpu
->arch
.exit_timing_lock
);
525 ret
= kvmppc_subarch_vcpu_init(vcpu
);
529 void kvm_arch_vcpu_uninit(struct kvm_vcpu
*vcpu
)
531 kvmppc_mmu_destroy(vcpu
);
532 kvmppc_subarch_vcpu_uninit(vcpu
);
535 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
539 * vrsave (formerly usprg0) isn't used by Linux, but may
540 * be used by the guest.
542 * On non-booke this is associated with Altivec and
543 * is handled by code in book3s.c.
545 mtspr(SPRN_VRSAVE
, vcpu
->arch
.vrsave
);
547 kvmppc_core_vcpu_load(vcpu
, cpu
);
550 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
552 kvmppc_core_vcpu_put(vcpu
);
554 vcpu
->arch
.vrsave
= mfspr(SPRN_VRSAVE
);
558 static void kvmppc_complete_dcr_load(struct kvm_vcpu
*vcpu
,
561 kvmppc_set_gpr(vcpu
, vcpu
->arch
.io_gpr
, run
->dcr
.data
);
564 static void kvmppc_complete_mmio_load(struct kvm_vcpu
*vcpu
,
567 u64
uninitialized_var(gpr
);
569 if (run
->mmio
.len
> sizeof(gpr
)) {
570 printk(KERN_ERR
"bad MMIO length: %d\n", run
->mmio
.len
);
574 if (vcpu
->arch
.mmio_is_bigendian
) {
575 switch (run
->mmio
.len
) {
576 case 8: gpr
= *(u64
*)run
->mmio
.data
; break;
577 case 4: gpr
= *(u32
*)run
->mmio
.data
; break;
578 case 2: gpr
= *(u16
*)run
->mmio
.data
; break;
579 case 1: gpr
= *(u8
*)run
->mmio
.data
; break;
582 /* Convert BE data from userland back to LE. */
583 switch (run
->mmio
.len
) {
584 case 4: gpr
= ld_le32((u32
*)run
->mmio
.data
); break;
585 case 2: gpr
= ld_le16((u16
*)run
->mmio
.data
); break;
586 case 1: gpr
= *(u8
*)run
->mmio
.data
; break;
590 if (vcpu
->arch
.mmio_sign_extend
) {
591 switch (run
->mmio
.len
) {
606 kvmppc_set_gpr(vcpu
, vcpu
->arch
.io_gpr
, gpr
);
608 switch (vcpu
->arch
.io_gpr
& KVM_MMIO_REG_EXT_MASK
) {
609 case KVM_MMIO_REG_GPR
:
610 kvmppc_set_gpr(vcpu
, vcpu
->arch
.io_gpr
, gpr
);
612 case KVM_MMIO_REG_FPR
:
613 vcpu
->arch
.fpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
615 #ifdef CONFIG_PPC_BOOK3S
616 case KVM_MMIO_REG_QPR
:
617 vcpu
->arch
.qpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
619 case KVM_MMIO_REG_FQPR
:
620 vcpu
->arch
.fpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
621 vcpu
->arch
.qpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
629 int kvmppc_handle_load(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
630 unsigned int rt
, unsigned int bytes
, int is_bigendian
)
634 if (bytes
> sizeof(run
->mmio
.data
)) {
635 printk(KERN_ERR
"%s: bad MMIO length: %d\n", __func__
,
639 run
->mmio
.phys_addr
= vcpu
->arch
.paddr_accessed
;
640 run
->mmio
.len
= bytes
;
641 run
->mmio
.is_write
= 0;
643 vcpu
->arch
.io_gpr
= rt
;
644 vcpu
->arch
.mmio_is_bigendian
= is_bigendian
;
645 vcpu
->mmio_needed
= 1;
646 vcpu
->mmio_is_write
= 0;
647 vcpu
->arch
.mmio_sign_extend
= 0;
649 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
651 ret
= kvm_io_bus_read(vcpu
->kvm
, KVM_MMIO_BUS
, run
->mmio
.phys_addr
,
652 bytes
, &run
->mmio
.data
);
654 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
657 kvmppc_complete_mmio_load(vcpu
, run
);
658 vcpu
->mmio_needed
= 0;
662 return EMULATE_DO_MMIO
;
665 /* Same as above, but sign extends */
666 int kvmppc_handle_loads(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
667 unsigned int rt
, unsigned int bytes
, int is_bigendian
)
671 vcpu
->arch
.mmio_sign_extend
= 1;
672 r
= kvmppc_handle_load(run
, vcpu
, rt
, bytes
, is_bigendian
);
677 int kvmppc_handle_store(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
678 u64 val
, unsigned int bytes
, int is_bigendian
)
680 void *data
= run
->mmio
.data
;
683 if (bytes
> sizeof(run
->mmio
.data
)) {
684 printk(KERN_ERR
"%s: bad MMIO length: %d\n", __func__
,
688 run
->mmio
.phys_addr
= vcpu
->arch
.paddr_accessed
;
689 run
->mmio
.len
= bytes
;
690 run
->mmio
.is_write
= 1;
691 vcpu
->mmio_needed
= 1;
692 vcpu
->mmio_is_write
= 1;
694 /* Store the value at the lowest bytes in 'data'. */
697 case 8: *(u64
*)data
= val
; break;
698 case 4: *(u32
*)data
= val
; break;
699 case 2: *(u16
*)data
= val
; break;
700 case 1: *(u8
*)data
= val
; break;
703 /* Store LE value into 'data'. */
705 case 4: st_le32(data
, val
); break;
706 case 2: st_le16(data
, val
); break;
707 case 1: *(u8
*)data
= val
; break;
711 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
713 ret
= kvm_io_bus_write(vcpu
->kvm
, KVM_MMIO_BUS
, run
->mmio
.phys_addr
,
714 bytes
, &run
->mmio
.data
);
716 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
719 vcpu
->mmio_needed
= 0;
723 return EMULATE_DO_MMIO
;
726 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
731 if (vcpu
->sigset_active
)
732 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
734 if (vcpu
->mmio_needed
) {
735 if (!vcpu
->mmio_is_write
)
736 kvmppc_complete_mmio_load(vcpu
, run
);
737 vcpu
->mmio_needed
= 0;
738 } else if (vcpu
->arch
.dcr_needed
) {
739 if (!vcpu
->arch
.dcr_is_write
)
740 kvmppc_complete_dcr_load(vcpu
, run
);
741 vcpu
->arch
.dcr_needed
= 0;
742 } else if (vcpu
->arch
.osi_needed
) {
743 u64
*gprs
= run
->osi
.gprs
;
746 for (i
= 0; i
< 32; i
++)
747 kvmppc_set_gpr(vcpu
, i
, gprs
[i
]);
748 vcpu
->arch
.osi_needed
= 0;
749 } else if (vcpu
->arch
.hcall_needed
) {
752 kvmppc_set_gpr(vcpu
, 3, run
->papr_hcall
.ret
);
753 for (i
= 0; i
< 9; ++i
)
754 kvmppc_set_gpr(vcpu
, 4 + i
, run
->papr_hcall
.args
[i
]);
755 vcpu
->arch
.hcall_needed
= 0;
757 } else if (vcpu
->arch
.epr_needed
) {
758 kvmppc_set_epr(vcpu
, run
->epr
.epr
);
759 vcpu
->arch
.epr_needed
= 0;
763 r
= kvmppc_vcpu_run(run
, vcpu
);
765 if (vcpu
->sigset_active
)
766 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
771 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu
*vcpu
, struct kvm_interrupt
*irq
)
773 if (irq
->irq
== KVM_INTERRUPT_UNSET
) {
774 kvmppc_core_dequeue_external(vcpu
);
778 kvmppc_core_queue_external(vcpu
, irq
);
785 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu
*vcpu
,
786 struct kvm_enable_cap
*cap
)
794 case KVM_CAP_PPC_OSI
:
796 vcpu
->arch
.osi_enabled
= true;
798 case KVM_CAP_PPC_PAPR
:
800 vcpu
->arch
.papr_enabled
= true;
802 case KVM_CAP_PPC_EPR
:
805 vcpu
->arch
.epr_flags
|= KVMPPC_EPR_USER
;
807 vcpu
->arch
.epr_flags
&= ~KVMPPC_EPR_USER
;
810 case KVM_CAP_PPC_BOOKE_WATCHDOG
:
812 vcpu
->arch
.watchdog_enabled
= true;
815 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
816 case KVM_CAP_SW_TLB
: {
817 struct kvm_config_tlb cfg
;
818 void __user
*user_ptr
= (void __user
*)(uintptr_t)cap
->args
[0];
821 if (copy_from_user(&cfg
, user_ptr
, sizeof(cfg
)))
824 r
= kvm_vcpu_ioctl_config_tlb(vcpu
, &cfg
);
828 #ifdef CONFIG_KVM_MPIC
829 case KVM_CAP_IRQ_MPIC
: {
831 struct kvm_device
*dev
;
834 f
= fdget(cap
->args
[0]);
839 dev
= kvm_device_from_filp(f
.file
);
841 r
= kvmppc_mpic_connect_vcpu(dev
, vcpu
, cap
->args
[1]);
847 #ifdef CONFIG_KVM_XICS
848 case KVM_CAP_IRQ_XICS
: {
850 struct kvm_device
*dev
;
853 f
= fdget(cap
->args
[0]);
858 dev
= kvm_device_from_filp(f
.file
);
860 r
= kvmppc_xics_connect_vcpu(dev
, vcpu
, cap
->args
[1]);
865 #endif /* CONFIG_KVM_XICS */
872 r
= kvmppc_sanity_check(vcpu
);
877 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
878 struct kvm_mp_state
*mp_state
)
883 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
884 struct kvm_mp_state
*mp_state
)
889 long kvm_arch_vcpu_ioctl(struct file
*filp
,
890 unsigned int ioctl
, unsigned long arg
)
892 struct kvm_vcpu
*vcpu
= filp
->private_data
;
893 void __user
*argp
= (void __user
*)arg
;
897 case KVM_INTERRUPT
: {
898 struct kvm_interrupt irq
;
900 if (copy_from_user(&irq
, argp
, sizeof(irq
)))
902 r
= kvm_vcpu_ioctl_interrupt(vcpu
, &irq
);
908 struct kvm_enable_cap cap
;
910 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
912 r
= kvm_vcpu_ioctl_enable_cap(vcpu
, &cap
);
916 case KVM_SET_ONE_REG
:
917 case KVM_GET_ONE_REG
:
919 struct kvm_one_reg reg
;
921 if (copy_from_user(®
, argp
, sizeof(reg
)))
923 if (ioctl
== KVM_SET_ONE_REG
)
924 r
= kvm_vcpu_ioctl_set_one_reg(vcpu
, ®
);
926 r
= kvm_vcpu_ioctl_get_one_reg(vcpu
, ®
);
930 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
931 case KVM_DIRTY_TLB
: {
932 struct kvm_dirty_tlb dirty
;
934 if (copy_from_user(&dirty
, argp
, sizeof(dirty
)))
936 r
= kvm_vcpu_ioctl_dirty_tlb(vcpu
, &dirty
);
948 int kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
950 return VM_FAULT_SIGBUS
;
953 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo
*pvinfo
)
955 u32 inst_nop
= 0x60000000;
956 #ifdef CONFIG_KVM_BOOKE_HV
957 u32 inst_sc1
= 0x44000022;
958 pvinfo
->hcall
[0] = inst_sc1
;
959 pvinfo
->hcall
[1] = inst_nop
;
960 pvinfo
->hcall
[2] = inst_nop
;
961 pvinfo
->hcall
[3] = inst_nop
;
963 u32 inst_lis
= 0x3c000000;
964 u32 inst_ori
= 0x60000000;
965 u32 inst_sc
= 0x44000002;
966 u32 inst_imm_mask
= 0xffff;
969 * The hypercall to get into KVM from within guest context is as
972 * lis r0, r0, KVM_SC_MAGIC_R0@h
973 * ori r0, KVM_SC_MAGIC_R0@l
977 pvinfo
->hcall
[0] = inst_lis
| ((KVM_SC_MAGIC_R0
>> 16) & inst_imm_mask
);
978 pvinfo
->hcall
[1] = inst_ori
| (KVM_SC_MAGIC_R0
& inst_imm_mask
);
979 pvinfo
->hcall
[2] = inst_sc
;
980 pvinfo
->hcall
[3] = inst_nop
;
983 pvinfo
->flags
= KVM_PPC_PVINFO_FLAGS_EV_IDLE
;
988 int kvm_vm_ioctl_irq_line(struct kvm
*kvm
, struct kvm_irq_level
*irq_event
,
991 if (!irqchip_in_kernel(kvm
))
994 irq_event
->status
= kvm_set_irq(kvm
, KVM_USERSPACE_IRQ_SOURCE_ID
,
995 irq_event
->irq
, irq_event
->level
,
1000 long kvm_arch_vm_ioctl(struct file
*filp
,
1001 unsigned int ioctl
, unsigned long arg
)
1003 struct kvm
*kvm __maybe_unused
= filp
->private_data
;
1004 void __user
*argp
= (void __user
*)arg
;
1008 case KVM_PPC_GET_PVINFO
: {
1009 struct kvm_ppc_pvinfo pvinfo
;
1010 memset(&pvinfo
, 0, sizeof(pvinfo
));
1011 r
= kvm_vm_ioctl_get_pvinfo(&pvinfo
);
1012 if (copy_to_user(argp
, &pvinfo
, sizeof(pvinfo
))) {
1019 #ifdef CONFIG_PPC_BOOK3S_64
1020 case KVM_CREATE_SPAPR_TCE
: {
1021 struct kvm_create_spapr_tce create_tce
;
1024 if (copy_from_user(&create_tce
, argp
, sizeof(create_tce
)))
1026 r
= kvm_vm_ioctl_create_spapr_tce(kvm
, &create_tce
);
1029 case KVM_PPC_GET_SMMU_INFO
: {
1030 struct kvm_ppc_smmu_info info
;
1032 memset(&info
, 0, sizeof(info
));
1033 r
= kvmppc_ops
->get_smmu_info(kvm
, &info
);
1034 if (r
>= 0 && copy_to_user(argp
, &info
, sizeof(info
)))
1038 case KVM_PPC_RTAS_DEFINE_TOKEN
: {
1039 struct kvm
*kvm
= filp
->private_data
;
1041 r
= kvm_vm_ioctl_rtas_define_token(kvm
, argp
);
1045 r
= kvmppc_ops
->arch_vm_ioctl(filp
, ioctl
, arg
);
1047 #else /* CONFIG_PPC_BOOK3S_64 */
1056 static unsigned long lpid_inuse
[BITS_TO_LONGS(KVMPPC_NR_LPIDS
)];
1057 static unsigned long nr_lpids
;
1059 long kvmppc_alloc_lpid(void)
1064 lpid
= find_first_zero_bit(lpid_inuse
, KVMPPC_NR_LPIDS
);
1065 if (lpid
>= nr_lpids
) {
1066 pr_err("%s: No LPIDs free\n", __func__
);
1069 } while (test_and_set_bit(lpid
, lpid_inuse
));
1074 void kvmppc_claim_lpid(long lpid
)
1076 set_bit(lpid
, lpid_inuse
);
1079 void kvmppc_free_lpid(long lpid
)
1081 clear_bit(lpid
, lpid_inuse
);
1084 void kvmppc_init_lpid(unsigned long nr_lpids_param
)
1086 nr_lpids
= min_t(unsigned long, KVMPPC_NR_LPIDS
, nr_lpids_param
);
1087 memset(lpid_inuse
, 0, sizeof(lpid_inuse
));
1090 int kvm_arch_init(void *opaque
)
1093 printk(KERN_ERR
"kvm: already loaded the other module\n");
1096 kvmppc_ops
= (struct kvmppc_ops
*)opaque
;
1100 void kvm_arch_exit(void)