2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2007
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
21 #include <linux/errno.h>
22 #include <linux/err.h>
23 #include <linux/kvm_host.h>
24 #include <linux/vmalloc.h>
25 #include <linux/hrtimer.h>
27 #include <linux/slab.h>
28 #include <asm/cputable.h>
29 #include <asm/uaccess.h>
30 #include <asm/kvm_ppc.h>
31 #include <asm/tlbflush.h>
32 #include <asm/cputhreads.h>
33 #include <asm/irqflags.h>
35 #include "../mm/mmu_decl.h"
37 #define CREATE_TRACE_POINTS
40 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*v
)
42 return !!(v
->arch
.pending_exceptions
) ||
46 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
51 #ifndef CONFIG_KVM_BOOK3S_64_HV
53 * Common checks before entering the guest world. Call with interrupts
58 * == 1 if we're ready to go into guest state
59 * <= 0 if we need to go back to the host with return value
61 int kvmppc_prepare_to_enter(struct kvm_vcpu
*vcpu
)
65 WARN_ON_ONCE(!irqs_disabled());
74 if (signal_pending(current
)) {
75 kvmppc_account_exit(vcpu
, SIGNAL_EXITS
);
76 vcpu
->run
->exit_reason
= KVM_EXIT_INTR
;
81 vcpu
->mode
= IN_GUEST_MODE
;
84 * Reading vcpu->requests must happen after setting vcpu->mode,
85 * so we don't miss a request because the requester sees
86 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
87 * before next entering the guest (and thus doesn't IPI).
92 /* Make sure we process requests preemptable */
94 trace_kvm_check_requests(vcpu
);
95 r
= kvmppc_core_check_requests(vcpu
);
102 if (kvmppc_core_prepare_to_enter(vcpu
)) {
103 /* interrupts got enabled in between, so we
104 are back at square 1 */
111 if (lazy_irq_pending()) {
112 /* Got an interrupt in between, try again */
128 #endif /* CONFIG_KVM_BOOK3S_64_HV */
130 int kvmppc_kvm_pv(struct kvm_vcpu
*vcpu
)
132 int nr
= kvmppc_get_gpr(vcpu
, 11);
134 unsigned long __maybe_unused param1
= kvmppc_get_gpr(vcpu
, 3);
135 unsigned long __maybe_unused param2
= kvmppc_get_gpr(vcpu
, 4);
136 unsigned long __maybe_unused param3
= kvmppc_get_gpr(vcpu
, 5);
137 unsigned long __maybe_unused param4
= kvmppc_get_gpr(vcpu
, 6);
138 unsigned long r2
= 0;
140 if (!(vcpu
->arch
.shared
->msr
& MSR_SF
)) {
142 param1
&= 0xffffffff;
143 param2
&= 0xffffffff;
144 param3
&= 0xffffffff;
145 param4
&= 0xffffffff;
149 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE
):
151 vcpu
->arch
.magic_page_pa
= param1
;
152 vcpu
->arch
.magic_page_ea
= param2
;
154 r2
= KVM_MAGIC_FEAT_SR
| KVM_MAGIC_FEAT_MAS0_TO_SPRG7
;
159 case KVM_HCALL_TOKEN(KVM_HC_FEATURES
):
161 #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
162 /* XXX Missing magic page on 44x */
163 r2
|= (1 << KVM_FEATURE_MAGIC_PAGE
);
166 /* Second return value is in r4 */
168 case EV_HCALL_TOKEN(EV_IDLE
):
170 kvm_vcpu_block(vcpu
);
171 clear_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
174 r
= EV_UNIMPLEMENTED
;
178 kvmppc_set_gpr(vcpu
, 4, r2
);
183 int kvmppc_sanity_check(struct kvm_vcpu
*vcpu
)
187 /* We have to know what CPU to virtualize */
191 /* PAPR only works with book3s_64 */
192 if ((vcpu
->arch
.cpu_type
!= KVM_CPU_3S_64
) && vcpu
->arch
.papr_enabled
)
195 #ifdef CONFIG_KVM_BOOK3S_64_HV
196 /* HV KVM can only do PAPR mode for now */
197 if (!vcpu
->arch
.papr_enabled
)
201 #ifdef CONFIG_KVM_BOOKE_HV
202 if (!cpu_has_feature(CPU_FTR_EMB_HV
))
210 return r
? 0 : -EINVAL
;
213 int kvmppc_emulate_mmio(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
215 enum emulation_result er
;
218 er
= kvmppc_emulate_instruction(run
, vcpu
);
221 /* Future optimization: only reload non-volatiles if they were
222 * actually modified. */
225 case EMULATE_DO_MMIO
:
226 run
->exit_reason
= KVM_EXIT_MMIO
;
227 /* We must reload nonvolatiles because "update" load/store
228 * instructions modify register state. */
229 /* Future optimization: only reload non-volatiles if they were
230 * actually modified. */
234 /* XXX Deliver Program interrupt to guest. */
235 printk(KERN_EMERG
"%s: emulation failed (%08x)\n", __func__
,
236 kvmppc_get_last_inst(vcpu
));
247 int kvm_arch_hardware_enable(void *garbage
)
252 void kvm_arch_hardware_disable(void *garbage
)
256 int kvm_arch_hardware_setup(void)
261 void kvm_arch_hardware_unsetup(void)
265 void kvm_arch_check_processor_compat(void *rtn
)
267 *(int *)rtn
= kvmppc_core_check_processor_compat();
270 int kvm_arch_init_vm(struct kvm
*kvm
, unsigned long type
)
275 return kvmppc_core_init_vm(kvm
);
278 void kvm_arch_destroy_vm(struct kvm
*kvm
)
281 struct kvm_vcpu
*vcpu
;
283 kvm_for_each_vcpu(i
, vcpu
, kvm
)
284 kvm_arch_vcpu_free(vcpu
);
286 mutex_lock(&kvm
->lock
);
287 for (i
= 0; i
< atomic_read(&kvm
->online_vcpus
); i
++)
288 kvm
->vcpus
[i
] = NULL
;
290 atomic_set(&kvm
->online_vcpus
, 0);
292 kvmppc_core_destroy_vm(kvm
);
294 mutex_unlock(&kvm
->lock
);
297 void kvm_arch_sync_events(struct kvm
*kvm
)
301 int kvm_dev_ioctl_check_extension(long ext
)
307 case KVM_CAP_PPC_BOOKE_SREGS
:
308 case KVM_CAP_PPC_BOOKE_WATCHDOG
:
309 case KVM_CAP_PPC_EPR
:
311 case KVM_CAP_PPC_SEGSTATE
:
312 case KVM_CAP_PPC_HIOR
:
313 case KVM_CAP_PPC_PAPR
:
315 case KVM_CAP_PPC_UNSET_IRQ
:
316 case KVM_CAP_PPC_IRQ_LEVEL
:
317 case KVM_CAP_ENABLE_CAP
:
318 case KVM_CAP_ONE_REG
:
319 case KVM_CAP_IOEVENTFD
:
320 case KVM_CAP_DEVICE_CTRL
:
323 #ifndef CONFIG_KVM_BOOK3S_64_HV
324 case KVM_CAP_PPC_PAIRED_SINGLES
:
325 case KVM_CAP_PPC_OSI
:
326 case KVM_CAP_PPC_GET_PVINFO
:
327 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
332 case KVM_CAP_COALESCED_MMIO
:
333 r
= KVM_COALESCED_MMIO_PAGE_OFFSET
;
336 #ifdef CONFIG_PPC_BOOK3S_64
337 case KVM_CAP_SPAPR_TCE
:
338 case KVM_CAP_PPC_ALLOC_HTAB
:
341 #endif /* CONFIG_PPC_BOOK3S_64 */
342 #ifdef CONFIG_KVM_BOOK3S_64_HV
343 case KVM_CAP_PPC_SMT
:
344 r
= threads_per_core
;
346 case KVM_CAP_PPC_RMA
:
348 /* PPC970 requires an RMA */
349 if (cpu_has_feature(CPU_FTR_ARCH_201
))
353 case KVM_CAP_SYNC_MMU
:
354 #ifdef CONFIG_KVM_BOOK3S_64_HV
355 r
= cpu_has_feature(CPU_FTR_ARCH_206
) ? 1 : 0;
356 #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
362 #ifdef CONFIG_KVM_BOOK3S_64_HV
363 case KVM_CAP_PPC_HTAB_FD
:
368 case KVM_CAP_NR_VCPUS
:
370 * Recommending a number of CPUs is somewhat arbitrary; we
371 * return the number of present CPUs for -HV (since a host
372 * will have secondary threads "offline"), and for other KVM
373 * implementations just count online CPUs.
375 #ifdef CONFIG_KVM_BOOK3S_64_HV
376 r
= num_present_cpus();
378 r
= num_online_cpus();
381 case KVM_CAP_MAX_VCPUS
:
384 #ifdef CONFIG_PPC_BOOK3S_64
385 case KVM_CAP_PPC_GET_SMMU_INFO
:
397 long kvm_arch_dev_ioctl(struct file
*filp
,
398 unsigned int ioctl
, unsigned long arg
)
403 void kvm_arch_free_memslot(struct kvm_memory_slot
*free
,
404 struct kvm_memory_slot
*dont
)
406 kvmppc_core_free_memslot(free
, dont
);
409 int kvm_arch_create_memslot(struct kvm_memory_slot
*slot
, unsigned long npages
)
411 return kvmppc_core_create_memslot(slot
, npages
);
414 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
415 struct kvm_memory_slot
*memslot
,
416 struct kvm_userspace_memory_region
*mem
,
417 enum kvm_mr_change change
)
419 return kvmppc_core_prepare_memory_region(kvm
, memslot
, mem
);
422 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
423 struct kvm_userspace_memory_region
*mem
,
424 const struct kvm_memory_slot
*old
,
425 enum kvm_mr_change change
)
427 kvmppc_core_commit_memory_region(kvm
, mem
, old
);
430 void kvm_arch_flush_shadow_all(struct kvm
*kvm
)
434 void kvm_arch_flush_shadow_memslot(struct kvm
*kvm
,
435 struct kvm_memory_slot
*slot
)
437 kvmppc_core_flush_memslot(kvm
, slot
);
440 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
, unsigned int id
)
442 struct kvm_vcpu
*vcpu
;
443 vcpu
= kvmppc_core_vcpu_create(kvm
, id
);
445 vcpu
->arch
.wqp
= &vcpu
->wq
;
446 kvmppc_create_vcpu_debugfs(vcpu
, id
);
451 int kvm_arch_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
456 void kvm_arch_vcpu_free(struct kvm_vcpu
*vcpu
)
458 /* Make sure we're not using the vcpu anymore */
459 hrtimer_cancel(&vcpu
->arch
.dec_timer
);
460 tasklet_kill(&vcpu
->arch
.tasklet
);
462 kvmppc_remove_vcpu_debugfs(vcpu
);
463 kvmppc_core_vcpu_free(vcpu
);
466 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
468 kvm_arch_vcpu_free(vcpu
);
471 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
473 return kvmppc_core_pending_dec(vcpu
);
477 * low level hrtimer wake routine. Because this runs in hardirq context
478 * we schedule a tasklet to do the real work.
480 enum hrtimer_restart
kvmppc_decrementer_wakeup(struct hrtimer
*timer
)
482 struct kvm_vcpu
*vcpu
;
484 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.dec_timer
);
485 tasklet_schedule(&vcpu
->arch
.tasklet
);
487 return HRTIMER_NORESTART
;
490 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
494 hrtimer_init(&vcpu
->arch
.dec_timer
, CLOCK_REALTIME
, HRTIMER_MODE_ABS
);
495 tasklet_init(&vcpu
->arch
.tasklet
, kvmppc_decrementer_func
, (ulong
)vcpu
);
496 vcpu
->arch
.dec_timer
.function
= kvmppc_decrementer_wakeup
;
497 vcpu
->arch
.dec_expires
= ~(u64
)0;
499 #ifdef CONFIG_KVM_EXIT_TIMING
500 mutex_init(&vcpu
->arch
.exit_timing_lock
);
502 ret
= kvmppc_subarch_vcpu_init(vcpu
);
506 void kvm_arch_vcpu_uninit(struct kvm_vcpu
*vcpu
)
508 kvmppc_mmu_destroy(vcpu
);
509 kvmppc_subarch_vcpu_uninit(vcpu
);
512 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
516 * vrsave (formerly usprg0) isn't used by Linux, but may
517 * be used by the guest.
519 * On non-booke this is associated with Altivec and
520 * is handled by code in book3s.c.
522 mtspr(SPRN_VRSAVE
, vcpu
->arch
.vrsave
);
524 kvmppc_core_vcpu_load(vcpu
, cpu
);
527 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
529 kvmppc_core_vcpu_put(vcpu
);
531 vcpu
->arch
.vrsave
= mfspr(SPRN_VRSAVE
);
535 static void kvmppc_complete_dcr_load(struct kvm_vcpu
*vcpu
,
538 kvmppc_set_gpr(vcpu
, vcpu
->arch
.io_gpr
, run
->dcr
.data
);
541 static void kvmppc_complete_mmio_load(struct kvm_vcpu
*vcpu
,
544 u64
uninitialized_var(gpr
);
546 if (run
->mmio
.len
> sizeof(gpr
)) {
547 printk(KERN_ERR
"bad MMIO length: %d\n", run
->mmio
.len
);
551 if (vcpu
->arch
.mmio_is_bigendian
) {
552 switch (run
->mmio
.len
) {
553 case 8: gpr
= *(u64
*)run
->mmio
.data
; break;
554 case 4: gpr
= *(u32
*)run
->mmio
.data
; break;
555 case 2: gpr
= *(u16
*)run
->mmio
.data
; break;
556 case 1: gpr
= *(u8
*)run
->mmio
.data
; break;
559 /* Convert BE data from userland back to LE. */
560 switch (run
->mmio
.len
) {
561 case 4: gpr
= ld_le32((u32
*)run
->mmio
.data
); break;
562 case 2: gpr
= ld_le16((u16
*)run
->mmio
.data
); break;
563 case 1: gpr
= *(u8
*)run
->mmio
.data
; break;
567 if (vcpu
->arch
.mmio_sign_extend
) {
568 switch (run
->mmio
.len
) {
583 kvmppc_set_gpr(vcpu
, vcpu
->arch
.io_gpr
, gpr
);
585 switch (vcpu
->arch
.io_gpr
& KVM_MMIO_REG_EXT_MASK
) {
586 case KVM_MMIO_REG_GPR
:
587 kvmppc_set_gpr(vcpu
, vcpu
->arch
.io_gpr
, gpr
);
589 case KVM_MMIO_REG_FPR
:
590 vcpu
->arch
.fpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
592 #ifdef CONFIG_PPC_BOOK3S
593 case KVM_MMIO_REG_QPR
:
594 vcpu
->arch
.qpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
596 case KVM_MMIO_REG_FQPR
:
597 vcpu
->arch
.fpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
598 vcpu
->arch
.qpr
[vcpu
->arch
.io_gpr
& KVM_MMIO_REG_MASK
] = gpr
;
606 int kvmppc_handle_load(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
607 unsigned int rt
, unsigned int bytes
, int is_bigendian
)
609 if (bytes
> sizeof(run
->mmio
.data
)) {
610 printk(KERN_ERR
"%s: bad MMIO length: %d\n", __func__
,
614 run
->mmio
.phys_addr
= vcpu
->arch
.paddr_accessed
;
615 run
->mmio
.len
= bytes
;
616 run
->mmio
.is_write
= 0;
618 vcpu
->arch
.io_gpr
= rt
;
619 vcpu
->arch
.mmio_is_bigendian
= is_bigendian
;
620 vcpu
->mmio_needed
= 1;
621 vcpu
->mmio_is_write
= 0;
622 vcpu
->arch
.mmio_sign_extend
= 0;
624 if (!kvm_io_bus_read(vcpu
->kvm
, KVM_MMIO_BUS
, run
->mmio
.phys_addr
,
625 bytes
, &run
->mmio
.data
)) {
626 kvmppc_complete_mmio_load(vcpu
, run
);
627 vcpu
->mmio_needed
= 0;
631 return EMULATE_DO_MMIO
;
634 /* Same as above, but sign extends */
635 int kvmppc_handle_loads(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
636 unsigned int rt
, unsigned int bytes
, int is_bigendian
)
640 vcpu
->arch
.mmio_sign_extend
= 1;
641 r
= kvmppc_handle_load(run
, vcpu
, rt
, bytes
, is_bigendian
);
646 int kvmppc_handle_store(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
647 u64 val
, unsigned int bytes
, int is_bigendian
)
649 void *data
= run
->mmio
.data
;
651 if (bytes
> sizeof(run
->mmio
.data
)) {
652 printk(KERN_ERR
"%s: bad MMIO length: %d\n", __func__
,
656 run
->mmio
.phys_addr
= vcpu
->arch
.paddr_accessed
;
657 run
->mmio
.len
= bytes
;
658 run
->mmio
.is_write
= 1;
659 vcpu
->mmio_needed
= 1;
660 vcpu
->mmio_is_write
= 1;
662 /* Store the value at the lowest bytes in 'data'. */
665 case 8: *(u64
*)data
= val
; break;
666 case 4: *(u32
*)data
= val
; break;
667 case 2: *(u16
*)data
= val
; break;
668 case 1: *(u8
*)data
= val
; break;
671 /* Store LE value into 'data'. */
673 case 4: st_le32(data
, val
); break;
674 case 2: st_le16(data
, val
); break;
675 case 1: *(u8
*)data
= val
; break;
679 if (!kvm_io_bus_write(vcpu
->kvm
, KVM_MMIO_BUS
, run
->mmio
.phys_addr
,
680 bytes
, &run
->mmio
.data
)) {
681 vcpu
->mmio_needed
= 0;
685 return EMULATE_DO_MMIO
;
688 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*run
)
693 if (vcpu
->sigset_active
)
694 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
696 if (vcpu
->mmio_needed
) {
697 if (!vcpu
->mmio_is_write
)
698 kvmppc_complete_mmio_load(vcpu
, run
);
699 vcpu
->mmio_needed
= 0;
700 } else if (vcpu
->arch
.dcr_needed
) {
701 if (!vcpu
->arch
.dcr_is_write
)
702 kvmppc_complete_dcr_load(vcpu
, run
);
703 vcpu
->arch
.dcr_needed
= 0;
704 } else if (vcpu
->arch
.osi_needed
) {
705 u64
*gprs
= run
->osi
.gprs
;
708 for (i
= 0; i
< 32; i
++)
709 kvmppc_set_gpr(vcpu
, i
, gprs
[i
]);
710 vcpu
->arch
.osi_needed
= 0;
711 } else if (vcpu
->arch
.hcall_needed
) {
714 kvmppc_set_gpr(vcpu
, 3, run
->papr_hcall
.ret
);
715 for (i
= 0; i
< 9; ++i
)
716 kvmppc_set_gpr(vcpu
, 4 + i
, run
->papr_hcall
.args
[i
]);
717 vcpu
->arch
.hcall_needed
= 0;
719 } else if (vcpu
->arch
.epr_needed
) {
720 kvmppc_set_epr(vcpu
, run
->epr
.epr
);
721 vcpu
->arch
.epr_needed
= 0;
725 r
= kvmppc_vcpu_run(run
, vcpu
);
727 if (vcpu
->sigset_active
)
728 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
733 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu
*vcpu
, struct kvm_interrupt
*irq
)
735 if (irq
->irq
== KVM_INTERRUPT_UNSET
) {
736 kvmppc_core_dequeue_external(vcpu
);
740 kvmppc_core_queue_external(vcpu
, irq
);
747 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu
*vcpu
,
748 struct kvm_enable_cap
*cap
)
756 case KVM_CAP_PPC_OSI
:
758 vcpu
->arch
.osi_enabled
= true;
760 case KVM_CAP_PPC_PAPR
:
762 vcpu
->arch
.papr_enabled
= true;
764 case KVM_CAP_PPC_EPR
:
767 vcpu
->arch
.epr_flags
|= KVMPPC_EPR_USER
;
769 vcpu
->arch
.epr_flags
&= ~KVMPPC_EPR_USER
;
772 case KVM_CAP_PPC_BOOKE_WATCHDOG
:
774 vcpu
->arch
.watchdog_enabled
= true;
777 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
778 case KVM_CAP_SW_TLB
: {
779 struct kvm_config_tlb cfg
;
780 void __user
*user_ptr
= (void __user
*)(uintptr_t)cap
->args
[0];
783 if (copy_from_user(&cfg
, user_ptr
, sizeof(cfg
)))
786 r
= kvm_vcpu_ioctl_config_tlb(vcpu
, &cfg
);
796 r
= kvmppc_sanity_check(vcpu
);
801 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
802 struct kvm_mp_state
*mp_state
)
807 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
808 struct kvm_mp_state
*mp_state
)
813 long kvm_arch_vcpu_ioctl(struct file
*filp
,
814 unsigned int ioctl
, unsigned long arg
)
816 struct kvm_vcpu
*vcpu
= filp
->private_data
;
817 void __user
*argp
= (void __user
*)arg
;
821 case KVM_INTERRUPT
: {
822 struct kvm_interrupt irq
;
824 if (copy_from_user(&irq
, argp
, sizeof(irq
)))
826 r
= kvm_vcpu_ioctl_interrupt(vcpu
, &irq
);
832 struct kvm_enable_cap cap
;
834 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
836 r
= kvm_vcpu_ioctl_enable_cap(vcpu
, &cap
);
840 case KVM_SET_ONE_REG
:
841 case KVM_GET_ONE_REG
:
843 struct kvm_one_reg reg
;
845 if (copy_from_user(®
, argp
, sizeof(reg
)))
847 if (ioctl
== KVM_SET_ONE_REG
)
848 r
= kvm_vcpu_ioctl_set_one_reg(vcpu
, ®
);
850 r
= kvm_vcpu_ioctl_get_one_reg(vcpu
, ®
);
854 #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
855 case KVM_DIRTY_TLB
: {
856 struct kvm_dirty_tlb dirty
;
858 if (copy_from_user(&dirty
, argp
, sizeof(dirty
)))
860 r
= kvm_vcpu_ioctl_dirty_tlb(vcpu
, &dirty
);
872 int kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
874 return VM_FAULT_SIGBUS
;
877 static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo
*pvinfo
)
879 u32 inst_nop
= 0x60000000;
880 #ifdef CONFIG_KVM_BOOKE_HV
881 u32 inst_sc1
= 0x44000022;
882 pvinfo
->hcall
[0] = inst_sc1
;
883 pvinfo
->hcall
[1] = inst_nop
;
884 pvinfo
->hcall
[2] = inst_nop
;
885 pvinfo
->hcall
[3] = inst_nop
;
887 u32 inst_lis
= 0x3c000000;
888 u32 inst_ori
= 0x60000000;
889 u32 inst_sc
= 0x44000002;
890 u32 inst_imm_mask
= 0xffff;
893 * The hypercall to get into KVM from within guest context is as
896 * lis r0, r0, KVM_SC_MAGIC_R0@h
897 * ori r0, KVM_SC_MAGIC_R0@l
901 pvinfo
->hcall
[0] = inst_lis
| ((KVM_SC_MAGIC_R0
>> 16) & inst_imm_mask
);
902 pvinfo
->hcall
[1] = inst_ori
| (KVM_SC_MAGIC_R0
& inst_imm_mask
);
903 pvinfo
->hcall
[2] = inst_sc
;
904 pvinfo
->hcall
[3] = inst_nop
;
907 pvinfo
->flags
= KVM_PPC_PVINFO_FLAGS_EV_IDLE
;
912 long kvm_arch_vm_ioctl(struct file
*filp
,
913 unsigned int ioctl
, unsigned long arg
)
915 struct kvm
*kvm __maybe_unused
= filp
->private_data
;
916 void __user
*argp
= (void __user
*)arg
;
920 case KVM_PPC_GET_PVINFO
: {
921 struct kvm_ppc_pvinfo pvinfo
;
922 memset(&pvinfo
, 0, sizeof(pvinfo
));
923 r
= kvm_vm_ioctl_get_pvinfo(&pvinfo
);
924 if (copy_to_user(argp
, &pvinfo
, sizeof(pvinfo
))) {
931 #ifdef CONFIG_PPC_BOOK3S_64
932 case KVM_CREATE_SPAPR_TCE
: {
933 struct kvm_create_spapr_tce create_tce
;
936 if (copy_from_user(&create_tce
, argp
, sizeof(create_tce
)))
938 r
= kvm_vm_ioctl_create_spapr_tce(kvm
, &create_tce
);
941 #endif /* CONFIG_PPC_BOOK3S_64 */
943 #ifdef CONFIG_KVM_BOOK3S_64_HV
944 case KVM_ALLOCATE_RMA
: {
945 struct kvm_allocate_rma rma
;
947 r
= kvm_vm_ioctl_allocate_rma(kvm
, &rma
);
948 if (r
>= 0 && copy_to_user(argp
, &rma
, sizeof(rma
)))
953 case KVM_PPC_ALLOCATE_HTAB
: {
957 if (get_user(htab_order
, (u32 __user
*)argp
))
959 r
= kvmppc_alloc_reset_hpt(kvm
, &htab_order
);
963 if (put_user(htab_order
, (u32 __user
*)argp
))
969 case KVM_PPC_GET_HTAB_FD
: {
970 struct kvm_get_htab_fd ghf
;
973 if (copy_from_user(&ghf
, argp
, sizeof(ghf
)))
975 r
= kvm_vm_ioctl_get_htab_fd(kvm
, &ghf
);
978 #endif /* CONFIG_KVM_BOOK3S_64_HV */
980 #ifdef CONFIG_PPC_BOOK3S_64
981 case KVM_PPC_GET_SMMU_INFO
: {
982 struct kvm_ppc_smmu_info info
;
984 memset(&info
, 0, sizeof(info
));
985 r
= kvm_vm_ioctl_get_smmu_info(kvm
, &info
);
986 if (r
>= 0 && copy_to_user(argp
, &info
, sizeof(info
)))
990 #endif /* CONFIG_PPC_BOOK3S_64 */
999 static unsigned long lpid_inuse
[BITS_TO_LONGS(KVMPPC_NR_LPIDS
)];
1000 static unsigned long nr_lpids
;
1002 long kvmppc_alloc_lpid(void)
1007 lpid
= find_first_zero_bit(lpid_inuse
, KVMPPC_NR_LPIDS
);
1008 if (lpid
>= nr_lpids
) {
1009 pr_err("%s: No LPIDs free\n", __func__
);
1012 } while (test_and_set_bit(lpid
, lpid_inuse
));
1017 void kvmppc_claim_lpid(long lpid
)
1019 set_bit(lpid
, lpid_inuse
);
1022 void kvmppc_free_lpid(long lpid
)
1024 clear_bit(lpid
, lpid_inuse
);
1027 void kvmppc_init_lpid(unsigned long nr_lpids_param
)
1029 nr_lpids
= min_t(unsigned long, KVMPPC_NR_LPIDS
, nr_lpids_param
);
1030 memset(lpid_inuse
, 0, sizeof(lpid_inuse
));
1033 int kvm_arch_init(void *opaque
)
1038 void kvm_arch_exit(void)