2 * hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008, 2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
14 * Jason J. Herne <jjherne@us.ibm.com>
17 #include <linux/compiler.h>
18 #include <linux/err.h>
20 #include <linux/hrtimer.h>
21 #include <linux/init.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/module.h>
25 #include <linux/random.h>
26 #include <linux/slab.h>
27 #include <linux/timer.h>
28 #include <asm/asm-offsets.h>
29 #include <asm/lowcore.h>
30 #include <asm/pgtable.h>
32 #include <asm/switch_to.h>
37 #define CREATE_TRACE_POINTS
39 #include "trace-s390.h"
41 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
43 struct kvm_stats_debugfs_item debugfs_entries
[] = {
44 { "userspace_handled", VCPU_STAT(exit_userspace
) },
45 { "exit_null", VCPU_STAT(exit_null
) },
46 { "exit_validity", VCPU_STAT(exit_validity
) },
47 { "exit_stop_request", VCPU_STAT(exit_stop_request
) },
48 { "exit_external_request", VCPU_STAT(exit_external_request
) },
49 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt
) },
50 { "exit_instruction", VCPU_STAT(exit_instruction
) },
51 { "exit_program_interruption", VCPU_STAT(exit_program_interruption
) },
52 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program
) },
53 { "halt_successful_poll", VCPU_STAT(halt_successful_poll
) },
54 { "halt_wakeup", VCPU_STAT(halt_wakeup
) },
55 { "instruction_lctlg", VCPU_STAT(instruction_lctlg
) },
56 { "instruction_lctl", VCPU_STAT(instruction_lctl
) },
57 { "instruction_stctl", VCPU_STAT(instruction_stctl
) },
58 { "instruction_stctg", VCPU_STAT(instruction_stctg
) },
59 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal
) },
60 { "deliver_external_call", VCPU_STAT(deliver_external_call
) },
61 { "deliver_service_signal", VCPU_STAT(deliver_service_signal
) },
62 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt
) },
63 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal
) },
64 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal
) },
65 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal
) },
66 { "deliver_program_interruption", VCPU_STAT(deliver_program_int
) },
67 { "exit_wait_state", VCPU_STAT(exit_wait_state
) },
68 { "instruction_pfmf", VCPU_STAT(instruction_pfmf
) },
69 { "instruction_stidp", VCPU_STAT(instruction_stidp
) },
70 { "instruction_spx", VCPU_STAT(instruction_spx
) },
71 { "instruction_stpx", VCPU_STAT(instruction_stpx
) },
72 { "instruction_stap", VCPU_STAT(instruction_stap
) },
73 { "instruction_storage_key", VCPU_STAT(instruction_storage_key
) },
74 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock
) },
75 { "instruction_stsch", VCPU_STAT(instruction_stsch
) },
76 { "instruction_chsc", VCPU_STAT(instruction_chsc
) },
77 { "instruction_essa", VCPU_STAT(instruction_essa
) },
78 { "instruction_stsi", VCPU_STAT(instruction_stsi
) },
79 { "instruction_stfl", VCPU_STAT(instruction_stfl
) },
80 { "instruction_tprot", VCPU_STAT(instruction_tprot
) },
81 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense
) },
82 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running
) },
83 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call
) },
84 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency
) },
85 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency
) },
86 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start
) },
87 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop
) },
88 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status
) },
89 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status
) },
90 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch
) },
91 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix
) },
92 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart
) },
93 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset
) },
94 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset
) },
95 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown
) },
96 { "diagnose_10", VCPU_STAT(diagnose_10
) },
97 { "diagnose_44", VCPU_STAT(diagnose_44
) },
98 { "diagnose_9c", VCPU_STAT(diagnose_9c
) },
102 /* upper facilities limit for kvm */
103 unsigned long kvm_s390_fac_list_mask
[] = {
104 0xff82fffbf4fc2000UL
,
105 0x005c000000000000UL
,
108 unsigned long kvm_s390_fac_list_mask_size(void)
110 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask
) > S390_ARCH_FAC_MASK_SIZE_U64
);
111 return ARRAY_SIZE(kvm_s390_fac_list_mask
);
114 static struct gmap_notifier gmap_notifier
;
116 /* Section: not file related */
117 int kvm_arch_hardware_enable(void)
119 /* every s390 is virtualization enabled ;-) */
123 static void kvm_gmap_notifier(struct gmap
*gmap
, unsigned long address
);
125 int kvm_arch_hardware_setup(void)
127 gmap_notifier
.notifier_call
= kvm_gmap_notifier
;
128 gmap_register_ipte_notifier(&gmap_notifier
);
132 void kvm_arch_hardware_unsetup(void)
134 gmap_unregister_ipte_notifier(&gmap_notifier
);
137 int kvm_arch_init(void *opaque
)
139 /* Register floating interrupt controller interface. */
140 return kvm_register_device_ops(&kvm_flic_ops
, KVM_DEV_TYPE_FLIC
);
143 /* Section: device related */
144 long kvm_arch_dev_ioctl(struct file
*filp
,
145 unsigned int ioctl
, unsigned long arg
)
147 if (ioctl
== KVM_S390_ENABLE_SIE
)
148 return s390_enable_sie();
152 int kvm_vm_ioctl_check_extension(struct kvm
*kvm
, long ext
)
157 case KVM_CAP_S390_PSW
:
158 case KVM_CAP_S390_GMAP
:
159 case KVM_CAP_SYNC_MMU
:
160 #ifdef CONFIG_KVM_S390_UCONTROL
161 case KVM_CAP_S390_UCONTROL
:
163 case KVM_CAP_ASYNC_PF
:
164 case KVM_CAP_SYNC_REGS
:
165 case KVM_CAP_ONE_REG
:
166 case KVM_CAP_ENABLE_CAP
:
167 case KVM_CAP_S390_CSS_SUPPORT
:
169 case KVM_CAP_IOEVENTFD
:
170 case KVM_CAP_DEVICE_CTRL
:
171 case KVM_CAP_ENABLE_CAP_VM
:
172 case KVM_CAP_S390_IRQCHIP
:
173 case KVM_CAP_VM_ATTRIBUTES
:
174 case KVM_CAP_MP_STATE
:
175 case KVM_CAP_S390_USER_SIGP
:
178 case KVM_CAP_NR_VCPUS
:
179 case KVM_CAP_MAX_VCPUS
:
182 case KVM_CAP_NR_MEMSLOTS
:
183 r
= KVM_USER_MEM_SLOTS
;
185 case KVM_CAP_S390_COW
:
186 r
= MACHINE_HAS_ESOP
;
194 static void kvm_s390_sync_dirty_log(struct kvm
*kvm
,
195 struct kvm_memory_slot
*memslot
)
197 gfn_t cur_gfn
, last_gfn
;
198 unsigned long address
;
199 struct gmap
*gmap
= kvm
->arch
.gmap
;
201 down_read(&gmap
->mm
->mmap_sem
);
202 /* Loop over all guest pages */
203 last_gfn
= memslot
->base_gfn
+ memslot
->npages
;
204 for (cur_gfn
= memslot
->base_gfn
; cur_gfn
<= last_gfn
; cur_gfn
++) {
205 address
= gfn_to_hva_memslot(memslot
, cur_gfn
);
207 if (gmap_test_and_clear_dirty(address
, gmap
))
208 mark_page_dirty(kvm
, cur_gfn
);
210 up_read(&gmap
->mm
->mmap_sem
);
213 /* Section: vm related */
215 * Get (and clear) the dirty memory log for a memory slot.
217 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
,
218 struct kvm_dirty_log
*log
)
222 struct kvm_memory_slot
*memslot
;
225 mutex_lock(&kvm
->slots_lock
);
228 if (log
->slot
>= KVM_USER_MEM_SLOTS
)
231 memslot
= id_to_memslot(kvm
->memslots
, log
->slot
);
233 if (!memslot
->dirty_bitmap
)
236 kvm_s390_sync_dirty_log(kvm
, memslot
);
237 r
= kvm_get_dirty_log(kvm
, log
, &is_dirty
);
241 /* Clear the dirty log */
243 n
= kvm_dirty_bitmap_bytes(memslot
);
244 memset(memslot
->dirty_bitmap
, 0, n
);
248 mutex_unlock(&kvm
->slots_lock
);
252 static int kvm_vm_ioctl_enable_cap(struct kvm
*kvm
, struct kvm_enable_cap
*cap
)
260 case KVM_CAP_S390_IRQCHIP
:
261 kvm
->arch
.use_irqchip
= 1;
264 case KVM_CAP_S390_USER_SIGP
:
265 kvm
->arch
.user_sigp
= 1;
275 static int kvm_s390_get_mem_control(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
279 switch (attr
->attr
) {
280 case KVM_S390_VM_MEM_LIMIT_SIZE
:
282 if (put_user(kvm
->arch
.gmap
->asce_end
, (u64 __user
*)attr
->addr
))
292 static int kvm_s390_set_mem_control(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
296 switch (attr
->attr
) {
297 case KVM_S390_VM_MEM_ENABLE_CMMA
:
299 mutex_lock(&kvm
->lock
);
300 if (atomic_read(&kvm
->online_vcpus
) == 0) {
301 kvm
->arch
.use_cmma
= 1;
304 mutex_unlock(&kvm
->lock
);
306 case KVM_S390_VM_MEM_CLR_CMMA
:
307 mutex_lock(&kvm
->lock
);
308 idx
= srcu_read_lock(&kvm
->srcu
);
309 s390_reset_cmma(kvm
->arch
.gmap
->mm
);
310 srcu_read_unlock(&kvm
->srcu
, idx
);
311 mutex_unlock(&kvm
->lock
);
314 case KVM_S390_VM_MEM_LIMIT_SIZE
: {
315 unsigned long new_limit
;
317 if (kvm_is_ucontrol(kvm
))
320 if (get_user(new_limit
, (u64 __user
*)attr
->addr
))
323 if (new_limit
> kvm
->arch
.gmap
->asce_end
)
327 mutex_lock(&kvm
->lock
);
328 if (atomic_read(&kvm
->online_vcpus
) == 0) {
329 /* gmap_alloc will round the limit up */
330 struct gmap
*new = gmap_alloc(current
->mm
, new_limit
);
335 gmap_free(kvm
->arch
.gmap
);
337 kvm
->arch
.gmap
= new;
341 mutex_unlock(&kvm
->lock
);
351 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu
*vcpu
);
353 static int kvm_s390_vm_set_crypto(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
355 struct kvm_vcpu
*vcpu
;
358 if (!test_kvm_facility(kvm
, 76))
361 mutex_lock(&kvm
->lock
);
362 switch (attr
->attr
) {
363 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW
:
365 kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
,
366 sizeof(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
));
367 kvm
->arch
.crypto
.aes_kw
= 1;
369 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW
:
371 kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
,
372 sizeof(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
));
373 kvm
->arch
.crypto
.dea_kw
= 1;
375 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW
:
376 kvm
->arch
.crypto
.aes_kw
= 0;
377 memset(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
, 0,
378 sizeof(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
));
380 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW
:
381 kvm
->arch
.crypto
.dea_kw
= 0;
382 memset(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
, 0,
383 sizeof(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
));
386 mutex_unlock(&kvm
->lock
);
390 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
391 kvm_s390_vcpu_crypto_setup(vcpu
);
394 mutex_unlock(&kvm
->lock
);
398 static int kvm_s390_set_tod_high(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
402 if (copy_from_user(>od_high
, (void __user
*)attr
->addr
,
412 static int kvm_s390_set_tod_low(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
414 struct kvm_vcpu
*cur_vcpu
;
415 unsigned int vcpu_idx
;
419 if (copy_from_user(>od
, (void __user
*)attr
->addr
, sizeof(gtod
)))
422 r
= store_tod_clock(&host_tod
);
426 mutex_lock(&kvm
->lock
);
427 kvm
->arch
.epoch
= gtod
- host_tod
;
428 kvm_for_each_vcpu(vcpu_idx
, cur_vcpu
, kvm
) {
429 cur_vcpu
->arch
.sie_block
->epoch
= kvm
->arch
.epoch
;
432 mutex_unlock(&kvm
->lock
);
436 static int kvm_s390_set_tod(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
443 switch (attr
->attr
) {
444 case KVM_S390_VM_TOD_HIGH
:
445 ret
= kvm_s390_set_tod_high(kvm
, attr
);
447 case KVM_S390_VM_TOD_LOW
:
448 ret
= kvm_s390_set_tod_low(kvm
, attr
);
457 static int kvm_s390_get_tod_high(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
461 if (copy_to_user((void __user
*)attr
->addr
, >od_high
,
468 static int kvm_s390_get_tod_low(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
473 r
= store_tod_clock(&host_tod
);
477 gtod
= host_tod
+ kvm
->arch
.epoch
;
478 if (copy_to_user((void __user
*)attr
->addr
, >od
, sizeof(gtod
)))
484 static int kvm_s390_get_tod(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
491 switch (attr
->attr
) {
492 case KVM_S390_VM_TOD_HIGH
:
493 ret
= kvm_s390_get_tod_high(kvm
, attr
);
495 case KVM_S390_VM_TOD_LOW
:
496 ret
= kvm_s390_get_tod_low(kvm
, attr
);
505 static int kvm_s390_set_processor(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
507 struct kvm_s390_vm_cpu_processor
*proc
;
510 mutex_lock(&kvm
->lock
);
511 if (atomic_read(&kvm
->online_vcpus
)) {
515 proc
= kzalloc(sizeof(*proc
), GFP_KERNEL
);
520 if (!copy_from_user(proc
, (void __user
*)attr
->addr
,
522 memcpy(&kvm
->arch
.model
.cpu_id
, &proc
->cpuid
,
523 sizeof(struct cpuid
));
524 kvm
->arch
.model
.ibc
= proc
->ibc
;
525 memcpy(kvm
->arch
.model
.fac
->list
, proc
->fac_list
,
526 S390_ARCH_FAC_LIST_SIZE_BYTE
);
531 mutex_unlock(&kvm
->lock
);
535 static int kvm_s390_set_cpu_model(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
539 switch (attr
->attr
) {
540 case KVM_S390_VM_CPU_PROCESSOR
:
541 ret
= kvm_s390_set_processor(kvm
, attr
);
547 static int kvm_s390_get_processor(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
549 struct kvm_s390_vm_cpu_processor
*proc
;
552 proc
= kzalloc(sizeof(*proc
), GFP_KERNEL
);
557 memcpy(&proc
->cpuid
, &kvm
->arch
.model
.cpu_id
, sizeof(struct cpuid
));
558 proc
->ibc
= kvm
->arch
.model
.ibc
;
559 memcpy(&proc
->fac_list
, kvm
->arch
.model
.fac
->list
, S390_ARCH_FAC_LIST_SIZE_BYTE
);
560 if (copy_to_user((void __user
*)attr
->addr
, proc
, sizeof(*proc
)))
567 static int kvm_s390_get_machine(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
569 struct kvm_s390_vm_cpu_machine
*mach
;
572 mach
= kzalloc(sizeof(*mach
), GFP_KERNEL
);
577 get_cpu_id((struct cpuid
*) &mach
->cpuid
);
578 mach
->ibc
= sclp_get_ibc();
579 memcpy(&mach
->fac_mask
, kvm
->arch
.model
.fac
->mask
,
580 S390_ARCH_FAC_LIST_SIZE_BYTE
);
581 memcpy((unsigned long *)&mach
->fac_list
, S390_lowcore
.stfle_fac_list
,
582 S390_ARCH_FAC_LIST_SIZE_BYTE
);
583 if (copy_to_user((void __user
*)attr
->addr
, mach
, sizeof(*mach
)))
590 static int kvm_s390_get_cpu_model(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
594 switch (attr
->attr
) {
595 case KVM_S390_VM_CPU_PROCESSOR
:
596 ret
= kvm_s390_get_processor(kvm
, attr
);
598 case KVM_S390_VM_CPU_MACHINE
:
599 ret
= kvm_s390_get_machine(kvm
, attr
);
605 static int kvm_s390_vm_set_attr(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
609 switch (attr
->group
) {
610 case KVM_S390_VM_MEM_CTRL
:
611 ret
= kvm_s390_set_mem_control(kvm
, attr
);
613 case KVM_S390_VM_TOD
:
614 ret
= kvm_s390_set_tod(kvm
, attr
);
616 case KVM_S390_VM_CPU_MODEL
:
617 ret
= kvm_s390_set_cpu_model(kvm
, attr
);
619 case KVM_S390_VM_CRYPTO
:
620 ret
= kvm_s390_vm_set_crypto(kvm
, attr
);
630 static int kvm_s390_vm_get_attr(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
634 switch (attr
->group
) {
635 case KVM_S390_VM_MEM_CTRL
:
636 ret
= kvm_s390_get_mem_control(kvm
, attr
);
638 case KVM_S390_VM_TOD
:
639 ret
= kvm_s390_get_tod(kvm
, attr
);
641 case KVM_S390_VM_CPU_MODEL
:
642 ret
= kvm_s390_get_cpu_model(kvm
, attr
);
652 static int kvm_s390_vm_has_attr(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
656 switch (attr
->group
) {
657 case KVM_S390_VM_MEM_CTRL
:
658 switch (attr
->attr
) {
659 case KVM_S390_VM_MEM_ENABLE_CMMA
:
660 case KVM_S390_VM_MEM_CLR_CMMA
:
661 case KVM_S390_VM_MEM_LIMIT_SIZE
:
669 case KVM_S390_VM_TOD
:
670 switch (attr
->attr
) {
671 case KVM_S390_VM_TOD_LOW
:
672 case KVM_S390_VM_TOD_HIGH
:
680 case KVM_S390_VM_CPU_MODEL
:
681 switch (attr
->attr
) {
682 case KVM_S390_VM_CPU_PROCESSOR
:
683 case KVM_S390_VM_CPU_MACHINE
:
691 case KVM_S390_VM_CRYPTO
:
692 switch (attr
->attr
) {
693 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW
:
694 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW
:
695 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW
:
696 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW
:
712 long kvm_arch_vm_ioctl(struct file
*filp
,
713 unsigned int ioctl
, unsigned long arg
)
715 struct kvm
*kvm
= filp
->private_data
;
716 void __user
*argp
= (void __user
*)arg
;
717 struct kvm_device_attr attr
;
721 case KVM_S390_INTERRUPT
: {
722 struct kvm_s390_interrupt s390int
;
725 if (copy_from_user(&s390int
, argp
, sizeof(s390int
)))
727 r
= kvm_s390_inject_vm(kvm
, &s390int
);
730 case KVM_ENABLE_CAP
: {
731 struct kvm_enable_cap cap
;
733 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
735 r
= kvm_vm_ioctl_enable_cap(kvm
, &cap
);
738 case KVM_CREATE_IRQCHIP
: {
739 struct kvm_irq_routing_entry routing
;
742 if (kvm
->arch
.use_irqchip
) {
743 /* Set up dummy routing. */
744 memset(&routing
, 0, sizeof(routing
));
745 kvm_set_irq_routing(kvm
, &routing
, 0, 0);
750 case KVM_SET_DEVICE_ATTR
: {
752 if (copy_from_user(&attr
, (void __user
*)arg
, sizeof(attr
)))
754 r
= kvm_s390_vm_set_attr(kvm
, &attr
);
757 case KVM_GET_DEVICE_ATTR
: {
759 if (copy_from_user(&attr
, (void __user
*)arg
, sizeof(attr
)))
761 r
= kvm_s390_vm_get_attr(kvm
, &attr
);
764 case KVM_HAS_DEVICE_ATTR
: {
766 if (copy_from_user(&attr
, (void __user
*)arg
, sizeof(attr
)))
768 r
= kvm_s390_vm_has_attr(kvm
, &attr
);
778 static int kvm_s390_query_ap_config(u8
*config
)
780 u32 fcn_code
= 0x04000000UL
;
783 memset(config
, 0, 128);
787 ".long 0xb2af0000\n" /* PQAP(QCI) */
793 : "r" (fcn_code
), "r" (config
)
794 : "cc", "0", "2", "memory"
800 static int kvm_s390_apxa_installed(void)
805 if (test_facility(2) && test_facility(12)) {
806 cc
= kvm_s390_query_ap_config(config
);
809 pr_err("PQAP(QCI) failed with cc=%d", cc
);
811 return config
[0] & 0x40;
817 static void kvm_s390_set_crycb_format(struct kvm
*kvm
)
819 kvm
->arch
.crypto
.crycbd
= (__u32
)(unsigned long) kvm
->arch
.crypto
.crycb
;
821 if (kvm_s390_apxa_installed())
822 kvm
->arch
.crypto
.crycbd
|= CRYCB_FORMAT2
;
824 kvm
->arch
.crypto
.crycbd
|= CRYCB_FORMAT1
;
827 static void kvm_s390_get_cpu_id(struct cpuid
*cpu_id
)
830 cpu_id
->version
= 0xff;
833 static int kvm_s390_crypto_init(struct kvm
*kvm
)
835 if (!test_kvm_facility(kvm
, 76))
838 kvm
->arch
.crypto
.crycb
= kzalloc(sizeof(*kvm
->arch
.crypto
.crycb
),
839 GFP_KERNEL
| GFP_DMA
);
840 if (!kvm
->arch
.crypto
.crycb
)
843 kvm_s390_set_crycb_format(kvm
);
845 /* Enable AES/DEA protected key functions by default */
846 kvm
->arch
.crypto
.aes_kw
= 1;
847 kvm
->arch
.crypto
.dea_kw
= 1;
848 get_random_bytes(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
,
849 sizeof(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
));
850 get_random_bytes(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
,
851 sizeof(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
));
856 int kvm_arch_init_vm(struct kvm
*kvm
, unsigned long type
)
860 static unsigned long sca_offset
;
863 #ifdef CONFIG_KVM_S390_UCONTROL
864 if (type
& ~KVM_VM_S390_UCONTROL
)
866 if ((type
& KVM_VM_S390_UCONTROL
) && (!capable(CAP_SYS_ADMIN
)))
873 rc
= s390_enable_sie();
879 kvm
->arch
.sca
= (struct sca_block
*) get_zeroed_page(GFP_KERNEL
);
882 spin_lock(&kvm_lock
);
883 sca_offset
= (sca_offset
+ 16) & 0x7f0;
884 kvm
->arch
.sca
= (struct sca_block
*) ((char *) kvm
->arch
.sca
+ sca_offset
);
885 spin_unlock(&kvm_lock
);
887 sprintf(debug_name
, "kvm-%u", current
->pid
);
889 kvm
->arch
.dbf
= debug_register(debug_name
, 8, 2, 8 * sizeof(long));
894 * The architectural maximum amount of facilities is 16 kbit. To store
895 * this amount, 2 kbyte of memory is required. Thus we need a full
896 * page to hold the guest facility list (arch.model.fac->list) and the
897 * facility mask (arch.model.fac->mask). Its address size has to be
898 * 31 bits and word aligned.
900 kvm
->arch
.model
.fac
=
901 (struct kvm_s390_fac
*) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
902 if (!kvm
->arch
.model
.fac
)
905 /* Populate the facility mask initially. */
906 memcpy(kvm
->arch
.model
.fac
->mask
, S390_lowcore
.stfle_fac_list
,
907 S390_ARCH_FAC_LIST_SIZE_BYTE
);
908 for (i
= 0; i
< S390_ARCH_FAC_LIST_SIZE_U64
; i
++) {
909 if (i
< kvm_s390_fac_list_mask_size())
910 kvm
->arch
.model
.fac
->mask
[i
] &= kvm_s390_fac_list_mask
[i
];
912 kvm
->arch
.model
.fac
->mask
[i
] = 0UL;
915 /* Populate the facility list initially. */
916 memcpy(kvm
->arch
.model
.fac
->list
, kvm
->arch
.model
.fac
->mask
,
917 S390_ARCH_FAC_LIST_SIZE_BYTE
);
919 kvm_s390_get_cpu_id(&kvm
->arch
.model
.cpu_id
);
920 kvm
->arch
.model
.ibc
= sclp_get_ibc() & 0x0fff;
922 if (kvm_s390_crypto_init(kvm
) < 0)
925 spin_lock_init(&kvm
->arch
.float_int
.lock
);
926 INIT_LIST_HEAD(&kvm
->arch
.float_int
.list
);
927 init_waitqueue_head(&kvm
->arch
.ipte_wq
);
928 mutex_init(&kvm
->arch
.ipte_mutex
);
930 debug_register_view(kvm
->arch
.dbf
, &debug_sprintf_view
);
931 VM_EVENT(kvm
, 3, "%s", "vm created");
933 if (type
& KVM_VM_S390_UCONTROL
) {
934 kvm
->arch
.gmap
= NULL
;
936 kvm
->arch
.gmap
= gmap_alloc(current
->mm
, (1UL << 44) - 1);
939 kvm
->arch
.gmap
->private = kvm
;
940 kvm
->arch
.gmap
->pfault_enabled
= 0;
943 kvm
->arch
.css_support
= 0;
944 kvm
->arch
.use_irqchip
= 0;
947 spin_lock_init(&kvm
->arch
.start_stop_lock
);
951 kfree(kvm
->arch
.crypto
.crycb
);
953 free_page((unsigned long)kvm
->arch
.model
.fac
);
955 debug_unregister(kvm
->arch
.dbf
);
957 free_page((unsigned long)(kvm
->arch
.sca
));
962 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
964 VCPU_EVENT(vcpu
, 3, "%s", "free cpu");
965 trace_kvm_s390_destroy_vcpu(vcpu
->vcpu_id
);
966 kvm_s390_clear_local_irqs(vcpu
);
967 kvm_clear_async_pf_completion_queue(vcpu
);
968 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
969 clear_bit(63 - vcpu
->vcpu_id
,
970 (unsigned long *) &vcpu
->kvm
->arch
.sca
->mcn
);
971 if (vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].sda
==
972 (__u64
) vcpu
->arch
.sie_block
)
973 vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].sda
= 0;
977 if (kvm_is_ucontrol(vcpu
->kvm
))
978 gmap_free(vcpu
->arch
.gmap
);
980 if (kvm_s390_cmma_enabled(vcpu
->kvm
))
981 kvm_s390_vcpu_unsetup_cmma(vcpu
);
982 free_page((unsigned long)(vcpu
->arch
.sie_block
));
984 kvm_vcpu_uninit(vcpu
);
985 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
988 static void kvm_free_vcpus(struct kvm
*kvm
)
991 struct kvm_vcpu
*vcpu
;
993 kvm_for_each_vcpu(i
, vcpu
, kvm
)
994 kvm_arch_vcpu_destroy(vcpu
);
996 mutex_lock(&kvm
->lock
);
997 for (i
= 0; i
< atomic_read(&kvm
->online_vcpus
); i
++)
998 kvm
->vcpus
[i
] = NULL
;
1000 atomic_set(&kvm
->online_vcpus
, 0);
1001 mutex_unlock(&kvm
->lock
);
1004 void kvm_arch_destroy_vm(struct kvm
*kvm
)
1006 kvm_free_vcpus(kvm
);
1007 free_page((unsigned long)kvm
->arch
.model
.fac
);
1008 free_page((unsigned long)(kvm
->arch
.sca
));
1009 debug_unregister(kvm
->arch
.dbf
);
1010 kfree(kvm
->arch
.crypto
.crycb
);
1011 if (!kvm_is_ucontrol(kvm
))
1012 gmap_free(kvm
->arch
.gmap
);
1013 kvm_s390_destroy_adapters(kvm
);
1014 kvm_s390_clear_float_irqs(kvm
);
1017 /* Section: vcpu related */
1018 static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu
*vcpu
)
1020 vcpu
->arch
.gmap
= gmap_alloc(current
->mm
, -1UL);
1021 if (!vcpu
->arch
.gmap
)
1023 vcpu
->arch
.gmap
->private = vcpu
->kvm
;
1028 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
1030 vcpu
->arch
.pfault_token
= KVM_S390_PFAULT_TOKEN_INVALID
;
1031 kvm_clear_async_pf_completion_queue(vcpu
);
1032 vcpu
->run
->kvm_valid_regs
= KVM_SYNC_PREFIX
|
1039 if (kvm_is_ucontrol(vcpu
->kvm
))
1040 return __kvm_ucontrol_vcpu_init(vcpu
);
1045 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
1047 save_fp_ctl(&vcpu
->arch
.host_fpregs
.fpc
);
1048 save_fp_regs(vcpu
->arch
.host_fpregs
.fprs
);
1049 save_access_regs(vcpu
->arch
.host_acrs
);
1050 restore_fp_ctl(&vcpu
->arch
.guest_fpregs
.fpc
);
1051 restore_fp_regs(vcpu
->arch
.guest_fpregs
.fprs
);
1052 restore_access_regs(vcpu
->run
->s
.regs
.acrs
);
1053 gmap_enable(vcpu
->arch
.gmap
);
1054 atomic_set_mask(CPUSTAT_RUNNING
, &vcpu
->arch
.sie_block
->cpuflags
);
1057 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
1059 atomic_clear_mask(CPUSTAT_RUNNING
, &vcpu
->arch
.sie_block
->cpuflags
);
1060 gmap_disable(vcpu
->arch
.gmap
);
1061 save_fp_ctl(&vcpu
->arch
.guest_fpregs
.fpc
);
1062 save_fp_regs(vcpu
->arch
.guest_fpregs
.fprs
);
1063 save_access_regs(vcpu
->run
->s
.regs
.acrs
);
1064 restore_fp_ctl(&vcpu
->arch
.host_fpregs
.fpc
);
1065 restore_fp_regs(vcpu
->arch
.host_fpregs
.fprs
);
1066 restore_access_regs(vcpu
->arch
.host_acrs
);
1069 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu
*vcpu
)
1071 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1072 vcpu
->arch
.sie_block
->gpsw
.mask
= 0UL;
1073 vcpu
->arch
.sie_block
->gpsw
.addr
= 0UL;
1074 kvm_s390_set_prefix(vcpu
, 0);
1075 vcpu
->arch
.sie_block
->cputm
= 0UL;
1076 vcpu
->arch
.sie_block
->ckc
= 0UL;
1077 vcpu
->arch
.sie_block
->todpr
= 0;
1078 memset(vcpu
->arch
.sie_block
->gcr
, 0, 16 * sizeof(__u64
));
1079 vcpu
->arch
.sie_block
->gcr
[0] = 0xE0UL
;
1080 vcpu
->arch
.sie_block
->gcr
[14] = 0xC2000000UL
;
1081 vcpu
->arch
.guest_fpregs
.fpc
= 0;
1082 asm volatile("lfpc %0" : : "Q" (vcpu
->arch
.guest_fpregs
.fpc
));
1083 vcpu
->arch
.sie_block
->gbea
= 1;
1084 vcpu
->arch
.sie_block
->pp
= 0;
1085 vcpu
->arch
.pfault_token
= KVM_S390_PFAULT_TOKEN_INVALID
;
1086 kvm_clear_async_pf_completion_queue(vcpu
);
1087 if (!kvm_s390_user_cpu_state_ctrl(vcpu
->kvm
))
1088 kvm_s390_vcpu_stop(vcpu
);
1089 kvm_s390_clear_local_irqs(vcpu
);
1092 void kvm_arch_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
1094 mutex_lock(&vcpu
->kvm
->lock
);
1095 vcpu
->arch
.sie_block
->epoch
= vcpu
->kvm
->arch
.epoch
;
1096 mutex_unlock(&vcpu
->kvm
->lock
);
1097 if (!kvm_is_ucontrol(vcpu
->kvm
))
1098 vcpu
->arch
.gmap
= vcpu
->kvm
->arch
.gmap
;
1101 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu
*vcpu
)
1103 if (!test_kvm_facility(vcpu
->kvm
, 76))
1106 vcpu
->arch
.sie_block
->ecb3
&= ~(ECB3_AES
| ECB3_DEA
);
1108 if (vcpu
->kvm
->arch
.crypto
.aes_kw
)
1109 vcpu
->arch
.sie_block
->ecb3
|= ECB3_AES
;
1110 if (vcpu
->kvm
->arch
.crypto
.dea_kw
)
1111 vcpu
->arch
.sie_block
->ecb3
|= ECB3_DEA
;
1113 vcpu
->arch
.sie_block
->crycbd
= vcpu
->kvm
->arch
.crypto
.crycbd
;
1116 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu
*vcpu
)
1118 free_page(vcpu
->arch
.sie_block
->cbrlo
);
1119 vcpu
->arch
.sie_block
->cbrlo
= 0;
1122 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu
*vcpu
)
1124 vcpu
->arch
.sie_block
->cbrlo
= get_zeroed_page(GFP_KERNEL
);
1125 if (!vcpu
->arch
.sie_block
->cbrlo
)
1128 vcpu
->arch
.sie_block
->ecb2
|= 0x80;
1129 vcpu
->arch
.sie_block
->ecb2
&= ~0x08;
1133 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
)
1137 atomic_set(&vcpu
->arch
.sie_block
->cpuflags
, CPUSTAT_ZARCH
|
1141 vcpu
->arch
.sie_block
->ecb
= 6;
1142 if (test_kvm_facility(vcpu
->kvm
, 50) && test_kvm_facility(vcpu
->kvm
, 73))
1143 vcpu
->arch
.sie_block
->ecb
|= 0x10;
1145 vcpu
->arch
.sie_block
->ecb2
= 8;
1146 vcpu
->arch
.sie_block
->eca
= 0xC1002000U
;
1147 if (sclp_has_siif())
1148 vcpu
->arch
.sie_block
->eca
|= 1;
1149 if (sclp_has_sigpif())
1150 vcpu
->arch
.sie_block
->eca
|= 0x10000000U
;
1151 vcpu
->arch
.sie_block
->ictl
|= ICTL_ISKE
| ICTL_SSKE
| ICTL_RRBE
|
1154 if (kvm_s390_cmma_enabled(vcpu
->kvm
)) {
1155 rc
= kvm_s390_vcpu_setup_cmma(vcpu
);
1159 hrtimer_init(&vcpu
->arch
.ckc_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
1160 vcpu
->arch
.ckc_timer
.function
= kvm_s390_idle_wakeup
;
1162 mutex_lock(&vcpu
->kvm
->lock
);
1163 vcpu
->arch
.cpu_id
= vcpu
->kvm
->arch
.model
.cpu_id
;
1164 vcpu
->arch
.sie_block
->ibc
= vcpu
->kvm
->arch
.model
.ibc
;
1165 mutex_unlock(&vcpu
->kvm
->lock
);
1167 kvm_s390_vcpu_crypto_setup(vcpu
);
1172 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
,
1175 struct kvm_vcpu
*vcpu
;
1176 struct sie_page
*sie_page
;
1179 if (id
>= KVM_MAX_VCPUS
)
1184 vcpu
= kmem_cache_zalloc(kvm_vcpu_cache
, GFP_KERNEL
);
1188 sie_page
= (struct sie_page
*) get_zeroed_page(GFP_KERNEL
);
1192 vcpu
->arch
.sie_block
= &sie_page
->sie_block
;
1193 vcpu
->arch
.sie_block
->itdba
= (unsigned long) &sie_page
->itdb
;
1195 vcpu
->arch
.sie_block
->icpua
= id
;
1196 if (!kvm_is_ucontrol(kvm
)) {
1197 if (!kvm
->arch
.sca
) {
1201 if (!kvm
->arch
.sca
->cpu
[id
].sda
)
1202 kvm
->arch
.sca
->cpu
[id
].sda
=
1203 (__u64
) vcpu
->arch
.sie_block
;
1204 vcpu
->arch
.sie_block
->scaoh
=
1205 (__u32
)(((__u64
)kvm
->arch
.sca
) >> 32);
1206 vcpu
->arch
.sie_block
->scaol
= (__u32
)(__u64
)kvm
->arch
.sca
;
1207 set_bit(63 - id
, (unsigned long *) &kvm
->arch
.sca
->mcn
);
1209 vcpu
->arch
.sie_block
->fac
= (int) (long) kvm
->arch
.model
.fac
->list
;
1211 spin_lock_init(&vcpu
->arch
.local_int
.lock
);
1212 vcpu
->arch
.local_int
.float_int
= &kvm
->arch
.float_int
;
1213 vcpu
->arch
.local_int
.wq
= &vcpu
->wq
;
1214 vcpu
->arch
.local_int
.cpuflags
= &vcpu
->arch
.sie_block
->cpuflags
;
1216 rc
= kvm_vcpu_init(vcpu
, kvm
, id
);
1218 goto out_free_sie_block
;
1219 VM_EVENT(kvm
, 3, "create cpu %d at %p, sie block at %p", id
, vcpu
,
1220 vcpu
->arch
.sie_block
);
1221 trace_kvm_s390_create_vcpu(id
, vcpu
, vcpu
->arch
.sie_block
);
1225 free_page((unsigned long)(vcpu
->arch
.sie_block
));
1227 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
1232 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*vcpu
)
1234 return kvm_s390_vcpu_has_irq(vcpu
, 0);
1237 void s390_vcpu_block(struct kvm_vcpu
*vcpu
)
1239 atomic_set_mask(PROG_BLOCK_SIE
, &vcpu
->arch
.sie_block
->prog20
);
1242 void s390_vcpu_unblock(struct kvm_vcpu
*vcpu
)
1244 atomic_clear_mask(PROG_BLOCK_SIE
, &vcpu
->arch
.sie_block
->prog20
);
1248 * Kick a guest cpu out of SIE and wait until SIE is not running.
1249 * If the CPU is not running (e.g. waiting as idle) the function will
1250 * return immediately. */
1251 void exit_sie(struct kvm_vcpu
*vcpu
)
1253 atomic_set_mask(CPUSTAT_STOP_INT
, &vcpu
->arch
.sie_block
->cpuflags
);
1254 while (vcpu
->arch
.sie_block
->prog0c
& PROG_IN_SIE
)
1258 /* Kick a guest cpu out of SIE and prevent SIE-reentry */
1259 void exit_sie_sync(struct kvm_vcpu
*vcpu
)
1261 s390_vcpu_block(vcpu
);
1265 static void kvm_gmap_notifier(struct gmap
*gmap
, unsigned long address
)
1268 struct kvm
*kvm
= gmap
->private;
1269 struct kvm_vcpu
*vcpu
;
1271 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
1272 /* match against both prefix pages */
1273 if (kvm_s390_get_prefix(vcpu
) == (address
& ~0x1000UL
)) {
1274 VCPU_EVENT(vcpu
, 2, "gmap notifier for %lx", address
);
1275 kvm_make_request(KVM_REQ_MMU_RELOAD
, vcpu
);
1276 exit_sie_sync(vcpu
);
1281 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
1283 /* kvm common code refers to this, but never calls it */
1288 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu
*vcpu
,
1289 struct kvm_one_reg
*reg
)
1294 case KVM_REG_S390_TODPR
:
1295 r
= put_user(vcpu
->arch
.sie_block
->todpr
,
1296 (u32 __user
*)reg
->addr
);
1298 case KVM_REG_S390_EPOCHDIFF
:
1299 r
= put_user(vcpu
->arch
.sie_block
->epoch
,
1300 (u64 __user
*)reg
->addr
);
1302 case KVM_REG_S390_CPU_TIMER
:
1303 r
= put_user(vcpu
->arch
.sie_block
->cputm
,
1304 (u64 __user
*)reg
->addr
);
1306 case KVM_REG_S390_CLOCK_COMP
:
1307 r
= put_user(vcpu
->arch
.sie_block
->ckc
,
1308 (u64 __user
*)reg
->addr
);
1310 case KVM_REG_S390_PFTOKEN
:
1311 r
= put_user(vcpu
->arch
.pfault_token
,
1312 (u64 __user
*)reg
->addr
);
1314 case KVM_REG_S390_PFCOMPARE
:
1315 r
= put_user(vcpu
->arch
.pfault_compare
,
1316 (u64 __user
*)reg
->addr
);
1318 case KVM_REG_S390_PFSELECT
:
1319 r
= put_user(vcpu
->arch
.pfault_select
,
1320 (u64 __user
*)reg
->addr
);
1322 case KVM_REG_S390_PP
:
1323 r
= put_user(vcpu
->arch
.sie_block
->pp
,
1324 (u64 __user
*)reg
->addr
);
1326 case KVM_REG_S390_GBEA
:
1327 r
= put_user(vcpu
->arch
.sie_block
->gbea
,
1328 (u64 __user
*)reg
->addr
);
1337 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu
*vcpu
,
1338 struct kvm_one_reg
*reg
)
1343 case KVM_REG_S390_TODPR
:
1344 r
= get_user(vcpu
->arch
.sie_block
->todpr
,
1345 (u32 __user
*)reg
->addr
);
1347 case KVM_REG_S390_EPOCHDIFF
:
1348 r
= get_user(vcpu
->arch
.sie_block
->epoch
,
1349 (u64 __user
*)reg
->addr
);
1351 case KVM_REG_S390_CPU_TIMER
:
1352 r
= get_user(vcpu
->arch
.sie_block
->cputm
,
1353 (u64 __user
*)reg
->addr
);
1355 case KVM_REG_S390_CLOCK_COMP
:
1356 r
= get_user(vcpu
->arch
.sie_block
->ckc
,
1357 (u64 __user
*)reg
->addr
);
1359 case KVM_REG_S390_PFTOKEN
:
1360 r
= get_user(vcpu
->arch
.pfault_token
,
1361 (u64 __user
*)reg
->addr
);
1362 if (vcpu
->arch
.pfault_token
== KVM_S390_PFAULT_TOKEN_INVALID
)
1363 kvm_clear_async_pf_completion_queue(vcpu
);
1365 case KVM_REG_S390_PFCOMPARE
:
1366 r
= get_user(vcpu
->arch
.pfault_compare
,
1367 (u64 __user
*)reg
->addr
);
1369 case KVM_REG_S390_PFSELECT
:
1370 r
= get_user(vcpu
->arch
.pfault_select
,
1371 (u64 __user
*)reg
->addr
);
1373 case KVM_REG_S390_PP
:
1374 r
= get_user(vcpu
->arch
.sie_block
->pp
,
1375 (u64 __user
*)reg
->addr
);
1377 case KVM_REG_S390_GBEA
:
1378 r
= get_user(vcpu
->arch
.sie_block
->gbea
,
1379 (u64 __user
*)reg
->addr
);
1388 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu
*vcpu
)
1390 kvm_s390_vcpu_initial_reset(vcpu
);
1394 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
1396 memcpy(&vcpu
->run
->s
.regs
.gprs
, ®s
->gprs
, sizeof(regs
->gprs
));
1400 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
1402 memcpy(®s
->gprs
, &vcpu
->run
->s
.regs
.gprs
, sizeof(regs
->gprs
));
1406 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
1407 struct kvm_sregs
*sregs
)
1409 memcpy(&vcpu
->run
->s
.regs
.acrs
, &sregs
->acrs
, sizeof(sregs
->acrs
));
1410 memcpy(&vcpu
->arch
.sie_block
->gcr
, &sregs
->crs
, sizeof(sregs
->crs
));
1411 restore_access_regs(vcpu
->run
->s
.regs
.acrs
);
1415 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
1416 struct kvm_sregs
*sregs
)
1418 memcpy(&sregs
->acrs
, &vcpu
->run
->s
.regs
.acrs
, sizeof(sregs
->acrs
));
1419 memcpy(&sregs
->crs
, &vcpu
->arch
.sie_block
->gcr
, sizeof(sregs
->crs
));
1423 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1425 if (test_fp_ctl(fpu
->fpc
))
1427 memcpy(&vcpu
->arch
.guest_fpregs
.fprs
, &fpu
->fprs
, sizeof(fpu
->fprs
));
1428 vcpu
->arch
.guest_fpregs
.fpc
= fpu
->fpc
;
1429 restore_fp_ctl(&vcpu
->arch
.guest_fpregs
.fpc
);
1430 restore_fp_regs(vcpu
->arch
.guest_fpregs
.fprs
);
1434 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1436 memcpy(&fpu
->fprs
, &vcpu
->arch
.guest_fpregs
.fprs
, sizeof(fpu
->fprs
));
1437 fpu
->fpc
= vcpu
->arch
.guest_fpregs
.fpc
;
1441 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu
*vcpu
, psw_t psw
)
1445 if (!is_vcpu_stopped(vcpu
))
1448 vcpu
->run
->psw_mask
= psw
.mask
;
1449 vcpu
->run
->psw_addr
= psw
.addr
;
1454 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
1455 struct kvm_translation
*tr
)
1457 return -EINVAL
; /* not implemented yet */
1460 #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1461 KVM_GUESTDBG_USE_HW_BP | \
1462 KVM_GUESTDBG_ENABLE)
1464 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
1465 struct kvm_guest_debug
*dbg
)
1469 vcpu
->guest_debug
= 0;
1470 kvm_s390_clear_bp_data(vcpu
);
1472 if (dbg
->control
& ~VALID_GUESTDBG_FLAGS
)
1475 if (dbg
->control
& KVM_GUESTDBG_ENABLE
) {
1476 vcpu
->guest_debug
= dbg
->control
;
1477 /* enforce guest PER */
1478 atomic_set_mask(CPUSTAT_P
, &vcpu
->arch
.sie_block
->cpuflags
);
1480 if (dbg
->control
& KVM_GUESTDBG_USE_HW_BP
)
1481 rc
= kvm_s390_import_bp_data(vcpu
, dbg
);
1483 atomic_clear_mask(CPUSTAT_P
, &vcpu
->arch
.sie_block
->cpuflags
);
1484 vcpu
->arch
.guestdbg
.last_bp
= 0;
1488 vcpu
->guest_debug
= 0;
1489 kvm_s390_clear_bp_data(vcpu
);
1490 atomic_clear_mask(CPUSTAT_P
, &vcpu
->arch
.sie_block
->cpuflags
);
1496 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
1497 struct kvm_mp_state
*mp_state
)
1499 /* CHECK_STOP and LOAD are not supported yet */
1500 return is_vcpu_stopped(vcpu
) ? KVM_MP_STATE_STOPPED
:
1501 KVM_MP_STATE_OPERATING
;
1504 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
1505 struct kvm_mp_state
*mp_state
)
1509 /* user space knows about this interface - let it control the state */
1510 vcpu
->kvm
->arch
.user_cpu_state_ctrl
= 1;
1512 switch (mp_state
->mp_state
) {
1513 case KVM_MP_STATE_STOPPED
:
1514 kvm_s390_vcpu_stop(vcpu
);
1516 case KVM_MP_STATE_OPERATING
:
1517 kvm_s390_vcpu_start(vcpu
);
1519 case KVM_MP_STATE_LOAD
:
1520 case KVM_MP_STATE_CHECK_STOP
:
1521 /* fall through - CHECK_STOP and LOAD are not supported yet */
1529 bool kvm_s390_cmma_enabled(struct kvm
*kvm
)
1531 if (!MACHINE_IS_LPAR
)
1533 /* only enable for z10 and later */
1534 if (!MACHINE_HAS_EDAT1
)
1536 if (!kvm
->arch
.use_cmma
)
1541 static bool ibs_enabled(struct kvm_vcpu
*vcpu
)
1543 return atomic_read(&vcpu
->arch
.sie_block
->cpuflags
) & CPUSTAT_IBS
;
1546 static int kvm_s390_handle_requests(struct kvm_vcpu
*vcpu
)
1549 s390_vcpu_unblock(vcpu
);
1551 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1552 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1553 * This ensures that the ipte instruction for this request has
1554 * already finished. We might race against a second unmapper that
1555 * wants to set the blocking bit. Lets just retry the request loop.
1557 if (kvm_check_request(KVM_REQ_MMU_RELOAD
, vcpu
)) {
1559 rc
= gmap_ipte_notify(vcpu
->arch
.gmap
,
1560 kvm_s390_get_prefix(vcpu
),
1567 if (kvm_check_request(KVM_REQ_TLB_FLUSH
, vcpu
)) {
1568 vcpu
->arch
.sie_block
->ihcpu
= 0xffff;
1572 if (kvm_check_request(KVM_REQ_ENABLE_IBS
, vcpu
)) {
1573 if (!ibs_enabled(vcpu
)) {
1574 trace_kvm_s390_enable_disable_ibs(vcpu
->vcpu_id
, 1);
1575 atomic_set_mask(CPUSTAT_IBS
,
1576 &vcpu
->arch
.sie_block
->cpuflags
);
1581 if (kvm_check_request(KVM_REQ_DISABLE_IBS
, vcpu
)) {
1582 if (ibs_enabled(vcpu
)) {
1583 trace_kvm_s390_enable_disable_ibs(vcpu
->vcpu_id
, 0);
1584 atomic_clear_mask(CPUSTAT_IBS
,
1585 &vcpu
->arch
.sie_block
->cpuflags
);
1590 /* nothing to do, just clear the request */
1591 clear_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
1597 * kvm_arch_fault_in_page - fault-in guest page if necessary
1598 * @vcpu: The corresponding virtual cpu
1599 * @gpa: Guest physical address
1600 * @writable: Whether the page should be writable or not
1602 * Make sure that a guest page has been faulted-in on the host.
1604 * Return: Zero on success, negative error code otherwise.
1606 long kvm_arch_fault_in_page(struct kvm_vcpu
*vcpu
, gpa_t gpa
, int writable
)
1608 return gmap_fault(vcpu
->arch
.gmap
, gpa
,
1609 writable
? FAULT_FLAG_WRITE
: 0);
1612 static void __kvm_inject_pfault_token(struct kvm_vcpu
*vcpu
, bool start_token
,
1613 unsigned long token
)
1615 struct kvm_s390_interrupt inti
;
1616 struct kvm_s390_irq irq
;
1619 irq
.u
.ext
.ext_params2
= token
;
1620 irq
.type
= KVM_S390_INT_PFAULT_INIT
;
1621 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu
, &irq
));
1623 inti
.type
= KVM_S390_INT_PFAULT_DONE
;
1624 inti
.parm64
= token
;
1625 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu
->kvm
, &inti
));
1629 void kvm_arch_async_page_not_present(struct kvm_vcpu
*vcpu
,
1630 struct kvm_async_pf
*work
)
1632 trace_kvm_s390_pfault_init(vcpu
, work
->arch
.pfault_token
);
1633 __kvm_inject_pfault_token(vcpu
, true, work
->arch
.pfault_token
);
1636 void kvm_arch_async_page_present(struct kvm_vcpu
*vcpu
,
1637 struct kvm_async_pf
*work
)
1639 trace_kvm_s390_pfault_done(vcpu
, work
->arch
.pfault_token
);
1640 __kvm_inject_pfault_token(vcpu
, false, work
->arch
.pfault_token
);
1643 void kvm_arch_async_page_ready(struct kvm_vcpu
*vcpu
,
1644 struct kvm_async_pf
*work
)
1646 /* s390 will always inject the page directly */
1649 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu
*vcpu
)
1652 * s390 will always inject the page directly,
1653 * but we still want check_async_completion to cleanup
1658 static int kvm_arch_setup_async_pf(struct kvm_vcpu
*vcpu
)
1661 struct kvm_arch_async_pf arch
;
1664 if (vcpu
->arch
.pfault_token
== KVM_S390_PFAULT_TOKEN_INVALID
)
1666 if ((vcpu
->arch
.sie_block
->gpsw
.mask
& vcpu
->arch
.pfault_select
) !=
1667 vcpu
->arch
.pfault_compare
)
1669 if (psw_extint_disabled(vcpu
))
1671 if (kvm_s390_vcpu_has_irq(vcpu
, 0))
1673 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x200ul
))
1675 if (!vcpu
->arch
.gmap
->pfault_enabled
)
1678 hva
= gfn_to_hva(vcpu
->kvm
, gpa_to_gfn(current
->thread
.gmap_addr
));
1679 hva
+= current
->thread
.gmap_addr
& ~PAGE_MASK
;
1680 if (read_guest_real(vcpu
, vcpu
->arch
.pfault_token
, &arch
.pfault_token
, 8))
1683 rc
= kvm_setup_async_pf(vcpu
, current
->thread
.gmap_addr
, hva
, &arch
);
1687 static int vcpu_pre_run(struct kvm_vcpu
*vcpu
)
1692 * On s390 notifications for arriving pages will be delivered directly
1693 * to the guest but the house keeping for completed pfaults is
1694 * handled outside the worker.
1696 kvm_check_async_pf_completion(vcpu
);
1698 memcpy(&vcpu
->arch
.sie_block
->gg14
, &vcpu
->run
->s
.regs
.gprs
[14], 16);
1703 if (test_cpu_flag(CIF_MCCK_PENDING
))
1706 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
1707 rc
= kvm_s390_deliver_pending_interrupts(vcpu
);
1712 rc
= kvm_s390_handle_requests(vcpu
);
1716 if (guestdbg_enabled(vcpu
)) {
1717 kvm_s390_backup_guest_per_regs(vcpu
);
1718 kvm_s390_patch_guest_per_regs(vcpu
);
1721 vcpu
->arch
.sie_block
->icptcode
= 0;
1722 cpuflags
= atomic_read(&vcpu
->arch
.sie_block
->cpuflags
);
1723 VCPU_EVENT(vcpu
, 6, "entering sie flags %x", cpuflags
);
1724 trace_kvm_s390_sie_enter(vcpu
, cpuflags
);
1729 static int vcpu_post_run(struct kvm_vcpu
*vcpu
, int exit_reason
)
1733 VCPU_EVENT(vcpu
, 6, "exit sie icptcode %d",
1734 vcpu
->arch
.sie_block
->icptcode
);
1735 trace_kvm_s390_sie_exit(vcpu
, vcpu
->arch
.sie_block
->icptcode
);
1737 if (guestdbg_enabled(vcpu
))
1738 kvm_s390_restore_guest_per_regs(vcpu
);
1740 if (exit_reason
>= 0) {
1742 } else if (kvm_is_ucontrol(vcpu
->kvm
)) {
1743 vcpu
->run
->exit_reason
= KVM_EXIT_S390_UCONTROL
;
1744 vcpu
->run
->s390_ucontrol
.trans_exc_code
=
1745 current
->thread
.gmap_addr
;
1746 vcpu
->run
->s390_ucontrol
.pgm_code
= 0x10;
1749 } else if (current
->thread
.gmap_pfault
) {
1750 trace_kvm_s390_major_guest_pfault(vcpu
);
1751 current
->thread
.gmap_pfault
= 0;
1752 if (kvm_arch_setup_async_pf(vcpu
)) {
1755 gpa_t gpa
= current
->thread
.gmap_addr
;
1756 rc
= kvm_arch_fault_in_page(vcpu
, gpa
, 1);
1761 VCPU_EVENT(vcpu
, 3, "%s", "fault in sie instruction");
1762 trace_kvm_s390_sie_fault(vcpu
);
1763 rc
= kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
1766 memcpy(&vcpu
->run
->s
.regs
.gprs
[14], &vcpu
->arch
.sie_block
->gg14
, 16);
1769 if (kvm_is_ucontrol(vcpu
->kvm
))
1770 /* Don't exit for host interrupts. */
1771 rc
= vcpu
->arch
.sie_block
->icptcode
? -EOPNOTSUPP
: 0;
1773 rc
= kvm_handle_sie_intercept(vcpu
);
1779 static int __vcpu_run(struct kvm_vcpu
*vcpu
)
1781 int rc
, exit_reason
;
1784 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1785 * ning the guest), so that memslots (and other stuff) are protected
1787 vcpu
->srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
1790 rc
= vcpu_pre_run(vcpu
);
1794 srcu_read_unlock(&vcpu
->kvm
->srcu
, vcpu
->srcu_idx
);
1796 * As PF_VCPU will be used in fault handler, between
1797 * guest_enter and guest_exit should be no uaccess.
1802 exit_reason
= sie64a(vcpu
->arch
.sie_block
,
1803 vcpu
->run
->s
.regs
.gprs
);
1805 vcpu
->srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
1807 rc
= vcpu_post_run(vcpu
, exit_reason
);
1808 } while (!signal_pending(current
) && !guestdbg_exit_pending(vcpu
) && !rc
);
1810 srcu_read_unlock(&vcpu
->kvm
->srcu
, vcpu
->srcu_idx
);
1814 static void sync_regs(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1816 vcpu
->arch
.sie_block
->gpsw
.mask
= kvm_run
->psw_mask
;
1817 vcpu
->arch
.sie_block
->gpsw
.addr
= kvm_run
->psw_addr
;
1818 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_PREFIX
)
1819 kvm_s390_set_prefix(vcpu
, kvm_run
->s
.regs
.prefix
);
1820 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_CRS
) {
1821 memcpy(&vcpu
->arch
.sie_block
->gcr
, &kvm_run
->s
.regs
.crs
, 128);
1822 /* some control register changes require a tlb flush */
1823 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
1825 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_ARCH0
) {
1826 vcpu
->arch
.sie_block
->cputm
= kvm_run
->s
.regs
.cputm
;
1827 vcpu
->arch
.sie_block
->ckc
= kvm_run
->s
.regs
.ckc
;
1828 vcpu
->arch
.sie_block
->todpr
= kvm_run
->s
.regs
.todpr
;
1829 vcpu
->arch
.sie_block
->pp
= kvm_run
->s
.regs
.pp
;
1830 vcpu
->arch
.sie_block
->gbea
= kvm_run
->s
.regs
.gbea
;
1832 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_PFAULT
) {
1833 vcpu
->arch
.pfault_token
= kvm_run
->s
.regs
.pft
;
1834 vcpu
->arch
.pfault_select
= kvm_run
->s
.regs
.pfs
;
1835 vcpu
->arch
.pfault_compare
= kvm_run
->s
.regs
.pfc
;
1836 if (vcpu
->arch
.pfault_token
== KVM_S390_PFAULT_TOKEN_INVALID
)
1837 kvm_clear_async_pf_completion_queue(vcpu
);
1839 kvm_run
->kvm_dirty_regs
= 0;
1842 static void store_regs(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1844 kvm_run
->psw_mask
= vcpu
->arch
.sie_block
->gpsw
.mask
;
1845 kvm_run
->psw_addr
= vcpu
->arch
.sie_block
->gpsw
.addr
;
1846 kvm_run
->s
.regs
.prefix
= kvm_s390_get_prefix(vcpu
);
1847 memcpy(&kvm_run
->s
.regs
.crs
, &vcpu
->arch
.sie_block
->gcr
, 128);
1848 kvm_run
->s
.regs
.cputm
= vcpu
->arch
.sie_block
->cputm
;
1849 kvm_run
->s
.regs
.ckc
= vcpu
->arch
.sie_block
->ckc
;
1850 kvm_run
->s
.regs
.todpr
= vcpu
->arch
.sie_block
->todpr
;
1851 kvm_run
->s
.regs
.pp
= vcpu
->arch
.sie_block
->pp
;
1852 kvm_run
->s
.regs
.gbea
= vcpu
->arch
.sie_block
->gbea
;
1853 kvm_run
->s
.regs
.pft
= vcpu
->arch
.pfault_token
;
1854 kvm_run
->s
.regs
.pfs
= vcpu
->arch
.pfault_select
;
1855 kvm_run
->s
.regs
.pfc
= vcpu
->arch
.pfault_compare
;
1858 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1863 if (guestdbg_exit_pending(vcpu
)) {
1864 kvm_s390_prepare_debug_exit(vcpu
);
1868 if (vcpu
->sigset_active
)
1869 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
1871 if (!kvm_s390_user_cpu_state_ctrl(vcpu
->kvm
)) {
1872 kvm_s390_vcpu_start(vcpu
);
1873 } else if (is_vcpu_stopped(vcpu
)) {
1874 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
1879 sync_regs(vcpu
, kvm_run
);
1882 rc
= __vcpu_run(vcpu
);
1884 if (signal_pending(current
) && !rc
) {
1885 kvm_run
->exit_reason
= KVM_EXIT_INTR
;
1889 if (guestdbg_exit_pending(vcpu
) && !rc
) {
1890 kvm_s390_prepare_debug_exit(vcpu
);
1894 if (rc
== -EOPNOTSUPP
) {
1895 /* intercept cannot be handled in-kernel, prepare kvm-run */
1896 kvm_run
->exit_reason
= KVM_EXIT_S390_SIEIC
;
1897 kvm_run
->s390_sieic
.icptcode
= vcpu
->arch
.sie_block
->icptcode
;
1898 kvm_run
->s390_sieic
.ipa
= vcpu
->arch
.sie_block
->ipa
;
1899 kvm_run
->s390_sieic
.ipb
= vcpu
->arch
.sie_block
->ipb
;
1903 if (rc
== -EREMOTE
) {
1904 /* intercept was handled, but userspace support is needed
1905 * kvm_run has been prepared by the handler */
1909 store_regs(vcpu
, kvm_run
);
1911 if (vcpu
->sigset_active
)
1912 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
1914 vcpu
->stat
.exit_userspace
++;
1919 * store status at address
1920 * we use have two special cases:
1921 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1922 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1924 int kvm_s390_store_status_unloaded(struct kvm_vcpu
*vcpu
, unsigned long gpa
)
1926 unsigned char archmode
= 1;
1931 if (gpa
== KVM_S390_STORE_STATUS_NOADDR
) {
1932 if (write_guest_abs(vcpu
, 163, &archmode
, 1))
1934 gpa
= SAVE_AREA_BASE
;
1935 } else if (gpa
== KVM_S390_STORE_STATUS_PREFIXED
) {
1936 if (write_guest_real(vcpu
, 163, &archmode
, 1))
1938 gpa
= kvm_s390_real_to_abs(vcpu
, SAVE_AREA_BASE
);
1940 rc
= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, fp_regs
),
1941 vcpu
->arch
.guest_fpregs
.fprs
, 128);
1942 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, gp_regs
),
1943 vcpu
->run
->s
.regs
.gprs
, 128);
1944 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, psw
),
1945 &vcpu
->arch
.sie_block
->gpsw
, 16);
1946 px
= kvm_s390_get_prefix(vcpu
);
1947 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, pref_reg
),
1949 rc
|= write_guest_abs(vcpu
,
1950 gpa
+ offsetof(struct save_area
, fp_ctrl_reg
),
1951 &vcpu
->arch
.guest_fpregs
.fpc
, 4);
1952 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, tod_reg
),
1953 &vcpu
->arch
.sie_block
->todpr
, 4);
1954 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, timer
),
1955 &vcpu
->arch
.sie_block
->cputm
, 8);
1956 clkcomp
= vcpu
->arch
.sie_block
->ckc
>> 8;
1957 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, clk_cmp
),
1959 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, acc_regs
),
1960 &vcpu
->run
->s
.regs
.acrs
, 64);
1961 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, ctrl_regs
),
1962 &vcpu
->arch
.sie_block
->gcr
, 128);
1963 return rc
? -EFAULT
: 0;
1966 int kvm_s390_vcpu_store_status(struct kvm_vcpu
*vcpu
, unsigned long addr
)
1969 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1970 * copying in vcpu load/put. Lets update our copies before we save
1971 * it into the save area
1973 save_fp_ctl(&vcpu
->arch
.guest_fpregs
.fpc
);
1974 save_fp_regs(vcpu
->arch
.guest_fpregs
.fprs
);
1975 save_access_regs(vcpu
->run
->s
.regs
.acrs
);
1977 return kvm_s390_store_status_unloaded(vcpu
, addr
);
1980 static void __disable_ibs_on_vcpu(struct kvm_vcpu
*vcpu
)
1982 kvm_check_request(KVM_REQ_ENABLE_IBS
, vcpu
);
1983 kvm_make_request(KVM_REQ_DISABLE_IBS
, vcpu
);
1984 exit_sie_sync(vcpu
);
1987 static void __disable_ibs_on_all_vcpus(struct kvm
*kvm
)
1990 struct kvm_vcpu
*vcpu
;
1992 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
1993 __disable_ibs_on_vcpu(vcpu
);
1997 static void __enable_ibs_on_vcpu(struct kvm_vcpu
*vcpu
)
1999 kvm_check_request(KVM_REQ_DISABLE_IBS
, vcpu
);
2000 kvm_make_request(KVM_REQ_ENABLE_IBS
, vcpu
);
2001 exit_sie_sync(vcpu
);
2004 void kvm_s390_vcpu_start(struct kvm_vcpu
*vcpu
)
2006 int i
, online_vcpus
, started_vcpus
= 0;
2008 if (!is_vcpu_stopped(vcpu
))
2011 trace_kvm_s390_vcpu_start_stop(vcpu
->vcpu_id
, 1);
2012 /* Only one cpu at a time may enter/leave the STOPPED state. */
2013 spin_lock(&vcpu
->kvm
->arch
.start_stop_lock
);
2014 online_vcpus
= atomic_read(&vcpu
->kvm
->online_vcpus
);
2016 for (i
= 0; i
< online_vcpus
; i
++) {
2017 if (!is_vcpu_stopped(vcpu
->kvm
->vcpus
[i
]))
2021 if (started_vcpus
== 0) {
2022 /* we're the only active VCPU -> speed it up */
2023 __enable_ibs_on_vcpu(vcpu
);
2024 } else if (started_vcpus
== 1) {
2026 * As we are starting a second VCPU, we have to disable
2027 * the IBS facility on all VCPUs to remove potentially
2028 * oustanding ENABLE requests.
2030 __disable_ibs_on_all_vcpus(vcpu
->kvm
);
2033 atomic_clear_mask(CPUSTAT_STOPPED
, &vcpu
->arch
.sie_block
->cpuflags
);
2035 * Another VCPU might have used IBS while we were offline.
2036 * Let's play safe and flush the VCPU at startup.
2038 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
2039 spin_unlock(&vcpu
->kvm
->arch
.start_stop_lock
);
2043 void kvm_s390_vcpu_stop(struct kvm_vcpu
*vcpu
)
2045 int i
, online_vcpus
, started_vcpus
= 0;
2046 struct kvm_vcpu
*started_vcpu
= NULL
;
2048 if (is_vcpu_stopped(vcpu
))
2051 trace_kvm_s390_vcpu_start_stop(vcpu
->vcpu_id
, 0);
2052 /* Only one cpu at a time may enter/leave the STOPPED state. */
2053 spin_lock(&vcpu
->kvm
->arch
.start_stop_lock
);
2054 online_vcpus
= atomic_read(&vcpu
->kvm
->online_vcpus
);
2056 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
2057 kvm_s390_clear_stop_irq(vcpu
);
2059 atomic_set_mask(CPUSTAT_STOPPED
, &vcpu
->arch
.sie_block
->cpuflags
);
2060 __disable_ibs_on_vcpu(vcpu
);
2062 for (i
= 0; i
< online_vcpus
; i
++) {
2063 if (!is_vcpu_stopped(vcpu
->kvm
->vcpus
[i
])) {
2065 started_vcpu
= vcpu
->kvm
->vcpus
[i
];
2069 if (started_vcpus
== 1) {
2071 * As we only have one VCPU left, we want to enable the
2072 * IBS facility for that VCPU to speed it up.
2074 __enable_ibs_on_vcpu(started_vcpu
);
2077 spin_unlock(&vcpu
->kvm
->arch
.start_stop_lock
);
2081 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu
*vcpu
,
2082 struct kvm_enable_cap
*cap
)
2090 case KVM_CAP_S390_CSS_SUPPORT
:
2091 if (!vcpu
->kvm
->arch
.css_support
) {
2092 vcpu
->kvm
->arch
.css_support
= 1;
2093 trace_kvm_s390_enable_css(vcpu
->kvm
);
2104 long kvm_arch_vcpu_ioctl(struct file
*filp
,
2105 unsigned int ioctl
, unsigned long arg
)
2107 struct kvm_vcpu
*vcpu
= filp
->private_data
;
2108 void __user
*argp
= (void __user
*)arg
;
2113 case KVM_S390_INTERRUPT
: {
2114 struct kvm_s390_interrupt s390int
;
2115 struct kvm_s390_irq s390irq
;
2118 if (copy_from_user(&s390int
, argp
, sizeof(s390int
)))
2120 if (s390int_to_s390irq(&s390int
, &s390irq
))
2122 r
= kvm_s390_inject_vcpu(vcpu
, &s390irq
);
2125 case KVM_S390_STORE_STATUS
:
2126 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
2127 r
= kvm_s390_vcpu_store_status(vcpu
, arg
);
2128 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
2130 case KVM_S390_SET_INITIAL_PSW
: {
2134 if (copy_from_user(&psw
, argp
, sizeof(psw
)))
2136 r
= kvm_arch_vcpu_ioctl_set_initial_psw(vcpu
, psw
);
2139 case KVM_S390_INITIAL_RESET
:
2140 r
= kvm_arch_vcpu_ioctl_initial_reset(vcpu
);
2142 case KVM_SET_ONE_REG
:
2143 case KVM_GET_ONE_REG
: {
2144 struct kvm_one_reg reg
;
2146 if (copy_from_user(®
, argp
, sizeof(reg
)))
2148 if (ioctl
== KVM_SET_ONE_REG
)
2149 r
= kvm_arch_vcpu_ioctl_set_one_reg(vcpu
, ®
);
2151 r
= kvm_arch_vcpu_ioctl_get_one_reg(vcpu
, ®
);
2154 #ifdef CONFIG_KVM_S390_UCONTROL
2155 case KVM_S390_UCAS_MAP
: {
2156 struct kvm_s390_ucas_mapping ucasmap
;
2158 if (copy_from_user(&ucasmap
, argp
, sizeof(ucasmap
))) {
2163 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
2168 r
= gmap_map_segment(vcpu
->arch
.gmap
, ucasmap
.user_addr
,
2169 ucasmap
.vcpu_addr
, ucasmap
.length
);
2172 case KVM_S390_UCAS_UNMAP
: {
2173 struct kvm_s390_ucas_mapping ucasmap
;
2175 if (copy_from_user(&ucasmap
, argp
, sizeof(ucasmap
))) {
2180 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
2185 r
= gmap_unmap_segment(vcpu
->arch
.gmap
, ucasmap
.vcpu_addr
,
2190 case KVM_S390_VCPU_FAULT
: {
2191 r
= gmap_fault(vcpu
->arch
.gmap
, arg
, 0);
2194 case KVM_ENABLE_CAP
:
2196 struct kvm_enable_cap cap
;
2198 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
2200 r
= kvm_vcpu_ioctl_enable_cap(vcpu
, &cap
);
2209 int kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
2211 #ifdef CONFIG_KVM_S390_UCONTROL
2212 if ((vmf
->pgoff
== KVM_S390_SIE_PAGE_OFFSET
)
2213 && (kvm_is_ucontrol(vcpu
->kvm
))) {
2214 vmf
->page
= virt_to_page(vcpu
->arch
.sie_block
);
2215 get_page(vmf
->page
);
2219 return VM_FAULT_SIGBUS
;
2222 int kvm_arch_create_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*slot
,
2223 unsigned long npages
)
2228 /* Section: memory related */
2229 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
2230 struct kvm_memory_slot
*memslot
,
2231 struct kvm_userspace_memory_region
*mem
,
2232 enum kvm_mr_change change
)
2234 /* A few sanity checks. We can have memory slots which have to be
2235 located/ended at a segment boundary (1MB). The memory in userland is
2236 ok to be fragmented into various different vmas. It is okay to mmap()
2237 and munmap() stuff in this slot after doing this call at any time */
2239 if (mem
->userspace_addr
& 0xffffful
)
2242 if (mem
->memory_size
& 0xffffful
)
2248 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
2249 struct kvm_userspace_memory_region
*mem
,
2250 const struct kvm_memory_slot
*old
,
2251 enum kvm_mr_change change
)
2255 /* If the basics of the memslot do not change, we do not want
2256 * to update the gmap. Every update causes several unnecessary
2257 * segment translation exceptions. This is usually handled just
2258 * fine by the normal fault handler + gmap, but it will also
2259 * cause faults on the prefix page of running guest CPUs.
2261 if (old
->userspace_addr
== mem
->userspace_addr
&&
2262 old
->base_gfn
* PAGE_SIZE
== mem
->guest_phys_addr
&&
2263 old
->npages
* PAGE_SIZE
== mem
->memory_size
)
2266 rc
= gmap_map_segment(kvm
->arch
.gmap
, mem
->userspace_addr
,
2267 mem
->guest_phys_addr
, mem
->memory_size
);
2269 printk(KERN_WARNING
"kvm-s390: failed to commit memory region\n");
2273 static int __init
kvm_s390_init(void)
2275 return kvm_init(NULL
, sizeof(struct kvm_vcpu
), 0, THIS_MODULE
);
2278 static void __exit
kvm_s390_exit(void)
2283 module_init(kvm_s390_init
);
2284 module_exit(kvm_s390_exit
);
2287 * Enable autoloading of the kvm module.
2288 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2289 * since x86 takes a different approach.
2291 #include <linux/miscdevice.h>
2292 MODULE_ALIAS_MISCDEV(KVM_MINOR
);
2293 MODULE_ALIAS("devname:kvm");