2 * hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008, 2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
14 * Jason J. Herne <jjherne@us.ibm.com>
17 #include <linux/compiler.h>
18 #include <linux/err.h>
20 #include <linux/hrtimer.h>
21 #include <linux/init.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/module.h>
25 #include <linux/random.h>
26 #include <linux/slab.h>
27 #include <linux/timer.h>
28 #include <asm/asm-offsets.h>
29 #include <asm/lowcore.h>
30 #include <asm/pgtable.h>
32 #include <asm/switch_to.h>
37 #define CREATE_TRACE_POINTS
39 #include "trace-s390.h"
41 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
43 struct kvm_stats_debugfs_item debugfs_entries
[] = {
44 { "userspace_handled", VCPU_STAT(exit_userspace
) },
45 { "exit_null", VCPU_STAT(exit_null
) },
46 { "exit_validity", VCPU_STAT(exit_validity
) },
47 { "exit_stop_request", VCPU_STAT(exit_stop_request
) },
48 { "exit_external_request", VCPU_STAT(exit_external_request
) },
49 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt
) },
50 { "exit_instruction", VCPU_STAT(exit_instruction
) },
51 { "exit_program_interruption", VCPU_STAT(exit_program_interruption
) },
52 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program
) },
53 { "halt_successful_poll", VCPU_STAT(halt_successful_poll
) },
54 { "halt_wakeup", VCPU_STAT(halt_wakeup
) },
55 { "instruction_lctlg", VCPU_STAT(instruction_lctlg
) },
56 { "instruction_lctl", VCPU_STAT(instruction_lctl
) },
57 { "instruction_stctl", VCPU_STAT(instruction_stctl
) },
58 { "instruction_stctg", VCPU_STAT(instruction_stctg
) },
59 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal
) },
60 { "deliver_external_call", VCPU_STAT(deliver_external_call
) },
61 { "deliver_service_signal", VCPU_STAT(deliver_service_signal
) },
62 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt
) },
63 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal
) },
64 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal
) },
65 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal
) },
66 { "deliver_program_interruption", VCPU_STAT(deliver_program_int
) },
67 { "exit_wait_state", VCPU_STAT(exit_wait_state
) },
68 { "instruction_pfmf", VCPU_STAT(instruction_pfmf
) },
69 { "instruction_stidp", VCPU_STAT(instruction_stidp
) },
70 { "instruction_spx", VCPU_STAT(instruction_spx
) },
71 { "instruction_stpx", VCPU_STAT(instruction_stpx
) },
72 { "instruction_stap", VCPU_STAT(instruction_stap
) },
73 { "instruction_storage_key", VCPU_STAT(instruction_storage_key
) },
74 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock
) },
75 { "instruction_stsch", VCPU_STAT(instruction_stsch
) },
76 { "instruction_chsc", VCPU_STAT(instruction_chsc
) },
77 { "instruction_essa", VCPU_STAT(instruction_essa
) },
78 { "instruction_stsi", VCPU_STAT(instruction_stsi
) },
79 { "instruction_stfl", VCPU_STAT(instruction_stfl
) },
80 { "instruction_tprot", VCPU_STAT(instruction_tprot
) },
81 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense
) },
82 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running
) },
83 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call
) },
84 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency
) },
85 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency
) },
86 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start
) },
87 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop
) },
88 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status
) },
89 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status
) },
90 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch
) },
91 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix
) },
92 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart
) },
93 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset
) },
94 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset
) },
95 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown
) },
96 { "diagnose_10", VCPU_STAT(diagnose_10
) },
97 { "diagnose_44", VCPU_STAT(diagnose_44
) },
98 { "diagnose_9c", VCPU_STAT(diagnose_9c
) },
102 /* upper facilities limit for kvm */
103 unsigned long kvm_s390_fac_list_mask
[] = {
104 0xff82fffbf4fc2000UL
,
105 0x005c000000000000UL
,
108 unsigned long kvm_s390_fac_list_mask_size(void)
110 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask
) > S390_ARCH_FAC_MASK_SIZE_U64
);
111 return ARRAY_SIZE(kvm_s390_fac_list_mask
);
114 static struct gmap_notifier gmap_notifier
;
116 /* Section: not file related */
117 int kvm_arch_hardware_enable(void)
119 /* every s390 is virtualization enabled ;-) */
123 static void kvm_gmap_notifier(struct gmap
*gmap
, unsigned long address
);
125 int kvm_arch_hardware_setup(void)
127 gmap_notifier
.notifier_call
= kvm_gmap_notifier
;
128 gmap_register_ipte_notifier(&gmap_notifier
);
132 void kvm_arch_hardware_unsetup(void)
134 gmap_unregister_ipte_notifier(&gmap_notifier
);
137 int kvm_arch_init(void *opaque
)
139 /* Register floating interrupt controller interface. */
140 return kvm_register_device_ops(&kvm_flic_ops
, KVM_DEV_TYPE_FLIC
);
143 /* Section: device related */
144 long kvm_arch_dev_ioctl(struct file
*filp
,
145 unsigned int ioctl
, unsigned long arg
)
147 if (ioctl
== KVM_S390_ENABLE_SIE
)
148 return s390_enable_sie();
152 int kvm_vm_ioctl_check_extension(struct kvm
*kvm
, long ext
)
157 case KVM_CAP_S390_PSW
:
158 case KVM_CAP_S390_GMAP
:
159 case KVM_CAP_SYNC_MMU
:
160 #ifdef CONFIG_KVM_S390_UCONTROL
161 case KVM_CAP_S390_UCONTROL
:
163 case KVM_CAP_ASYNC_PF
:
164 case KVM_CAP_SYNC_REGS
:
165 case KVM_CAP_ONE_REG
:
166 case KVM_CAP_ENABLE_CAP
:
167 case KVM_CAP_S390_CSS_SUPPORT
:
169 case KVM_CAP_IOEVENTFD
:
170 case KVM_CAP_DEVICE_CTRL
:
171 case KVM_CAP_ENABLE_CAP_VM
:
172 case KVM_CAP_S390_IRQCHIP
:
173 case KVM_CAP_VM_ATTRIBUTES
:
174 case KVM_CAP_MP_STATE
:
175 case KVM_CAP_S390_USER_SIGP
:
178 case KVM_CAP_NR_VCPUS
:
179 case KVM_CAP_MAX_VCPUS
:
182 case KVM_CAP_NR_MEMSLOTS
:
183 r
= KVM_USER_MEM_SLOTS
;
185 case KVM_CAP_S390_COW
:
186 r
= MACHINE_HAS_ESOP
;
194 static void kvm_s390_sync_dirty_log(struct kvm
*kvm
,
195 struct kvm_memory_slot
*memslot
)
197 gfn_t cur_gfn
, last_gfn
;
198 unsigned long address
;
199 struct gmap
*gmap
= kvm
->arch
.gmap
;
201 down_read(&gmap
->mm
->mmap_sem
);
202 /* Loop over all guest pages */
203 last_gfn
= memslot
->base_gfn
+ memslot
->npages
;
204 for (cur_gfn
= memslot
->base_gfn
; cur_gfn
<= last_gfn
; cur_gfn
++) {
205 address
= gfn_to_hva_memslot(memslot
, cur_gfn
);
207 if (gmap_test_and_clear_dirty(address
, gmap
))
208 mark_page_dirty(kvm
, cur_gfn
);
210 up_read(&gmap
->mm
->mmap_sem
);
213 /* Section: vm related */
215 * Get (and clear) the dirty memory log for a memory slot.
217 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
,
218 struct kvm_dirty_log
*log
)
222 struct kvm_memory_slot
*memslot
;
225 mutex_lock(&kvm
->slots_lock
);
228 if (log
->slot
>= KVM_USER_MEM_SLOTS
)
231 memslot
= id_to_memslot(kvm
->memslots
, log
->slot
);
233 if (!memslot
->dirty_bitmap
)
236 kvm_s390_sync_dirty_log(kvm
, memslot
);
237 r
= kvm_get_dirty_log(kvm
, log
, &is_dirty
);
241 /* Clear the dirty log */
243 n
= kvm_dirty_bitmap_bytes(memslot
);
244 memset(memslot
->dirty_bitmap
, 0, n
);
248 mutex_unlock(&kvm
->slots_lock
);
252 static int kvm_vm_ioctl_enable_cap(struct kvm
*kvm
, struct kvm_enable_cap
*cap
)
260 case KVM_CAP_S390_IRQCHIP
:
261 kvm
->arch
.use_irqchip
= 1;
264 case KVM_CAP_S390_USER_SIGP
:
265 kvm
->arch
.user_sigp
= 1;
275 static int kvm_s390_get_mem_control(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
279 switch (attr
->attr
) {
280 case KVM_S390_VM_MEM_LIMIT_SIZE
:
282 if (put_user(kvm
->arch
.gmap
->asce_end
, (u64 __user
*)attr
->addr
))
292 static int kvm_s390_set_mem_control(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
296 switch (attr
->attr
) {
297 case KVM_S390_VM_MEM_ENABLE_CMMA
:
299 mutex_lock(&kvm
->lock
);
300 if (atomic_read(&kvm
->online_vcpus
) == 0) {
301 kvm
->arch
.use_cmma
= 1;
304 mutex_unlock(&kvm
->lock
);
306 case KVM_S390_VM_MEM_CLR_CMMA
:
307 mutex_lock(&kvm
->lock
);
308 idx
= srcu_read_lock(&kvm
->srcu
);
309 s390_reset_cmma(kvm
->arch
.gmap
->mm
);
310 srcu_read_unlock(&kvm
->srcu
, idx
);
311 mutex_unlock(&kvm
->lock
);
314 case KVM_S390_VM_MEM_LIMIT_SIZE
: {
315 unsigned long new_limit
;
317 if (kvm_is_ucontrol(kvm
))
320 if (get_user(new_limit
, (u64 __user
*)attr
->addr
))
323 if (new_limit
> kvm
->arch
.gmap
->asce_end
)
327 mutex_lock(&kvm
->lock
);
328 if (atomic_read(&kvm
->online_vcpus
) == 0) {
329 /* gmap_alloc will round the limit up */
330 struct gmap
*new = gmap_alloc(current
->mm
, new_limit
);
335 gmap_free(kvm
->arch
.gmap
);
337 kvm
->arch
.gmap
= new;
341 mutex_unlock(&kvm
->lock
);
351 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu
*vcpu
);
353 static int kvm_s390_vm_set_crypto(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
355 struct kvm_vcpu
*vcpu
;
358 if (!test_kvm_facility(kvm
, 76))
361 mutex_lock(&kvm
->lock
);
362 switch (attr
->attr
) {
363 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW
:
365 kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
,
366 sizeof(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
));
367 kvm
->arch
.crypto
.aes_kw
= 1;
369 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW
:
371 kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
,
372 sizeof(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
));
373 kvm
->arch
.crypto
.dea_kw
= 1;
375 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW
:
376 kvm
->arch
.crypto
.aes_kw
= 0;
377 memset(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
, 0,
378 sizeof(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
));
380 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW
:
381 kvm
->arch
.crypto
.dea_kw
= 0;
382 memset(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
, 0,
383 sizeof(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
));
386 mutex_unlock(&kvm
->lock
);
390 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
391 kvm_s390_vcpu_crypto_setup(vcpu
);
394 mutex_unlock(&kvm
->lock
);
398 static int kvm_s390_set_tod_high(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
402 if (copy_from_user(>od_high
, (void __user
*)attr
->addr
,
412 static int kvm_s390_set_tod_low(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
414 struct kvm_vcpu
*cur_vcpu
;
415 unsigned int vcpu_idx
;
419 if (copy_from_user(>od
, (void __user
*)attr
->addr
, sizeof(gtod
)))
422 r
= store_tod_clock(&host_tod
);
426 mutex_lock(&kvm
->lock
);
427 kvm
->arch
.epoch
= gtod
- host_tod
;
428 kvm_for_each_vcpu(vcpu_idx
, cur_vcpu
, kvm
) {
429 cur_vcpu
->arch
.sie_block
->epoch
= kvm
->arch
.epoch
;
432 mutex_unlock(&kvm
->lock
);
436 static int kvm_s390_set_tod(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
443 switch (attr
->attr
) {
444 case KVM_S390_VM_TOD_HIGH
:
445 ret
= kvm_s390_set_tod_high(kvm
, attr
);
447 case KVM_S390_VM_TOD_LOW
:
448 ret
= kvm_s390_set_tod_low(kvm
, attr
);
457 static int kvm_s390_get_tod_high(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
461 if (copy_to_user((void __user
*)attr
->addr
, >od_high
,
468 static int kvm_s390_get_tod_low(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
473 r
= store_tod_clock(&host_tod
);
477 gtod
= host_tod
+ kvm
->arch
.epoch
;
478 if (copy_to_user((void __user
*)attr
->addr
, >od
, sizeof(gtod
)))
484 static int kvm_s390_get_tod(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
491 switch (attr
->attr
) {
492 case KVM_S390_VM_TOD_HIGH
:
493 ret
= kvm_s390_get_tod_high(kvm
, attr
);
495 case KVM_S390_VM_TOD_LOW
:
496 ret
= kvm_s390_get_tod_low(kvm
, attr
);
505 static int kvm_s390_set_processor(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
507 struct kvm_s390_vm_cpu_processor
*proc
;
510 mutex_lock(&kvm
->lock
);
511 if (atomic_read(&kvm
->online_vcpus
)) {
515 proc
= kzalloc(sizeof(*proc
), GFP_KERNEL
);
520 if (!copy_from_user(proc
, (void __user
*)attr
->addr
,
522 memcpy(&kvm
->arch
.model
.cpu_id
, &proc
->cpuid
,
523 sizeof(struct cpuid
));
524 kvm
->arch
.model
.ibc
= proc
->ibc
;
525 memcpy(kvm
->arch
.model
.fac
->kvm
, proc
->fac_list
,
526 S390_ARCH_FAC_LIST_SIZE_BYTE
);
531 mutex_unlock(&kvm
->lock
);
535 static int kvm_s390_set_cpu_model(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
539 switch (attr
->attr
) {
540 case KVM_S390_VM_CPU_PROCESSOR
:
541 ret
= kvm_s390_set_processor(kvm
, attr
);
547 static int kvm_s390_get_processor(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
549 struct kvm_s390_vm_cpu_processor
*proc
;
552 proc
= kzalloc(sizeof(*proc
), GFP_KERNEL
);
557 memcpy(&proc
->cpuid
, &kvm
->arch
.model
.cpu_id
, sizeof(struct cpuid
));
558 proc
->ibc
= kvm
->arch
.model
.ibc
;
559 memcpy(&proc
->fac_list
, kvm
->arch
.model
.fac
->kvm
, S390_ARCH_FAC_LIST_SIZE_BYTE
);
560 if (copy_to_user((void __user
*)attr
->addr
, proc
, sizeof(*proc
)))
567 static int kvm_s390_get_machine(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
569 struct kvm_s390_vm_cpu_machine
*mach
;
572 mach
= kzalloc(sizeof(*mach
), GFP_KERNEL
);
577 get_cpu_id((struct cpuid
*) &mach
->cpuid
);
578 mach
->ibc
= sclp_get_ibc();
579 memcpy(&mach
->fac_mask
, kvm_s390_fac_list_mask
,
580 kvm_s390_fac_list_mask_size() * sizeof(u64
));
581 memcpy((unsigned long *)&mach
->fac_list
, S390_lowcore
.stfle_fac_list
,
582 S390_ARCH_FAC_LIST_SIZE_U64
);
583 if (copy_to_user((void __user
*)attr
->addr
, mach
, sizeof(*mach
)))
590 static int kvm_s390_get_cpu_model(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
594 switch (attr
->attr
) {
595 case KVM_S390_VM_CPU_PROCESSOR
:
596 ret
= kvm_s390_get_processor(kvm
, attr
);
598 case KVM_S390_VM_CPU_MACHINE
:
599 ret
= kvm_s390_get_machine(kvm
, attr
);
605 static int kvm_s390_vm_set_attr(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
609 switch (attr
->group
) {
610 case KVM_S390_VM_MEM_CTRL
:
611 ret
= kvm_s390_set_mem_control(kvm
, attr
);
613 case KVM_S390_VM_TOD
:
614 ret
= kvm_s390_set_tod(kvm
, attr
);
616 case KVM_S390_VM_CPU_MODEL
:
617 ret
= kvm_s390_set_cpu_model(kvm
, attr
);
619 case KVM_S390_VM_CRYPTO
:
620 ret
= kvm_s390_vm_set_crypto(kvm
, attr
);
630 static int kvm_s390_vm_get_attr(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
634 switch (attr
->group
) {
635 case KVM_S390_VM_MEM_CTRL
:
636 ret
= kvm_s390_get_mem_control(kvm
, attr
);
638 case KVM_S390_VM_TOD
:
639 ret
= kvm_s390_get_tod(kvm
, attr
);
641 case KVM_S390_VM_CPU_MODEL
:
642 ret
= kvm_s390_get_cpu_model(kvm
, attr
);
652 static int kvm_s390_vm_has_attr(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
656 switch (attr
->group
) {
657 case KVM_S390_VM_MEM_CTRL
:
658 switch (attr
->attr
) {
659 case KVM_S390_VM_MEM_ENABLE_CMMA
:
660 case KVM_S390_VM_MEM_CLR_CMMA
:
661 case KVM_S390_VM_MEM_LIMIT_SIZE
:
669 case KVM_S390_VM_TOD
:
670 switch (attr
->attr
) {
671 case KVM_S390_VM_TOD_LOW
:
672 case KVM_S390_VM_TOD_HIGH
:
680 case KVM_S390_VM_CPU_MODEL
:
681 switch (attr
->attr
) {
682 case KVM_S390_VM_CPU_PROCESSOR
:
683 case KVM_S390_VM_CPU_MACHINE
:
691 case KVM_S390_VM_CRYPTO
:
692 switch (attr
->attr
) {
693 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW
:
694 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW
:
695 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW
:
696 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW
:
712 long kvm_arch_vm_ioctl(struct file
*filp
,
713 unsigned int ioctl
, unsigned long arg
)
715 struct kvm
*kvm
= filp
->private_data
;
716 void __user
*argp
= (void __user
*)arg
;
717 struct kvm_device_attr attr
;
721 case KVM_S390_INTERRUPT
: {
722 struct kvm_s390_interrupt s390int
;
725 if (copy_from_user(&s390int
, argp
, sizeof(s390int
)))
727 r
= kvm_s390_inject_vm(kvm
, &s390int
);
730 case KVM_ENABLE_CAP
: {
731 struct kvm_enable_cap cap
;
733 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
735 r
= kvm_vm_ioctl_enable_cap(kvm
, &cap
);
738 case KVM_CREATE_IRQCHIP
: {
739 struct kvm_irq_routing_entry routing
;
742 if (kvm
->arch
.use_irqchip
) {
743 /* Set up dummy routing. */
744 memset(&routing
, 0, sizeof(routing
));
745 kvm_set_irq_routing(kvm
, &routing
, 0, 0);
750 case KVM_SET_DEVICE_ATTR
: {
752 if (copy_from_user(&attr
, (void __user
*)arg
, sizeof(attr
)))
754 r
= kvm_s390_vm_set_attr(kvm
, &attr
);
757 case KVM_GET_DEVICE_ATTR
: {
759 if (copy_from_user(&attr
, (void __user
*)arg
, sizeof(attr
)))
761 r
= kvm_s390_vm_get_attr(kvm
, &attr
);
764 case KVM_HAS_DEVICE_ATTR
: {
766 if (copy_from_user(&attr
, (void __user
*)arg
, sizeof(attr
)))
768 r
= kvm_s390_vm_has_attr(kvm
, &attr
);
778 static int kvm_s390_query_ap_config(u8
*config
)
780 u32 fcn_code
= 0x04000000UL
;
786 ".long 0xb2af0000\n" /* PQAP(QCI) */
790 : "r" (fcn_code
), "r" (config
)
791 : "cc", "0", "2", "memory"
797 static int kvm_s390_apxa_installed(void)
802 if (test_facility(2) && test_facility(12)) {
803 cc
= kvm_s390_query_ap_config(config
);
806 pr_err("PQAP(QCI) failed with cc=%d", cc
);
808 return config
[0] & 0x40;
814 static void kvm_s390_set_crycb_format(struct kvm
*kvm
)
816 kvm
->arch
.crypto
.crycbd
= (__u32
)(unsigned long) kvm
->arch
.crypto
.crycb
;
818 if (kvm_s390_apxa_installed())
819 kvm
->arch
.crypto
.crycbd
|= CRYCB_FORMAT2
;
821 kvm
->arch
.crypto
.crycbd
|= CRYCB_FORMAT1
;
824 static void kvm_s390_get_cpu_id(struct cpuid
*cpu_id
)
827 cpu_id
->version
= 0xff;
830 static int kvm_s390_crypto_init(struct kvm
*kvm
)
832 if (!test_kvm_facility(kvm
, 76))
835 kvm
->arch
.crypto
.crycb
= kzalloc(sizeof(*kvm
->arch
.crypto
.crycb
),
836 GFP_KERNEL
| GFP_DMA
);
837 if (!kvm
->arch
.crypto
.crycb
)
840 kvm_s390_set_crycb_format(kvm
);
842 /* Disable AES/DEA protected key functions by default */
843 kvm
->arch
.crypto
.aes_kw
= 0;
844 kvm
->arch
.crypto
.dea_kw
= 0;
849 int kvm_arch_init_vm(struct kvm
*kvm
, unsigned long type
)
853 static unsigned long sca_offset
;
856 #ifdef CONFIG_KVM_S390_UCONTROL
857 if (type
& ~KVM_VM_S390_UCONTROL
)
859 if ((type
& KVM_VM_S390_UCONTROL
) && (!capable(CAP_SYS_ADMIN
)))
866 rc
= s390_enable_sie();
872 kvm
->arch
.sca
= (struct sca_block
*) get_zeroed_page(GFP_KERNEL
);
875 spin_lock(&kvm_lock
);
876 sca_offset
= (sca_offset
+ 16) & 0x7f0;
877 kvm
->arch
.sca
= (struct sca_block
*) ((char *) kvm
->arch
.sca
+ sca_offset
);
878 spin_unlock(&kvm_lock
);
880 sprintf(debug_name
, "kvm-%u", current
->pid
);
882 kvm
->arch
.dbf
= debug_register(debug_name
, 8, 2, 8 * sizeof(long));
887 * The architectural maximum amount of facilities is 16 kbit. To store
888 * this amount, 2 kbyte of memory is required. Thus we need a full
889 * page to hold the active copy (arch.model.fac->sie) and the current
890 * facilities set (arch.model.fac->kvm). Its address size has to be
891 * 31 bits and word aligned.
893 kvm
->arch
.model
.fac
=
894 (struct s390_model_fac
*) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
895 if (!kvm
->arch
.model
.fac
)
898 memcpy(kvm
->arch
.model
.fac
->kvm
, S390_lowcore
.stfle_fac_list
,
899 S390_ARCH_FAC_LIST_SIZE_U64
);
902 * If this KVM host runs *not* in a LPAR, relax the facility bits
903 * of the kvm facility mask by all missing facilities. This will allow
904 * to determine the right CPU model by means of the remaining facilities.
905 * Live guest migration must prohibit the migration of KVMs running in
906 * a LPAR to non LPAR hosts.
908 if (!MACHINE_IS_LPAR
)
909 for (i
= 0; i
< kvm_s390_fac_list_mask_size(); i
++)
910 kvm_s390_fac_list_mask
[i
] &= kvm
->arch
.model
.fac
->kvm
[i
];
913 * Apply the kvm facility mask to limit the kvm supported/tolerated
916 for (i
= 0; i
< S390_ARCH_FAC_LIST_SIZE_U64
; i
++) {
917 if (i
< kvm_s390_fac_list_mask_size())
918 kvm
->arch
.model
.fac
->kvm
[i
] &= kvm_s390_fac_list_mask
[i
];
920 kvm
->arch
.model
.fac
->kvm
[i
] = 0UL;
923 kvm_s390_get_cpu_id(&kvm
->arch
.model
.cpu_id
);
924 kvm
->arch
.model
.ibc
= sclp_get_ibc() & 0x0fff;
926 if (kvm_s390_crypto_init(kvm
) < 0)
929 spin_lock_init(&kvm
->arch
.float_int
.lock
);
930 INIT_LIST_HEAD(&kvm
->arch
.float_int
.list
);
931 init_waitqueue_head(&kvm
->arch
.ipte_wq
);
932 mutex_init(&kvm
->arch
.ipte_mutex
);
934 debug_register_view(kvm
->arch
.dbf
, &debug_sprintf_view
);
935 VM_EVENT(kvm
, 3, "%s", "vm created");
937 if (type
& KVM_VM_S390_UCONTROL
) {
938 kvm
->arch
.gmap
= NULL
;
940 kvm
->arch
.gmap
= gmap_alloc(current
->mm
, (1UL << 44) - 1);
943 kvm
->arch
.gmap
->private = kvm
;
944 kvm
->arch
.gmap
->pfault_enabled
= 0;
947 kvm
->arch
.css_support
= 0;
948 kvm
->arch
.use_irqchip
= 0;
951 spin_lock_init(&kvm
->arch
.start_stop_lock
);
955 kfree(kvm
->arch
.crypto
.crycb
);
957 free_page((unsigned long)kvm
->arch
.model
.fac
);
959 debug_unregister(kvm
->arch
.dbf
);
961 free_page((unsigned long)(kvm
->arch
.sca
));
966 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
968 VCPU_EVENT(vcpu
, 3, "%s", "free cpu");
969 trace_kvm_s390_destroy_vcpu(vcpu
->vcpu_id
);
970 kvm_s390_clear_local_irqs(vcpu
);
971 kvm_clear_async_pf_completion_queue(vcpu
);
972 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
973 clear_bit(63 - vcpu
->vcpu_id
,
974 (unsigned long *) &vcpu
->kvm
->arch
.sca
->mcn
);
975 if (vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].sda
==
976 (__u64
) vcpu
->arch
.sie_block
)
977 vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].sda
= 0;
981 if (kvm_is_ucontrol(vcpu
->kvm
))
982 gmap_free(vcpu
->arch
.gmap
);
984 if (kvm_s390_cmma_enabled(vcpu
->kvm
))
985 kvm_s390_vcpu_unsetup_cmma(vcpu
);
986 free_page((unsigned long)(vcpu
->arch
.sie_block
));
988 kvm_vcpu_uninit(vcpu
);
989 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
992 static void kvm_free_vcpus(struct kvm
*kvm
)
995 struct kvm_vcpu
*vcpu
;
997 kvm_for_each_vcpu(i
, vcpu
, kvm
)
998 kvm_arch_vcpu_destroy(vcpu
);
1000 mutex_lock(&kvm
->lock
);
1001 for (i
= 0; i
< atomic_read(&kvm
->online_vcpus
); i
++)
1002 kvm
->vcpus
[i
] = NULL
;
1004 atomic_set(&kvm
->online_vcpus
, 0);
1005 mutex_unlock(&kvm
->lock
);
1008 void kvm_arch_destroy_vm(struct kvm
*kvm
)
1010 kvm_free_vcpus(kvm
);
1011 free_page((unsigned long)kvm
->arch
.model
.fac
);
1012 free_page((unsigned long)(kvm
->arch
.sca
));
1013 debug_unregister(kvm
->arch
.dbf
);
1014 kfree(kvm
->arch
.crypto
.crycb
);
1015 if (!kvm_is_ucontrol(kvm
))
1016 gmap_free(kvm
->arch
.gmap
);
1017 kvm_s390_destroy_adapters(kvm
);
1018 kvm_s390_clear_float_irqs(kvm
);
1021 /* Section: vcpu related */
1022 static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu
*vcpu
)
1024 vcpu
->arch
.gmap
= gmap_alloc(current
->mm
, -1UL);
1025 if (!vcpu
->arch
.gmap
)
1027 vcpu
->arch
.gmap
->private = vcpu
->kvm
;
1032 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
1034 vcpu
->arch
.pfault_token
= KVM_S390_PFAULT_TOKEN_INVALID
;
1035 kvm_clear_async_pf_completion_queue(vcpu
);
1036 vcpu
->run
->kvm_valid_regs
= KVM_SYNC_PREFIX
|
1043 if (kvm_is_ucontrol(vcpu
->kvm
))
1044 return __kvm_ucontrol_vcpu_init(vcpu
);
1049 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
1051 save_fp_ctl(&vcpu
->arch
.host_fpregs
.fpc
);
1052 save_fp_regs(vcpu
->arch
.host_fpregs
.fprs
);
1053 save_access_regs(vcpu
->arch
.host_acrs
);
1054 restore_fp_ctl(&vcpu
->arch
.guest_fpregs
.fpc
);
1055 restore_fp_regs(vcpu
->arch
.guest_fpregs
.fprs
);
1056 restore_access_regs(vcpu
->run
->s
.regs
.acrs
);
1057 gmap_enable(vcpu
->arch
.gmap
);
1058 atomic_set_mask(CPUSTAT_RUNNING
, &vcpu
->arch
.sie_block
->cpuflags
);
1061 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
1063 atomic_clear_mask(CPUSTAT_RUNNING
, &vcpu
->arch
.sie_block
->cpuflags
);
1064 gmap_disable(vcpu
->arch
.gmap
);
1065 save_fp_ctl(&vcpu
->arch
.guest_fpregs
.fpc
);
1066 save_fp_regs(vcpu
->arch
.guest_fpregs
.fprs
);
1067 save_access_regs(vcpu
->run
->s
.regs
.acrs
);
1068 restore_fp_ctl(&vcpu
->arch
.host_fpregs
.fpc
);
1069 restore_fp_regs(vcpu
->arch
.host_fpregs
.fprs
);
1070 restore_access_regs(vcpu
->arch
.host_acrs
);
1073 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu
*vcpu
)
1075 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1076 vcpu
->arch
.sie_block
->gpsw
.mask
= 0UL;
1077 vcpu
->arch
.sie_block
->gpsw
.addr
= 0UL;
1078 kvm_s390_set_prefix(vcpu
, 0);
1079 vcpu
->arch
.sie_block
->cputm
= 0UL;
1080 vcpu
->arch
.sie_block
->ckc
= 0UL;
1081 vcpu
->arch
.sie_block
->todpr
= 0;
1082 memset(vcpu
->arch
.sie_block
->gcr
, 0, 16 * sizeof(__u64
));
1083 vcpu
->arch
.sie_block
->gcr
[0] = 0xE0UL
;
1084 vcpu
->arch
.sie_block
->gcr
[14] = 0xC2000000UL
;
1085 vcpu
->arch
.guest_fpregs
.fpc
= 0;
1086 asm volatile("lfpc %0" : : "Q" (vcpu
->arch
.guest_fpregs
.fpc
));
1087 vcpu
->arch
.sie_block
->gbea
= 1;
1088 vcpu
->arch
.sie_block
->pp
= 0;
1089 vcpu
->arch
.pfault_token
= KVM_S390_PFAULT_TOKEN_INVALID
;
1090 kvm_clear_async_pf_completion_queue(vcpu
);
1091 if (!kvm_s390_user_cpu_state_ctrl(vcpu
->kvm
))
1092 kvm_s390_vcpu_stop(vcpu
);
1093 kvm_s390_clear_local_irqs(vcpu
);
1096 void kvm_arch_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
1098 mutex_lock(&vcpu
->kvm
->lock
);
1099 vcpu
->arch
.sie_block
->epoch
= vcpu
->kvm
->arch
.epoch
;
1100 mutex_unlock(&vcpu
->kvm
->lock
);
1101 if (!kvm_is_ucontrol(vcpu
->kvm
))
1102 vcpu
->arch
.gmap
= vcpu
->kvm
->arch
.gmap
;
1105 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu
*vcpu
)
1107 if (!test_kvm_facility(vcpu
->kvm
, 76))
1110 vcpu
->arch
.sie_block
->ecb3
&= ~(ECB3_AES
| ECB3_DEA
);
1112 if (vcpu
->kvm
->arch
.crypto
.aes_kw
)
1113 vcpu
->arch
.sie_block
->ecb3
|= ECB3_AES
;
1114 if (vcpu
->kvm
->arch
.crypto
.dea_kw
)
1115 vcpu
->arch
.sie_block
->ecb3
|= ECB3_DEA
;
1117 vcpu
->arch
.sie_block
->crycbd
= vcpu
->kvm
->arch
.crypto
.crycbd
;
1120 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu
*vcpu
)
1122 free_page(vcpu
->arch
.sie_block
->cbrlo
);
1123 vcpu
->arch
.sie_block
->cbrlo
= 0;
1126 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu
*vcpu
)
1128 vcpu
->arch
.sie_block
->cbrlo
= get_zeroed_page(GFP_KERNEL
);
1129 if (!vcpu
->arch
.sie_block
->cbrlo
)
1132 vcpu
->arch
.sie_block
->ecb2
|= 0x80;
1133 vcpu
->arch
.sie_block
->ecb2
&= ~0x08;
1137 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
)
1141 atomic_set(&vcpu
->arch
.sie_block
->cpuflags
, CPUSTAT_ZARCH
|
1145 vcpu
->arch
.sie_block
->ecb
= 6;
1146 if (test_kvm_facility(vcpu
->kvm
, 50) && test_kvm_facility(vcpu
->kvm
, 73))
1147 vcpu
->arch
.sie_block
->ecb
|= 0x10;
1149 vcpu
->arch
.sie_block
->ecb2
= 8;
1150 vcpu
->arch
.sie_block
->eca
= 0xC1002000U
;
1151 if (sclp_has_siif())
1152 vcpu
->arch
.sie_block
->eca
|= 1;
1153 if (sclp_has_sigpif())
1154 vcpu
->arch
.sie_block
->eca
|= 0x10000000U
;
1155 vcpu
->arch
.sie_block
->ictl
|= ICTL_ISKE
| ICTL_SSKE
| ICTL_RRBE
|
1158 if (kvm_s390_cmma_enabled(vcpu
->kvm
)) {
1159 rc
= kvm_s390_vcpu_setup_cmma(vcpu
);
1163 hrtimer_init(&vcpu
->arch
.ckc_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
1164 vcpu
->arch
.ckc_timer
.function
= kvm_s390_idle_wakeup
;
1166 mutex_lock(&vcpu
->kvm
->lock
);
1167 vcpu
->arch
.cpu_id
= vcpu
->kvm
->arch
.model
.cpu_id
;
1168 memcpy(vcpu
->kvm
->arch
.model
.fac
->sie
, vcpu
->kvm
->arch
.model
.fac
->kvm
,
1169 S390_ARCH_FAC_LIST_SIZE_BYTE
);
1170 vcpu
->arch
.sie_block
->ibc
= vcpu
->kvm
->arch
.model
.ibc
;
1171 mutex_unlock(&vcpu
->kvm
->lock
);
1173 kvm_s390_vcpu_crypto_setup(vcpu
);
1178 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
,
1181 struct kvm_vcpu
*vcpu
;
1182 struct sie_page
*sie_page
;
1185 if (id
>= KVM_MAX_VCPUS
)
1190 vcpu
= kmem_cache_zalloc(kvm_vcpu_cache
, GFP_KERNEL
);
1194 sie_page
= (struct sie_page
*) get_zeroed_page(GFP_KERNEL
);
1198 vcpu
->arch
.sie_block
= &sie_page
->sie_block
;
1199 vcpu
->arch
.sie_block
->itdba
= (unsigned long) &sie_page
->itdb
;
1201 vcpu
->arch
.sie_block
->icpua
= id
;
1202 if (!kvm_is_ucontrol(kvm
)) {
1203 if (!kvm
->arch
.sca
) {
1207 if (!kvm
->arch
.sca
->cpu
[id
].sda
)
1208 kvm
->arch
.sca
->cpu
[id
].sda
=
1209 (__u64
) vcpu
->arch
.sie_block
;
1210 vcpu
->arch
.sie_block
->scaoh
=
1211 (__u32
)(((__u64
)kvm
->arch
.sca
) >> 32);
1212 vcpu
->arch
.sie_block
->scaol
= (__u32
)(__u64
)kvm
->arch
.sca
;
1213 set_bit(63 - id
, (unsigned long *) &kvm
->arch
.sca
->mcn
);
1215 vcpu
->arch
.sie_block
->fac
= (int) (long) kvm
->arch
.model
.fac
->sie
;
1217 spin_lock_init(&vcpu
->arch
.local_int
.lock
);
1218 vcpu
->arch
.local_int
.float_int
= &kvm
->arch
.float_int
;
1219 vcpu
->arch
.local_int
.wq
= &vcpu
->wq
;
1220 vcpu
->arch
.local_int
.cpuflags
= &vcpu
->arch
.sie_block
->cpuflags
;
1222 rc
= kvm_vcpu_init(vcpu
, kvm
, id
);
1224 goto out_free_sie_block
;
1225 VM_EVENT(kvm
, 3, "create cpu %d at %p, sie block at %p", id
, vcpu
,
1226 vcpu
->arch
.sie_block
);
1227 trace_kvm_s390_create_vcpu(id
, vcpu
, vcpu
->arch
.sie_block
);
1231 free_page((unsigned long)(vcpu
->arch
.sie_block
));
1233 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
1238 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*vcpu
)
1240 return kvm_s390_vcpu_has_irq(vcpu
, 0);
1243 void s390_vcpu_block(struct kvm_vcpu
*vcpu
)
1245 atomic_set_mask(PROG_BLOCK_SIE
, &vcpu
->arch
.sie_block
->prog20
);
1248 void s390_vcpu_unblock(struct kvm_vcpu
*vcpu
)
1250 atomic_clear_mask(PROG_BLOCK_SIE
, &vcpu
->arch
.sie_block
->prog20
);
1254 * Kick a guest cpu out of SIE and wait until SIE is not running.
1255 * If the CPU is not running (e.g. waiting as idle) the function will
1256 * return immediately. */
1257 void exit_sie(struct kvm_vcpu
*vcpu
)
1259 atomic_set_mask(CPUSTAT_STOP_INT
, &vcpu
->arch
.sie_block
->cpuflags
);
1260 while (vcpu
->arch
.sie_block
->prog0c
& PROG_IN_SIE
)
1264 /* Kick a guest cpu out of SIE and prevent SIE-reentry */
1265 void exit_sie_sync(struct kvm_vcpu
*vcpu
)
1267 s390_vcpu_block(vcpu
);
1271 static void kvm_gmap_notifier(struct gmap
*gmap
, unsigned long address
)
1274 struct kvm
*kvm
= gmap
->private;
1275 struct kvm_vcpu
*vcpu
;
1277 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
1278 /* match against both prefix pages */
1279 if (kvm_s390_get_prefix(vcpu
) == (address
& ~0x1000UL
)) {
1280 VCPU_EVENT(vcpu
, 2, "gmap notifier for %lx", address
);
1281 kvm_make_request(KVM_REQ_MMU_RELOAD
, vcpu
);
1282 exit_sie_sync(vcpu
);
1287 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
1289 /* kvm common code refers to this, but never calls it */
1294 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu
*vcpu
,
1295 struct kvm_one_reg
*reg
)
1300 case KVM_REG_S390_TODPR
:
1301 r
= put_user(vcpu
->arch
.sie_block
->todpr
,
1302 (u32 __user
*)reg
->addr
);
1304 case KVM_REG_S390_EPOCHDIFF
:
1305 r
= put_user(vcpu
->arch
.sie_block
->epoch
,
1306 (u64 __user
*)reg
->addr
);
1308 case KVM_REG_S390_CPU_TIMER
:
1309 r
= put_user(vcpu
->arch
.sie_block
->cputm
,
1310 (u64 __user
*)reg
->addr
);
1312 case KVM_REG_S390_CLOCK_COMP
:
1313 r
= put_user(vcpu
->arch
.sie_block
->ckc
,
1314 (u64 __user
*)reg
->addr
);
1316 case KVM_REG_S390_PFTOKEN
:
1317 r
= put_user(vcpu
->arch
.pfault_token
,
1318 (u64 __user
*)reg
->addr
);
1320 case KVM_REG_S390_PFCOMPARE
:
1321 r
= put_user(vcpu
->arch
.pfault_compare
,
1322 (u64 __user
*)reg
->addr
);
1324 case KVM_REG_S390_PFSELECT
:
1325 r
= put_user(vcpu
->arch
.pfault_select
,
1326 (u64 __user
*)reg
->addr
);
1328 case KVM_REG_S390_PP
:
1329 r
= put_user(vcpu
->arch
.sie_block
->pp
,
1330 (u64 __user
*)reg
->addr
);
1332 case KVM_REG_S390_GBEA
:
1333 r
= put_user(vcpu
->arch
.sie_block
->gbea
,
1334 (u64 __user
*)reg
->addr
);
1343 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu
*vcpu
,
1344 struct kvm_one_reg
*reg
)
1349 case KVM_REG_S390_TODPR
:
1350 r
= get_user(vcpu
->arch
.sie_block
->todpr
,
1351 (u32 __user
*)reg
->addr
);
1353 case KVM_REG_S390_EPOCHDIFF
:
1354 r
= get_user(vcpu
->arch
.sie_block
->epoch
,
1355 (u64 __user
*)reg
->addr
);
1357 case KVM_REG_S390_CPU_TIMER
:
1358 r
= get_user(vcpu
->arch
.sie_block
->cputm
,
1359 (u64 __user
*)reg
->addr
);
1361 case KVM_REG_S390_CLOCK_COMP
:
1362 r
= get_user(vcpu
->arch
.sie_block
->ckc
,
1363 (u64 __user
*)reg
->addr
);
1365 case KVM_REG_S390_PFTOKEN
:
1366 r
= get_user(vcpu
->arch
.pfault_token
,
1367 (u64 __user
*)reg
->addr
);
1368 if (vcpu
->arch
.pfault_token
== KVM_S390_PFAULT_TOKEN_INVALID
)
1369 kvm_clear_async_pf_completion_queue(vcpu
);
1371 case KVM_REG_S390_PFCOMPARE
:
1372 r
= get_user(vcpu
->arch
.pfault_compare
,
1373 (u64 __user
*)reg
->addr
);
1375 case KVM_REG_S390_PFSELECT
:
1376 r
= get_user(vcpu
->arch
.pfault_select
,
1377 (u64 __user
*)reg
->addr
);
1379 case KVM_REG_S390_PP
:
1380 r
= get_user(vcpu
->arch
.sie_block
->pp
,
1381 (u64 __user
*)reg
->addr
);
1383 case KVM_REG_S390_GBEA
:
1384 r
= get_user(vcpu
->arch
.sie_block
->gbea
,
1385 (u64 __user
*)reg
->addr
);
1394 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu
*vcpu
)
1396 kvm_s390_vcpu_initial_reset(vcpu
);
1400 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
1402 memcpy(&vcpu
->run
->s
.regs
.gprs
, ®s
->gprs
, sizeof(regs
->gprs
));
1406 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
1408 memcpy(®s
->gprs
, &vcpu
->run
->s
.regs
.gprs
, sizeof(regs
->gprs
));
1412 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
1413 struct kvm_sregs
*sregs
)
1415 memcpy(&vcpu
->run
->s
.regs
.acrs
, &sregs
->acrs
, sizeof(sregs
->acrs
));
1416 memcpy(&vcpu
->arch
.sie_block
->gcr
, &sregs
->crs
, sizeof(sregs
->crs
));
1417 restore_access_regs(vcpu
->run
->s
.regs
.acrs
);
1421 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
1422 struct kvm_sregs
*sregs
)
1424 memcpy(&sregs
->acrs
, &vcpu
->run
->s
.regs
.acrs
, sizeof(sregs
->acrs
));
1425 memcpy(&sregs
->crs
, &vcpu
->arch
.sie_block
->gcr
, sizeof(sregs
->crs
));
1429 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1431 if (test_fp_ctl(fpu
->fpc
))
1433 memcpy(&vcpu
->arch
.guest_fpregs
.fprs
, &fpu
->fprs
, sizeof(fpu
->fprs
));
1434 vcpu
->arch
.guest_fpregs
.fpc
= fpu
->fpc
;
1435 restore_fp_ctl(&vcpu
->arch
.guest_fpregs
.fpc
);
1436 restore_fp_regs(vcpu
->arch
.guest_fpregs
.fprs
);
1440 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1442 memcpy(&fpu
->fprs
, &vcpu
->arch
.guest_fpregs
.fprs
, sizeof(fpu
->fprs
));
1443 fpu
->fpc
= vcpu
->arch
.guest_fpregs
.fpc
;
1447 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu
*vcpu
, psw_t psw
)
1451 if (!is_vcpu_stopped(vcpu
))
1454 vcpu
->run
->psw_mask
= psw
.mask
;
1455 vcpu
->run
->psw_addr
= psw
.addr
;
1460 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
1461 struct kvm_translation
*tr
)
1463 return -EINVAL
; /* not implemented yet */
1466 #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1467 KVM_GUESTDBG_USE_HW_BP | \
1468 KVM_GUESTDBG_ENABLE)
1470 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
1471 struct kvm_guest_debug
*dbg
)
1475 vcpu
->guest_debug
= 0;
1476 kvm_s390_clear_bp_data(vcpu
);
1478 if (dbg
->control
& ~VALID_GUESTDBG_FLAGS
)
1481 if (dbg
->control
& KVM_GUESTDBG_ENABLE
) {
1482 vcpu
->guest_debug
= dbg
->control
;
1483 /* enforce guest PER */
1484 atomic_set_mask(CPUSTAT_P
, &vcpu
->arch
.sie_block
->cpuflags
);
1486 if (dbg
->control
& KVM_GUESTDBG_USE_HW_BP
)
1487 rc
= kvm_s390_import_bp_data(vcpu
, dbg
);
1489 atomic_clear_mask(CPUSTAT_P
, &vcpu
->arch
.sie_block
->cpuflags
);
1490 vcpu
->arch
.guestdbg
.last_bp
= 0;
1494 vcpu
->guest_debug
= 0;
1495 kvm_s390_clear_bp_data(vcpu
);
1496 atomic_clear_mask(CPUSTAT_P
, &vcpu
->arch
.sie_block
->cpuflags
);
1502 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
1503 struct kvm_mp_state
*mp_state
)
1505 /* CHECK_STOP and LOAD are not supported yet */
1506 return is_vcpu_stopped(vcpu
) ? KVM_MP_STATE_STOPPED
:
1507 KVM_MP_STATE_OPERATING
;
1510 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
1511 struct kvm_mp_state
*mp_state
)
1515 /* user space knows about this interface - let it control the state */
1516 vcpu
->kvm
->arch
.user_cpu_state_ctrl
= 1;
1518 switch (mp_state
->mp_state
) {
1519 case KVM_MP_STATE_STOPPED
:
1520 kvm_s390_vcpu_stop(vcpu
);
1522 case KVM_MP_STATE_OPERATING
:
1523 kvm_s390_vcpu_start(vcpu
);
1525 case KVM_MP_STATE_LOAD
:
1526 case KVM_MP_STATE_CHECK_STOP
:
1527 /* fall through - CHECK_STOP and LOAD are not supported yet */
1535 bool kvm_s390_cmma_enabled(struct kvm
*kvm
)
1537 if (!MACHINE_IS_LPAR
)
1539 /* only enable for z10 and later */
1540 if (!MACHINE_HAS_EDAT1
)
1542 if (!kvm
->arch
.use_cmma
)
1547 static bool ibs_enabled(struct kvm_vcpu
*vcpu
)
1549 return atomic_read(&vcpu
->arch
.sie_block
->cpuflags
) & CPUSTAT_IBS
;
1552 static int kvm_s390_handle_requests(struct kvm_vcpu
*vcpu
)
1555 s390_vcpu_unblock(vcpu
);
1557 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1558 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1559 * This ensures that the ipte instruction for this request has
1560 * already finished. We might race against a second unmapper that
1561 * wants to set the blocking bit. Lets just retry the request loop.
1563 if (kvm_check_request(KVM_REQ_MMU_RELOAD
, vcpu
)) {
1565 rc
= gmap_ipte_notify(vcpu
->arch
.gmap
,
1566 kvm_s390_get_prefix(vcpu
),
1573 if (kvm_check_request(KVM_REQ_TLB_FLUSH
, vcpu
)) {
1574 vcpu
->arch
.sie_block
->ihcpu
= 0xffff;
1578 if (kvm_check_request(KVM_REQ_ENABLE_IBS
, vcpu
)) {
1579 if (!ibs_enabled(vcpu
)) {
1580 trace_kvm_s390_enable_disable_ibs(vcpu
->vcpu_id
, 1);
1581 atomic_set_mask(CPUSTAT_IBS
,
1582 &vcpu
->arch
.sie_block
->cpuflags
);
1587 if (kvm_check_request(KVM_REQ_DISABLE_IBS
, vcpu
)) {
1588 if (ibs_enabled(vcpu
)) {
1589 trace_kvm_s390_enable_disable_ibs(vcpu
->vcpu_id
, 0);
1590 atomic_clear_mask(CPUSTAT_IBS
,
1591 &vcpu
->arch
.sie_block
->cpuflags
);
1596 /* nothing to do, just clear the request */
1597 clear_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
1603 * kvm_arch_fault_in_page - fault-in guest page if necessary
1604 * @vcpu: The corresponding virtual cpu
1605 * @gpa: Guest physical address
1606 * @writable: Whether the page should be writable or not
1608 * Make sure that a guest page has been faulted-in on the host.
1610 * Return: Zero on success, negative error code otherwise.
1612 long kvm_arch_fault_in_page(struct kvm_vcpu
*vcpu
, gpa_t gpa
, int writable
)
1614 return gmap_fault(vcpu
->arch
.gmap
, gpa
,
1615 writable
? FAULT_FLAG_WRITE
: 0);
1618 static void __kvm_inject_pfault_token(struct kvm_vcpu
*vcpu
, bool start_token
,
1619 unsigned long token
)
1621 struct kvm_s390_interrupt inti
;
1622 struct kvm_s390_irq irq
;
1625 irq
.u
.ext
.ext_params2
= token
;
1626 irq
.type
= KVM_S390_INT_PFAULT_INIT
;
1627 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu
, &irq
));
1629 inti
.type
= KVM_S390_INT_PFAULT_DONE
;
1630 inti
.parm64
= token
;
1631 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu
->kvm
, &inti
));
1635 void kvm_arch_async_page_not_present(struct kvm_vcpu
*vcpu
,
1636 struct kvm_async_pf
*work
)
1638 trace_kvm_s390_pfault_init(vcpu
, work
->arch
.pfault_token
);
1639 __kvm_inject_pfault_token(vcpu
, true, work
->arch
.pfault_token
);
1642 void kvm_arch_async_page_present(struct kvm_vcpu
*vcpu
,
1643 struct kvm_async_pf
*work
)
1645 trace_kvm_s390_pfault_done(vcpu
, work
->arch
.pfault_token
);
1646 __kvm_inject_pfault_token(vcpu
, false, work
->arch
.pfault_token
);
1649 void kvm_arch_async_page_ready(struct kvm_vcpu
*vcpu
,
1650 struct kvm_async_pf
*work
)
1652 /* s390 will always inject the page directly */
1655 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu
*vcpu
)
1658 * s390 will always inject the page directly,
1659 * but we still want check_async_completion to cleanup
1664 static int kvm_arch_setup_async_pf(struct kvm_vcpu
*vcpu
)
1667 struct kvm_arch_async_pf arch
;
1670 if (vcpu
->arch
.pfault_token
== KVM_S390_PFAULT_TOKEN_INVALID
)
1672 if ((vcpu
->arch
.sie_block
->gpsw
.mask
& vcpu
->arch
.pfault_select
) !=
1673 vcpu
->arch
.pfault_compare
)
1675 if (psw_extint_disabled(vcpu
))
1677 if (kvm_s390_vcpu_has_irq(vcpu
, 0))
1679 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x200ul
))
1681 if (!vcpu
->arch
.gmap
->pfault_enabled
)
1684 hva
= gfn_to_hva(vcpu
->kvm
, gpa_to_gfn(current
->thread
.gmap_addr
));
1685 hva
+= current
->thread
.gmap_addr
& ~PAGE_MASK
;
1686 if (read_guest_real(vcpu
, vcpu
->arch
.pfault_token
, &arch
.pfault_token
, 8))
1689 rc
= kvm_setup_async_pf(vcpu
, current
->thread
.gmap_addr
, hva
, &arch
);
1693 static int vcpu_pre_run(struct kvm_vcpu
*vcpu
)
1698 * On s390 notifications for arriving pages will be delivered directly
1699 * to the guest but the house keeping for completed pfaults is
1700 * handled outside the worker.
1702 kvm_check_async_pf_completion(vcpu
);
1704 memcpy(&vcpu
->arch
.sie_block
->gg14
, &vcpu
->run
->s
.regs
.gprs
[14], 16);
1709 if (test_cpu_flag(CIF_MCCK_PENDING
))
1712 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
1713 rc
= kvm_s390_deliver_pending_interrupts(vcpu
);
1718 rc
= kvm_s390_handle_requests(vcpu
);
1722 if (guestdbg_enabled(vcpu
)) {
1723 kvm_s390_backup_guest_per_regs(vcpu
);
1724 kvm_s390_patch_guest_per_regs(vcpu
);
1727 vcpu
->arch
.sie_block
->icptcode
= 0;
1728 cpuflags
= atomic_read(&vcpu
->arch
.sie_block
->cpuflags
);
1729 VCPU_EVENT(vcpu
, 6, "entering sie flags %x", cpuflags
);
1730 trace_kvm_s390_sie_enter(vcpu
, cpuflags
);
1735 static int vcpu_post_run(struct kvm_vcpu
*vcpu
, int exit_reason
)
1739 VCPU_EVENT(vcpu
, 6, "exit sie icptcode %d",
1740 vcpu
->arch
.sie_block
->icptcode
);
1741 trace_kvm_s390_sie_exit(vcpu
, vcpu
->arch
.sie_block
->icptcode
);
1743 if (guestdbg_enabled(vcpu
))
1744 kvm_s390_restore_guest_per_regs(vcpu
);
1746 if (exit_reason
>= 0) {
1748 } else if (kvm_is_ucontrol(vcpu
->kvm
)) {
1749 vcpu
->run
->exit_reason
= KVM_EXIT_S390_UCONTROL
;
1750 vcpu
->run
->s390_ucontrol
.trans_exc_code
=
1751 current
->thread
.gmap_addr
;
1752 vcpu
->run
->s390_ucontrol
.pgm_code
= 0x10;
1755 } else if (current
->thread
.gmap_pfault
) {
1756 trace_kvm_s390_major_guest_pfault(vcpu
);
1757 current
->thread
.gmap_pfault
= 0;
1758 if (kvm_arch_setup_async_pf(vcpu
)) {
1761 gpa_t gpa
= current
->thread
.gmap_addr
;
1762 rc
= kvm_arch_fault_in_page(vcpu
, gpa
, 1);
1767 VCPU_EVENT(vcpu
, 3, "%s", "fault in sie instruction");
1768 trace_kvm_s390_sie_fault(vcpu
);
1769 rc
= kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
1772 memcpy(&vcpu
->run
->s
.regs
.gprs
[14], &vcpu
->arch
.sie_block
->gg14
, 16);
1775 if (kvm_is_ucontrol(vcpu
->kvm
))
1776 /* Don't exit for host interrupts. */
1777 rc
= vcpu
->arch
.sie_block
->icptcode
? -EOPNOTSUPP
: 0;
1779 rc
= kvm_handle_sie_intercept(vcpu
);
1785 static int __vcpu_run(struct kvm_vcpu
*vcpu
)
1787 int rc
, exit_reason
;
1790 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1791 * ning the guest), so that memslots (and other stuff) are protected
1793 vcpu
->srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
1796 rc
= vcpu_pre_run(vcpu
);
1800 srcu_read_unlock(&vcpu
->kvm
->srcu
, vcpu
->srcu_idx
);
1802 * As PF_VCPU will be used in fault handler, between
1803 * guest_enter and guest_exit should be no uaccess.
1808 exit_reason
= sie64a(vcpu
->arch
.sie_block
,
1809 vcpu
->run
->s
.regs
.gprs
);
1811 vcpu
->srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
1813 rc
= vcpu_post_run(vcpu
, exit_reason
);
1814 } while (!signal_pending(current
) && !guestdbg_exit_pending(vcpu
) && !rc
);
1816 srcu_read_unlock(&vcpu
->kvm
->srcu
, vcpu
->srcu_idx
);
1820 static void sync_regs(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1822 vcpu
->arch
.sie_block
->gpsw
.mask
= kvm_run
->psw_mask
;
1823 vcpu
->arch
.sie_block
->gpsw
.addr
= kvm_run
->psw_addr
;
1824 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_PREFIX
)
1825 kvm_s390_set_prefix(vcpu
, kvm_run
->s
.regs
.prefix
);
1826 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_CRS
) {
1827 memcpy(&vcpu
->arch
.sie_block
->gcr
, &kvm_run
->s
.regs
.crs
, 128);
1828 /* some control register changes require a tlb flush */
1829 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
1831 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_ARCH0
) {
1832 vcpu
->arch
.sie_block
->cputm
= kvm_run
->s
.regs
.cputm
;
1833 vcpu
->arch
.sie_block
->ckc
= kvm_run
->s
.regs
.ckc
;
1834 vcpu
->arch
.sie_block
->todpr
= kvm_run
->s
.regs
.todpr
;
1835 vcpu
->arch
.sie_block
->pp
= kvm_run
->s
.regs
.pp
;
1836 vcpu
->arch
.sie_block
->gbea
= kvm_run
->s
.regs
.gbea
;
1838 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_PFAULT
) {
1839 vcpu
->arch
.pfault_token
= kvm_run
->s
.regs
.pft
;
1840 vcpu
->arch
.pfault_select
= kvm_run
->s
.regs
.pfs
;
1841 vcpu
->arch
.pfault_compare
= kvm_run
->s
.regs
.pfc
;
1842 if (vcpu
->arch
.pfault_token
== KVM_S390_PFAULT_TOKEN_INVALID
)
1843 kvm_clear_async_pf_completion_queue(vcpu
);
1845 kvm_run
->kvm_dirty_regs
= 0;
1848 static void store_regs(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1850 kvm_run
->psw_mask
= vcpu
->arch
.sie_block
->gpsw
.mask
;
1851 kvm_run
->psw_addr
= vcpu
->arch
.sie_block
->gpsw
.addr
;
1852 kvm_run
->s
.regs
.prefix
= kvm_s390_get_prefix(vcpu
);
1853 memcpy(&kvm_run
->s
.regs
.crs
, &vcpu
->arch
.sie_block
->gcr
, 128);
1854 kvm_run
->s
.regs
.cputm
= vcpu
->arch
.sie_block
->cputm
;
1855 kvm_run
->s
.regs
.ckc
= vcpu
->arch
.sie_block
->ckc
;
1856 kvm_run
->s
.regs
.todpr
= vcpu
->arch
.sie_block
->todpr
;
1857 kvm_run
->s
.regs
.pp
= vcpu
->arch
.sie_block
->pp
;
1858 kvm_run
->s
.regs
.gbea
= vcpu
->arch
.sie_block
->gbea
;
1859 kvm_run
->s
.regs
.pft
= vcpu
->arch
.pfault_token
;
1860 kvm_run
->s
.regs
.pfs
= vcpu
->arch
.pfault_select
;
1861 kvm_run
->s
.regs
.pfc
= vcpu
->arch
.pfault_compare
;
1864 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1869 if (guestdbg_exit_pending(vcpu
)) {
1870 kvm_s390_prepare_debug_exit(vcpu
);
1874 if (vcpu
->sigset_active
)
1875 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
1877 if (!kvm_s390_user_cpu_state_ctrl(vcpu
->kvm
)) {
1878 kvm_s390_vcpu_start(vcpu
);
1879 } else if (is_vcpu_stopped(vcpu
)) {
1880 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
1885 sync_regs(vcpu
, kvm_run
);
1888 rc
= __vcpu_run(vcpu
);
1890 if (signal_pending(current
) && !rc
) {
1891 kvm_run
->exit_reason
= KVM_EXIT_INTR
;
1895 if (guestdbg_exit_pending(vcpu
) && !rc
) {
1896 kvm_s390_prepare_debug_exit(vcpu
);
1900 if (rc
== -EOPNOTSUPP
) {
1901 /* intercept cannot be handled in-kernel, prepare kvm-run */
1902 kvm_run
->exit_reason
= KVM_EXIT_S390_SIEIC
;
1903 kvm_run
->s390_sieic
.icptcode
= vcpu
->arch
.sie_block
->icptcode
;
1904 kvm_run
->s390_sieic
.ipa
= vcpu
->arch
.sie_block
->ipa
;
1905 kvm_run
->s390_sieic
.ipb
= vcpu
->arch
.sie_block
->ipb
;
1909 if (rc
== -EREMOTE
) {
1910 /* intercept was handled, but userspace support is needed
1911 * kvm_run has been prepared by the handler */
1915 store_regs(vcpu
, kvm_run
);
1917 if (vcpu
->sigset_active
)
1918 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
1920 vcpu
->stat
.exit_userspace
++;
1925 * store status at address
1926 * we use have two special cases:
1927 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1928 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1930 int kvm_s390_store_status_unloaded(struct kvm_vcpu
*vcpu
, unsigned long gpa
)
1932 unsigned char archmode
= 1;
1937 if (gpa
== KVM_S390_STORE_STATUS_NOADDR
) {
1938 if (write_guest_abs(vcpu
, 163, &archmode
, 1))
1940 gpa
= SAVE_AREA_BASE
;
1941 } else if (gpa
== KVM_S390_STORE_STATUS_PREFIXED
) {
1942 if (write_guest_real(vcpu
, 163, &archmode
, 1))
1944 gpa
= kvm_s390_real_to_abs(vcpu
, SAVE_AREA_BASE
);
1946 rc
= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, fp_regs
),
1947 vcpu
->arch
.guest_fpregs
.fprs
, 128);
1948 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, gp_regs
),
1949 vcpu
->run
->s
.regs
.gprs
, 128);
1950 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, psw
),
1951 &vcpu
->arch
.sie_block
->gpsw
, 16);
1952 px
= kvm_s390_get_prefix(vcpu
);
1953 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, pref_reg
),
1955 rc
|= write_guest_abs(vcpu
,
1956 gpa
+ offsetof(struct save_area
, fp_ctrl_reg
),
1957 &vcpu
->arch
.guest_fpregs
.fpc
, 4);
1958 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, tod_reg
),
1959 &vcpu
->arch
.sie_block
->todpr
, 4);
1960 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, timer
),
1961 &vcpu
->arch
.sie_block
->cputm
, 8);
1962 clkcomp
= vcpu
->arch
.sie_block
->ckc
>> 8;
1963 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, clk_cmp
),
1965 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, acc_regs
),
1966 &vcpu
->run
->s
.regs
.acrs
, 64);
1967 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, ctrl_regs
),
1968 &vcpu
->arch
.sie_block
->gcr
, 128);
1969 return rc
? -EFAULT
: 0;
1972 int kvm_s390_vcpu_store_status(struct kvm_vcpu
*vcpu
, unsigned long addr
)
1975 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1976 * copying in vcpu load/put. Lets update our copies before we save
1977 * it into the save area
1979 save_fp_ctl(&vcpu
->arch
.guest_fpregs
.fpc
);
1980 save_fp_regs(vcpu
->arch
.guest_fpregs
.fprs
);
1981 save_access_regs(vcpu
->run
->s
.regs
.acrs
);
1983 return kvm_s390_store_status_unloaded(vcpu
, addr
);
1986 static void __disable_ibs_on_vcpu(struct kvm_vcpu
*vcpu
)
1988 kvm_check_request(KVM_REQ_ENABLE_IBS
, vcpu
);
1989 kvm_make_request(KVM_REQ_DISABLE_IBS
, vcpu
);
1990 exit_sie_sync(vcpu
);
1993 static void __disable_ibs_on_all_vcpus(struct kvm
*kvm
)
1996 struct kvm_vcpu
*vcpu
;
1998 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
1999 __disable_ibs_on_vcpu(vcpu
);
2003 static void __enable_ibs_on_vcpu(struct kvm_vcpu
*vcpu
)
2005 kvm_check_request(KVM_REQ_DISABLE_IBS
, vcpu
);
2006 kvm_make_request(KVM_REQ_ENABLE_IBS
, vcpu
);
2007 exit_sie_sync(vcpu
);
2010 void kvm_s390_vcpu_start(struct kvm_vcpu
*vcpu
)
2012 int i
, online_vcpus
, started_vcpus
= 0;
2014 if (!is_vcpu_stopped(vcpu
))
2017 trace_kvm_s390_vcpu_start_stop(vcpu
->vcpu_id
, 1);
2018 /* Only one cpu at a time may enter/leave the STOPPED state. */
2019 spin_lock(&vcpu
->kvm
->arch
.start_stop_lock
);
2020 online_vcpus
= atomic_read(&vcpu
->kvm
->online_vcpus
);
2022 for (i
= 0; i
< online_vcpus
; i
++) {
2023 if (!is_vcpu_stopped(vcpu
->kvm
->vcpus
[i
]))
2027 if (started_vcpus
== 0) {
2028 /* we're the only active VCPU -> speed it up */
2029 __enable_ibs_on_vcpu(vcpu
);
2030 } else if (started_vcpus
== 1) {
2032 * As we are starting a second VCPU, we have to disable
2033 * the IBS facility on all VCPUs to remove potentially
2034 * oustanding ENABLE requests.
2036 __disable_ibs_on_all_vcpus(vcpu
->kvm
);
2039 atomic_clear_mask(CPUSTAT_STOPPED
, &vcpu
->arch
.sie_block
->cpuflags
);
2041 * Another VCPU might have used IBS while we were offline.
2042 * Let's play safe and flush the VCPU at startup.
2044 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
2045 spin_unlock(&vcpu
->kvm
->arch
.start_stop_lock
);
2049 void kvm_s390_vcpu_stop(struct kvm_vcpu
*vcpu
)
2051 int i
, online_vcpus
, started_vcpus
= 0;
2052 struct kvm_vcpu
*started_vcpu
= NULL
;
2054 if (is_vcpu_stopped(vcpu
))
2057 trace_kvm_s390_vcpu_start_stop(vcpu
->vcpu_id
, 0);
2058 /* Only one cpu at a time may enter/leave the STOPPED state. */
2059 spin_lock(&vcpu
->kvm
->arch
.start_stop_lock
);
2060 online_vcpus
= atomic_read(&vcpu
->kvm
->online_vcpus
);
2062 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
2063 kvm_s390_clear_stop_irq(vcpu
);
2065 atomic_set_mask(CPUSTAT_STOPPED
, &vcpu
->arch
.sie_block
->cpuflags
);
2066 __disable_ibs_on_vcpu(vcpu
);
2068 for (i
= 0; i
< online_vcpus
; i
++) {
2069 if (!is_vcpu_stopped(vcpu
->kvm
->vcpus
[i
])) {
2071 started_vcpu
= vcpu
->kvm
->vcpus
[i
];
2075 if (started_vcpus
== 1) {
2077 * As we only have one VCPU left, we want to enable the
2078 * IBS facility for that VCPU to speed it up.
2080 __enable_ibs_on_vcpu(started_vcpu
);
2083 spin_unlock(&vcpu
->kvm
->arch
.start_stop_lock
);
2087 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu
*vcpu
,
2088 struct kvm_enable_cap
*cap
)
2096 case KVM_CAP_S390_CSS_SUPPORT
:
2097 if (!vcpu
->kvm
->arch
.css_support
) {
2098 vcpu
->kvm
->arch
.css_support
= 1;
2099 trace_kvm_s390_enable_css(vcpu
->kvm
);
2110 long kvm_arch_vcpu_ioctl(struct file
*filp
,
2111 unsigned int ioctl
, unsigned long arg
)
2113 struct kvm_vcpu
*vcpu
= filp
->private_data
;
2114 void __user
*argp
= (void __user
*)arg
;
2119 case KVM_S390_INTERRUPT
: {
2120 struct kvm_s390_interrupt s390int
;
2121 struct kvm_s390_irq s390irq
;
2124 if (copy_from_user(&s390int
, argp
, sizeof(s390int
)))
2126 if (s390int_to_s390irq(&s390int
, &s390irq
))
2128 r
= kvm_s390_inject_vcpu(vcpu
, &s390irq
);
2131 case KVM_S390_STORE_STATUS
:
2132 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
2133 r
= kvm_s390_vcpu_store_status(vcpu
, arg
);
2134 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
2136 case KVM_S390_SET_INITIAL_PSW
: {
2140 if (copy_from_user(&psw
, argp
, sizeof(psw
)))
2142 r
= kvm_arch_vcpu_ioctl_set_initial_psw(vcpu
, psw
);
2145 case KVM_S390_INITIAL_RESET
:
2146 r
= kvm_arch_vcpu_ioctl_initial_reset(vcpu
);
2148 case KVM_SET_ONE_REG
:
2149 case KVM_GET_ONE_REG
: {
2150 struct kvm_one_reg reg
;
2152 if (copy_from_user(®
, argp
, sizeof(reg
)))
2154 if (ioctl
== KVM_SET_ONE_REG
)
2155 r
= kvm_arch_vcpu_ioctl_set_one_reg(vcpu
, ®
);
2157 r
= kvm_arch_vcpu_ioctl_get_one_reg(vcpu
, ®
);
2160 #ifdef CONFIG_KVM_S390_UCONTROL
2161 case KVM_S390_UCAS_MAP
: {
2162 struct kvm_s390_ucas_mapping ucasmap
;
2164 if (copy_from_user(&ucasmap
, argp
, sizeof(ucasmap
))) {
2169 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
2174 r
= gmap_map_segment(vcpu
->arch
.gmap
, ucasmap
.user_addr
,
2175 ucasmap
.vcpu_addr
, ucasmap
.length
);
2178 case KVM_S390_UCAS_UNMAP
: {
2179 struct kvm_s390_ucas_mapping ucasmap
;
2181 if (copy_from_user(&ucasmap
, argp
, sizeof(ucasmap
))) {
2186 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
2191 r
= gmap_unmap_segment(vcpu
->arch
.gmap
, ucasmap
.vcpu_addr
,
2196 case KVM_S390_VCPU_FAULT
: {
2197 r
= gmap_fault(vcpu
->arch
.gmap
, arg
, 0);
2200 case KVM_ENABLE_CAP
:
2202 struct kvm_enable_cap cap
;
2204 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
2206 r
= kvm_vcpu_ioctl_enable_cap(vcpu
, &cap
);
2215 int kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
2217 #ifdef CONFIG_KVM_S390_UCONTROL
2218 if ((vmf
->pgoff
== KVM_S390_SIE_PAGE_OFFSET
)
2219 && (kvm_is_ucontrol(vcpu
->kvm
))) {
2220 vmf
->page
= virt_to_page(vcpu
->arch
.sie_block
);
2221 get_page(vmf
->page
);
2225 return VM_FAULT_SIGBUS
;
2228 int kvm_arch_create_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*slot
,
2229 unsigned long npages
)
2234 /* Section: memory related */
2235 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
2236 struct kvm_memory_slot
*memslot
,
2237 struct kvm_userspace_memory_region
*mem
,
2238 enum kvm_mr_change change
)
2240 /* A few sanity checks. We can have memory slots which have to be
2241 located/ended at a segment boundary (1MB). The memory in userland is
2242 ok to be fragmented into various different vmas. It is okay to mmap()
2243 and munmap() stuff in this slot after doing this call at any time */
2245 if (mem
->userspace_addr
& 0xffffful
)
2248 if (mem
->memory_size
& 0xffffful
)
2254 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
2255 struct kvm_userspace_memory_region
*mem
,
2256 const struct kvm_memory_slot
*old
,
2257 enum kvm_mr_change change
)
2261 /* If the basics of the memslot do not change, we do not want
2262 * to update the gmap. Every update causes several unnecessary
2263 * segment translation exceptions. This is usually handled just
2264 * fine by the normal fault handler + gmap, but it will also
2265 * cause faults on the prefix page of running guest CPUs.
2267 if (old
->userspace_addr
== mem
->userspace_addr
&&
2268 old
->base_gfn
* PAGE_SIZE
== mem
->guest_phys_addr
&&
2269 old
->npages
* PAGE_SIZE
== mem
->memory_size
)
2272 rc
= gmap_map_segment(kvm
->arch
.gmap
, mem
->userspace_addr
,
2273 mem
->guest_phys_addr
, mem
->memory_size
);
2275 printk(KERN_WARNING
"kvm-s390: failed to commit memory region\n");
2279 static int __init
kvm_s390_init(void)
2281 return kvm_init(NULL
, sizeof(struct kvm_vcpu
), 0, THIS_MODULE
);
2284 static void __exit
kvm_s390_exit(void)
2289 module_init(kvm_s390_init
);
2290 module_exit(kvm_s390_exit
);
2293 * Enable autoloading of the kvm module.
2294 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2295 * since x86 takes a different approach.
2297 #include <linux/miscdevice.h>
2298 MODULE_ALIAS_MISCDEV(KVM_MINOR
);
2299 MODULE_ALIAS("devname:kvm");