2 * hosting zSeries kernel virtual machines
4 * Copyright IBM Corp. 2008, 2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
14 * Jason J. Herne <jjherne@us.ibm.com>
17 #include <linux/compiler.h>
18 #include <linux/err.h>
20 #include <linux/hrtimer.h>
21 #include <linux/init.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/module.h>
25 #include <linux/random.h>
26 #include <linux/slab.h>
27 #include <linux/timer.h>
28 #include <asm/asm-offsets.h>
29 #include <asm/lowcore.h>
30 #include <asm/pgtable.h>
32 #include <asm/switch_to.h>
37 #define CREATE_TRACE_POINTS
39 #include "trace-s390.h"
41 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
43 struct kvm_stats_debugfs_item debugfs_entries
[] = {
44 { "userspace_handled", VCPU_STAT(exit_userspace
) },
45 { "exit_null", VCPU_STAT(exit_null
) },
46 { "exit_validity", VCPU_STAT(exit_validity
) },
47 { "exit_stop_request", VCPU_STAT(exit_stop_request
) },
48 { "exit_external_request", VCPU_STAT(exit_external_request
) },
49 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt
) },
50 { "exit_instruction", VCPU_STAT(exit_instruction
) },
51 { "exit_program_interruption", VCPU_STAT(exit_program_interruption
) },
52 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program
) },
53 { "halt_successful_poll", VCPU_STAT(halt_successful_poll
) },
54 { "halt_wakeup", VCPU_STAT(halt_wakeup
) },
55 { "instruction_lctlg", VCPU_STAT(instruction_lctlg
) },
56 { "instruction_lctl", VCPU_STAT(instruction_lctl
) },
57 { "instruction_stctl", VCPU_STAT(instruction_stctl
) },
58 { "instruction_stctg", VCPU_STAT(instruction_stctg
) },
59 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal
) },
60 { "deliver_external_call", VCPU_STAT(deliver_external_call
) },
61 { "deliver_service_signal", VCPU_STAT(deliver_service_signal
) },
62 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt
) },
63 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal
) },
64 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal
) },
65 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal
) },
66 { "deliver_program_interruption", VCPU_STAT(deliver_program_int
) },
67 { "exit_wait_state", VCPU_STAT(exit_wait_state
) },
68 { "instruction_pfmf", VCPU_STAT(instruction_pfmf
) },
69 { "instruction_stidp", VCPU_STAT(instruction_stidp
) },
70 { "instruction_spx", VCPU_STAT(instruction_spx
) },
71 { "instruction_stpx", VCPU_STAT(instruction_stpx
) },
72 { "instruction_stap", VCPU_STAT(instruction_stap
) },
73 { "instruction_storage_key", VCPU_STAT(instruction_storage_key
) },
74 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock
) },
75 { "instruction_stsch", VCPU_STAT(instruction_stsch
) },
76 { "instruction_chsc", VCPU_STAT(instruction_chsc
) },
77 { "instruction_essa", VCPU_STAT(instruction_essa
) },
78 { "instruction_stsi", VCPU_STAT(instruction_stsi
) },
79 { "instruction_stfl", VCPU_STAT(instruction_stfl
) },
80 { "instruction_tprot", VCPU_STAT(instruction_tprot
) },
81 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense
) },
82 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running
) },
83 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call
) },
84 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency
) },
85 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency
) },
86 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start
) },
87 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop
) },
88 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status
) },
89 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status
) },
90 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch
) },
91 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix
) },
92 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart
) },
93 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset
) },
94 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset
) },
95 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown
) },
96 { "diagnose_10", VCPU_STAT(diagnose_10
) },
97 { "diagnose_44", VCPU_STAT(diagnose_44
) },
98 { "diagnose_9c", VCPU_STAT(diagnose_9c
) },
102 /* upper facilities limit for kvm */
103 unsigned long kvm_s390_fac_list_mask
[] = {
104 0xff82fffbf4fc2000UL
,
105 0x005c000000000000UL
,
108 unsigned long kvm_s390_fac_list_mask_size(void)
110 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask
) > S390_ARCH_FAC_MASK_SIZE_U64
);
111 return ARRAY_SIZE(kvm_s390_fac_list_mask
);
114 static struct gmap_notifier gmap_notifier
;
116 /* Section: not file related */
117 int kvm_arch_hardware_enable(void)
119 /* every s390 is virtualization enabled ;-) */
123 static void kvm_gmap_notifier(struct gmap
*gmap
, unsigned long address
);
125 int kvm_arch_hardware_setup(void)
127 gmap_notifier
.notifier_call
= kvm_gmap_notifier
;
128 gmap_register_ipte_notifier(&gmap_notifier
);
132 void kvm_arch_hardware_unsetup(void)
134 gmap_unregister_ipte_notifier(&gmap_notifier
);
137 int kvm_arch_init(void *opaque
)
139 /* Register floating interrupt controller interface. */
140 return kvm_register_device_ops(&kvm_flic_ops
, KVM_DEV_TYPE_FLIC
);
143 /* Section: device related */
144 long kvm_arch_dev_ioctl(struct file
*filp
,
145 unsigned int ioctl
, unsigned long arg
)
147 if (ioctl
== KVM_S390_ENABLE_SIE
)
148 return s390_enable_sie();
152 int kvm_vm_ioctl_check_extension(struct kvm
*kvm
, long ext
)
157 case KVM_CAP_S390_PSW
:
158 case KVM_CAP_S390_GMAP
:
159 case KVM_CAP_SYNC_MMU
:
160 #ifdef CONFIG_KVM_S390_UCONTROL
161 case KVM_CAP_S390_UCONTROL
:
163 case KVM_CAP_ASYNC_PF
:
164 case KVM_CAP_SYNC_REGS
:
165 case KVM_CAP_ONE_REG
:
166 case KVM_CAP_ENABLE_CAP
:
167 case KVM_CAP_S390_CSS_SUPPORT
:
168 case KVM_CAP_IOEVENTFD
:
169 case KVM_CAP_DEVICE_CTRL
:
170 case KVM_CAP_ENABLE_CAP_VM
:
171 case KVM_CAP_S390_IRQCHIP
:
172 case KVM_CAP_VM_ATTRIBUTES
:
173 case KVM_CAP_MP_STATE
:
174 case KVM_CAP_S390_USER_SIGP
:
177 case KVM_CAP_NR_VCPUS
:
178 case KVM_CAP_MAX_VCPUS
:
181 case KVM_CAP_NR_MEMSLOTS
:
182 r
= KVM_USER_MEM_SLOTS
;
184 case KVM_CAP_S390_COW
:
185 r
= MACHINE_HAS_ESOP
;
193 static void kvm_s390_sync_dirty_log(struct kvm
*kvm
,
194 struct kvm_memory_slot
*memslot
)
196 gfn_t cur_gfn
, last_gfn
;
197 unsigned long address
;
198 struct gmap
*gmap
= kvm
->arch
.gmap
;
200 down_read(&gmap
->mm
->mmap_sem
);
201 /* Loop over all guest pages */
202 last_gfn
= memslot
->base_gfn
+ memslot
->npages
;
203 for (cur_gfn
= memslot
->base_gfn
; cur_gfn
<= last_gfn
; cur_gfn
++) {
204 address
= gfn_to_hva_memslot(memslot
, cur_gfn
);
206 if (gmap_test_and_clear_dirty(address
, gmap
))
207 mark_page_dirty(kvm
, cur_gfn
);
209 up_read(&gmap
->mm
->mmap_sem
);
212 /* Section: vm related */
214 * Get (and clear) the dirty memory log for a memory slot.
216 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
,
217 struct kvm_dirty_log
*log
)
221 struct kvm_memory_slot
*memslot
;
224 mutex_lock(&kvm
->slots_lock
);
227 if (log
->slot
>= KVM_USER_MEM_SLOTS
)
230 memslot
= id_to_memslot(kvm
->memslots
, log
->slot
);
232 if (!memslot
->dirty_bitmap
)
235 kvm_s390_sync_dirty_log(kvm
, memslot
);
236 r
= kvm_get_dirty_log(kvm
, log
, &is_dirty
);
240 /* Clear the dirty log */
242 n
= kvm_dirty_bitmap_bytes(memslot
);
243 memset(memslot
->dirty_bitmap
, 0, n
);
247 mutex_unlock(&kvm
->slots_lock
);
251 static int kvm_vm_ioctl_enable_cap(struct kvm
*kvm
, struct kvm_enable_cap
*cap
)
259 case KVM_CAP_S390_IRQCHIP
:
260 kvm
->arch
.use_irqchip
= 1;
263 case KVM_CAP_S390_USER_SIGP
:
264 kvm
->arch
.user_sigp
= 1;
274 static int kvm_s390_get_mem_control(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
278 switch (attr
->attr
) {
279 case KVM_S390_VM_MEM_LIMIT_SIZE
:
281 if (put_user(kvm
->arch
.gmap
->asce_end
, (u64 __user
*)attr
->addr
))
291 static int kvm_s390_set_mem_control(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
295 switch (attr
->attr
) {
296 case KVM_S390_VM_MEM_ENABLE_CMMA
:
298 mutex_lock(&kvm
->lock
);
299 if (atomic_read(&kvm
->online_vcpus
) == 0) {
300 kvm
->arch
.use_cmma
= 1;
303 mutex_unlock(&kvm
->lock
);
305 case KVM_S390_VM_MEM_CLR_CMMA
:
306 mutex_lock(&kvm
->lock
);
307 idx
= srcu_read_lock(&kvm
->srcu
);
308 s390_reset_cmma(kvm
->arch
.gmap
->mm
);
309 srcu_read_unlock(&kvm
->srcu
, idx
);
310 mutex_unlock(&kvm
->lock
);
313 case KVM_S390_VM_MEM_LIMIT_SIZE
: {
314 unsigned long new_limit
;
316 if (kvm_is_ucontrol(kvm
))
319 if (get_user(new_limit
, (u64 __user
*)attr
->addr
))
322 if (new_limit
> kvm
->arch
.gmap
->asce_end
)
326 mutex_lock(&kvm
->lock
);
327 if (atomic_read(&kvm
->online_vcpus
) == 0) {
328 /* gmap_alloc will round the limit up */
329 struct gmap
*new = gmap_alloc(current
->mm
, new_limit
);
334 gmap_free(kvm
->arch
.gmap
);
336 kvm
->arch
.gmap
= new;
340 mutex_unlock(&kvm
->lock
);
350 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu
*vcpu
);
352 static int kvm_s390_vm_set_crypto(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
354 struct kvm_vcpu
*vcpu
;
357 if (!test_kvm_facility(kvm
, 76))
360 mutex_lock(&kvm
->lock
);
361 switch (attr
->attr
) {
362 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW
:
364 kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
,
365 sizeof(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
));
366 kvm
->arch
.crypto
.aes_kw
= 1;
368 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW
:
370 kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
,
371 sizeof(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
));
372 kvm
->arch
.crypto
.dea_kw
= 1;
374 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW
:
375 kvm
->arch
.crypto
.aes_kw
= 0;
376 memset(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
, 0,
377 sizeof(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
));
379 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW
:
380 kvm
->arch
.crypto
.dea_kw
= 0;
381 memset(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
, 0,
382 sizeof(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
));
385 mutex_unlock(&kvm
->lock
);
389 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
390 kvm_s390_vcpu_crypto_setup(vcpu
);
393 mutex_unlock(&kvm
->lock
);
397 static int kvm_s390_set_tod_high(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
401 if (copy_from_user(>od_high
, (void __user
*)attr
->addr
,
411 static int kvm_s390_set_tod_low(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
413 struct kvm_vcpu
*cur_vcpu
;
414 unsigned int vcpu_idx
;
418 if (copy_from_user(>od
, (void __user
*)attr
->addr
, sizeof(gtod
)))
421 r
= store_tod_clock(&host_tod
);
425 mutex_lock(&kvm
->lock
);
426 kvm
->arch
.epoch
= gtod
- host_tod
;
427 kvm_for_each_vcpu(vcpu_idx
, cur_vcpu
, kvm
) {
428 cur_vcpu
->arch
.sie_block
->epoch
= kvm
->arch
.epoch
;
431 mutex_unlock(&kvm
->lock
);
435 static int kvm_s390_set_tod(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
442 switch (attr
->attr
) {
443 case KVM_S390_VM_TOD_HIGH
:
444 ret
= kvm_s390_set_tod_high(kvm
, attr
);
446 case KVM_S390_VM_TOD_LOW
:
447 ret
= kvm_s390_set_tod_low(kvm
, attr
);
456 static int kvm_s390_get_tod_high(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
460 if (copy_to_user((void __user
*)attr
->addr
, >od_high
,
467 static int kvm_s390_get_tod_low(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
472 r
= store_tod_clock(&host_tod
);
476 gtod
= host_tod
+ kvm
->arch
.epoch
;
477 if (copy_to_user((void __user
*)attr
->addr
, >od
, sizeof(gtod
)))
483 static int kvm_s390_get_tod(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
490 switch (attr
->attr
) {
491 case KVM_S390_VM_TOD_HIGH
:
492 ret
= kvm_s390_get_tod_high(kvm
, attr
);
494 case KVM_S390_VM_TOD_LOW
:
495 ret
= kvm_s390_get_tod_low(kvm
, attr
);
504 static int kvm_s390_set_processor(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
506 struct kvm_s390_vm_cpu_processor
*proc
;
509 mutex_lock(&kvm
->lock
);
510 if (atomic_read(&kvm
->online_vcpus
)) {
514 proc
= kzalloc(sizeof(*proc
), GFP_KERNEL
);
519 if (!copy_from_user(proc
, (void __user
*)attr
->addr
,
521 memcpy(&kvm
->arch
.model
.cpu_id
, &proc
->cpuid
,
522 sizeof(struct cpuid
));
523 kvm
->arch
.model
.ibc
= proc
->ibc
;
524 memcpy(kvm
->arch
.model
.fac
->list
, proc
->fac_list
,
525 S390_ARCH_FAC_LIST_SIZE_BYTE
);
530 mutex_unlock(&kvm
->lock
);
534 static int kvm_s390_set_cpu_model(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
538 switch (attr
->attr
) {
539 case KVM_S390_VM_CPU_PROCESSOR
:
540 ret
= kvm_s390_set_processor(kvm
, attr
);
546 static int kvm_s390_get_processor(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
548 struct kvm_s390_vm_cpu_processor
*proc
;
551 proc
= kzalloc(sizeof(*proc
), GFP_KERNEL
);
556 memcpy(&proc
->cpuid
, &kvm
->arch
.model
.cpu_id
, sizeof(struct cpuid
));
557 proc
->ibc
= kvm
->arch
.model
.ibc
;
558 memcpy(&proc
->fac_list
, kvm
->arch
.model
.fac
->list
, S390_ARCH_FAC_LIST_SIZE_BYTE
);
559 if (copy_to_user((void __user
*)attr
->addr
, proc
, sizeof(*proc
)))
566 static int kvm_s390_get_machine(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
568 struct kvm_s390_vm_cpu_machine
*mach
;
571 mach
= kzalloc(sizeof(*mach
), GFP_KERNEL
);
576 get_cpu_id((struct cpuid
*) &mach
->cpuid
);
577 mach
->ibc
= sclp_get_ibc();
578 memcpy(&mach
->fac_mask
, kvm
->arch
.model
.fac
->mask
,
579 S390_ARCH_FAC_LIST_SIZE_BYTE
);
580 memcpy((unsigned long *)&mach
->fac_list
, S390_lowcore
.stfle_fac_list
,
581 S390_ARCH_FAC_LIST_SIZE_BYTE
);
582 if (copy_to_user((void __user
*)attr
->addr
, mach
, sizeof(*mach
)))
589 static int kvm_s390_get_cpu_model(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
593 switch (attr
->attr
) {
594 case KVM_S390_VM_CPU_PROCESSOR
:
595 ret
= kvm_s390_get_processor(kvm
, attr
);
597 case KVM_S390_VM_CPU_MACHINE
:
598 ret
= kvm_s390_get_machine(kvm
, attr
);
604 static int kvm_s390_vm_set_attr(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
608 switch (attr
->group
) {
609 case KVM_S390_VM_MEM_CTRL
:
610 ret
= kvm_s390_set_mem_control(kvm
, attr
);
612 case KVM_S390_VM_TOD
:
613 ret
= kvm_s390_set_tod(kvm
, attr
);
615 case KVM_S390_VM_CPU_MODEL
:
616 ret
= kvm_s390_set_cpu_model(kvm
, attr
);
618 case KVM_S390_VM_CRYPTO
:
619 ret
= kvm_s390_vm_set_crypto(kvm
, attr
);
629 static int kvm_s390_vm_get_attr(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
633 switch (attr
->group
) {
634 case KVM_S390_VM_MEM_CTRL
:
635 ret
= kvm_s390_get_mem_control(kvm
, attr
);
637 case KVM_S390_VM_TOD
:
638 ret
= kvm_s390_get_tod(kvm
, attr
);
640 case KVM_S390_VM_CPU_MODEL
:
641 ret
= kvm_s390_get_cpu_model(kvm
, attr
);
651 static int kvm_s390_vm_has_attr(struct kvm
*kvm
, struct kvm_device_attr
*attr
)
655 switch (attr
->group
) {
656 case KVM_S390_VM_MEM_CTRL
:
657 switch (attr
->attr
) {
658 case KVM_S390_VM_MEM_ENABLE_CMMA
:
659 case KVM_S390_VM_MEM_CLR_CMMA
:
660 case KVM_S390_VM_MEM_LIMIT_SIZE
:
668 case KVM_S390_VM_TOD
:
669 switch (attr
->attr
) {
670 case KVM_S390_VM_TOD_LOW
:
671 case KVM_S390_VM_TOD_HIGH
:
679 case KVM_S390_VM_CPU_MODEL
:
680 switch (attr
->attr
) {
681 case KVM_S390_VM_CPU_PROCESSOR
:
682 case KVM_S390_VM_CPU_MACHINE
:
690 case KVM_S390_VM_CRYPTO
:
691 switch (attr
->attr
) {
692 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW
:
693 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW
:
694 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW
:
695 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW
:
711 long kvm_arch_vm_ioctl(struct file
*filp
,
712 unsigned int ioctl
, unsigned long arg
)
714 struct kvm
*kvm
= filp
->private_data
;
715 void __user
*argp
= (void __user
*)arg
;
716 struct kvm_device_attr attr
;
720 case KVM_S390_INTERRUPT
: {
721 struct kvm_s390_interrupt s390int
;
724 if (copy_from_user(&s390int
, argp
, sizeof(s390int
)))
726 r
= kvm_s390_inject_vm(kvm
, &s390int
);
729 case KVM_ENABLE_CAP
: {
730 struct kvm_enable_cap cap
;
732 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
734 r
= kvm_vm_ioctl_enable_cap(kvm
, &cap
);
737 case KVM_CREATE_IRQCHIP
: {
738 struct kvm_irq_routing_entry routing
;
741 if (kvm
->arch
.use_irqchip
) {
742 /* Set up dummy routing. */
743 memset(&routing
, 0, sizeof(routing
));
744 kvm_set_irq_routing(kvm
, &routing
, 0, 0);
749 case KVM_SET_DEVICE_ATTR
: {
751 if (copy_from_user(&attr
, (void __user
*)arg
, sizeof(attr
)))
753 r
= kvm_s390_vm_set_attr(kvm
, &attr
);
756 case KVM_GET_DEVICE_ATTR
: {
758 if (copy_from_user(&attr
, (void __user
*)arg
, sizeof(attr
)))
760 r
= kvm_s390_vm_get_attr(kvm
, &attr
);
763 case KVM_HAS_DEVICE_ATTR
: {
765 if (copy_from_user(&attr
, (void __user
*)arg
, sizeof(attr
)))
767 r
= kvm_s390_vm_has_attr(kvm
, &attr
);
777 static int kvm_s390_query_ap_config(u8
*config
)
779 u32 fcn_code
= 0x04000000UL
;
782 memset(config
, 0, 128);
786 ".long 0xb2af0000\n" /* PQAP(QCI) */
792 : "r" (fcn_code
), "r" (config
)
793 : "cc", "0", "2", "memory"
799 static int kvm_s390_apxa_installed(void)
804 if (test_facility(2) && test_facility(12)) {
805 cc
= kvm_s390_query_ap_config(config
);
808 pr_err("PQAP(QCI) failed with cc=%d", cc
);
810 return config
[0] & 0x40;
816 static void kvm_s390_set_crycb_format(struct kvm
*kvm
)
818 kvm
->arch
.crypto
.crycbd
= (__u32
)(unsigned long) kvm
->arch
.crypto
.crycb
;
820 if (kvm_s390_apxa_installed())
821 kvm
->arch
.crypto
.crycbd
|= CRYCB_FORMAT2
;
823 kvm
->arch
.crypto
.crycbd
|= CRYCB_FORMAT1
;
826 static void kvm_s390_get_cpu_id(struct cpuid
*cpu_id
)
829 cpu_id
->version
= 0xff;
832 static int kvm_s390_crypto_init(struct kvm
*kvm
)
834 if (!test_kvm_facility(kvm
, 76))
837 kvm
->arch
.crypto
.crycb
= kzalloc(sizeof(*kvm
->arch
.crypto
.crycb
),
838 GFP_KERNEL
| GFP_DMA
);
839 if (!kvm
->arch
.crypto
.crycb
)
842 kvm_s390_set_crycb_format(kvm
);
844 /* Enable AES/DEA protected key functions by default */
845 kvm
->arch
.crypto
.aes_kw
= 1;
846 kvm
->arch
.crypto
.dea_kw
= 1;
847 get_random_bytes(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
,
848 sizeof(kvm
->arch
.crypto
.crycb
->aes_wrapping_key_mask
));
849 get_random_bytes(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
,
850 sizeof(kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
));
855 int kvm_arch_init_vm(struct kvm
*kvm
, unsigned long type
)
859 static unsigned long sca_offset
;
862 #ifdef CONFIG_KVM_S390_UCONTROL
863 if (type
& ~KVM_VM_S390_UCONTROL
)
865 if ((type
& KVM_VM_S390_UCONTROL
) && (!capable(CAP_SYS_ADMIN
)))
872 rc
= s390_enable_sie();
878 kvm
->arch
.sca
= (struct sca_block
*) get_zeroed_page(GFP_KERNEL
);
881 spin_lock(&kvm_lock
);
882 sca_offset
= (sca_offset
+ 16) & 0x7f0;
883 kvm
->arch
.sca
= (struct sca_block
*) ((char *) kvm
->arch
.sca
+ sca_offset
);
884 spin_unlock(&kvm_lock
);
886 sprintf(debug_name
, "kvm-%u", current
->pid
);
888 kvm
->arch
.dbf
= debug_register(debug_name
, 8, 2, 8 * sizeof(long));
893 * The architectural maximum amount of facilities is 16 kbit. To store
894 * this amount, 2 kbyte of memory is required. Thus we need a full
895 * page to hold the guest facility list (arch.model.fac->list) and the
896 * facility mask (arch.model.fac->mask). Its address size has to be
897 * 31 bits and word aligned.
899 kvm
->arch
.model
.fac
=
900 (struct kvm_s390_fac
*) get_zeroed_page(GFP_KERNEL
| GFP_DMA
);
901 if (!kvm
->arch
.model
.fac
)
904 /* Populate the facility mask initially. */
905 memcpy(kvm
->arch
.model
.fac
->mask
, S390_lowcore
.stfle_fac_list
,
906 S390_ARCH_FAC_LIST_SIZE_BYTE
);
907 for (i
= 0; i
< S390_ARCH_FAC_LIST_SIZE_U64
; i
++) {
908 if (i
< kvm_s390_fac_list_mask_size())
909 kvm
->arch
.model
.fac
->mask
[i
] &= kvm_s390_fac_list_mask
[i
];
911 kvm
->arch
.model
.fac
->mask
[i
] = 0UL;
914 /* Populate the facility list initially. */
915 memcpy(kvm
->arch
.model
.fac
->list
, kvm
->arch
.model
.fac
->mask
,
916 S390_ARCH_FAC_LIST_SIZE_BYTE
);
918 kvm_s390_get_cpu_id(&kvm
->arch
.model
.cpu_id
);
919 kvm
->arch
.model
.ibc
= sclp_get_ibc() & 0x0fff;
921 if (kvm_s390_crypto_init(kvm
) < 0)
924 spin_lock_init(&kvm
->arch
.float_int
.lock
);
925 INIT_LIST_HEAD(&kvm
->arch
.float_int
.list
);
926 init_waitqueue_head(&kvm
->arch
.ipte_wq
);
927 mutex_init(&kvm
->arch
.ipte_mutex
);
929 debug_register_view(kvm
->arch
.dbf
, &debug_sprintf_view
);
930 VM_EVENT(kvm
, 3, "%s", "vm created");
932 if (type
& KVM_VM_S390_UCONTROL
) {
933 kvm
->arch
.gmap
= NULL
;
935 kvm
->arch
.gmap
= gmap_alloc(current
->mm
, (1UL << 44) - 1);
938 kvm
->arch
.gmap
->private = kvm
;
939 kvm
->arch
.gmap
->pfault_enabled
= 0;
942 kvm
->arch
.css_support
= 0;
943 kvm
->arch
.use_irqchip
= 0;
946 spin_lock_init(&kvm
->arch
.start_stop_lock
);
950 kfree(kvm
->arch
.crypto
.crycb
);
952 free_page((unsigned long)kvm
->arch
.model
.fac
);
954 debug_unregister(kvm
->arch
.dbf
);
956 free_page((unsigned long)(kvm
->arch
.sca
));
961 void kvm_arch_vcpu_destroy(struct kvm_vcpu
*vcpu
)
963 VCPU_EVENT(vcpu
, 3, "%s", "free cpu");
964 trace_kvm_s390_destroy_vcpu(vcpu
->vcpu_id
);
965 kvm_s390_clear_local_irqs(vcpu
);
966 kvm_clear_async_pf_completion_queue(vcpu
);
967 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
968 clear_bit(63 - vcpu
->vcpu_id
,
969 (unsigned long *) &vcpu
->kvm
->arch
.sca
->mcn
);
970 if (vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].sda
==
971 (__u64
) vcpu
->arch
.sie_block
)
972 vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].sda
= 0;
976 if (kvm_is_ucontrol(vcpu
->kvm
))
977 gmap_free(vcpu
->arch
.gmap
);
979 if (kvm_s390_cmma_enabled(vcpu
->kvm
))
980 kvm_s390_vcpu_unsetup_cmma(vcpu
);
981 free_page((unsigned long)(vcpu
->arch
.sie_block
));
983 kvm_vcpu_uninit(vcpu
);
984 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
987 static void kvm_free_vcpus(struct kvm
*kvm
)
990 struct kvm_vcpu
*vcpu
;
992 kvm_for_each_vcpu(i
, vcpu
, kvm
)
993 kvm_arch_vcpu_destroy(vcpu
);
995 mutex_lock(&kvm
->lock
);
996 for (i
= 0; i
< atomic_read(&kvm
->online_vcpus
); i
++)
997 kvm
->vcpus
[i
] = NULL
;
999 atomic_set(&kvm
->online_vcpus
, 0);
1000 mutex_unlock(&kvm
->lock
);
1003 void kvm_arch_destroy_vm(struct kvm
*kvm
)
1005 kvm_free_vcpus(kvm
);
1006 free_page((unsigned long)kvm
->arch
.model
.fac
);
1007 free_page((unsigned long)(kvm
->arch
.sca
));
1008 debug_unregister(kvm
->arch
.dbf
);
1009 kfree(kvm
->arch
.crypto
.crycb
);
1010 if (!kvm_is_ucontrol(kvm
))
1011 gmap_free(kvm
->arch
.gmap
);
1012 kvm_s390_destroy_adapters(kvm
);
1013 kvm_s390_clear_float_irqs(kvm
);
1016 /* Section: vcpu related */
1017 static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu
*vcpu
)
1019 vcpu
->arch
.gmap
= gmap_alloc(current
->mm
, -1UL);
1020 if (!vcpu
->arch
.gmap
)
1022 vcpu
->arch
.gmap
->private = vcpu
->kvm
;
1027 int kvm_arch_vcpu_init(struct kvm_vcpu
*vcpu
)
1029 vcpu
->arch
.pfault_token
= KVM_S390_PFAULT_TOKEN_INVALID
;
1030 kvm_clear_async_pf_completion_queue(vcpu
);
1031 vcpu
->run
->kvm_valid_regs
= KVM_SYNC_PREFIX
|
1038 if (kvm_is_ucontrol(vcpu
->kvm
))
1039 return __kvm_ucontrol_vcpu_init(vcpu
);
1044 void kvm_arch_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
1046 save_fp_ctl(&vcpu
->arch
.host_fpregs
.fpc
);
1047 save_fp_regs(vcpu
->arch
.host_fpregs
.fprs
);
1048 save_access_regs(vcpu
->arch
.host_acrs
);
1049 restore_fp_ctl(&vcpu
->arch
.guest_fpregs
.fpc
);
1050 restore_fp_regs(vcpu
->arch
.guest_fpregs
.fprs
);
1051 restore_access_regs(vcpu
->run
->s
.regs
.acrs
);
1052 gmap_enable(vcpu
->arch
.gmap
);
1053 atomic_set_mask(CPUSTAT_RUNNING
, &vcpu
->arch
.sie_block
->cpuflags
);
1056 void kvm_arch_vcpu_put(struct kvm_vcpu
*vcpu
)
1058 atomic_clear_mask(CPUSTAT_RUNNING
, &vcpu
->arch
.sie_block
->cpuflags
);
1059 gmap_disable(vcpu
->arch
.gmap
);
1060 save_fp_ctl(&vcpu
->arch
.guest_fpregs
.fpc
);
1061 save_fp_regs(vcpu
->arch
.guest_fpregs
.fprs
);
1062 save_access_regs(vcpu
->run
->s
.regs
.acrs
);
1063 restore_fp_ctl(&vcpu
->arch
.host_fpregs
.fpc
);
1064 restore_fp_regs(vcpu
->arch
.host_fpregs
.fprs
);
1065 restore_access_regs(vcpu
->arch
.host_acrs
);
1068 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu
*vcpu
)
1070 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1071 vcpu
->arch
.sie_block
->gpsw
.mask
= 0UL;
1072 vcpu
->arch
.sie_block
->gpsw
.addr
= 0UL;
1073 kvm_s390_set_prefix(vcpu
, 0);
1074 vcpu
->arch
.sie_block
->cputm
= 0UL;
1075 vcpu
->arch
.sie_block
->ckc
= 0UL;
1076 vcpu
->arch
.sie_block
->todpr
= 0;
1077 memset(vcpu
->arch
.sie_block
->gcr
, 0, 16 * sizeof(__u64
));
1078 vcpu
->arch
.sie_block
->gcr
[0] = 0xE0UL
;
1079 vcpu
->arch
.sie_block
->gcr
[14] = 0xC2000000UL
;
1080 vcpu
->arch
.guest_fpregs
.fpc
= 0;
1081 asm volatile("lfpc %0" : : "Q" (vcpu
->arch
.guest_fpregs
.fpc
));
1082 vcpu
->arch
.sie_block
->gbea
= 1;
1083 vcpu
->arch
.sie_block
->pp
= 0;
1084 vcpu
->arch
.pfault_token
= KVM_S390_PFAULT_TOKEN_INVALID
;
1085 kvm_clear_async_pf_completion_queue(vcpu
);
1086 if (!kvm_s390_user_cpu_state_ctrl(vcpu
->kvm
))
1087 kvm_s390_vcpu_stop(vcpu
);
1088 kvm_s390_clear_local_irqs(vcpu
);
1091 void kvm_arch_vcpu_postcreate(struct kvm_vcpu
*vcpu
)
1093 mutex_lock(&vcpu
->kvm
->lock
);
1094 vcpu
->arch
.sie_block
->epoch
= vcpu
->kvm
->arch
.epoch
;
1095 mutex_unlock(&vcpu
->kvm
->lock
);
1096 if (!kvm_is_ucontrol(vcpu
->kvm
))
1097 vcpu
->arch
.gmap
= vcpu
->kvm
->arch
.gmap
;
1100 static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu
*vcpu
)
1102 if (!test_kvm_facility(vcpu
->kvm
, 76))
1105 vcpu
->arch
.sie_block
->ecb3
&= ~(ECB3_AES
| ECB3_DEA
);
1107 if (vcpu
->kvm
->arch
.crypto
.aes_kw
)
1108 vcpu
->arch
.sie_block
->ecb3
|= ECB3_AES
;
1109 if (vcpu
->kvm
->arch
.crypto
.dea_kw
)
1110 vcpu
->arch
.sie_block
->ecb3
|= ECB3_DEA
;
1112 vcpu
->arch
.sie_block
->crycbd
= vcpu
->kvm
->arch
.crypto
.crycbd
;
1115 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu
*vcpu
)
1117 free_page(vcpu
->arch
.sie_block
->cbrlo
);
1118 vcpu
->arch
.sie_block
->cbrlo
= 0;
1121 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu
*vcpu
)
1123 vcpu
->arch
.sie_block
->cbrlo
= get_zeroed_page(GFP_KERNEL
);
1124 if (!vcpu
->arch
.sie_block
->cbrlo
)
1127 vcpu
->arch
.sie_block
->ecb2
|= 0x80;
1128 vcpu
->arch
.sie_block
->ecb2
&= ~0x08;
1132 int kvm_arch_vcpu_setup(struct kvm_vcpu
*vcpu
)
1136 atomic_set(&vcpu
->arch
.sie_block
->cpuflags
, CPUSTAT_ZARCH
|
1140 vcpu
->arch
.sie_block
->ecb
= 6;
1141 if (test_kvm_facility(vcpu
->kvm
, 50) && test_kvm_facility(vcpu
->kvm
, 73))
1142 vcpu
->arch
.sie_block
->ecb
|= 0x10;
1144 vcpu
->arch
.sie_block
->ecb2
= 8;
1145 vcpu
->arch
.sie_block
->eca
= 0xC1002000U
;
1146 if (sclp_has_siif())
1147 vcpu
->arch
.sie_block
->eca
|= 1;
1148 if (sclp_has_sigpif())
1149 vcpu
->arch
.sie_block
->eca
|= 0x10000000U
;
1150 vcpu
->arch
.sie_block
->ictl
|= ICTL_ISKE
| ICTL_SSKE
| ICTL_RRBE
|
1153 if (kvm_s390_cmma_enabled(vcpu
->kvm
)) {
1154 rc
= kvm_s390_vcpu_setup_cmma(vcpu
);
1158 hrtimer_init(&vcpu
->arch
.ckc_timer
, CLOCK_MONOTONIC
, HRTIMER_MODE_REL
);
1159 vcpu
->arch
.ckc_timer
.function
= kvm_s390_idle_wakeup
;
1161 mutex_lock(&vcpu
->kvm
->lock
);
1162 vcpu
->arch
.cpu_id
= vcpu
->kvm
->arch
.model
.cpu_id
;
1163 vcpu
->arch
.sie_block
->ibc
= vcpu
->kvm
->arch
.model
.ibc
;
1164 mutex_unlock(&vcpu
->kvm
->lock
);
1166 kvm_s390_vcpu_crypto_setup(vcpu
);
1171 struct kvm_vcpu
*kvm_arch_vcpu_create(struct kvm
*kvm
,
1174 struct kvm_vcpu
*vcpu
;
1175 struct sie_page
*sie_page
;
1178 if (id
>= KVM_MAX_VCPUS
)
1183 vcpu
= kmem_cache_zalloc(kvm_vcpu_cache
, GFP_KERNEL
);
1187 sie_page
= (struct sie_page
*) get_zeroed_page(GFP_KERNEL
);
1191 vcpu
->arch
.sie_block
= &sie_page
->sie_block
;
1192 vcpu
->arch
.sie_block
->itdba
= (unsigned long) &sie_page
->itdb
;
1194 vcpu
->arch
.sie_block
->icpua
= id
;
1195 if (!kvm_is_ucontrol(kvm
)) {
1196 if (!kvm
->arch
.sca
) {
1200 if (!kvm
->arch
.sca
->cpu
[id
].sda
)
1201 kvm
->arch
.sca
->cpu
[id
].sda
=
1202 (__u64
) vcpu
->arch
.sie_block
;
1203 vcpu
->arch
.sie_block
->scaoh
=
1204 (__u32
)(((__u64
)kvm
->arch
.sca
) >> 32);
1205 vcpu
->arch
.sie_block
->scaol
= (__u32
)(__u64
)kvm
->arch
.sca
;
1206 set_bit(63 - id
, (unsigned long *) &kvm
->arch
.sca
->mcn
);
1208 vcpu
->arch
.sie_block
->fac
= (int) (long) kvm
->arch
.model
.fac
->list
;
1210 spin_lock_init(&vcpu
->arch
.local_int
.lock
);
1211 vcpu
->arch
.local_int
.float_int
= &kvm
->arch
.float_int
;
1212 vcpu
->arch
.local_int
.wq
= &vcpu
->wq
;
1213 vcpu
->arch
.local_int
.cpuflags
= &vcpu
->arch
.sie_block
->cpuflags
;
1215 rc
= kvm_vcpu_init(vcpu
, kvm
, id
);
1217 goto out_free_sie_block
;
1218 VM_EVENT(kvm
, 3, "create cpu %d at %p, sie block at %p", id
, vcpu
,
1219 vcpu
->arch
.sie_block
);
1220 trace_kvm_s390_create_vcpu(id
, vcpu
, vcpu
->arch
.sie_block
);
1224 free_page((unsigned long)(vcpu
->arch
.sie_block
));
1226 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
1231 int kvm_arch_vcpu_runnable(struct kvm_vcpu
*vcpu
)
1233 return kvm_s390_vcpu_has_irq(vcpu
, 0);
1236 void s390_vcpu_block(struct kvm_vcpu
*vcpu
)
1238 atomic_set_mask(PROG_BLOCK_SIE
, &vcpu
->arch
.sie_block
->prog20
);
1241 void s390_vcpu_unblock(struct kvm_vcpu
*vcpu
)
1243 atomic_clear_mask(PROG_BLOCK_SIE
, &vcpu
->arch
.sie_block
->prog20
);
1247 * Kick a guest cpu out of SIE and wait until SIE is not running.
1248 * If the CPU is not running (e.g. waiting as idle) the function will
1249 * return immediately. */
1250 void exit_sie(struct kvm_vcpu
*vcpu
)
1252 atomic_set_mask(CPUSTAT_STOP_INT
, &vcpu
->arch
.sie_block
->cpuflags
);
1253 while (vcpu
->arch
.sie_block
->prog0c
& PROG_IN_SIE
)
1257 /* Kick a guest cpu out of SIE and prevent SIE-reentry */
1258 void exit_sie_sync(struct kvm_vcpu
*vcpu
)
1260 s390_vcpu_block(vcpu
);
1264 static void kvm_gmap_notifier(struct gmap
*gmap
, unsigned long address
)
1267 struct kvm
*kvm
= gmap
->private;
1268 struct kvm_vcpu
*vcpu
;
1270 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
1271 /* match against both prefix pages */
1272 if (kvm_s390_get_prefix(vcpu
) == (address
& ~0x1000UL
)) {
1273 VCPU_EVENT(vcpu
, 2, "gmap notifier for %lx", address
);
1274 kvm_make_request(KVM_REQ_MMU_RELOAD
, vcpu
);
1275 exit_sie_sync(vcpu
);
1280 int kvm_arch_vcpu_should_kick(struct kvm_vcpu
*vcpu
)
1282 /* kvm common code refers to this, but never calls it */
1287 static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu
*vcpu
,
1288 struct kvm_one_reg
*reg
)
1293 case KVM_REG_S390_TODPR
:
1294 r
= put_user(vcpu
->arch
.sie_block
->todpr
,
1295 (u32 __user
*)reg
->addr
);
1297 case KVM_REG_S390_EPOCHDIFF
:
1298 r
= put_user(vcpu
->arch
.sie_block
->epoch
,
1299 (u64 __user
*)reg
->addr
);
1301 case KVM_REG_S390_CPU_TIMER
:
1302 r
= put_user(vcpu
->arch
.sie_block
->cputm
,
1303 (u64 __user
*)reg
->addr
);
1305 case KVM_REG_S390_CLOCK_COMP
:
1306 r
= put_user(vcpu
->arch
.sie_block
->ckc
,
1307 (u64 __user
*)reg
->addr
);
1309 case KVM_REG_S390_PFTOKEN
:
1310 r
= put_user(vcpu
->arch
.pfault_token
,
1311 (u64 __user
*)reg
->addr
);
1313 case KVM_REG_S390_PFCOMPARE
:
1314 r
= put_user(vcpu
->arch
.pfault_compare
,
1315 (u64 __user
*)reg
->addr
);
1317 case KVM_REG_S390_PFSELECT
:
1318 r
= put_user(vcpu
->arch
.pfault_select
,
1319 (u64 __user
*)reg
->addr
);
1321 case KVM_REG_S390_PP
:
1322 r
= put_user(vcpu
->arch
.sie_block
->pp
,
1323 (u64 __user
*)reg
->addr
);
1325 case KVM_REG_S390_GBEA
:
1326 r
= put_user(vcpu
->arch
.sie_block
->gbea
,
1327 (u64 __user
*)reg
->addr
);
1336 static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu
*vcpu
,
1337 struct kvm_one_reg
*reg
)
1342 case KVM_REG_S390_TODPR
:
1343 r
= get_user(vcpu
->arch
.sie_block
->todpr
,
1344 (u32 __user
*)reg
->addr
);
1346 case KVM_REG_S390_EPOCHDIFF
:
1347 r
= get_user(vcpu
->arch
.sie_block
->epoch
,
1348 (u64 __user
*)reg
->addr
);
1350 case KVM_REG_S390_CPU_TIMER
:
1351 r
= get_user(vcpu
->arch
.sie_block
->cputm
,
1352 (u64 __user
*)reg
->addr
);
1354 case KVM_REG_S390_CLOCK_COMP
:
1355 r
= get_user(vcpu
->arch
.sie_block
->ckc
,
1356 (u64 __user
*)reg
->addr
);
1358 case KVM_REG_S390_PFTOKEN
:
1359 r
= get_user(vcpu
->arch
.pfault_token
,
1360 (u64 __user
*)reg
->addr
);
1361 if (vcpu
->arch
.pfault_token
== KVM_S390_PFAULT_TOKEN_INVALID
)
1362 kvm_clear_async_pf_completion_queue(vcpu
);
1364 case KVM_REG_S390_PFCOMPARE
:
1365 r
= get_user(vcpu
->arch
.pfault_compare
,
1366 (u64 __user
*)reg
->addr
);
1368 case KVM_REG_S390_PFSELECT
:
1369 r
= get_user(vcpu
->arch
.pfault_select
,
1370 (u64 __user
*)reg
->addr
);
1372 case KVM_REG_S390_PP
:
1373 r
= get_user(vcpu
->arch
.sie_block
->pp
,
1374 (u64 __user
*)reg
->addr
);
1376 case KVM_REG_S390_GBEA
:
1377 r
= get_user(vcpu
->arch
.sie_block
->gbea
,
1378 (u64 __user
*)reg
->addr
);
1387 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu
*vcpu
)
1389 kvm_s390_vcpu_initial_reset(vcpu
);
1393 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
1395 memcpy(&vcpu
->run
->s
.regs
.gprs
, ®s
->gprs
, sizeof(regs
->gprs
));
1399 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu
*vcpu
, struct kvm_regs
*regs
)
1401 memcpy(®s
->gprs
, &vcpu
->run
->s
.regs
.gprs
, sizeof(regs
->gprs
));
1405 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
1406 struct kvm_sregs
*sregs
)
1408 memcpy(&vcpu
->run
->s
.regs
.acrs
, &sregs
->acrs
, sizeof(sregs
->acrs
));
1409 memcpy(&vcpu
->arch
.sie_block
->gcr
, &sregs
->crs
, sizeof(sregs
->crs
));
1410 restore_access_regs(vcpu
->run
->s
.regs
.acrs
);
1414 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
1415 struct kvm_sregs
*sregs
)
1417 memcpy(&sregs
->acrs
, &vcpu
->run
->s
.regs
.acrs
, sizeof(sregs
->acrs
));
1418 memcpy(&sregs
->crs
, &vcpu
->arch
.sie_block
->gcr
, sizeof(sregs
->crs
));
1422 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1424 if (test_fp_ctl(fpu
->fpc
))
1426 memcpy(&vcpu
->arch
.guest_fpregs
.fprs
, &fpu
->fprs
, sizeof(fpu
->fprs
));
1427 vcpu
->arch
.guest_fpregs
.fpc
= fpu
->fpc
;
1428 restore_fp_ctl(&vcpu
->arch
.guest_fpregs
.fpc
);
1429 restore_fp_regs(vcpu
->arch
.guest_fpregs
.fprs
);
1433 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu
*vcpu
, struct kvm_fpu
*fpu
)
1435 memcpy(&fpu
->fprs
, &vcpu
->arch
.guest_fpregs
.fprs
, sizeof(fpu
->fprs
));
1436 fpu
->fpc
= vcpu
->arch
.guest_fpregs
.fpc
;
1440 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu
*vcpu
, psw_t psw
)
1444 if (!is_vcpu_stopped(vcpu
))
1447 vcpu
->run
->psw_mask
= psw
.mask
;
1448 vcpu
->run
->psw_addr
= psw
.addr
;
1453 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu
*vcpu
,
1454 struct kvm_translation
*tr
)
1456 return -EINVAL
; /* not implemented yet */
1459 #define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1460 KVM_GUESTDBG_USE_HW_BP | \
1461 KVM_GUESTDBG_ENABLE)
1463 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu
*vcpu
,
1464 struct kvm_guest_debug
*dbg
)
1468 vcpu
->guest_debug
= 0;
1469 kvm_s390_clear_bp_data(vcpu
);
1471 if (dbg
->control
& ~VALID_GUESTDBG_FLAGS
)
1474 if (dbg
->control
& KVM_GUESTDBG_ENABLE
) {
1475 vcpu
->guest_debug
= dbg
->control
;
1476 /* enforce guest PER */
1477 atomic_set_mask(CPUSTAT_P
, &vcpu
->arch
.sie_block
->cpuflags
);
1479 if (dbg
->control
& KVM_GUESTDBG_USE_HW_BP
)
1480 rc
= kvm_s390_import_bp_data(vcpu
, dbg
);
1482 atomic_clear_mask(CPUSTAT_P
, &vcpu
->arch
.sie_block
->cpuflags
);
1483 vcpu
->arch
.guestdbg
.last_bp
= 0;
1487 vcpu
->guest_debug
= 0;
1488 kvm_s390_clear_bp_data(vcpu
);
1489 atomic_clear_mask(CPUSTAT_P
, &vcpu
->arch
.sie_block
->cpuflags
);
1495 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu
*vcpu
,
1496 struct kvm_mp_state
*mp_state
)
1498 /* CHECK_STOP and LOAD are not supported yet */
1499 return is_vcpu_stopped(vcpu
) ? KVM_MP_STATE_STOPPED
:
1500 KVM_MP_STATE_OPERATING
;
1503 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu
*vcpu
,
1504 struct kvm_mp_state
*mp_state
)
1508 /* user space knows about this interface - let it control the state */
1509 vcpu
->kvm
->arch
.user_cpu_state_ctrl
= 1;
1511 switch (mp_state
->mp_state
) {
1512 case KVM_MP_STATE_STOPPED
:
1513 kvm_s390_vcpu_stop(vcpu
);
1515 case KVM_MP_STATE_OPERATING
:
1516 kvm_s390_vcpu_start(vcpu
);
1518 case KVM_MP_STATE_LOAD
:
1519 case KVM_MP_STATE_CHECK_STOP
:
1520 /* fall through - CHECK_STOP and LOAD are not supported yet */
1528 bool kvm_s390_cmma_enabled(struct kvm
*kvm
)
1530 if (!MACHINE_IS_LPAR
)
1532 /* only enable for z10 and later */
1533 if (!MACHINE_HAS_EDAT1
)
1535 if (!kvm
->arch
.use_cmma
)
1540 static bool ibs_enabled(struct kvm_vcpu
*vcpu
)
1542 return atomic_read(&vcpu
->arch
.sie_block
->cpuflags
) & CPUSTAT_IBS
;
1545 static int kvm_s390_handle_requests(struct kvm_vcpu
*vcpu
)
1548 s390_vcpu_unblock(vcpu
);
1550 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1551 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1552 * This ensures that the ipte instruction for this request has
1553 * already finished. We might race against a second unmapper that
1554 * wants to set the blocking bit. Lets just retry the request loop.
1556 if (kvm_check_request(KVM_REQ_MMU_RELOAD
, vcpu
)) {
1558 rc
= gmap_ipte_notify(vcpu
->arch
.gmap
,
1559 kvm_s390_get_prefix(vcpu
),
1566 if (kvm_check_request(KVM_REQ_TLB_FLUSH
, vcpu
)) {
1567 vcpu
->arch
.sie_block
->ihcpu
= 0xffff;
1571 if (kvm_check_request(KVM_REQ_ENABLE_IBS
, vcpu
)) {
1572 if (!ibs_enabled(vcpu
)) {
1573 trace_kvm_s390_enable_disable_ibs(vcpu
->vcpu_id
, 1);
1574 atomic_set_mask(CPUSTAT_IBS
,
1575 &vcpu
->arch
.sie_block
->cpuflags
);
1580 if (kvm_check_request(KVM_REQ_DISABLE_IBS
, vcpu
)) {
1581 if (ibs_enabled(vcpu
)) {
1582 trace_kvm_s390_enable_disable_ibs(vcpu
->vcpu_id
, 0);
1583 atomic_clear_mask(CPUSTAT_IBS
,
1584 &vcpu
->arch
.sie_block
->cpuflags
);
1589 /* nothing to do, just clear the request */
1590 clear_bit(KVM_REQ_UNHALT
, &vcpu
->requests
);
1596 * kvm_arch_fault_in_page - fault-in guest page if necessary
1597 * @vcpu: The corresponding virtual cpu
1598 * @gpa: Guest physical address
1599 * @writable: Whether the page should be writable or not
1601 * Make sure that a guest page has been faulted-in on the host.
1603 * Return: Zero on success, negative error code otherwise.
1605 long kvm_arch_fault_in_page(struct kvm_vcpu
*vcpu
, gpa_t gpa
, int writable
)
1607 return gmap_fault(vcpu
->arch
.gmap
, gpa
,
1608 writable
? FAULT_FLAG_WRITE
: 0);
1611 static void __kvm_inject_pfault_token(struct kvm_vcpu
*vcpu
, bool start_token
,
1612 unsigned long token
)
1614 struct kvm_s390_interrupt inti
;
1615 struct kvm_s390_irq irq
;
1618 irq
.u
.ext
.ext_params2
= token
;
1619 irq
.type
= KVM_S390_INT_PFAULT_INIT
;
1620 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu
, &irq
));
1622 inti
.type
= KVM_S390_INT_PFAULT_DONE
;
1623 inti
.parm64
= token
;
1624 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu
->kvm
, &inti
));
1628 void kvm_arch_async_page_not_present(struct kvm_vcpu
*vcpu
,
1629 struct kvm_async_pf
*work
)
1631 trace_kvm_s390_pfault_init(vcpu
, work
->arch
.pfault_token
);
1632 __kvm_inject_pfault_token(vcpu
, true, work
->arch
.pfault_token
);
1635 void kvm_arch_async_page_present(struct kvm_vcpu
*vcpu
,
1636 struct kvm_async_pf
*work
)
1638 trace_kvm_s390_pfault_done(vcpu
, work
->arch
.pfault_token
);
1639 __kvm_inject_pfault_token(vcpu
, false, work
->arch
.pfault_token
);
1642 void kvm_arch_async_page_ready(struct kvm_vcpu
*vcpu
,
1643 struct kvm_async_pf
*work
)
1645 /* s390 will always inject the page directly */
1648 bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu
*vcpu
)
1651 * s390 will always inject the page directly,
1652 * but we still want check_async_completion to cleanup
1657 static int kvm_arch_setup_async_pf(struct kvm_vcpu
*vcpu
)
1660 struct kvm_arch_async_pf arch
;
1663 if (vcpu
->arch
.pfault_token
== KVM_S390_PFAULT_TOKEN_INVALID
)
1665 if ((vcpu
->arch
.sie_block
->gpsw
.mask
& vcpu
->arch
.pfault_select
) !=
1666 vcpu
->arch
.pfault_compare
)
1668 if (psw_extint_disabled(vcpu
))
1670 if (kvm_s390_vcpu_has_irq(vcpu
, 0))
1672 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x200ul
))
1674 if (!vcpu
->arch
.gmap
->pfault_enabled
)
1677 hva
= gfn_to_hva(vcpu
->kvm
, gpa_to_gfn(current
->thread
.gmap_addr
));
1678 hva
+= current
->thread
.gmap_addr
& ~PAGE_MASK
;
1679 if (read_guest_real(vcpu
, vcpu
->arch
.pfault_token
, &arch
.pfault_token
, 8))
1682 rc
= kvm_setup_async_pf(vcpu
, current
->thread
.gmap_addr
, hva
, &arch
);
1686 static int vcpu_pre_run(struct kvm_vcpu
*vcpu
)
1691 * On s390 notifications for arriving pages will be delivered directly
1692 * to the guest but the house keeping for completed pfaults is
1693 * handled outside the worker.
1695 kvm_check_async_pf_completion(vcpu
);
1697 memcpy(&vcpu
->arch
.sie_block
->gg14
, &vcpu
->run
->s
.regs
.gprs
[14], 16);
1702 if (test_cpu_flag(CIF_MCCK_PENDING
))
1705 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
1706 rc
= kvm_s390_deliver_pending_interrupts(vcpu
);
1711 rc
= kvm_s390_handle_requests(vcpu
);
1715 if (guestdbg_enabled(vcpu
)) {
1716 kvm_s390_backup_guest_per_regs(vcpu
);
1717 kvm_s390_patch_guest_per_regs(vcpu
);
1720 vcpu
->arch
.sie_block
->icptcode
= 0;
1721 cpuflags
= atomic_read(&vcpu
->arch
.sie_block
->cpuflags
);
1722 VCPU_EVENT(vcpu
, 6, "entering sie flags %x", cpuflags
);
1723 trace_kvm_s390_sie_enter(vcpu
, cpuflags
);
1728 static int vcpu_post_run(struct kvm_vcpu
*vcpu
, int exit_reason
)
1732 VCPU_EVENT(vcpu
, 6, "exit sie icptcode %d",
1733 vcpu
->arch
.sie_block
->icptcode
);
1734 trace_kvm_s390_sie_exit(vcpu
, vcpu
->arch
.sie_block
->icptcode
);
1736 if (guestdbg_enabled(vcpu
))
1737 kvm_s390_restore_guest_per_regs(vcpu
);
1739 if (exit_reason
>= 0) {
1741 } else if (kvm_is_ucontrol(vcpu
->kvm
)) {
1742 vcpu
->run
->exit_reason
= KVM_EXIT_S390_UCONTROL
;
1743 vcpu
->run
->s390_ucontrol
.trans_exc_code
=
1744 current
->thread
.gmap_addr
;
1745 vcpu
->run
->s390_ucontrol
.pgm_code
= 0x10;
1748 } else if (current
->thread
.gmap_pfault
) {
1749 trace_kvm_s390_major_guest_pfault(vcpu
);
1750 current
->thread
.gmap_pfault
= 0;
1751 if (kvm_arch_setup_async_pf(vcpu
)) {
1754 gpa_t gpa
= current
->thread
.gmap_addr
;
1755 rc
= kvm_arch_fault_in_page(vcpu
, gpa
, 1);
1760 VCPU_EVENT(vcpu
, 3, "%s", "fault in sie instruction");
1761 trace_kvm_s390_sie_fault(vcpu
);
1762 rc
= kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
1765 memcpy(&vcpu
->run
->s
.regs
.gprs
[14], &vcpu
->arch
.sie_block
->gg14
, 16);
1768 if (kvm_is_ucontrol(vcpu
->kvm
))
1769 /* Don't exit for host interrupts. */
1770 rc
= vcpu
->arch
.sie_block
->icptcode
? -EOPNOTSUPP
: 0;
1772 rc
= kvm_handle_sie_intercept(vcpu
);
1778 static int __vcpu_run(struct kvm_vcpu
*vcpu
)
1780 int rc
, exit_reason
;
1783 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1784 * ning the guest), so that memslots (and other stuff) are protected
1786 vcpu
->srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
1789 rc
= vcpu_pre_run(vcpu
);
1793 srcu_read_unlock(&vcpu
->kvm
->srcu
, vcpu
->srcu_idx
);
1795 * As PF_VCPU will be used in fault handler, between
1796 * guest_enter and guest_exit should be no uaccess.
1801 exit_reason
= sie64a(vcpu
->arch
.sie_block
,
1802 vcpu
->run
->s
.regs
.gprs
);
1804 vcpu
->srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
1806 rc
= vcpu_post_run(vcpu
, exit_reason
);
1807 } while (!signal_pending(current
) && !guestdbg_exit_pending(vcpu
) && !rc
);
1809 srcu_read_unlock(&vcpu
->kvm
->srcu
, vcpu
->srcu_idx
);
1813 static void sync_regs(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1815 vcpu
->arch
.sie_block
->gpsw
.mask
= kvm_run
->psw_mask
;
1816 vcpu
->arch
.sie_block
->gpsw
.addr
= kvm_run
->psw_addr
;
1817 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_PREFIX
)
1818 kvm_s390_set_prefix(vcpu
, kvm_run
->s
.regs
.prefix
);
1819 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_CRS
) {
1820 memcpy(&vcpu
->arch
.sie_block
->gcr
, &kvm_run
->s
.regs
.crs
, 128);
1821 /* some control register changes require a tlb flush */
1822 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
1824 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_ARCH0
) {
1825 vcpu
->arch
.sie_block
->cputm
= kvm_run
->s
.regs
.cputm
;
1826 vcpu
->arch
.sie_block
->ckc
= kvm_run
->s
.regs
.ckc
;
1827 vcpu
->arch
.sie_block
->todpr
= kvm_run
->s
.regs
.todpr
;
1828 vcpu
->arch
.sie_block
->pp
= kvm_run
->s
.regs
.pp
;
1829 vcpu
->arch
.sie_block
->gbea
= kvm_run
->s
.regs
.gbea
;
1831 if (kvm_run
->kvm_dirty_regs
& KVM_SYNC_PFAULT
) {
1832 vcpu
->arch
.pfault_token
= kvm_run
->s
.regs
.pft
;
1833 vcpu
->arch
.pfault_select
= kvm_run
->s
.regs
.pfs
;
1834 vcpu
->arch
.pfault_compare
= kvm_run
->s
.regs
.pfc
;
1835 if (vcpu
->arch
.pfault_token
== KVM_S390_PFAULT_TOKEN_INVALID
)
1836 kvm_clear_async_pf_completion_queue(vcpu
);
1838 kvm_run
->kvm_dirty_regs
= 0;
1841 static void store_regs(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1843 kvm_run
->psw_mask
= vcpu
->arch
.sie_block
->gpsw
.mask
;
1844 kvm_run
->psw_addr
= vcpu
->arch
.sie_block
->gpsw
.addr
;
1845 kvm_run
->s
.regs
.prefix
= kvm_s390_get_prefix(vcpu
);
1846 memcpy(&kvm_run
->s
.regs
.crs
, &vcpu
->arch
.sie_block
->gcr
, 128);
1847 kvm_run
->s
.regs
.cputm
= vcpu
->arch
.sie_block
->cputm
;
1848 kvm_run
->s
.regs
.ckc
= vcpu
->arch
.sie_block
->ckc
;
1849 kvm_run
->s
.regs
.todpr
= vcpu
->arch
.sie_block
->todpr
;
1850 kvm_run
->s
.regs
.pp
= vcpu
->arch
.sie_block
->pp
;
1851 kvm_run
->s
.regs
.gbea
= vcpu
->arch
.sie_block
->gbea
;
1852 kvm_run
->s
.regs
.pft
= vcpu
->arch
.pfault_token
;
1853 kvm_run
->s
.regs
.pfs
= vcpu
->arch
.pfault_select
;
1854 kvm_run
->s
.regs
.pfc
= vcpu
->arch
.pfault_compare
;
1857 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu
*vcpu
, struct kvm_run
*kvm_run
)
1862 if (guestdbg_exit_pending(vcpu
)) {
1863 kvm_s390_prepare_debug_exit(vcpu
);
1867 if (vcpu
->sigset_active
)
1868 sigprocmask(SIG_SETMASK
, &vcpu
->sigset
, &sigsaved
);
1870 if (!kvm_s390_user_cpu_state_ctrl(vcpu
->kvm
)) {
1871 kvm_s390_vcpu_start(vcpu
);
1872 } else if (is_vcpu_stopped(vcpu
)) {
1873 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
1878 sync_regs(vcpu
, kvm_run
);
1881 rc
= __vcpu_run(vcpu
);
1883 if (signal_pending(current
) && !rc
) {
1884 kvm_run
->exit_reason
= KVM_EXIT_INTR
;
1888 if (guestdbg_exit_pending(vcpu
) && !rc
) {
1889 kvm_s390_prepare_debug_exit(vcpu
);
1893 if (rc
== -EOPNOTSUPP
) {
1894 /* intercept cannot be handled in-kernel, prepare kvm-run */
1895 kvm_run
->exit_reason
= KVM_EXIT_S390_SIEIC
;
1896 kvm_run
->s390_sieic
.icptcode
= vcpu
->arch
.sie_block
->icptcode
;
1897 kvm_run
->s390_sieic
.ipa
= vcpu
->arch
.sie_block
->ipa
;
1898 kvm_run
->s390_sieic
.ipb
= vcpu
->arch
.sie_block
->ipb
;
1902 if (rc
== -EREMOTE
) {
1903 /* intercept was handled, but userspace support is needed
1904 * kvm_run has been prepared by the handler */
1908 store_regs(vcpu
, kvm_run
);
1910 if (vcpu
->sigset_active
)
1911 sigprocmask(SIG_SETMASK
, &sigsaved
, NULL
);
1913 vcpu
->stat
.exit_userspace
++;
1918 * store status at address
1919 * we use have two special cases:
1920 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1921 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1923 int kvm_s390_store_status_unloaded(struct kvm_vcpu
*vcpu
, unsigned long gpa
)
1925 unsigned char archmode
= 1;
1930 if (gpa
== KVM_S390_STORE_STATUS_NOADDR
) {
1931 if (write_guest_abs(vcpu
, 163, &archmode
, 1))
1933 gpa
= SAVE_AREA_BASE
;
1934 } else if (gpa
== KVM_S390_STORE_STATUS_PREFIXED
) {
1935 if (write_guest_real(vcpu
, 163, &archmode
, 1))
1937 gpa
= kvm_s390_real_to_abs(vcpu
, SAVE_AREA_BASE
);
1939 rc
= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, fp_regs
),
1940 vcpu
->arch
.guest_fpregs
.fprs
, 128);
1941 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, gp_regs
),
1942 vcpu
->run
->s
.regs
.gprs
, 128);
1943 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, psw
),
1944 &vcpu
->arch
.sie_block
->gpsw
, 16);
1945 px
= kvm_s390_get_prefix(vcpu
);
1946 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, pref_reg
),
1948 rc
|= write_guest_abs(vcpu
,
1949 gpa
+ offsetof(struct save_area
, fp_ctrl_reg
),
1950 &vcpu
->arch
.guest_fpregs
.fpc
, 4);
1951 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, tod_reg
),
1952 &vcpu
->arch
.sie_block
->todpr
, 4);
1953 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, timer
),
1954 &vcpu
->arch
.sie_block
->cputm
, 8);
1955 clkcomp
= vcpu
->arch
.sie_block
->ckc
>> 8;
1956 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, clk_cmp
),
1958 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, acc_regs
),
1959 &vcpu
->run
->s
.regs
.acrs
, 64);
1960 rc
|= write_guest_abs(vcpu
, gpa
+ offsetof(struct save_area
, ctrl_regs
),
1961 &vcpu
->arch
.sie_block
->gcr
, 128);
1962 return rc
? -EFAULT
: 0;
1965 int kvm_s390_vcpu_store_status(struct kvm_vcpu
*vcpu
, unsigned long addr
)
1968 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1969 * copying in vcpu load/put. Lets update our copies before we save
1970 * it into the save area
1972 save_fp_ctl(&vcpu
->arch
.guest_fpregs
.fpc
);
1973 save_fp_regs(vcpu
->arch
.guest_fpregs
.fprs
);
1974 save_access_regs(vcpu
->run
->s
.regs
.acrs
);
1976 return kvm_s390_store_status_unloaded(vcpu
, addr
);
1979 static void __disable_ibs_on_vcpu(struct kvm_vcpu
*vcpu
)
1981 kvm_check_request(KVM_REQ_ENABLE_IBS
, vcpu
);
1982 kvm_make_request(KVM_REQ_DISABLE_IBS
, vcpu
);
1983 exit_sie_sync(vcpu
);
1986 static void __disable_ibs_on_all_vcpus(struct kvm
*kvm
)
1989 struct kvm_vcpu
*vcpu
;
1991 kvm_for_each_vcpu(i
, vcpu
, kvm
) {
1992 __disable_ibs_on_vcpu(vcpu
);
1996 static void __enable_ibs_on_vcpu(struct kvm_vcpu
*vcpu
)
1998 kvm_check_request(KVM_REQ_DISABLE_IBS
, vcpu
);
1999 kvm_make_request(KVM_REQ_ENABLE_IBS
, vcpu
);
2000 exit_sie_sync(vcpu
);
2003 void kvm_s390_vcpu_start(struct kvm_vcpu
*vcpu
)
2005 int i
, online_vcpus
, started_vcpus
= 0;
2007 if (!is_vcpu_stopped(vcpu
))
2010 trace_kvm_s390_vcpu_start_stop(vcpu
->vcpu_id
, 1);
2011 /* Only one cpu at a time may enter/leave the STOPPED state. */
2012 spin_lock(&vcpu
->kvm
->arch
.start_stop_lock
);
2013 online_vcpus
= atomic_read(&vcpu
->kvm
->online_vcpus
);
2015 for (i
= 0; i
< online_vcpus
; i
++) {
2016 if (!is_vcpu_stopped(vcpu
->kvm
->vcpus
[i
]))
2020 if (started_vcpus
== 0) {
2021 /* we're the only active VCPU -> speed it up */
2022 __enable_ibs_on_vcpu(vcpu
);
2023 } else if (started_vcpus
== 1) {
2025 * As we are starting a second VCPU, we have to disable
2026 * the IBS facility on all VCPUs to remove potentially
2027 * oustanding ENABLE requests.
2029 __disable_ibs_on_all_vcpus(vcpu
->kvm
);
2032 atomic_clear_mask(CPUSTAT_STOPPED
, &vcpu
->arch
.sie_block
->cpuflags
);
2034 * Another VCPU might have used IBS while we were offline.
2035 * Let's play safe and flush the VCPU at startup.
2037 kvm_make_request(KVM_REQ_TLB_FLUSH
, vcpu
);
2038 spin_unlock(&vcpu
->kvm
->arch
.start_stop_lock
);
2042 void kvm_s390_vcpu_stop(struct kvm_vcpu
*vcpu
)
2044 int i
, online_vcpus
, started_vcpus
= 0;
2045 struct kvm_vcpu
*started_vcpu
= NULL
;
2047 if (is_vcpu_stopped(vcpu
))
2050 trace_kvm_s390_vcpu_start_stop(vcpu
->vcpu_id
, 0);
2051 /* Only one cpu at a time may enter/leave the STOPPED state. */
2052 spin_lock(&vcpu
->kvm
->arch
.start_stop_lock
);
2053 online_vcpus
= atomic_read(&vcpu
->kvm
->online_vcpus
);
2055 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
2056 kvm_s390_clear_stop_irq(vcpu
);
2058 atomic_set_mask(CPUSTAT_STOPPED
, &vcpu
->arch
.sie_block
->cpuflags
);
2059 __disable_ibs_on_vcpu(vcpu
);
2061 for (i
= 0; i
< online_vcpus
; i
++) {
2062 if (!is_vcpu_stopped(vcpu
->kvm
->vcpus
[i
])) {
2064 started_vcpu
= vcpu
->kvm
->vcpus
[i
];
2068 if (started_vcpus
== 1) {
2070 * As we only have one VCPU left, we want to enable the
2071 * IBS facility for that VCPU to speed it up.
2073 __enable_ibs_on_vcpu(started_vcpu
);
2076 spin_unlock(&vcpu
->kvm
->arch
.start_stop_lock
);
2080 static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu
*vcpu
,
2081 struct kvm_enable_cap
*cap
)
2089 case KVM_CAP_S390_CSS_SUPPORT
:
2090 if (!vcpu
->kvm
->arch
.css_support
) {
2091 vcpu
->kvm
->arch
.css_support
= 1;
2092 trace_kvm_s390_enable_css(vcpu
->kvm
);
2103 long kvm_arch_vcpu_ioctl(struct file
*filp
,
2104 unsigned int ioctl
, unsigned long arg
)
2106 struct kvm_vcpu
*vcpu
= filp
->private_data
;
2107 void __user
*argp
= (void __user
*)arg
;
2112 case KVM_S390_INTERRUPT
: {
2113 struct kvm_s390_interrupt s390int
;
2114 struct kvm_s390_irq s390irq
;
2117 if (copy_from_user(&s390int
, argp
, sizeof(s390int
)))
2119 if (s390int_to_s390irq(&s390int
, &s390irq
))
2121 r
= kvm_s390_inject_vcpu(vcpu
, &s390irq
);
2124 case KVM_S390_STORE_STATUS
:
2125 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
2126 r
= kvm_s390_vcpu_store_status(vcpu
, arg
);
2127 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
2129 case KVM_S390_SET_INITIAL_PSW
: {
2133 if (copy_from_user(&psw
, argp
, sizeof(psw
)))
2135 r
= kvm_arch_vcpu_ioctl_set_initial_psw(vcpu
, psw
);
2138 case KVM_S390_INITIAL_RESET
:
2139 r
= kvm_arch_vcpu_ioctl_initial_reset(vcpu
);
2141 case KVM_SET_ONE_REG
:
2142 case KVM_GET_ONE_REG
: {
2143 struct kvm_one_reg reg
;
2145 if (copy_from_user(®
, argp
, sizeof(reg
)))
2147 if (ioctl
== KVM_SET_ONE_REG
)
2148 r
= kvm_arch_vcpu_ioctl_set_one_reg(vcpu
, ®
);
2150 r
= kvm_arch_vcpu_ioctl_get_one_reg(vcpu
, ®
);
2153 #ifdef CONFIG_KVM_S390_UCONTROL
2154 case KVM_S390_UCAS_MAP
: {
2155 struct kvm_s390_ucas_mapping ucasmap
;
2157 if (copy_from_user(&ucasmap
, argp
, sizeof(ucasmap
))) {
2162 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
2167 r
= gmap_map_segment(vcpu
->arch
.gmap
, ucasmap
.user_addr
,
2168 ucasmap
.vcpu_addr
, ucasmap
.length
);
2171 case KVM_S390_UCAS_UNMAP
: {
2172 struct kvm_s390_ucas_mapping ucasmap
;
2174 if (copy_from_user(&ucasmap
, argp
, sizeof(ucasmap
))) {
2179 if (!kvm_is_ucontrol(vcpu
->kvm
)) {
2184 r
= gmap_unmap_segment(vcpu
->arch
.gmap
, ucasmap
.vcpu_addr
,
2189 case KVM_S390_VCPU_FAULT
: {
2190 r
= gmap_fault(vcpu
->arch
.gmap
, arg
, 0);
2193 case KVM_ENABLE_CAP
:
2195 struct kvm_enable_cap cap
;
2197 if (copy_from_user(&cap
, argp
, sizeof(cap
)))
2199 r
= kvm_vcpu_ioctl_enable_cap(vcpu
, &cap
);
2208 int kvm_arch_vcpu_fault(struct kvm_vcpu
*vcpu
, struct vm_fault
*vmf
)
2210 #ifdef CONFIG_KVM_S390_UCONTROL
2211 if ((vmf
->pgoff
== KVM_S390_SIE_PAGE_OFFSET
)
2212 && (kvm_is_ucontrol(vcpu
->kvm
))) {
2213 vmf
->page
= virt_to_page(vcpu
->arch
.sie_block
);
2214 get_page(vmf
->page
);
2218 return VM_FAULT_SIGBUS
;
2221 int kvm_arch_create_memslot(struct kvm
*kvm
, struct kvm_memory_slot
*slot
,
2222 unsigned long npages
)
2227 /* Section: memory related */
2228 int kvm_arch_prepare_memory_region(struct kvm
*kvm
,
2229 struct kvm_memory_slot
*memslot
,
2230 struct kvm_userspace_memory_region
*mem
,
2231 enum kvm_mr_change change
)
2233 /* A few sanity checks. We can have memory slots which have to be
2234 located/ended at a segment boundary (1MB). The memory in userland is
2235 ok to be fragmented into various different vmas. It is okay to mmap()
2236 and munmap() stuff in this slot after doing this call at any time */
2238 if (mem
->userspace_addr
& 0xffffful
)
2241 if (mem
->memory_size
& 0xffffful
)
2247 void kvm_arch_commit_memory_region(struct kvm
*kvm
,
2248 struct kvm_userspace_memory_region
*mem
,
2249 const struct kvm_memory_slot
*old
,
2250 enum kvm_mr_change change
)
2254 /* If the basics of the memslot do not change, we do not want
2255 * to update the gmap. Every update causes several unnecessary
2256 * segment translation exceptions. This is usually handled just
2257 * fine by the normal fault handler + gmap, but it will also
2258 * cause faults on the prefix page of running guest CPUs.
2260 if (old
->userspace_addr
== mem
->userspace_addr
&&
2261 old
->base_gfn
* PAGE_SIZE
== mem
->guest_phys_addr
&&
2262 old
->npages
* PAGE_SIZE
== mem
->memory_size
)
2265 rc
= gmap_map_segment(kvm
->arch
.gmap
, mem
->userspace_addr
,
2266 mem
->guest_phys_addr
, mem
->memory_size
);
2268 printk(KERN_WARNING
"kvm-s390: failed to commit memory region\n");
2272 static int __init
kvm_s390_init(void)
2274 return kvm_init(NULL
, sizeof(struct kvm_vcpu
), 0, THIS_MODULE
);
2277 static void __exit
kvm_s390_exit(void)
2282 module_init(kvm_s390_init
);
2283 module_exit(kvm_s390_exit
);
2286 * Enable autoloading of the kvm module.
2287 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2288 * since x86 takes a different approach.
2290 #include <linux/miscdevice.h>
2291 MODULE_ALIAS_MISCDEV(KVM_MINOR
);
2292 MODULE_ALIAS("devname:kvm");