]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/s390/kvm/kvm-s390.c
KVM: s390: interface to query and configure cpu features
[mirror_ubuntu-hirsute-kernel.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15f36ebd 14 * Jason J. Herne <jjherne@us.ibm.com>
b0c632db
HC
15 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
ca872302 20#include <linux/hrtimer.h>
b0c632db
HC
21#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
a374e892 25#include <linux/random.h>
b0c632db 26#include <linux/slab.h>
ba5c1e9b 27#include <linux/timer.h>
41408c28 28#include <linux/vmalloc.h>
15c9705f 29#include <linux/bitmap.h>
cbb870c8 30#include <asm/asm-offsets.h>
b0c632db 31#include <asm/lowcore.h>
fdf03650 32#include <asm/etr.h>
b0c632db 33#include <asm/pgtable.h>
1e133ab2 34#include <asm/gmap.h>
f5daba1d 35#include <asm/nmi.h>
a0616cde 36#include <asm/switch_to.h>
6d3da241 37#include <asm/isc.h>
1526bf9c 38#include <asm/sclp.h>
8f2abe6a 39#include "kvm-s390.h"
b0c632db
HC
40#include "gaccess.h"
41
ea2cdd27
DH
42#define KMSG_COMPONENT "kvm-s390"
43#undef pr_fmt
44#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
45
5786fffa
CH
46#define CREATE_TRACE_POINTS
47#include "trace.h"
ade38c31 48#include "trace-s390.h"
5786fffa 49
41408c28 50#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
816c7667
JF
51#define LOCAL_IRQS 32
52#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
53 (KVM_MAX_VCPUS + LOCAL_IRQS))
41408c28 54
b0c632db
HC
55#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
56
57struct kvm_stats_debugfs_item debugfs_entries[] = {
58 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 59 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
60 { "exit_validity", VCPU_STAT(exit_validity) },
61 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
62 { "exit_external_request", VCPU_STAT(exit_external_request) },
63 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
64 { "exit_instruction", VCPU_STAT(exit_instruction) },
65 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
66 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
a011eeb2 67 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
f7819512 68 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
62bea5bf 69 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
3491caf2 70 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
ce2e4f0b 71 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
f5e10b09 72 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b 73 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
aba07508
DH
74 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
75 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
ba5c1e9b 76 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 77 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
78 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
79 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
80 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
81 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
82 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
83 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
84 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 85 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
86 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
87 { "instruction_spx", VCPU_STAT(instruction_spx) },
88 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
89 { "instruction_stap", VCPU_STAT(instruction_stap) },
90 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
8a242234 91 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
453423dc
CB
92 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
93 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
b31288fa 94 { "instruction_essa", VCPU_STAT(instruction_essa) },
453423dc
CB
95 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
96 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 97 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
95ca2cb5 98 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
5288fbf0 99 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 100 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 101 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0 102 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
42cb0c9f
DH
103 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
104 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
5288fbf0 105 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
42cb0c9f
DH
106 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
107 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
cd7b4b61 108 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
5288fbf0
CB
109 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
110 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
111 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
42cb0c9f
DH
112 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
113 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
114 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
388186bc 115 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 116 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 117 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
175a5c9e
CB
118 { "diagnose_258", VCPU_STAT(diagnose_258) },
119 { "diagnose_308", VCPU_STAT(diagnose_308) },
120 { "diagnose_500", VCPU_STAT(diagnose_500) },
b0c632db
HC
121 { NULL }
122};
123
9d8d5786 124/* upper facilities limit for kvm */
60a37709
AY
125unsigned long kvm_s390_fac_list_mask[16] = {
126 0xffe6000000000000UL,
127 0x005e000000000000UL,
9d8d5786 128};
b0c632db 129
9d8d5786 130unsigned long kvm_s390_fac_list_mask_size(void)
78c4b59f 131{
9d8d5786
MM
132 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
133 return ARRAY_SIZE(kvm_s390_fac_list_mask);
78c4b59f
MM
134}
135
15c9705f
DH
136/* available cpu features supported by kvm */
137static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
138
9d8d5786 139static struct gmap_notifier gmap_notifier;
78f26131 140debug_info_t *kvm_s390_dbf;
9d8d5786 141
b0c632db 142/* Section: not file related */
13a34e06 143int kvm_arch_hardware_enable(void)
b0c632db
HC
144{
145 /* every s390 is virtualization enabled ;-) */
10474ae8 146 return 0;
b0c632db
HC
147}
148
2c70fe44
CB
149static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
150
fdf03650
FZ
151/*
152 * This callback is executed during stop_machine(). All CPUs are therefore
153 * temporarily stopped. In order not to change guest behavior, we have to
154 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
155 * so a CPU won't be stopped while calculating with the epoch.
156 */
157static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
158 void *v)
159{
160 struct kvm *kvm;
161 struct kvm_vcpu *vcpu;
162 int i;
163 unsigned long long *delta = v;
164
165 list_for_each_entry(kvm, &vm_list, vm_list) {
166 kvm->arch.epoch -= *delta;
167 kvm_for_each_vcpu(i, vcpu, kvm) {
168 vcpu->arch.sie_block->epoch -= *delta;
db0758b2
DH
169 if (vcpu->arch.cputm_enabled)
170 vcpu->arch.cputm_start += *delta;
fdf03650
FZ
171 }
172 }
173 return NOTIFY_OK;
174}
175
176static struct notifier_block kvm_clock_notifier = {
177 .notifier_call = kvm_clock_sync,
178};
179
b0c632db
HC
180int kvm_arch_hardware_setup(void)
181{
2c70fe44
CB
182 gmap_notifier.notifier_call = kvm_gmap_notifier;
183 gmap_register_ipte_notifier(&gmap_notifier);
fdf03650
FZ
184 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
185 &kvm_clock_notifier);
b0c632db
HC
186 return 0;
187}
188
189void kvm_arch_hardware_unsetup(void)
190{
2c70fe44 191 gmap_unregister_ipte_notifier(&gmap_notifier);
fdf03650
FZ
192 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
193 &kvm_clock_notifier);
b0c632db
HC
194}
195
b0c632db
HC
196int kvm_arch_init(void *opaque)
197{
78f26131
CB
198 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
199 if (!kvm_s390_dbf)
200 return -ENOMEM;
201
202 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
203 debug_unregister(kvm_s390_dbf);
204 return -ENOMEM;
205 }
206
84877d93
CH
207 /* Register floating interrupt controller interface. */
208 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
b0c632db
HC
209}
210
78f26131
CB
211void kvm_arch_exit(void)
212{
213 debug_unregister(kvm_s390_dbf);
214}
215
b0c632db
HC
216/* Section: device related */
217long kvm_arch_dev_ioctl(struct file *filp,
218 unsigned int ioctl, unsigned long arg)
219{
220 if (ioctl == KVM_S390_ENABLE_SIE)
221 return s390_enable_sie();
222 return -EINVAL;
223}
224
784aa3d7 225int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
b0c632db 226{
d7b0b5eb
CO
227 int r;
228
2bd0ac4e 229 switch (ext) {
d7b0b5eb 230 case KVM_CAP_S390_PSW:
b6cf8788 231 case KVM_CAP_S390_GMAP:
52e16b18 232 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
233#ifdef CONFIG_KVM_S390_UCONTROL
234 case KVM_CAP_S390_UCONTROL:
235#endif
3c038e6b 236 case KVM_CAP_ASYNC_PF:
60b413c9 237 case KVM_CAP_SYNC_REGS:
14eebd91 238 case KVM_CAP_ONE_REG:
d6712df9 239 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 240 case KVM_CAP_S390_CSS_SUPPORT:
10ccaa1e 241 case KVM_CAP_IOEVENTFD:
c05c4186 242 case KVM_CAP_DEVICE_CTRL:
d938dc55 243 case KVM_CAP_ENABLE_CAP_VM:
78599d90 244 case KVM_CAP_S390_IRQCHIP:
f2061656 245 case KVM_CAP_VM_ATTRIBUTES:
6352e4d2 246 case KVM_CAP_MP_STATE:
47b43c52 247 case KVM_CAP_S390_INJECT_IRQ:
2444b352 248 case KVM_CAP_S390_USER_SIGP:
e44fc8c9 249 case KVM_CAP_S390_USER_STSI:
30ee2a98 250 case KVM_CAP_S390_SKEYS:
816c7667 251 case KVM_CAP_S390_IRQ_STATE:
d7b0b5eb
CO
252 r = 1;
253 break;
41408c28
TH
254 case KVM_CAP_S390_MEM_OP:
255 r = MEM_OP_MAX_SIZE;
256 break;
e726b1bd
CB
257 case KVM_CAP_NR_VCPUS:
258 case KVM_CAP_MAX_VCPUS:
fe0edcb7
ED
259 r = sclp.has_esca ? KVM_S390_ESCA_CPU_SLOTS
260 : KVM_S390_BSCA_CPU_SLOTS;
e726b1bd 261 break;
e1e2e605
NW
262 case KVM_CAP_NR_MEMSLOTS:
263 r = KVM_USER_MEM_SLOTS;
264 break;
1526bf9c 265 case KVM_CAP_S390_COW:
abf09bed 266 r = MACHINE_HAS_ESOP;
1526bf9c 267 break;
68c55750
EF
268 case KVM_CAP_S390_VECTOR_REGISTERS:
269 r = MACHINE_HAS_VX;
270 break;
c6e5f166
FZ
271 case KVM_CAP_S390_RI:
272 r = test_facility(64);
273 break;
2bd0ac4e 274 default:
d7b0b5eb 275 r = 0;
2bd0ac4e 276 }
d7b0b5eb 277 return r;
b0c632db
HC
278}
279
15f36ebd
JH
280static void kvm_s390_sync_dirty_log(struct kvm *kvm,
281 struct kvm_memory_slot *memslot)
282{
283 gfn_t cur_gfn, last_gfn;
284 unsigned long address;
285 struct gmap *gmap = kvm->arch.gmap;
286
15f36ebd
JH
287 /* Loop over all guest pages */
288 last_gfn = memslot->base_gfn + memslot->npages;
289 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
290 address = gfn_to_hva_memslot(memslot, cur_gfn);
291
1e133ab2 292 if (test_and_clear_guest_dirty(gmap->mm, address))
15f36ebd 293 mark_page_dirty(kvm, cur_gfn);
1763f8d0
CB
294 if (fatal_signal_pending(current))
295 return;
70c88a00 296 cond_resched();
15f36ebd 297 }
15f36ebd
JH
298}
299
b0c632db 300/* Section: vm related */
a6e2f683
ED
301static void sca_del_vcpu(struct kvm_vcpu *vcpu);
302
b0c632db
HC
303/*
304 * Get (and clear) the dirty memory log for a memory slot.
305 */
306int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
307 struct kvm_dirty_log *log)
308{
15f36ebd
JH
309 int r;
310 unsigned long n;
9f6b8029 311 struct kvm_memslots *slots;
15f36ebd
JH
312 struct kvm_memory_slot *memslot;
313 int is_dirty = 0;
314
315 mutex_lock(&kvm->slots_lock);
316
317 r = -EINVAL;
318 if (log->slot >= KVM_USER_MEM_SLOTS)
319 goto out;
320
9f6b8029
PB
321 slots = kvm_memslots(kvm);
322 memslot = id_to_memslot(slots, log->slot);
15f36ebd
JH
323 r = -ENOENT;
324 if (!memslot->dirty_bitmap)
325 goto out;
326
327 kvm_s390_sync_dirty_log(kvm, memslot);
328 r = kvm_get_dirty_log(kvm, log, &is_dirty);
329 if (r)
330 goto out;
331
332 /* Clear the dirty log */
333 if (is_dirty) {
334 n = kvm_dirty_bitmap_bytes(memslot);
335 memset(memslot->dirty_bitmap, 0, n);
336 }
337 r = 0;
338out:
339 mutex_unlock(&kvm->slots_lock);
340 return r;
b0c632db
HC
341}
342
d938dc55
CH
343static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
344{
345 int r;
346
347 if (cap->flags)
348 return -EINVAL;
349
350 switch (cap->cap) {
84223598 351 case KVM_CAP_S390_IRQCHIP:
c92ea7b9 352 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
84223598
CH
353 kvm->arch.use_irqchip = 1;
354 r = 0;
355 break;
2444b352 356 case KVM_CAP_S390_USER_SIGP:
c92ea7b9 357 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
2444b352
DH
358 kvm->arch.user_sigp = 1;
359 r = 0;
360 break;
68c55750 361 case KVM_CAP_S390_VECTOR_REGISTERS:
5967c17b
DH
362 mutex_lock(&kvm->lock);
363 if (atomic_read(&kvm->online_vcpus)) {
364 r = -EBUSY;
365 } else if (MACHINE_HAS_VX) {
c54f0d6a
DH
366 set_kvm_facility(kvm->arch.model.fac_mask, 129);
367 set_kvm_facility(kvm->arch.model.fac_list, 129);
18280d8b
MM
368 r = 0;
369 } else
370 r = -EINVAL;
5967c17b 371 mutex_unlock(&kvm->lock);
c92ea7b9
CB
372 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
373 r ? "(not available)" : "(success)");
68c55750 374 break;
c6e5f166
FZ
375 case KVM_CAP_S390_RI:
376 r = -EINVAL;
377 mutex_lock(&kvm->lock);
378 if (atomic_read(&kvm->online_vcpus)) {
379 r = -EBUSY;
380 } else if (test_facility(64)) {
c54f0d6a
DH
381 set_kvm_facility(kvm->arch.model.fac_mask, 64);
382 set_kvm_facility(kvm->arch.model.fac_list, 64);
c6e5f166
FZ
383 r = 0;
384 }
385 mutex_unlock(&kvm->lock);
386 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
387 r ? "(not available)" : "(success)");
388 break;
e44fc8c9 389 case KVM_CAP_S390_USER_STSI:
c92ea7b9 390 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
e44fc8c9
ET
391 kvm->arch.user_stsi = 1;
392 r = 0;
393 break;
d938dc55
CH
394 default:
395 r = -EINVAL;
396 break;
397 }
398 return r;
399}
400
8c0a7ce6
DD
401static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
402{
403 int ret;
404
405 switch (attr->attr) {
406 case KVM_S390_VM_MEM_LIMIT_SIZE:
407 ret = 0;
c92ea7b9 408 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
a3a92c31
DD
409 kvm->arch.mem_limit);
410 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
8c0a7ce6
DD
411 ret = -EFAULT;
412 break;
413 default:
414 ret = -ENXIO;
415 break;
416 }
417 return ret;
418}
419
420static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
4f718eab
DD
421{
422 int ret;
423 unsigned int idx;
424 switch (attr->attr) {
425 case KVM_S390_VM_MEM_ENABLE_CMMA:
e6db1d61
DD
426 /* enable CMMA only for z10 and later (EDAT_1) */
427 ret = -EINVAL;
428 if (!MACHINE_IS_LPAR || !MACHINE_HAS_EDAT1)
429 break;
430
4f718eab 431 ret = -EBUSY;
c92ea7b9 432 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
4f718eab
DD
433 mutex_lock(&kvm->lock);
434 if (atomic_read(&kvm->online_vcpus) == 0) {
435 kvm->arch.use_cmma = 1;
436 ret = 0;
437 }
438 mutex_unlock(&kvm->lock);
439 break;
440 case KVM_S390_VM_MEM_CLR_CMMA:
c3489155
DD
441 ret = -EINVAL;
442 if (!kvm->arch.use_cmma)
443 break;
444
c92ea7b9 445 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
4f718eab
DD
446 mutex_lock(&kvm->lock);
447 idx = srcu_read_lock(&kvm->srcu);
a13cff31 448 s390_reset_cmma(kvm->arch.gmap->mm);
4f718eab
DD
449 srcu_read_unlock(&kvm->srcu, idx);
450 mutex_unlock(&kvm->lock);
451 ret = 0;
452 break;
8c0a7ce6
DD
453 case KVM_S390_VM_MEM_LIMIT_SIZE: {
454 unsigned long new_limit;
455
456 if (kvm_is_ucontrol(kvm))
457 return -EINVAL;
458
459 if (get_user(new_limit, (u64 __user *)attr->addr))
460 return -EFAULT;
461
a3a92c31
DD
462 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
463 new_limit > kvm->arch.mem_limit)
8c0a7ce6
DD
464 return -E2BIG;
465
a3a92c31
DD
466 if (!new_limit)
467 return -EINVAL;
468
469 /* gmap_alloc takes last usable address */
470 if (new_limit != KVM_S390_NO_MEM_LIMIT)
471 new_limit -= 1;
472
8c0a7ce6
DD
473 ret = -EBUSY;
474 mutex_lock(&kvm->lock);
475 if (atomic_read(&kvm->online_vcpus) == 0) {
476 /* gmap_alloc will round the limit up */
477 struct gmap *new = gmap_alloc(current->mm, new_limit);
478
479 if (!new) {
480 ret = -ENOMEM;
481 } else {
482 gmap_free(kvm->arch.gmap);
483 new->private = kvm;
484 kvm->arch.gmap = new;
485 ret = 0;
486 }
487 }
488 mutex_unlock(&kvm->lock);
a3a92c31
DD
489 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
490 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
491 (void *) kvm->arch.gmap->asce);
8c0a7ce6
DD
492 break;
493 }
4f718eab
DD
494 default:
495 ret = -ENXIO;
496 break;
497 }
498 return ret;
499}
500
a374e892
TK
501static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
502
503static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
504{
505 struct kvm_vcpu *vcpu;
506 int i;
507
9d8d5786 508 if (!test_kvm_facility(kvm, 76))
a374e892
TK
509 return -EINVAL;
510
511 mutex_lock(&kvm->lock);
512 switch (attr->attr) {
513 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
514 get_random_bytes(
515 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
516 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
517 kvm->arch.crypto.aes_kw = 1;
c92ea7b9 518 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
a374e892
TK
519 break;
520 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
521 get_random_bytes(
522 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
523 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
524 kvm->arch.crypto.dea_kw = 1;
c92ea7b9 525 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
a374e892
TK
526 break;
527 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
528 kvm->arch.crypto.aes_kw = 0;
529 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
530 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
c92ea7b9 531 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
a374e892
TK
532 break;
533 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
534 kvm->arch.crypto.dea_kw = 0;
535 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
536 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
c92ea7b9 537 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
a374e892
TK
538 break;
539 default:
540 mutex_unlock(&kvm->lock);
541 return -ENXIO;
542 }
543
544 kvm_for_each_vcpu(i, vcpu, kvm) {
545 kvm_s390_vcpu_crypto_setup(vcpu);
546 exit_sie(vcpu);
547 }
548 mutex_unlock(&kvm->lock);
549 return 0;
550}
551
72f25020
JH
552static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
553{
554 u8 gtod_high;
555
556 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
557 sizeof(gtod_high)))
558 return -EFAULT;
559
560 if (gtod_high != 0)
561 return -EINVAL;
58c383c6 562 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
72f25020
JH
563
564 return 0;
565}
566
567static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
568{
5a3d883a 569 u64 gtod;
72f25020
JH
570
571 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
572 return -EFAULT;
573
25ed1675 574 kvm_s390_set_tod_clock(kvm, gtod);
58c383c6 575 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
72f25020
JH
576 return 0;
577}
578
579static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
580{
581 int ret;
582
583 if (attr->flags)
584 return -EINVAL;
585
586 switch (attr->attr) {
587 case KVM_S390_VM_TOD_HIGH:
588 ret = kvm_s390_set_tod_high(kvm, attr);
589 break;
590 case KVM_S390_VM_TOD_LOW:
591 ret = kvm_s390_set_tod_low(kvm, attr);
592 break;
593 default:
594 ret = -ENXIO;
595 break;
596 }
597 return ret;
598}
599
600static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
601{
602 u8 gtod_high = 0;
603
604 if (copy_to_user((void __user *)attr->addr, &gtod_high,
605 sizeof(gtod_high)))
606 return -EFAULT;
58c383c6 607 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
72f25020
JH
608
609 return 0;
610}
611
612static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
613{
5a3d883a 614 u64 gtod;
72f25020 615
60417fcc 616 gtod = kvm_s390_get_tod_clock_fast(kvm);
72f25020
JH
617 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
618 return -EFAULT;
58c383c6 619 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
72f25020
JH
620
621 return 0;
622}
623
624static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
625{
626 int ret;
627
628 if (attr->flags)
629 return -EINVAL;
630
631 switch (attr->attr) {
632 case KVM_S390_VM_TOD_HIGH:
633 ret = kvm_s390_get_tod_high(kvm, attr);
634 break;
635 case KVM_S390_VM_TOD_LOW:
636 ret = kvm_s390_get_tod_low(kvm, attr);
637 break;
638 default:
639 ret = -ENXIO;
640 break;
641 }
642 return ret;
643}
644
658b6eda
MM
645static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
646{
647 struct kvm_s390_vm_cpu_processor *proc;
053dd230 648 u16 lowest_ibc, unblocked_ibc;
658b6eda
MM
649 int ret = 0;
650
651 mutex_lock(&kvm->lock);
652 if (atomic_read(&kvm->online_vcpus)) {
653 ret = -EBUSY;
654 goto out;
655 }
656 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
657 if (!proc) {
658 ret = -ENOMEM;
659 goto out;
660 }
661 if (!copy_from_user(proc, (void __user *)attr->addr,
662 sizeof(*proc))) {
9bb0ec09 663 kvm->arch.model.cpuid = proc->cpuid;
053dd230
DH
664 lowest_ibc = sclp.ibc >> 16 & 0xfff;
665 unblocked_ibc = sclp.ibc & 0xfff;
666 if (lowest_ibc) {
667 if (proc->ibc > unblocked_ibc)
668 kvm->arch.model.ibc = unblocked_ibc;
669 else if (proc->ibc < lowest_ibc)
670 kvm->arch.model.ibc = lowest_ibc;
671 else
672 kvm->arch.model.ibc = proc->ibc;
673 }
c54f0d6a 674 memcpy(kvm->arch.model.fac_list, proc->fac_list,
658b6eda
MM
675 S390_ARCH_FAC_LIST_SIZE_BYTE);
676 } else
677 ret = -EFAULT;
678 kfree(proc);
679out:
680 mutex_unlock(&kvm->lock);
681 return ret;
682}
683
15c9705f
DH
684static int kvm_s390_set_processor_feat(struct kvm *kvm,
685 struct kvm_device_attr *attr)
686{
687 struct kvm_s390_vm_cpu_feat data;
688 int ret = -EBUSY;
689
690 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
691 return -EFAULT;
692 if (!bitmap_subset((unsigned long *) data.feat,
693 kvm_s390_available_cpu_feat,
694 KVM_S390_VM_CPU_FEAT_NR_BITS))
695 return -EINVAL;
696
697 mutex_lock(&kvm->lock);
698 if (!atomic_read(&kvm->online_vcpus)) {
699 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
700 KVM_S390_VM_CPU_FEAT_NR_BITS);
701 ret = 0;
702 }
703 mutex_unlock(&kvm->lock);
704 return ret;
705}
706
658b6eda
MM
707static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
708{
709 int ret = -ENXIO;
710
711 switch (attr->attr) {
712 case KVM_S390_VM_CPU_PROCESSOR:
713 ret = kvm_s390_set_processor(kvm, attr);
714 break;
15c9705f
DH
715 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
716 ret = kvm_s390_set_processor_feat(kvm, attr);
717 break;
658b6eda
MM
718 }
719 return ret;
720}
721
722static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
723{
724 struct kvm_s390_vm_cpu_processor *proc;
725 int ret = 0;
726
727 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
728 if (!proc) {
729 ret = -ENOMEM;
730 goto out;
731 }
9bb0ec09 732 proc->cpuid = kvm->arch.model.cpuid;
658b6eda 733 proc->ibc = kvm->arch.model.ibc;
c54f0d6a
DH
734 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
735 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda
MM
736 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
737 ret = -EFAULT;
738 kfree(proc);
739out:
740 return ret;
741}
742
743static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
744{
745 struct kvm_s390_vm_cpu_machine *mach;
746 int ret = 0;
747
748 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
749 if (!mach) {
750 ret = -ENOMEM;
751 goto out;
752 }
753 get_cpu_id((struct cpuid *) &mach->cpuid);
37c5f6c8 754 mach->ibc = sclp.ibc;
c54f0d6a 755 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
981467c9 756 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda 757 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
94422ee8 758 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda
MM
759 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
760 ret = -EFAULT;
761 kfree(mach);
762out:
763 return ret;
764}
765
15c9705f
DH
766static int kvm_s390_get_processor_feat(struct kvm *kvm,
767 struct kvm_device_attr *attr)
768{
769 struct kvm_s390_vm_cpu_feat data;
770
771 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
772 KVM_S390_VM_CPU_FEAT_NR_BITS);
773 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
774 return -EFAULT;
775 return 0;
776}
777
778static int kvm_s390_get_machine_feat(struct kvm *kvm,
779 struct kvm_device_attr *attr)
780{
781 struct kvm_s390_vm_cpu_feat data;
782
783 bitmap_copy((unsigned long *) data.feat,
784 kvm_s390_available_cpu_feat,
785 KVM_S390_VM_CPU_FEAT_NR_BITS);
786 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
787 return -EFAULT;
788 return 0;
789}
790
658b6eda
MM
791static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
792{
793 int ret = -ENXIO;
794
795 switch (attr->attr) {
796 case KVM_S390_VM_CPU_PROCESSOR:
797 ret = kvm_s390_get_processor(kvm, attr);
798 break;
799 case KVM_S390_VM_CPU_MACHINE:
800 ret = kvm_s390_get_machine(kvm, attr);
801 break;
15c9705f
DH
802 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
803 ret = kvm_s390_get_processor_feat(kvm, attr);
804 break;
805 case KVM_S390_VM_CPU_MACHINE_FEAT:
806 ret = kvm_s390_get_machine_feat(kvm, attr);
807 break;
658b6eda
MM
808 }
809 return ret;
810}
811
f2061656
DD
812static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
813{
814 int ret;
815
816 switch (attr->group) {
4f718eab 817 case KVM_S390_VM_MEM_CTRL:
8c0a7ce6 818 ret = kvm_s390_set_mem_control(kvm, attr);
4f718eab 819 break;
72f25020
JH
820 case KVM_S390_VM_TOD:
821 ret = kvm_s390_set_tod(kvm, attr);
822 break;
658b6eda
MM
823 case KVM_S390_VM_CPU_MODEL:
824 ret = kvm_s390_set_cpu_model(kvm, attr);
825 break;
a374e892
TK
826 case KVM_S390_VM_CRYPTO:
827 ret = kvm_s390_vm_set_crypto(kvm, attr);
828 break;
f2061656
DD
829 default:
830 ret = -ENXIO;
831 break;
832 }
833
834 return ret;
835}
836
837static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
838{
8c0a7ce6
DD
839 int ret;
840
841 switch (attr->group) {
842 case KVM_S390_VM_MEM_CTRL:
843 ret = kvm_s390_get_mem_control(kvm, attr);
844 break;
72f25020
JH
845 case KVM_S390_VM_TOD:
846 ret = kvm_s390_get_tod(kvm, attr);
847 break;
658b6eda
MM
848 case KVM_S390_VM_CPU_MODEL:
849 ret = kvm_s390_get_cpu_model(kvm, attr);
850 break;
8c0a7ce6
DD
851 default:
852 ret = -ENXIO;
853 break;
854 }
855
856 return ret;
f2061656
DD
857}
858
859static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
860{
861 int ret;
862
863 switch (attr->group) {
4f718eab
DD
864 case KVM_S390_VM_MEM_CTRL:
865 switch (attr->attr) {
866 case KVM_S390_VM_MEM_ENABLE_CMMA:
867 case KVM_S390_VM_MEM_CLR_CMMA:
8c0a7ce6 868 case KVM_S390_VM_MEM_LIMIT_SIZE:
4f718eab
DD
869 ret = 0;
870 break;
871 default:
872 ret = -ENXIO;
873 break;
874 }
875 break;
72f25020
JH
876 case KVM_S390_VM_TOD:
877 switch (attr->attr) {
878 case KVM_S390_VM_TOD_LOW:
879 case KVM_S390_VM_TOD_HIGH:
880 ret = 0;
881 break;
882 default:
883 ret = -ENXIO;
884 break;
885 }
886 break;
658b6eda
MM
887 case KVM_S390_VM_CPU_MODEL:
888 switch (attr->attr) {
889 case KVM_S390_VM_CPU_PROCESSOR:
890 case KVM_S390_VM_CPU_MACHINE:
15c9705f
DH
891 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
892 case KVM_S390_VM_CPU_MACHINE_FEAT:
658b6eda
MM
893 ret = 0;
894 break;
895 default:
896 ret = -ENXIO;
897 break;
898 }
899 break;
a374e892
TK
900 case KVM_S390_VM_CRYPTO:
901 switch (attr->attr) {
902 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
903 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
904 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
905 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
906 ret = 0;
907 break;
908 default:
909 ret = -ENXIO;
910 break;
911 }
912 break;
f2061656
DD
913 default:
914 ret = -ENXIO;
915 break;
916 }
917
918 return ret;
919}
920
30ee2a98
JH
921static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
922{
923 uint8_t *keys;
924 uint64_t hva;
925 unsigned long curkey;
926 int i, r = 0;
927
928 if (args->flags != 0)
929 return -EINVAL;
930
931 /* Is this guest using storage keys? */
932 if (!mm_use_skey(current->mm))
933 return KVM_S390_GET_SKEYS_NONE;
934
935 /* Enforce sane limit on memory allocation */
936 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
937 return -EINVAL;
938
939 keys = kmalloc_array(args->count, sizeof(uint8_t),
940 GFP_KERNEL | __GFP_NOWARN);
941 if (!keys)
942 keys = vmalloc(sizeof(uint8_t) * args->count);
943 if (!keys)
944 return -ENOMEM;
945
946 for (i = 0; i < args->count; i++) {
947 hva = gfn_to_hva(kvm, args->start_gfn + i);
948 if (kvm_is_error_hva(hva)) {
949 r = -EFAULT;
950 goto out;
951 }
952
953 curkey = get_guest_storage_key(current->mm, hva);
954 if (IS_ERR_VALUE(curkey)) {
955 r = curkey;
956 goto out;
957 }
958 keys[i] = curkey;
959 }
960
961 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
962 sizeof(uint8_t) * args->count);
963 if (r)
964 r = -EFAULT;
965out:
966 kvfree(keys);
967 return r;
968}
969
970static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
971{
972 uint8_t *keys;
973 uint64_t hva;
974 int i, r = 0;
975
976 if (args->flags != 0)
977 return -EINVAL;
978
979 /* Enforce sane limit on memory allocation */
980 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
981 return -EINVAL;
982
983 keys = kmalloc_array(args->count, sizeof(uint8_t),
984 GFP_KERNEL | __GFP_NOWARN);
985 if (!keys)
986 keys = vmalloc(sizeof(uint8_t) * args->count);
987 if (!keys)
988 return -ENOMEM;
989
990 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
991 sizeof(uint8_t) * args->count);
992 if (r) {
993 r = -EFAULT;
994 goto out;
995 }
996
997 /* Enable storage key handling for the guest */
14d4a425
DD
998 r = s390_enable_skey();
999 if (r)
1000 goto out;
30ee2a98
JH
1001
1002 for (i = 0; i < args->count; i++) {
1003 hva = gfn_to_hva(kvm, args->start_gfn + i);
1004 if (kvm_is_error_hva(hva)) {
1005 r = -EFAULT;
1006 goto out;
1007 }
1008
1009 /* Lowest order bit is reserved */
1010 if (keys[i] & 0x01) {
1011 r = -EINVAL;
1012 goto out;
1013 }
1014
1015 r = set_guest_storage_key(current->mm, hva,
1016 (unsigned long)keys[i], 0);
1017 if (r)
1018 goto out;
1019 }
1020out:
1021 kvfree(keys);
1022 return r;
1023}
1024
b0c632db
HC
1025long kvm_arch_vm_ioctl(struct file *filp,
1026 unsigned int ioctl, unsigned long arg)
1027{
1028 struct kvm *kvm = filp->private_data;
1029 void __user *argp = (void __user *)arg;
f2061656 1030 struct kvm_device_attr attr;
b0c632db
HC
1031 int r;
1032
1033 switch (ioctl) {
ba5c1e9b
CO
1034 case KVM_S390_INTERRUPT: {
1035 struct kvm_s390_interrupt s390int;
1036
1037 r = -EFAULT;
1038 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1039 break;
1040 r = kvm_s390_inject_vm(kvm, &s390int);
1041 break;
1042 }
d938dc55
CH
1043 case KVM_ENABLE_CAP: {
1044 struct kvm_enable_cap cap;
1045 r = -EFAULT;
1046 if (copy_from_user(&cap, argp, sizeof(cap)))
1047 break;
1048 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1049 break;
1050 }
84223598
CH
1051 case KVM_CREATE_IRQCHIP: {
1052 struct kvm_irq_routing_entry routing;
1053
1054 r = -EINVAL;
1055 if (kvm->arch.use_irqchip) {
1056 /* Set up dummy routing. */
1057 memset(&routing, 0, sizeof(routing));
152b2839 1058 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
84223598
CH
1059 }
1060 break;
1061 }
f2061656
DD
1062 case KVM_SET_DEVICE_ATTR: {
1063 r = -EFAULT;
1064 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1065 break;
1066 r = kvm_s390_vm_set_attr(kvm, &attr);
1067 break;
1068 }
1069 case KVM_GET_DEVICE_ATTR: {
1070 r = -EFAULT;
1071 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1072 break;
1073 r = kvm_s390_vm_get_attr(kvm, &attr);
1074 break;
1075 }
1076 case KVM_HAS_DEVICE_ATTR: {
1077 r = -EFAULT;
1078 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1079 break;
1080 r = kvm_s390_vm_has_attr(kvm, &attr);
1081 break;
1082 }
30ee2a98
JH
1083 case KVM_S390_GET_SKEYS: {
1084 struct kvm_s390_skeys args;
1085
1086 r = -EFAULT;
1087 if (copy_from_user(&args, argp,
1088 sizeof(struct kvm_s390_skeys)))
1089 break;
1090 r = kvm_s390_get_skeys(kvm, &args);
1091 break;
1092 }
1093 case KVM_S390_SET_SKEYS: {
1094 struct kvm_s390_skeys args;
1095
1096 r = -EFAULT;
1097 if (copy_from_user(&args, argp,
1098 sizeof(struct kvm_s390_skeys)))
1099 break;
1100 r = kvm_s390_set_skeys(kvm, &args);
1101 break;
1102 }
b0c632db 1103 default:
367e1319 1104 r = -ENOTTY;
b0c632db
HC
1105 }
1106
1107 return r;
1108}
1109
45c9b47c
TK
1110static int kvm_s390_query_ap_config(u8 *config)
1111{
1112 u32 fcn_code = 0x04000000UL;
86044c8c 1113 u32 cc = 0;
45c9b47c 1114
86044c8c 1115 memset(config, 0, 128);
45c9b47c
TK
1116 asm volatile(
1117 "lgr 0,%1\n"
1118 "lgr 2,%2\n"
1119 ".long 0xb2af0000\n" /* PQAP(QCI) */
86044c8c 1120 "0: ipm %0\n"
45c9b47c 1121 "srl %0,28\n"
86044c8c
CB
1122 "1:\n"
1123 EX_TABLE(0b, 1b)
1124 : "+r" (cc)
45c9b47c
TK
1125 : "r" (fcn_code), "r" (config)
1126 : "cc", "0", "2", "memory"
1127 );
1128
1129 return cc;
1130}
1131
1132static int kvm_s390_apxa_installed(void)
1133{
1134 u8 config[128];
1135 int cc;
1136
a6aacc3f 1137 if (test_facility(12)) {
45c9b47c
TK
1138 cc = kvm_s390_query_ap_config(config);
1139
1140 if (cc)
1141 pr_err("PQAP(QCI) failed with cc=%d", cc);
1142 else
1143 return config[0] & 0x40;
1144 }
1145
1146 return 0;
1147}
1148
1149static void kvm_s390_set_crycb_format(struct kvm *kvm)
1150{
1151 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1152
1153 if (kvm_s390_apxa_installed())
1154 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1155 else
1156 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1157}
1158
9bb0ec09 1159static u64 kvm_s390_get_initial_cpuid(void)
9d8d5786 1160{
9bb0ec09
DH
1161 struct cpuid cpuid;
1162
1163 get_cpu_id(&cpuid);
1164 cpuid.version = 0xff;
1165 return *((u64 *) &cpuid);
9d8d5786
MM
1166}
1167
c54f0d6a 1168static void kvm_s390_crypto_init(struct kvm *kvm)
5102ee87 1169{
9d8d5786 1170 if (!test_kvm_facility(kvm, 76))
c54f0d6a 1171 return;
5102ee87 1172
c54f0d6a 1173 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
45c9b47c 1174 kvm_s390_set_crycb_format(kvm);
5102ee87 1175
ed6f76b4
TK
1176 /* Enable AES/DEA protected key functions by default */
1177 kvm->arch.crypto.aes_kw = 1;
1178 kvm->arch.crypto.dea_kw = 1;
1179 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1180 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1181 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1182 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
5102ee87
TK
1183}
1184
7d43bafc
ED
1185static void sca_dispose(struct kvm *kvm)
1186{
1187 if (kvm->arch.use_esca)
5e044315 1188 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
7d43bafc
ED
1189 else
1190 free_page((unsigned long)(kvm->arch.sca));
1191 kvm->arch.sca = NULL;
1192}
1193
e08b9637 1194int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 1195{
9d8d5786 1196 int i, rc;
b0c632db 1197 char debug_name[16];
f6c137ff 1198 static unsigned long sca_offset;
b0c632db 1199
e08b9637
CO
1200 rc = -EINVAL;
1201#ifdef CONFIG_KVM_S390_UCONTROL
1202 if (type & ~KVM_VM_S390_UCONTROL)
1203 goto out_err;
1204 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1205 goto out_err;
1206#else
1207 if (type)
1208 goto out_err;
1209#endif
1210
b0c632db
HC
1211 rc = s390_enable_sie();
1212 if (rc)
d89f5eff 1213 goto out_err;
b0c632db 1214
b290411a
CO
1215 rc = -ENOMEM;
1216
7d0a5e62
JF
1217 ratelimit_state_init(&kvm->arch.sthyi_limit, 5 * HZ, 500);
1218
7d43bafc 1219 kvm->arch.use_esca = 0; /* start with basic SCA */
5e044315 1220 rwlock_init(&kvm->arch.sca_lock);
bc784cce 1221 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(GFP_KERNEL);
b0c632db 1222 if (!kvm->arch.sca)
d89f5eff 1223 goto out_err;
f6c137ff 1224 spin_lock(&kvm_lock);
c5c2c393 1225 sca_offset += 16;
bc784cce 1226 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
c5c2c393 1227 sca_offset = 0;
bc784cce
ED
1228 kvm->arch.sca = (struct bsca_block *)
1229 ((char *) kvm->arch.sca + sca_offset);
f6c137ff 1230 spin_unlock(&kvm_lock);
b0c632db
HC
1231
1232 sprintf(debug_name, "kvm-%u", current->pid);
1233
1cb9cf72 1234 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
b0c632db 1235 if (!kvm->arch.dbf)
40f5b735 1236 goto out_err;
b0c632db 1237
c54f0d6a
DH
1238 kvm->arch.sie_page2 =
1239 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1240 if (!kvm->arch.sie_page2)
40f5b735 1241 goto out_err;
9d8d5786 1242
fb5bf93f 1243 /* Populate the facility mask initially. */
c54f0d6a 1244 memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
94422ee8 1245 S390_ARCH_FAC_LIST_SIZE_BYTE);
9d8d5786
MM
1246 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1247 if (i < kvm_s390_fac_list_mask_size())
c54f0d6a 1248 kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
9d8d5786 1249 else
c54f0d6a 1250 kvm->arch.model.fac_mask[i] = 0UL;
9d8d5786
MM
1251 }
1252
981467c9 1253 /* Populate the facility list initially. */
c54f0d6a
DH
1254 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
1255 memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
981467c9
MM
1256 S390_ARCH_FAC_LIST_SIZE_BYTE);
1257
95ca2cb5
JF
1258 set_kvm_facility(kvm->arch.model.fac_mask, 74);
1259 set_kvm_facility(kvm->arch.model.fac_list, 74);
1260
9bb0ec09 1261 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
37c5f6c8 1262 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
9d8d5786 1263
c54f0d6a 1264 kvm_s390_crypto_init(kvm);
5102ee87 1265
ba5c1e9b 1266 spin_lock_init(&kvm->arch.float_int.lock);
6d3da241
JF
1267 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1268 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
8a242234 1269 init_waitqueue_head(&kvm->arch.ipte_wq);
a6b7e459 1270 mutex_init(&kvm->arch.ipte_mutex);
ba5c1e9b 1271
b0c632db 1272 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
78f26131 1273 VM_EVENT(kvm, 3, "vm created with type %lu", type);
b0c632db 1274
e08b9637
CO
1275 if (type & KVM_VM_S390_UCONTROL) {
1276 kvm->arch.gmap = NULL;
a3a92c31 1277 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
e08b9637 1278 } else {
32e6b236
GH
1279 if (sclp.hamax == U64_MAX)
1280 kvm->arch.mem_limit = TASK_MAX_SIZE;
1281 else
1282 kvm->arch.mem_limit = min_t(unsigned long, TASK_MAX_SIZE,
1283 sclp.hamax + 1);
a3a92c31 1284 kvm->arch.gmap = gmap_alloc(current->mm, kvm->arch.mem_limit - 1);
e08b9637 1285 if (!kvm->arch.gmap)
40f5b735 1286 goto out_err;
2c70fe44 1287 kvm->arch.gmap->private = kvm;
24eb3a82 1288 kvm->arch.gmap->pfault_enabled = 0;
e08b9637 1289 }
fa6b7fe9
CH
1290
1291 kvm->arch.css_support = 0;
84223598 1292 kvm->arch.use_irqchip = 0;
72f25020 1293 kvm->arch.epoch = 0;
fa6b7fe9 1294
8ad35755 1295 spin_lock_init(&kvm->arch.start_stop_lock);
8335713a 1296 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
8ad35755 1297
d89f5eff 1298 return 0;
40f5b735 1299out_err:
c54f0d6a 1300 free_page((unsigned long)kvm->arch.sie_page2);
598841ca 1301 debug_unregister(kvm->arch.dbf);
7d43bafc 1302 sca_dispose(kvm);
78f26131 1303 KVM_EVENT(3, "creation of vm failed: %d", rc);
d89f5eff 1304 return rc;
b0c632db
HC
1305}
1306
d329c035
CB
1307void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1308{
1309 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 1310 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
67335e63 1311 kvm_s390_clear_local_irqs(vcpu);
3c038e6b 1312 kvm_clear_async_pf_completion_queue(vcpu);
bc784cce 1313 if (!kvm_is_ucontrol(vcpu->kvm))
a6e2f683 1314 sca_del_vcpu(vcpu);
27e0393f
CO
1315
1316 if (kvm_is_ucontrol(vcpu->kvm))
1317 gmap_free(vcpu->arch.gmap);
1318
e6db1d61 1319 if (vcpu->kvm->arch.use_cmma)
b31605c1 1320 kvm_s390_vcpu_unsetup_cmma(vcpu);
d329c035 1321 free_page((unsigned long)(vcpu->arch.sie_block));
b31288fa 1322
6692cef3 1323 kvm_vcpu_uninit(vcpu);
b110feaf 1324 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
1325}
1326
1327static void kvm_free_vcpus(struct kvm *kvm)
1328{
1329 unsigned int i;
988a2cae 1330 struct kvm_vcpu *vcpu;
d329c035 1331
988a2cae
GN
1332 kvm_for_each_vcpu(i, vcpu, kvm)
1333 kvm_arch_vcpu_destroy(vcpu);
1334
1335 mutex_lock(&kvm->lock);
1336 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1337 kvm->vcpus[i] = NULL;
1338
1339 atomic_set(&kvm->online_vcpus, 0);
1340 mutex_unlock(&kvm->lock);
d329c035
CB
1341}
1342
b0c632db
HC
1343void kvm_arch_destroy_vm(struct kvm *kvm)
1344{
d329c035 1345 kvm_free_vcpus(kvm);
7d43bafc 1346 sca_dispose(kvm);
d329c035 1347 debug_unregister(kvm->arch.dbf);
c54f0d6a 1348 free_page((unsigned long)kvm->arch.sie_page2);
27e0393f
CO
1349 if (!kvm_is_ucontrol(kvm))
1350 gmap_free(kvm->arch.gmap);
841b91c5 1351 kvm_s390_destroy_adapters(kvm);
67335e63 1352 kvm_s390_clear_float_irqs(kvm);
8335713a 1353 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
b0c632db
HC
1354}
1355
1356/* Section: vcpu related */
dafd032a
DD
1357static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1358{
1359 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
1360 if (!vcpu->arch.gmap)
1361 return -ENOMEM;
1362 vcpu->arch.gmap->private = vcpu->kvm;
1363
1364 return 0;
1365}
1366
a6e2f683
ED
1367static void sca_del_vcpu(struct kvm_vcpu *vcpu)
1368{
5e044315 1369 read_lock(&vcpu->kvm->arch.sca_lock);
7d43bafc
ED
1370 if (vcpu->kvm->arch.use_esca) {
1371 struct esca_block *sca = vcpu->kvm->arch.sca;
a6e2f683 1372
7d43bafc 1373 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
10ce32d5 1374 sca->cpu[vcpu->vcpu_id].sda = 0;
7d43bafc
ED
1375 } else {
1376 struct bsca_block *sca = vcpu->kvm->arch.sca;
1377
1378 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
10ce32d5 1379 sca->cpu[vcpu->vcpu_id].sda = 0;
7d43bafc 1380 }
5e044315 1381 read_unlock(&vcpu->kvm->arch.sca_lock);
a6e2f683
ED
1382}
1383
eaa78f34 1384static void sca_add_vcpu(struct kvm_vcpu *vcpu)
a6e2f683 1385{
eaa78f34
DH
1386 read_lock(&vcpu->kvm->arch.sca_lock);
1387 if (vcpu->kvm->arch.use_esca) {
1388 struct esca_block *sca = vcpu->kvm->arch.sca;
7d43bafc 1389
eaa78f34 1390 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
7d43bafc
ED
1391 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1392 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
25508824 1393 vcpu->arch.sie_block->ecb2 |= 0x04U;
eaa78f34 1394 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
7d43bafc 1395 } else {
eaa78f34 1396 struct bsca_block *sca = vcpu->kvm->arch.sca;
a6e2f683 1397
eaa78f34 1398 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
7d43bafc
ED
1399 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1400 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
eaa78f34 1401 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
7d43bafc 1402 }
eaa78f34 1403 read_unlock(&vcpu->kvm->arch.sca_lock);
5e044315
ED
1404}
1405
1406/* Basic SCA to Extended SCA data copy routines */
1407static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
1408{
1409 d->sda = s->sda;
1410 d->sigp_ctrl.c = s->sigp_ctrl.c;
1411 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
1412}
1413
1414static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
1415{
1416 int i;
1417
1418 d->ipte_control = s->ipte_control;
1419 d->mcn[0] = s->mcn;
1420 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
1421 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
1422}
1423
1424static int sca_switch_to_extended(struct kvm *kvm)
1425{
1426 struct bsca_block *old_sca = kvm->arch.sca;
1427 struct esca_block *new_sca;
1428 struct kvm_vcpu *vcpu;
1429 unsigned int vcpu_idx;
1430 u32 scaol, scaoh;
1431
1432 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
1433 if (!new_sca)
1434 return -ENOMEM;
1435
1436 scaoh = (u32)((u64)(new_sca) >> 32);
1437 scaol = (u32)(u64)(new_sca) & ~0x3fU;
1438
1439 kvm_s390_vcpu_block_all(kvm);
1440 write_lock(&kvm->arch.sca_lock);
1441
1442 sca_copy_b_to_e(new_sca, old_sca);
1443
1444 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
1445 vcpu->arch.sie_block->scaoh = scaoh;
1446 vcpu->arch.sie_block->scaol = scaol;
1447 vcpu->arch.sie_block->ecb2 |= 0x04U;
1448 }
1449 kvm->arch.sca = new_sca;
1450 kvm->arch.use_esca = 1;
1451
1452 write_unlock(&kvm->arch.sca_lock);
1453 kvm_s390_vcpu_unblock_all(kvm);
1454
1455 free_page((unsigned long)old_sca);
1456
8335713a
CB
1457 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
1458 old_sca, kvm->arch.sca);
5e044315 1459 return 0;
a6e2f683
ED
1460}
1461
1462static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
1463{
5e044315
ED
1464 int rc;
1465
1466 if (id < KVM_S390_BSCA_CPU_SLOTS)
1467 return true;
1468 if (!sclp.has_esca)
1469 return false;
1470
1471 mutex_lock(&kvm->lock);
1472 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
1473 mutex_unlock(&kvm->lock);
1474
1475 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
a6e2f683
ED
1476}
1477
b0c632db
HC
1478int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1479{
3c038e6b
DD
1480 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1481 kvm_clear_async_pf_completion_queue(vcpu);
59674c1a
CB
1482 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1483 KVM_SYNC_GPRS |
9eed0735 1484 KVM_SYNC_ACRS |
b028ee3e
DH
1485 KVM_SYNC_CRS |
1486 KVM_SYNC_ARCH0 |
1487 KVM_SYNC_PFAULT;
c6e5f166
FZ
1488 if (test_kvm_facility(vcpu->kvm, 64))
1489 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
f6aa6dc4
DH
1490 /* fprs can be synchronized via vrs, even if the guest has no vx. With
1491 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
1492 */
1493 if (MACHINE_HAS_VX)
68c55750 1494 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
6fd8e67d
DH
1495 else
1496 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
dafd032a
DD
1497
1498 if (kvm_is_ucontrol(vcpu->kvm))
1499 return __kvm_ucontrol_vcpu_init(vcpu);
1500
b0c632db
HC
1501 return 0;
1502}
1503
db0758b2
DH
1504/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1505static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1506{
1507 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
9c23a131 1508 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
db0758b2 1509 vcpu->arch.cputm_start = get_tod_clock_fast();
9c23a131 1510 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
db0758b2
DH
1511}
1512
1513/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1514static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1515{
1516 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
9c23a131 1517 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
db0758b2
DH
1518 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1519 vcpu->arch.cputm_start = 0;
9c23a131 1520 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
db0758b2
DH
1521}
1522
1523/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1524static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1525{
1526 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
1527 vcpu->arch.cputm_enabled = true;
1528 __start_cpu_timer_accounting(vcpu);
1529}
1530
1531/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1532static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1533{
1534 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
1535 __stop_cpu_timer_accounting(vcpu);
1536 vcpu->arch.cputm_enabled = false;
1537}
1538
1539static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1540{
1541 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1542 __enable_cpu_timer_accounting(vcpu);
1543 preempt_enable();
1544}
1545
1546static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1547{
1548 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1549 __disable_cpu_timer_accounting(vcpu);
1550 preempt_enable();
1551}
1552
4287f247
DH
1553/* set the cpu timer - may only be called from the VCPU thread itself */
1554void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
1555{
db0758b2 1556 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
9c23a131 1557 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
db0758b2
DH
1558 if (vcpu->arch.cputm_enabled)
1559 vcpu->arch.cputm_start = get_tod_clock_fast();
4287f247 1560 vcpu->arch.sie_block->cputm = cputm;
9c23a131 1561 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
db0758b2 1562 preempt_enable();
4287f247
DH
1563}
1564
db0758b2 1565/* update and get the cpu timer - can also be called from other VCPU threads */
4287f247
DH
1566__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
1567{
9c23a131 1568 unsigned int seq;
db0758b2 1569 __u64 value;
db0758b2
DH
1570
1571 if (unlikely(!vcpu->arch.cputm_enabled))
1572 return vcpu->arch.sie_block->cputm;
1573
9c23a131
DH
1574 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1575 do {
1576 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
1577 /*
1578 * If the writer would ever execute a read in the critical
1579 * section, e.g. in irq context, we have a deadlock.
1580 */
1581 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
1582 value = vcpu->arch.sie_block->cputm;
1583 /* if cputm_start is 0, accounting is being started/stopped */
1584 if (likely(vcpu->arch.cputm_start))
1585 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1586 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
1587 preempt_enable();
db0758b2 1588 return value;
4287f247
DH
1589}
1590
b0c632db
HC
1591void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1592{
9977e886 1593 /* Save host register state */
d0164ee2 1594 save_fpu_regs();
9abc2a08
DH
1595 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
1596 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
9977e886 1597
6fd8e67d
DH
1598 if (MACHINE_HAS_VX)
1599 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
1600 else
1601 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
9abc2a08 1602 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
9977e886 1603 if (test_fp_ctl(current->thread.fpu.fpc))
96b2d7a8 1604 /* User space provided an invalid FPC, let's clear it */
9977e886
HB
1605 current->thread.fpu.fpc = 0;
1606
1607 save_access_regs(vcpu->arch.host_acrs);
59674c1a 1608 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 1609 gmap_enable(vcpu->arch.gmap);
805de8f4 1610 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
5ebda316 1611 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
db0758b2 1612 __start_cpu_timer_accounting(vcpu);
01a745ac 1613 vcpu->cpu = cpu;
b0c632db
HC
1614}
1615
1616void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1617{
01a745ac 1618 vcpu->cpu = -1;
5ebda316 1619 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
db0758b2 1620 __stop_cpu_timer_accounting(vcpu);
805de8f4 1621 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 1622 gmap_disable(vcpu->arch.gmap);
9977e886 1623
9abc2a08 1624 /* Save guest register state */
d0164ee2 1625 save_fpu_regs();
9abc2a08 1626 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
9977e886 1627
9abc2a08
DH
1628 /* Restore host register state */
1629 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
1630 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
9977e886
HB
1631
1632 save_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
1633 restore_access_regs(vcpu->arch.host_acrs);
1634}
1635
1636static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1637{
1638 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1639 vcpu->arch.sie_block->gpsw.mask = 0UL;
1640 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 1641 kvm_s390_set_prefix(vcpu, 0);
4287f247 1642 kvm_s390_set_cpu_timer(vcpu, 0);
b0c632db
HC
1643 vcpu->arch.sie_block->ckc = 0UL;
1644 vcpu->arch.sie_block->todpr = 0;
1645 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1646 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
1647 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
9abc2a08
DH
1648 /* make sure the new fpc will be lazily loaded */
1649 save_fpu_regs();
1650 current->thread.fpu.fpc = 0;
b0c632db 1651 vcpu->arch.sie_block->gbea = 1;
672550fb 1652 vcpu->arch.sie_block->pp = 0;
3c038e6b
DD
1653 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1654 kvm_clear_async_pf_completion_queue(vcpu);
6352e4d2
DH
1655 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1656 kvm_s390_vcpu_stop(vcpu);
2ed10cc1 1657 kvm_s390_clear_local_irqs(vcpu);
b0c632db
HC
1658}
1659
31928aa5 1660void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
42897d86 1661{
72f25020 1662 mutex_lock(&vcpu->kvm->lock);
fdf03650 1663 preempt_disable();
72f25020 1664 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
fdf03650 1665 preempt_enable();
72f25020 1666 mutex_unlock(&vcpu->kvm->lock);
25508824 1667 if (!kvm_is_ucontrol(vcpu->kvm)) {
dafd032a 1668 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
eaa78f34 1669 sca_add_vcpu(vcpu);
25508824
DH
1670 }
1671
42897d86
MT
1672}
1673
5102ee87
TK
1674static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1675{
9d8d5786 1676 if (!test_kvm_facility(vcpu->kvm, 76))
5102ee87
TK
1677 return;
1678
a374e892
TK
1679 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1680
1681 if (vcpu->kvm->arch.crypto.aes_kw)
1682 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1683 if (vcpu->kvm->arch.crypto.dea_kw)
1684 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1685
5102ee87
TK
1686 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1687}
1688
b31605c1
DD
1689void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1690{
1691 free_page(vcpu->arch.sie_block->cbrlo);
1692 vcpu->arch.sie_block->cbrlo = 0;
1693}
1694
1695int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1696{
1697 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1698 if (!vcpu->arch.sie_block->cbrlo)
1699 return -ENOMEM;
1700
1701 vcpu->arch.sie_block->ecb2 |= 0x80;
1702 vcpu->arch.sie_block->ecb2 &= ~0x08;
1703 return 0;
1704}
1705
91520f1a
MM
1706static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1707{
1708 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1709
91520f1a 1710 vcpu->arch.sie_block->ibc = model->ibc;
80bc79dc 1711 if (test_kvm_facility(vcpu->kvm, 7))
c54f0d6a 1712 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
91520f1a
MM
1713}
1714
b0c632db
HC
1715int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1716{
b31605c1 1717 int rc = 0;
b31288fa 1718
9e6dabef
CH
1719 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1720 CPUSTAT_SM |
a4a4f191
GH
1721 CPUSTAT_STOPPED);
1722
53df84f8 1723 if (test_kvm_facility(vcpu->kvm, 78))
805de8f4 1724 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
53df84f8 1725 else if (test_kvm_facility(vcpu->kvm, 8))
805de8f4 1726 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
a4a4f191 1727
91520f1a
MM
1728 kvm_s390_vcpu_setup_model(vcpu);
1729
bd50e8ec
DH
1730 vcpu->arch.sie_block->ecb = 0x02;
1731 if (test_kvm_facility(vcpu->kvm, 9))
1732 vcpu->arch.sie_block->ecb |= 0x04;
9d8d5786 1733 if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
7feb6bb8
MM
1734 vcpu->arch.sie_block->ecb |= 0x10;
1735
d6af0b49
DH
1736 if (test_kvm_facility(vcpu->kvm, 8))
1737 vcpu->arch.sie_block->ecb2 |= 0x08;
ea5f4969 1738 vcpu->arch.sie_block->eca = 0xC1002000U;
37c5f6c8 1739 if (sclp.has_siif)
217a4406 1740 vcpu->arch.sie_block->eca |= 1;
37c5f6c8 1741 if (sclp.has_sigpif)
ea5f4969 1742 vcpu->arch.sie_block->eca |= 0x10000000U;
c6e5f166
FZ
1743 if (test_kvm_facility(vcpu->kvm, 64))
1744 vcpu->arch.sie_block->ecb3 |= 0x01;
18280d8b 1745 if (test_kvm_facility(vcpu->kvm, 129)) {
13211ea7
EF
1746 vcpu->arch.sie_block->eca |= 0x00020000;
1747 vcpu->arch.sie_block->ecd |= 0x20000000;
1748 }
c6e5f166 1749 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
492d8642 1750 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
95ca2cb5
JF
1751 if (test_kvm_facility(vcpu->kvm, 74))
1752 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
5a5e6536 1753
e6db1d61 1754 if (vcpu->kvm->arch.use_cmma) {
b31605c1
DD
1755 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1756 if (rc)
1757 return rc;
b31288fa 1758 }
0ac96caf 1759 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ca872302 1760 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
9d8d5786 1761
5102ee87
TK
1762 kvm_s390_vcpu_crypto_setup(vcpu);
1763
b31605c1 1764 return rc;
b0c632db
HC
1765}
1766
1767struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1768 unsigned int id)
1769{
4d47555a 1770 struct kvm_vcpu *vcpu;
7feb6bb8 1771 struct sie_page *sie_page;
4d47555a
CO
1772 int rc = -EINVAL;
1773
4215825e 1774 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
4d47555a
CO
1775 goto out;
1776
1777 rc = -ENOMEM;
b0c632db 1778
b110feaf 1779 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 1780 if (!vcpu)
4d47555a 1781 goto out;
b0c632db 1782
7feb6bb8
MM
1783 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1784 if (!sie_page)
b0c632db
HC
1785 goto out_free_cpu;
1786
7feb6bb8
MM
1787 vcpu->arch.sie_block = &sie_page->sie_block;
1788 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
1789
b0c632db 1790 vcpu->arch.sie_block->icpua = id;
ba5c1e9b 1791 spin_lock_init(&vcpu->arch.local_int.lock);
ba5c1e9b 1792 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
d0321a24 1793 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 1794 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
9c23a131 1795 seqcount_init(&vcpu->arch.cputm_seqcount);
ba5c1e9b 1796
b0c632db
HC
1797 rc = kvm_vcpu_init(vcpu, kvm, id);
1798 if (rc)
9abc2a08 1799 goto out_free_sie_block;
8335713a 1800 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
b0c632db 1801 vcpu->arch.sie_block);
ade38c31 1802 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 1803
b0c632db 1804 return vcpu;
7b06bf2f
WY
1805out_free_sie_block:
1806 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 1807out_free_cpu:
b110feaf 1808 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 1809out:
b0c632db
HC
1810 return ERR_PTR(rc);
1811}
1812
b0c632db
HC
1813int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1814{
9a022067 1815 return kvm_s390_vcpu_has_irq(vcpu, 0);
b0c632db
HC
1816}
1817
27406cd5 1818void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
49b99e1e 1819{
805de8f4 1820 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
61a6df54 1821 exit_sie(vcpu);
49b99e1e
CB
1822}
1823
27406cd5 1824void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
49b99e1e 1825{
805de8f4 1826 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
49b99e1e
CB
1827}
1828
8e236546
CB
1829static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
1830{
805de8f4 1831 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
61a6df54 1832 exit_sie(vcpu);
8e236546
CB
1833}
1834
1835static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
1836{
9bf9fde2 1837 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
8e236546
CB
1838}
1839
49b99e1e
CB
1840/*
1841 * Kick a guest cpu out of SIE and wait until SIE is not running.
1842 * If the CPU is not running (e.g. waiting as idle) the function will
1843 * return immediately. */
1844void exit_sie(struct kvm_vcpu *vcpu)
1845{
805de8f4 1846 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
49b99e1e
CB
1847 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1848 cpu_relax();
1849}
1850
8e236546
CB
1851/* Kick a guest cpu out of SIE to process a request synchronously */
1852void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
49b99e1e 1853{
8e236546
CB
1854 kvm_make_request(req, vcpu);
1855 kvm_s390_vcpu_request(vcpu);
49b99e1e
CB
1856}
1857
2c70fe44
CB
1858static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1859{
1860 int i;
1861 struct kvm *kvm = gmap->private;
1862 struct kvm_vcpu *vcpu;
1863
1864 kvm_for_each_vcpu(i, vcpu, kvm) {
1865 /* match against both prefix pages */
fda902cb 1866 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
2c70fe44 1867 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
8e236546 1868 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
2c70fe44
CB
1869 }
1870 }
1871}
1872
b6d33834
CD
1873int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1874{
1875 /* kvm common code refers to this, but never calls it */
1876 BUG();
1877 return 0;
1878}
1879
14eebd91
CO
1880static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1881 struct kvm_one_reg *reg)
1882{
1883 int r = -EINVAL;
1884
1885 switch (reg->id) {
29b7c71b
CO
1886 case KVM_REG_S390_TODPR:
1887 r = put_user(vcpu->arch.sie_block->todpr,
1888 (u32 __user *)reg->addr);
1889 break;
1890 case KVM_REG_S390_EPOCHDIFF:
1891 r = put_user(vcpu->arch.sie_block->epoch,
1892 (u64 __user *)reg->addr);
1893 break;
46a6dd1c 1894 case KVM_REG_S390_CPU_TIMER:
4287f247 1895 r = put_user(kvm_s390_get_cpu_timer(vcpu),
46a6dd1c
J
1896 (u64 __user *)reg->addr);
1897 break;
1898 case KVM_REG_S390_CLOCK_COMP:
1899 r = put_user(vcpu->arch.sie_block->ckc,
1900 (u64 __user *)reg->addr);
1901 break;
536336c2
DD
1902 case KVM_REG_S390_PFTOKEN:
1903 r = put_user(vcpu->arch.pfault_token,
1904 (u64 __user *)reg->addr);
1905 break;
1906 case KVM_REG_S390_PFCOMPARE:
1907 r = put_user(vcpu->arch.pfault_compare,
1908 (u64 __user *)reg->addr);
1909 break;
1910 case KVM_REG_S390_PFSELECT:
1911 r = put_user(vcpu->arch.pfault_select,
1912 (u64 __user *)reg->addr);
1913 break;
672550fb
CB
1914 case KVM_REG_S390_PP:
1915 r = put_user(vcpu->arch.sie_block->pp,
1916 (u64 __user *)reg->addr);
1917 break;
afa45ff5
CB
1918 case KVM_REG_S390_GBEA:
1919 r = put_user(vcpu->arch.sie_block->gbea,
1920 (u64 __user *)reg->addr);
1921 break;
14eebd91
CO
1922 default:
1923 break;
1924 }
1925
1926 return r;
1927}
1928
1929static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
1930 struct kvm_one_reg *reg)
1931{
1932 int r = -EINVAL;
4287f247 1933 __u64 val;
14eebd91
CO
1934
1935 switch (reg->id) {
29b7c71b
CO
1936 case KVM_REG_S390_TODPR:
1937 r = get_user(vcpu->arch.sie_block->todpr,
1938 (u32 __user *)reg->addr);
1939 break;
1940 case KVM_REG_S390_EPOCHDIFF:
1941 r = get_user(vcpu->arch.sie_block->epoch,
1942 (u64 __user *)reg->addr);
1943 break;
46a6dd1c 1944 case KVM_REG_S390_CPU_TIMER:
4287f247
DH
1945 r = get_user(val, (u64 __user *)reg->addr);
1946 if (!r)
1947 kvm_s390_set_cpu_timer(vcpu, val);
46a6dd1c
J
1948 break;
1949 case KVM_REG_S390_CLOCK_COMP:
1950 r = get_user(vcpu->arch.sie_block->ckc,
1951 (u64 __user *)reg->addr);
1952 break;
536336c2
DD
1953 case KVM_REG_S390_PFTOKEN:
1954 r = get_user(vcpu->arch.pfault_token,
1955 (u64 __user *)reg->addr);
9fbd8082
DH
1956 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1957 kvm_clear_async_pf_completion_queue(vcpu);
536336c2
DD
1958 break;
1959 case KVM_REG_S390_PFCOMPARE:
1960 r = get_user(vcpu->arch.pfault_compare,
1961 (u64 __user *)reg->addr);
1962 break;
1963 case KVM_REG_S390_PFSELECT:
1964 r = get_user(vcpu->arch.pfault_select,
1965 (u64 __user *)reg->addr);
1966 break;
672550fb
CB
1967 case KVM_REG_S390_PP:
1968 r = get_user(vcpu->arch.sie_block->pp,
1969 (u64 __user *)reg->addr);
1970 break;
afa45ff5
CB
1971 case KVM_REG_S390_GBEA:
1972 r = get_user(vcpu->arch.sie_block->gbea,
1973 (u64 __user *)reg->addr);
1974 break;
14eebd91
CO
1975 default:
1976 break;
1977 }
1978
1979 return r;
1980}
b6d33834 1981
b0c632db
HC
1982static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1983{
b0c632db 1984 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
1985 return 0;
1986}
1987
1988int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1989{
5a32c1af 1990 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
1991 return 0;
1992}
1993
1994int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1995{
5a32c1af 1996 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
1997 return 0;
1998}
1999
2000int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2001 struct kvm_sregs *sregs)
2002{
59674c1a 2003 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 2004 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 2005 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
2006 return 0;
2007}
2008
2009int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2010 struct kvm_sregs *sregs)
2011{
59674c1a 2012 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 2013 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
2014 return 0;
2015}
2016
2017int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2018{
9abc2a08
DH
2019 /* make sure the new values will be lazily loaded */
2020 save_fpu_regs();
4725c860
MS
2021 if (test_fp_ctl(fpu->fpc))
2022 return -EINVAL;
9abc2a08
DH
2023 current->thread.fpu.fpc = fpu->fpc;
2024 if (MACHINE_HAS_VX)
2025 convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
2026 else
2027 memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
b0c632db
HC
2028 return 0;
2029}
2030
2031int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2032{
9abc2a08
DH
2033 /* make sure we have the latest values */
2034 save_fpu_regs();
2035 if (MACHINE_HAS_VX)
2036 convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
2037 else
2038 memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
2039 fpu->fpc = current->thread.fpu.fpc;
b0c632db
HC
2040 return 0;
2041}
2042
2043static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
2044{
2045 int rc = 0;
2046
7a42fdc2 2047 if (!is_vcpu_stopped(vcpu))
b0c632db 2048 rc = -EBUSY;
d7b0b5eb
CO
2049 else {
2050 vcpu->run->psw_mask = psw.mask;
2051 vcpu->run->psw_addr = psw.addr;
2052 }
b0c632db
HC
2053 return rc;
2054}
2055
2056int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2057 struct kvm_translation *tr)
2058{
2059 return -EINVAL; /* not implemented yet */
2060}
2061
27291e21
DH
2062#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
2063 KVM_GUESTDBG_USE_HW_BP | \
2064 KVM_GUESTDBG_ENABLE)
2065
d0bfb940
JK
2066int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2067 struct kvm_guest_debug *dbg)
b0c632db 2068{
27291e21
DH
2069 int rc = 0;
2070
2071 vcpu->guest_debug = 0;
2072 kvm_s390_clear_bp_data(vcpu);
2073
2de3bfc2 2074 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
27291e21
DH
2075 return -EINVAL;
2076
2077 if (dbg->control & KVM_GUESTDBG_ENABLE) {
2078 vcpu->guest_debug = dbg->control;
2079 /* enforce guest PER */
805de8f4 2080 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
27291e21
DH
2081
2082 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
2083 rc = kvm_s390_import_bp_data(vcpu, dbg);
2084 } else {
805de8f4 2085 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
27291e21
DH
2086 vcpu->arch.guestdbg.last_bp = 0;
2087 }
2088
2089 if (rc) {
2090 vcpu->guest_debug = 0;
2091 kvm_s390_clear_bp_data(vcpu);
805de8f4 2092 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
27291e21
DH
2093 }
2094
2095 return rc;
b0c632db
HC
2096}
2097
62d9f0db
MT
2098int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2099 struct kvm_mp_state *mp_state)
2100{
6352e4d2
DH
2101 /* CHECK_STOP and LOAD are not supported yet */
2102 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
2103 KVM_MP_STATE_OPERATING;
62d9f0db
MT
2104}
2105
2106int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2107 struct kvm_mp_state *mp_state)
2108{
6352e4d2
DH
2109 int rc = 0;
2110
2111 /* user space knows about this interface - let it control the state */
2112 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
2113
2114 switch (mp_state->mp_state) {
2115 case KVM_MP_STATE_STOPPED:
2116 kvm_s390_vcpu_stop(vcpu);
2117 break;
2118 case KVM_MP_STATE_OPERATING:
2119 kvm_s390_vcpu_start(vcpu);
2120 break;
2121 case KVM_MP_STATE_LOAD:
2122 case KVM_MP_STATE_CHECK_STOP:
2123 /* fall through - CHECK_STOP and LOAD are not supported yet */
2124 default:
2125 rc = -ENXIO;
2126 }
2127
2128 return rc;
62d9f0db
MT
2129}
2130
8ad35755
DH
2131static bool ibs_enabled(struct kvm_vcpu *vcpu)
2132{
2133 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
2134}
2135
2c70fe44
CB
2136static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
2137{
8ad35755 2138retry:
8e236546 2139 kvm_s390_vcpu_request_handled(vcpu);
586b7ccd
CB
2140 if (!vcpu->requests)
2141 return 0;
2c70fe44
CB
2142 /*
2143 * We use MMU_RELOAD just to re-arm the ipte notifier for the
2144 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
2145 * This ensures that the ipte instruction for this request has
2146 * already finished. We might race against a second unmapper that
2147 * wants to set the blocking bit. Lets just retry the request loop.
2148 */
8ad35755 2149 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
2c70fe44
CB
2150 int rc;
2151 rc = gmap_ipte_notify(vcpu->arch.gmap,
fda902cb 2152 kvm_s390_get_prefix(vcpu),
2c70fe44
CB
2153 PAGE_SIZE * 2);
2154 if (rc)
2155 return rc;
8ad35755 2156 goto retry;
2c70fe44 2157 }
8ad35755 2158
d3d692c8
DH
2159 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2160 vcpu->arch.sie_block->ihcpu = 0xffff;
2161 goto retry;
2162 }
2163
8ad35755
DH
2164 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
2165 if (!ibs_enabled(vcpu)) {
2166 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
805de8f4 2167 atomic_or(CPUSTAT_IBS,
8ad35755
DH
2168 &vcpu->arch.sie_block->cpuflags);
2169 }
2170 goto retry;
2c70fe44 2171 }
8ad35755
DH
2172
2173 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
2174 if (ibs_enabled(vcpu)) {
2175 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
805de8f4 2176 atomic_andnot(CPUSTAT_IBS,
8ad35755
DH
2177 &vcpu->arch.sie_block->cpuflags);
2178 }
2179 goto retry;
2180 }
2181
0759d068
DH
2182 /* nothing to do, just clear the request */
2183 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
2184
2c70fe44
CB
2185 return 0;
2186}
2187
25ed1675
DH
2188void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
2189{
2190 struct kvm_vcpu *vcpu;
2191 int i;
2192
2193 mutex_lock(&kvm->lock);
2194 preempt_disable();
2195 kvm->arch.epoch = tod - get_tod_clock();
2196 kvm_s390_vcpu_block_all(kvm);
2197 kvm_for_each_vcpu(i, vcpu, kvm)
2198 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
2199 kvm_s390_vcpu_unblock_all(kvm);
2200 preempt_enable();
2201 mutex_unlock(&kvm->lock);
2202}
2203
fa576c58
TH
2204/**
2205 * kvm_arch_fault_in_page - fault-in guest page if necessary
2206 * @vcpu: The corresponding virtual cpu
2207 * @gpa: Guest physical address
2208 * @writable: Whether the page should be writable or not
2209 *
2210 * Make sure that a guest page has been faulted-in on the host.
2211 *
2212 * Return: Zero on success, negative error code otherwise.
2213 */
2214long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
24eb3a82 2215{
527e30b4
MS
2216 return gmap_fault(vcpu->arch.gmap, gpa,
2217 writable ? FAULT_FLAG_WRITE : 0);
24eb3a82
DD
2218}
2219
3c038e6b
DD
2220static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
2221 unsigned long token)
2222{
2223 struct kvm_s390_interrupt inti;
383d0b05 2224 struct kvm_s390_irq irq;
3c038e6b
DD
2225
2226 if (start_token) {
383d0b05
JF
2227 irq.u.ext.ext_params2 = token;
2228 irq.type = KVM_S390_INT_PFAULT_INIT;
2229 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
3c038e6b
DD
2230 } else {
2231 inti.type = KVM_S390_INT_PFAULT_DONE;
383d0b05 2232 inti.parm64 = token;
3c038e6b
DD
2233 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
2234 }
2235}
2236
2237void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
2238 struct kvm_async_pf *work)
2239{
2240 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
2241 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
2242}
2243
2244void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
2245 struct kvm_async_pf *work)
2246{
2247 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
2248 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
2249}
2250
2251void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
2252 struct kvm_async_pf *work)
2253{
2254 /* s390 will always inject the page directly */
2255}
2256
2257bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
2258{
2259 /*
2260 * s390 will always inject the page directly,
2261 * but we still want check_async_completion to cleanup
2262 */
2263 return true;
2264}
2265
2266static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
2267{
2268 hva_t hva;
2269 struct kvm_arch_async_pf arch;
2270 int rc;
2271
2272 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2273 return 0;
2274 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
2275 vcpu->arch.pfault_compare)
2276 return 0;
2277 if (psw_extint_disabled(vcpu))
2278 return 0;
9a022067 2279 if (kvm_s390_vcpu_has_irq(vcpu, 0))
3c038e6b
DD
2280 return 0;
2281 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
2282 return 0;
2283 if (!vcpu->arch.gmap->pfault_enabled)
2284 return 0;
2285
81480cc1
HC
2286 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
2287 hva += current->thread.gmap_addr & ~PAGE_MASK;
2288 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
3c038e6b
DD
2289 return 0;
2290
2291 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
2292 return rc;
2293}
2294
3fb4c40f 2295static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 2296{
3fb4c40f 2297 int rc, cpuflags;
e168bf8d 2298
3c038e6b
DD
2299 /*
2300 * On s390 notifications for arriving pages will be delivered directly
2301 * to the guest but the house keeping for completed pfaults is
2302 * handled outside the worker.
2303 */
2304 kvm_check_async_pf_completion(vcpu);
2305
7ec7c8c7
CB
2306 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
2307 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
b0c632db
HC
2308
2309 if (need_resched())
2310 schedule();
2311
d3a73acb 2312 if (test_cpu_flag(CIF_MCCK_PENDING))
71cde587
CB
2313 s390_handle_mcck();
2314
79395031
JF
2315 if (!kvm_is_ucontrol(vcpu->kvm)) {
2316 rc = kvm_s390_deliver_pending_interrupts(vcpu);
2317 if (rc)
2318 return rc;
2319 }
0ff31867 2320
2c70fe44
CB
2321 rc = kvm_s390_handle_requests(vcpu);
2322 if (rc)
2323 return rc;
2324
27291e21
DH
2325 if (guestdbg_enabled(vcpu)) {
2326 kvm_s390_backup_guest_per_regs(vcpu);
2327 kvm_s390_patch_guest_per_regs(vcpu);
2328 }
2329
b0c632db 2330 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
2331 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
2332 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
2333 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 2334
3fb4c40f
TH
2335 return 0;
2336}
2337
492d8642
TH
2338static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
2339{
56317920
DH
2340 struct kvm_s390_pgm_info pgm_info = {
2341 .code = PGM_ADDRESSING,
2342 };
2343 u8 opcode, ilen;
492d8642
TH
2344 int rc;
2345
2346 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
2347 trace_kvm_s390_sie_fault(vcpu);
2348
2349 /*
2350 * We want to inject an addressing exception, which is defined as a
2351 * suppressing or terminating exception. However, since we came here
2352 * by a DAT access exception, the PSW still points to the faulting
2353 * instruction since DAT exceptions are nullifying. So we've got
2354 * to look up the current opcode to get the length of the instruction
2355 * to be able to forward the PSW.
2356 */
65977322 2357 rc = read_guest_instr(vcpu, &opcode, 1);
56317920 2358 ilen = insn_length(opcode);
9b0d721a
DH
2359 if (rc < 0) {
2360 return rc;
2361 } else if (rc) {
2362 /* Instruction-Fetching Exceptions - we can't detect the ilen.
2363 * Forward by arbitrary ilc, injection will take care of
2364 * nullification if necessary.
2365 */
2366 pgm_info = vcpu->arch.pgm;
2367 ilen = 4;
2368 }
56317920
DH
2369 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
2370 kvm_s390_forward_psw(vcpu, ilen);
2371 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
492d8642
TH
2372}
2373
3fb4c40f
TH
2374static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
2375{
2b29a9fd
DD
2376 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
2377 vcpu->arch.sie_block->icptcode);
2378 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
2379
27291e21
DH
2380 if (guestdbg_enabled(vcpu))
2381 kvm_s390_restore_guest_per_regs(vcpu);
2382
7ec7c8c7
CB
2383 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
2384 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
71f116bf
DH
2385
2386 if (vcpu->arch.sie_block->icptcode > 0) {
2387 int rc = kvm_handle_sie_intercept(vcpu);
2388
2389 if (rc != -EOPNOTSUPP)
2390 return rc;
2391 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
2392 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
2393 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
2394 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
2395 return -EREMOTE;
2396 } else if (exit_reason != -EFAULT) {
2397 vcpu->stat.exit_null++;
2398 return 0;
210b1607
TH
2399 } else if (kvm_is_ucontrol(vcpu->kvm)) {
2400 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
2401 vcpu->run->s390_ucontrol.trans_exc_code =
2402 current->thread.gmap_addr;
2403 vcpu->run->s390_ucontrol.pgm_code = 0x10;
71f116bf 2404 return -EREMOTE;
24eb3a82 2405 } else if (current->thread.gmap_pfault) {
3c038e6b 2406 trace_kvm_s390_major_guest_pfault(vcpu);
24eb3a82 2407 current->thread.gmap_pfault = 0;
71f116bf
DH
2408 if (kvm_arch_setup_async_pf(vcpu))
2409 return 0;
2410 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
a76ccff6 2411 }
71f116bf 2412 return vcpu_post_run_fault_in_sie(vcpu);
3fb4c40f
TH
2413}
2414
2415static int __vcpu_run(struct kvm_vcpu *vcpu)
2416{
2417 int rc, exit_reason;
2418
800c1065
TH
2419 /*
2420 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2421 * ning the guest), so that memslots (and other stuff) are protected
2422 */
2423 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2424
a76ccff6
TH
2425 do {
2426 rc = vcpu_pre_run(vcpu);
2427 if (rc)
2428 break;
3fb4c40f 2429
800c1065 2430 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
2431 /*
2432 * As PF_VCPU will be used in fault handler, between
2433 * guest_enter and guest_exit should be no uaccess.
2434 */
0097d12e
CB
2435 local_irq_disable();
2436 __kvm_guest_enter();
db0758b2 2437 __disable_cpu_timer_accounting(vcpu);
0097d12e 2438 local_irq_enable();
a76ccff6
TH
2439 exit_reason = sie64a(vcpu->arch.sie_block,
2440 vcpu->run->s.regs.gprs);
0097d12e 2441 local_irq_disable();
db0758b2 2442 __enable_cpu_timer_accounting(vcpu);
0097d12e
CB
2443 __kvm_guest_exit();
2444 local_irq_enable();
800c1065 2445 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
2446
2447 rc = vcpu_post_run(vcpu, exit_reason);
27291e21 2448 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
3fb4c40f 2449
800c1065 2450 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 2451 return rc;
b0c632db
HC
2452}
2453
b028ee3e
DH
2454static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2455{
2456 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2457 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2458 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2459 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2460 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2461 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
d3d692c8
DH
2462 /* some control register changes require a tlb flush */
2463 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
b028ee3e
DH
2464 }
2465 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4287f247 2466 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
b028ee3e
DH
2467 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2468 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2469 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2470 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2471 }
2472 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2473 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2474 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2475 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
9fbd8082
DH
2476 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2477 kvm_clear_async_pf_completion_queue(vcpu);
b028ee3e
DH
2478 }
2479 kvm_run->kvm_dirty_regs = 0;
2480}
2481
2482static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2483{
2484 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2485 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2486 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2487 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
4287f247 2488 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
b028ee3e
DH
2489 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2490 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2491 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2492 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2493 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2494 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2495 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2496}
2497
b0c632db
HC
2498int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2499{
8f2abe6a 2500 int rc;
b0c632db
HC
2501 sigset_t sigsaved;
2502
27291e21
DH
2503 if (guestdbg_exit_pending(vcpu)) {
2504 kvm_s390_prepare_debug_exit(vcpu);
2505 return 0;
2506 }
2507
b0c632db
HC
2508 if (vcpu->sigset_active)
2509 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2510
6352e4d2
DH
2511 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2512 kvm_s390_vcpu_start(vcpu);
2513 } else if (is_vcpu_stopped(vcpu)) {
ea2cdd27 2514 pr_err_ratelimited("can't run stopped vcpu %d\n",
6352e4d2
DH
2515 vcpu->vcpu_id);
2516 return -EINVAL;
2517 }
b0c632db 2518
b028ee3e 2519 sync_regs(vcpu, kvm_run);
db0758b2 2520 enable_cpu_timer_accounting(vcpu);
d7b0b5eb 2521
dab4079d 2522 might_fault();
a76ccff6 2523 rc = __vcpu_run(vcpu);
9ace903d 2524
b1d16c49
CE
2525 if (signal_pending(current) && !rc) {
2526 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 2527 rc = -EINTR;
b1d16c49 2528 }
8f2abe6a 2529
27291e21
DH
2530 if (guestdbg_exit_pending(vcpu) && !rc) {
2531 kvm_s390_prepare_debug_exit(vcpu);
2532 rc = 0;
2533 }
2534
8f2abe6a 2535 if (rc == -EREMOTE) {
71f116bf 2536 /* userspace support is needed, kvm_run has been prepared */
8f2abe6a
CB
2537 rc = 0;
2538 }
b0c632db 2539
db0758b2 2540 disable_cpu_timer_accounting(vcpu);
b028ee3e 2541 store_regs(vcpu, kvm_run);
d7b0b5eb 2542
b0c632db
HC
2543 if (vcpu->sigset_active)
2544 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2545
b0c632db 2546 vcpu->stat.exit_userspace++;
7e8e6ab4 2547 return rc;
b0c632db
HC
2548}
2549
b0c632db
HC
2550/*
2551 * store status at address
2552 * we use have two special cases:
2553 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2554 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2555 */
d0bce605 2556int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
b0c632db 2557{
092670cd 2558 unsigned char archmode = 1;
9abc2a08 2559 freg_t fprs[NUM_FPRS];
fda902cb 2560 unsigned int px;
4287f247 2561 u64 clkcomp, cputm;
d0bce605 2562 int rc;
b0c632db 2563
d9a3a09a 2564 px = kvm_s390_get_prefix(vcpu);
d0bce605
HC
2565 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2566 if (write_guest_abs(vcpu, 163, &archmode, 1))
b0c632db 2567 return -EFAULT;
d9a3a09a 2568 gpa = 0;
d0bce605
HC
2569 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2570 if (write_guest_real(vcpu, 163, &archmode, 1))
b0c632db 2571 return -EFAULT;
d9a3a09a
MS
2572 gpa = px;
2573 } else
2574 gpa -= __LC_FPREGS_SAVE_AREA;
9abc2a08
DH
2575
2576 /* manually convert vector registers if necessary */
2577 if (MACHINE_HAS_VX) {
9522b37f 2578 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
9abc2a08
DH
2579 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
2580 fprs, 128);
2581 } else {
2582 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
6fd8e67d 2583 vcpu->run->s.regs.fprs, 128);
9abc2a08 2584 }
d9a3a09a 2585 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
d0bce605 2586 vcpu->run->s.regs.gprs, 128);
d9a3a09a 2587 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
d0bce605 2588 &vcpu->arch.sie_block->gpsw, 16);
d9a3a09a 2589 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
fda902cb 2590 &px, 4);
d9a3a09a 2591 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
9abc2a08 2592 &vcpu->run->s.regs.fpc, 4);
d9a3a09a 2593 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
d0bce605 2594 &vcpu->arch.sie_block->todpr, 4);
4287f247 2595 cputm = kvm_s390_get_cpu_timer(vcpu);
d9a3a09a 2596 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
4287f247 2597 &cputm, 8);
178bd789 2598 clkcomp = vcpu->arch.sie_block->ckc >> 8;
d9a3a09a 2599 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
d0bce605 2600 &clkcomp, 8);
d9a3a09a 2601 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
d0bce605 2602 &vcpu->run->s.regs.acrs, 64);
d9a3a09a 2603 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
d0bce605
HC
2604 &vcpu->arch.sie_block->gcr, 128);
2605 return rc ? -EFAULT : 0;
b0c632db
HC
2606}
2607
e879892c
TH
2608int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2609{
2610 /*
2611 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2612 * copying in vcpu load/put. Lets update our copies before we save
2613 * it into the save area
2614 */
d0164ee2 2615 save_fpu_regs();
9abc2a08 2616 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
e879892c
TH
2617 save_access_regs(vcpu->run->s.regs.acrs);
2618
2619 return kvm_s390_store_status_unloaded(vcpu, addr);
2620}
2621
bc17de7c
EF
2622/*
2623 * store additional status at address
2624 */
2625int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2626 unsigned long gpa)
2627{
2628 /* Only bits 0-53 are used for address formation */
2629 if (!(gpa & ~0x3ff))
2630 return 0;
2631
2632 return write_guest_abs(vcpu, gpa & ~0x3ff,
2633 (void *)&vcpu->run->s.regs.vrs, 512);
2634}
2635
2636int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2637{
2638 if (!test_kvm_facility(vcpu->kvm, 129))
2639 return 0;
2640
2641 /*
2642 * The guest VXRS are in the host VXRs due to the lazy
9977e886
HB
2643 * copying in vcpu load/put. We can simply call save_fpu_regs()
2644 * to save the current register state because we are in the
2645 * middle of a load/put cycle.
2646 *
2647 * Let's update our copies before we save it into the save area.
bc17de7c 2648 */
d0164ee2 2649 save_fpu_regs();
bc17de7c
EF
2650
2651 return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2652}
2653
8ad35755
DH
2654static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2655{
2656 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
8e236546 2657 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
8ad35755
DH
2658}
2659
2660static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2661{
2662 unsigned int i;
2663 struct kvm_vcpu *vcpu;
2664
2665 kvm_for_each_vcpu(i, vcpu, kvm) {
2666 __disable_ibs_on_vcpu(vcpu);
2667 }
2668}
2669
2670static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2671{
2672 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
8e236546 2673 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
8ad35755
DH
2674}
2675
6852d7b6
DH
2676void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2677{
8ad35755
DH
2678 int i, online_vcpus, started_vcpus = 0;
2679
2680 if (!is_vcpu_stopped(vcpu))
2681 return;
2682
6852d7b6 2683 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
8ad35755 2684 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 2685 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
2686 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2687
2688 for (i = 0; i < online_vcpus; i++) {
2689 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2690 started_vcpus++;
2691 }
2692
2693 if (started_vcpus == 0) {
2694 /* we're the only active VCPU -> speed it up */
2695 __enable_ibs_on_vcpu(vcpu);
2696 } else if (started_vcpus == 1) {
2697 /*
2698 * As we are starting a second VCPU, we have to disable
2699 * the IBS facility on all VCPUs to remove potentially
2700 * oustanding ENABLE requests.
2701 */
2702 __disable_ibs_on_all_vcpus(vcpu->kvm);
2703 }
2704
805de8f4 2705 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
2706 /*
2707 * Another VCPU might have used IBS while we were offline.
2708 * Let's play safe and flush the VCPU at startup.
2709 */
d3d692c8 2710 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
433b9ee4 2711 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 2712 return;
6852d7b6
DH
2713}
2714
2715void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2716{
8ad35755
DH
2717 int i, online_vcpus, started_vcpus = 0;
2718 struct kvm_vcpu *started_vcpu = NULL;
2719
2720 if (is_vcpu_stopped(vcpu))
2721 return;
2722
6852d7b6 2723 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
8ad35755 2724 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 2725 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
2726 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2727
32f5ff63 2728 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
6cddd432 2729 kvm_s390_clear_stop_irq(vcpu);
32f5ff63 2730
805de8f4 2731 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
2732 __disable_ibs_on_vcpu(vcpu);
2733
2734 for (i = 0; i < online_vcpus; i++) {
2735 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2736 started_vcpus++;
2737 started_vcpu = vcpu->kvm->vcpus[i];
2738 }
2739 }
2740
2741 if (started_vcpus == 1) {
2742 /*
2743 * As we only have one VCPU left, we want to enable the
2744 * IBS facility for that VCPU to speed it up.
2745 */
2746 __enable_ibs_on_vcpu(started_vcpu);
2747 }
2748
433b9ee4 2749 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 2750 return;
6852d7b6
DH
2751}
2752
d6712df9
CH
2753static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2754 struct kvm_enable_cap *cap)
2755{
2756 int r;
2757
2758 if (cap->flags)
2759 return -EINVAL;
2760
2761 switch (cap->cap) {
fa6b7fe9
CH
2762 case KVM_CAP_S390_CSS_SUPPORT:
2763 if (!vcpu->kvm->arch.css_support) {
2764 vcpu->kvm->arch.css_support = 1;
c92ea7b9 2765 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
fa6b7fe9
CH
2766 trace_kvm_s390_enable_css(vcpu->kvm);
2767 }
2768 r = 0;
2769 break;
d6712df9
CH
2770 default:
2771 r = -EINVAL;
2772 break;
2773 }
2774 return r;
2775}
2776
41408c28
TH
2777static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
2778 struct kvm_s390_mem_op *mop)
2779{
2780 void __user *uaddr = (void __user *)mop->buf;
2781 void *tmpbuf = NULL;
2782 int r, srcu_idx;
2783 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
2784 | KVM_S390_MEMOP_F_CHECK_ONLY;
2785
2786 if (mop->flags & ~supported_flags)
2787 return -EINVAL;
2788
2789 if (mop->size > MEM_OP_MAX_SIZE)
2790 return -E2BIG;
2791
2792 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2793 tmpbuf = vmalloc(mop->size);
2794 if (!tmpbuf)
2795 return -ENOMEM;
2796 }
2797
2798 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2799
2800 switch (mop->op) {
2801 case KVM_S390_MEMOP_LOGICAL_READ:
2802 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
92c96321
DH
2803 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
2804 mop->size, GACC_FETCH);
41408c28
TH
2805 break;
2806 }
2807 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2808 if (r == 0) {
2809 if (copy_to_user(uaddr, tmpbuf, mop->size))
2810 r = -EFAULT;
2811 }
2812 break;
2813 case KVM_S390_MEMOP_LOGICAL_WRITE:
2814 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
92c96321
DH
2815 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
2816 mop->size, GACC_STORE);
41408c28
TH
2817 break;
2818 }
2819 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2820 r = -EFAULT;
2821 break;
2822 }
2823 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2824 break;
2825 default:
2826 r = -EINVAL;
2827 }
2828
2829 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2830
2831 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
2832 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
2833
2834 vfree(tmpbuf);
2835 return r;
2836}
2837
b0c632db
HC
2838long kvm_arch_vcpu_ioctl(struct file *filp,
2839 unsigned int ioctl, unsigned long arg)
2840{
2841 struct kvm_vcpu *vcpu = filp->private_data;
2842 void __user *argp = (void __user *)arg;
800c1065 2843 int idx;
bc923cc9 2844 long r;
b0c632db 2845
93736624 2846 switch (ioctl) {
47b43c52
JF
2847 case KVM_S390_IRQ: {
2848 struct kvm_s390_irq s390irq;
2849
2850 r = -EFAULT;
2851 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
2852 break;
2853 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
2854 break;
2855 }
93736624 2856 case KVM_S390_INTERRUPT: {
ba5c1e9b 2857 struct kvm_s390_interrupt s390int;
383d0b05 2858 struct kvm_s390_irq s390irq;
ba5c1e9b 2859
93736624 2860 r = -EFAULT;
ba5c1e9b 2861 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624 2862 break;
383d0b05
JF
2863 if (s390int_to_s390irq(&s390int, &s390irq))
2864 return -EINVAL;
2865 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
93736624 2866 break;
ba5c1e9b 2867 }
b0c632db 2868 case KVM_S390_STORE_STATUS:
800c1065 2869 idx = srcu_read_lock(&vcpu->kvm->srcu);
bc923cc9 2870 r = kvm_s390_vcpu_store_status(vcpu, arg);
800c1065 2871 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 2872 break;
b0c632db
HC
2873 case KVM_S390_SET_INITIAL_PSW: {
2874 psw_t psw;
2875
bc923cc9 2876 r = -EFAULT;
b0c632db 2877 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
2878 break;
2879 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2880 break;
b0c632db
HC
2881 }
2882 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
2883 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2884 break;
14eebd91
CO
2885 case KVM_SET_ONE_REG:
2886 case KVM_GET_ONE_REG: {
2887 struct kvm_one_reg reg;
2888 r = -EFAULT;
2889 if (copy_from_user(&reg, argp, sizeof(reg)))
2890 break;
2891 if (ioctl == KVM_SET_ONE_REG)
2892 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
2893 else
2894 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
2895 break;
2896 }
27e0393f
CO
2897#ifdef CONFIG_KVM_S390_UCONTROL
2898 case KVM_S390_UCAS_MAP: {
2899 struct kvm_s390_ucas_mapping ucasmap;
2900
2901 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2902 r = -EFAULT;
2903 break;
2904 }
2905
2906 if (!kvm_is_ucontrol(vcpu->kvm)) {
2907 r = -EINVAL;
2908 break;
2909 }
2910
2911 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
2912 ucasmap.vcpu_addr, ucasmap.length);
2913 break;
2914 }
2915 case KVM_S390_UCAS_UNMAP: {
2916 struct kvm_s390_ucas_mapping ucasmap;
2917
2918 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2919 r = -EFAULT;
2920 break;
2921 }
2922
2923 if (!kvm_is_ucontrol(vcpu->kvm)) {
2924 r = -EINVAL;
2925 break;
2926 }
2927
2928 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
2929 ucasmap.length);
2930 break;
2931 }
2932#endif
ccc7910f 2933 case KVM_S390_VCPU_FAULT: {
527e30b4 2934 r = gmap_fault(vcpu->arch.gmap, arg, 0);
ccc7910f
CO
2935 break;
2936 }
d6712df9
CH
2937 case KVM_ENABLE_CAP:
2938 {
2939 struct kvm_enable_cap cap;
2940 r = -EFAULT;
2941 if (copy_from_user(&cap, argp, sizeof(cap)))
2942 break;
2943 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2944 break;
2945 }
41408c28
TH
2946 case KVM_S390_MEM_OP: {
2947 struct kvm_s390_mem_op mem_op;
2948
2949 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
2950 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
2951 else
2952 r = -EFAULT;
2953 break;
2954 }
816c7667
JF
2955 case KVM_S390_SET_IRQ_STATE: {
2956 struct kvm_s390_irq_state irq_state;
2957
2958 r = -EFAULT;
2959 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2960 break;
2961 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
2962 irq_state.len == 0 ||
2963 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
2964 r = -EINVAL;
2965 break;
2966 }
2967 r = kvm_s390_set_irq_state(vcpu,
2968 (void __user *) irq_state.buf,
2969 irq_state.len);
2970 break;
2971 }
2972 case KVM_S390_GET_IRQ_STATE: {
2973 struct kvm_s390_irq_state irq_state;
2974
2975 r = -EFAULT;
2976 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2977 break;
2978 if (irq_state.len == 0) {
2979 r = -EINVAL;
2980 break;
2981 }
2982 r = kvm_s390_get_irq_state(vcpu,
2983 (__u8 __user *) irq_state.buf,
2984 irq_state.len);
2985 break;
2986 }
b0c632db 2987 default:
3e6afcf1 2988 r = -ENOTTY;
b0c632db 2989 }
bc923cc9 2990 return r;
b0c632db
HC
2991}
2992
5b1c1493
CO
2993int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2994{
2995#ifdef CONFIG_KVM_S390_UCONTROL
2996 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
2997 && (kvm_is_ucontrol(vcpu->kvm))) {
2998 vmf->page = virt_to_page(vcpu->arch.sie_block);
2999 get_page(vmf->page);
3000 return 0;
3001 }
3002#endif
3003 return VM_FAULT_SIGBUS;
3004}
3005
5587027c
AK
3006int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
3007 unsigned long npages)
db3fe4eb
TY
3008{
3009 return 0;
3010}
3011
b0c632db 3012/* Section: memory related */
f7784b8e
MT
3013int kvm_arch_prepare_memory_region(struct kvm *kvm,
3014 struct kvm_memory_slot *memslot,
09170a49 3015 const struct kvm_userspace_memory_region *mem,
7b6195a9 3016 enum kvm_mr_change change)
b0c632db 3017{
dd2887e7
NW
3018 /* A few sanity checks. We can have memory slots which have to be
3019 located/ended at a segment boundary (1MB). The memory in userland is
3020 ok to be fragmented into various different vmas. It is okay to mmap()
3021 and munmap() stuff in this slot after doing this call at any time */
b0c632db 3022
598841ca 3023 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
3024 return -EINVAL;
3025
598841ca 3026 if (mem->memory_size & 0xffffful)
b0c632db
HC
3027 return -EINVAL;
3028
a3a92c31
DD
3029 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
3030 return -EINVAL;
3031
f7784b8e
MT
3032 return 0;
3033}
3034
3035void kvm_arch_commit_memory_region(struct kvm *kvm,
09170a49 3036 const struct kvm_userspace_memory_region *mem,
8482644a 3037 const struct kvm_memory_slot *old,
f36f3f28 3038 const struct kvm_memory_slot *new,
8482644a 3039 enum kvm_mr_change change)
f7784b8e 3040{
f7850c92 3041 int rc;
f7784b8e 3042
2cef4deb
CB
3043 /* If the basics of the memslot do not change, we do not want
3044 * to update the gmap. Every update causes several unnecessary
3045 * segment translation exceptions. This is usually handled just
3046 * fine by the normal fault handler + gmap, but it will also
3047 * cause faults on the prefix page of running guest CPUs.
3048 */
3049 if (old->userspace_addr == mem->userspace_addr &&
3050 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
3051 old->npages * PAGE_SIZE == mem->memory_size)
3052 return;
598841ca
CO
3053
3054 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
3055 mem->guest_phys_addr, mem->memory_size);
3056 if (rc)
ea2cdd27 3057 pr_warn("failed to commit memory region\n");
598841ca 3058 return;
b0c632db
HC
3059}
3060
60a37709
AY
3061static inline unsigned long nonhyp_mask(int i)
3062{
3063 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
3064
3065 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
3066}
3067
3491caf2
CB
3068void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
3069{
3070 vcpu->valid_wakeup = false;
3071}
3072
b0c632db
HC
3073static int __init kvm_s390_init(void)
3074{
60a37709
AY
3075 int i;
3076
07197fd0
DH
3077 if (!sclp.has_sief2) {
3078 pr_info("SIE not available\n");
3079 return -ENODEV;
3080 }
3081
60a37709
AY
3082 for (i = 0; i < 16; i++)
3083 kvm_s390_fac_list_mask[i] |=
3084 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
3085
9d8d5786 3086 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
b0c632db
HC
3087}
3088
3089static void __exit kvm_s390_exit(void)
3090{
3091 kvm_exit();
3092}
3093
3094module_init(kvm_s390_init);
3095module_exit(kvm_s390_exit);
566af940
CH
3096
3097/*
3098 * Enable autoloading of the kvm module.
3099 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
3100 * since x86 takes a different approach.
3101 */
3102#include <linux/miscdevice.h>
3103MODULE_ALIAS_MISCDEV(KVM_MINOR);
3104MODULE_ALIAS("devname:kvm");