]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/s390/kvm/kvm-s390.c
KVM: svm: handle KVM_X86_QUIRK_CD_NW_CLEARED in svm_get_mt_mask
[mirror_ubuntu-hirsute-kernel.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15f36ebd 14 * Jason J. Herne <jjherne@us.ibm.com>
b0c632db
HC
15 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
ca872302 20#include <linux/hrtimer.h>
b0c632db
HC
21#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
a374e892 25#include <linux/random.h>
b0c632db 26#include <linux/slab.h>
ba5c1e9b 27#include <linux/timer.h>
41408c28 28#include <linux/vmalloc.h>
cbb870c8 29#include <asm/asm-offsets.h>
b0c632db
HC
30#include <asm/lowcore.h>
31#include <asm/pgtable.h>
f5daba1d 32#include <asm/nmi.h>
a0616cde 33#include <asm/switch_to.h>
6d3da241 34#include <asm/isc.h>
1526bf9c 35#include <asm/sclp.h>
8f2abe6a 36#include "kvm-s390.h"
b0c632db
HC
37#include "gaccess.h"
38
ea2cdd27
DH
39#define KMSG_COMPONENT "kvm-s390"
40#undef pr_fmt
41#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
42
5786fffa
CH
43#define CREATE_TRACE_POINTS
44#include "trace.h"
ade38c31 45#include "trace-s390.h"
5786fffa 46
41408c28 47#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
816c7667
JF
48#define LOCAL_IRQS 32
49#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
50 (KVM_MAX_VCPUS + LOCAL_IRQS))
41408c28 51
b0c632db
HC
52#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
53
54struct kvm_stats_debugfs_item debugfs_entries[] = {
55 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 56 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
57 { "exit_validity", VCPU_STAT(exit_validity) },
58 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
59 { "exit_external_request", VCPU_STAT(exit_external_request) },
60 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
61 { "exit_instruction", VCPU_STAT(exit_instruction) },
62 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
63 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f7819512 64 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
ce2e4f0b 65 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
f5e10b09 66 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b 67 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
aba07508
DH
68 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
69 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
ba5c1e9b 70 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 71 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
72 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
73 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
74 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
75 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
76 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
77 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
78 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 79 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
80 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
81 { "instruction_spx", VCPU_STAT(instruction_spx) },
82 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
83 { "instruction_stap", VCPU_STAT(instruction_stap) },
84 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
8a242234 85 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
453423dc
CB
86 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
87 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
b31288fa 88 { "instruction_essa", VCPU_STAT(instruction_essa) },
453423dc
CB
89 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
90 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 91 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 92 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 93 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 94 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0 95 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
42cb0c9f
DH
96 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
97 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
5288fbf0 98 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
42cb0c9f
DH
99 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
100 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
cd7b4b61 101 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
5288fbf0
CB
102 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
103 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
104 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
42cb0c9f
DH
105 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
106 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
107 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
388186bc 108 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 109 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 110 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
111 { NULL }
112};
113
9d8d5786
MM
114/* upper facilities limit for kvm */
115unsigned long kvm_s390_fac_list_mask[] = {
a3ed8dae 116 0xffe6fffbfcfdfc40UL,
53df84f8 117 0x005e800000000000UL,
9d8d5786 118};
b0c632db 119
9d8d5786 120unsigned long kvm_s390_fac_list_mask_size(void)
78c4b59f 121{
9d8d5786
MM
122 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
123 return ARRAY_SIZE(kvm_s390_fac_list_mask);
78c4b59f
MM
124}
125
9d8d5786
MM
126static struct gmap_notifier gmap_notifier;
127
b0c632db 128/* Section: not file related */
13a34e06 129int kvm_arch_hardware_enable(void)
b0c632db
HC
130{
131 /* every s390 is virtualization enabled ;-) */
10474ae8 132 return 0;
b0c632db
HC
133}
134
2c70fe44
CB
135static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
136
b0c632db
HC
137int kvm_arch_hardware_setup(void)
138{
2c70fe44
CB
139 gmap_notifier.notifier_call = kvm_gmap_notifier;
140 gmap_register_ipte_notifier(&gmap_notifier);
b0c632db
HC
141 return 0;
142}
143
144void kvm_arch_hardware_unsetup(void)
145{
2c70fe44 146 gmap_unregister_ipte_notifier(&gmap_notifier);
b0c632db
HC
147}
148
b0c632db
HC
149int kvm_arch_init(void *opaque)
150{
84877d93
CH
151 /* Register floating interrupt controller interface. */
152 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
b0c632db
HC
153}
154
b0c632db
HC
155/* Section: device related */
156long kvm_arch_dev_ioctl(struct file *filp,
157 unsigned int ioctl, unsigned long arg)
158{
159 if (ioctl == KVM_S390_ENABLE_SIE)
160 return s390_enable_sie();
161 return -EINVAL;
162}
163
784aa3d7 164int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
b0c632db 165{
d7b0b5eb
CO
166 int r;
167
2bd0ac4e 168 switch (ext) {
d7b0b5eb 169 case KVM_CAP_S390_PSW:
b6cf8788 170 case KVM_CAP_S390_GMAP:
52e16b18 171 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
172#ifdef CONFIG_KVM_S390_UCONTROL
173 case KVM_CAP_S390_UCONTROL:
174#endif
3c038e6b 175 case KVM_CAP_ASYNC_PF:
60b413c9 176 case KVM_CAP_SYNC_REGS:
14eebd91 177 case KVM_CAP_ONE_REG:
d6712df9 178 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 179 case KVM_CAP_S390_CSS_SUPPORT:
10ccaa1e 180 case KVM_CAP_IOEVENTFD:
c05c4186 181 case KVM_CAP_DEVICE_CTRL:
d938dc55 182 case KVM_CAP_ENABLE_CAP_VM:
78599d90 183 case KVM_CAP_S390_IRQCHIP:
f2061656 184 case KVM_CAP_VM_ATTRIBUTES:
6352e4d2 185 case KVM_CAP_MP_STATE:
47b43c52 186 case KVM_CAP_S390_INJECT_IRQ:
2444b352 187 case KVM_CAP_S390_USER_SIGP:
e44fc8c9 188 case KVM_CAP_S390_USER_STSI:
30ee2a98 189 case KVM_CAP_S390_SKEYS:
816c7667 190 case KVM_CAP_S390_IRQ_STATE:
d7b0b5eb
CO
191 r = 1;
192 break;
41408c28
TH
193 case KVM_CAP_S390_MEM_OP:
194 r = MEM_OP_MAX_SIZE;
195 break;
e726b1bd
CB
196 case KVM_CAP_NR_VCPUS:
197 case KVM_CAP_MAX_VCPUS:
198 r = KVM_MAX_VCPUS;
199 break;
e1e2e605
NW
200 case KVM_CAP_NR_MEMSLOTS:
201 r = KVM_USER_MEM_SLOTS;
202 break;
1526bf9c 203 case KVM_CAP_S390_COW:
abf09bed 204 r = MACHINE_HAS_ESOP;
1526bf9c 205 break;
68c55750
EF
206 case KVM_CAP_S390_VECTOR_REGISTERS:
207 r = MACHINE_HAS_VX;
208 break;
2bd0ac4e 209 default:
d7b0b5eb 210 r = 0;
2bd0ac4e 211 }
d7b0b5eb 212 return r;
b0c632db
HC
213}
214
15f36ebd
JH
215static void kvm_s390_sync_dirty_log(struct kvm *kvm,
216 struct kvm_memory_slot *memslot)
217{
218 gfn_t cur_gfn, last_gfn;
219 unsigned long address;
220 struct gmap *gmap = kvm->arch.gmap;
221
222 down_read(&gmap->mm->mmap_sem);
223 /* Loop over all guest pages */
224 last_gfn = memslot->base_gfn + memslot->npages;
225 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
226 address = gfn_to_hva_memslot(memslot, cur_gfn);
227
228 if (gmap_test_and_clear_dirty(address, gmap))
229 mark_page_dirty(kvm, cur_gfn);
230 }
231 up_read(&gmap->mm->mmap_sem);
232}
233
b0c632db
HC
234/* Section: vm related */
235/*
236 * Get (and clear) the dirty memory log for a memory slot.
237 */
238int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
239 struct kvm_dirty_log *log)
240{
15f36ebd
JH
241 int r;
242 unsigned long n;
9f6b8029 243 struct kvm_memslots *slots;
15f36ebd
JH
244 struct kvm_memory_slot *memslot;
245 int is_dirty = 0;
246
247 mutex_lock(&kvm->slots_lock);
248
249 r = -EINVAL;
250 if (log->slot >= KVM_USER_MEM_SLOTS)
251 goto out;
252
9f6b8029
PB
253 slots = kvm_memslots(kvm);
254 memslot = id_to_memslot(slots, log->slot);
15f36ebd
JH
255 r = -ENOENT;
256 if (!memslot->dirty_bitmap)
257 goto out;
258
259 kvm_s390_sync_dirty_log(kvm, memslot);
260 r = kvm_get_dirty_log(kvm, log, &is_dirty);
261 if (r)
262 goto out;
263
264 /* Clear the dirty log */
265 if (is_dirty) {
266 n = kvm_dirty_bitmap_bytes(memslot);
267 memset(memslot->dirty_bitmap, 0, n);
268 }
269 r = 0;
270out:
271 mutex_unlock(&kvm->slots_lock);
272 return r;
b0c632db
HC
273}
274
d938dc55
CH
275static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
276{
277 int r;
278
279 if (cap->flags)
280 return -EINVAL;
281
282 switch (cap->cap) {
84223598
CH
283 case KVM_CAP_S390_IRQCHIP:
284 kvm->arch.use_irqchip = 1;
285 r = 0;
286 break;
2444b352
DH
287 case KVM_CAP_S390_USER_SIGP:
288 kvm->arch.user_sigp = 1;
289 r = 0;
290 break;
68c55750 291 case KVM_CAP_S390_VECTOR_REGISTERS:
18280d8b
MM
292 if (MACHINE_HAS_VX) {
293 set_kvm_facility(kvm->arch.model.fac->mask, 129);
294 set_kvm_facility(kvm->arch.model.fac->list, 129);
295 r = 0;
296 } else
297 r = -EINVAL;
68c55750 298 break;
e44fc8c9
ET
299 case KVM_CAP_S390_USER_STSI:
300 kvm->arch.user_stsi = 1;
301 r = 0;
302 break;
d938dc55
CH
303 default:
304 r = -EINVAL;
305 break;
306 }
307 return r;
308}
309
8c0a7ce6
DD
310static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
311{
312 int ret;
313
314 switch (attr->attr) {
315 case KVM_S390_VM_MEM_LIMIT_SIZE:
316 ret = 0;
317 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
318 ret = -EFAULT;
319 break;
320 default:
321 ret = -ENXIO;
322 break;
323 }
324 return ret;
325}
326
327static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
4f718eab
DD
328{
329 int ret;
330 unsigned int idx;
331 switch (attr->attr) {
332 case KVM_S390_VM_MEM_ENABLE_CMMA:
333 ret = -EBUSY;
334 mutex_lock(&kvm->lock);
335 if (atomic_read(&kvm->online_vcpus) == 0) {
336 kvm->arch.use_cmma = 1;
337 ret = 0;
338 }
339 mutex_unlock(&kvm->lock);
340 break;
341 case KVM_S390_VM_MEM_CLR_CMMA:
342 mutex_lock(&kvm->lock);
343 idx = srcu_read_lock(&kvm->srcu);
a13cff31 344 s390_reset_cmma(kvm->arch.gmap->mm);
4f718eab
DD
345 srcu_read_unlock(&kvm->srcu, idx);
346 mutex_unlock(&kvm->lock);
347 ret = 0;
348 break;
8c0a7ce6
DD
349 case KVM_S390_VM_MEM_LIMIT_SIZE: {
350 unsigned long new_limit;
351
352 if (kvm_is_ucontrol(kvm))
353 return -EINVAL;
354
355 if (get_user(new_limit, (u64 __user *)attr->addr))
356 return -EFAULT;
357
358 if (new_limit > kvm->arch.gmap->asce_end)
359 return -E2BIG;
360
361 ret = -EBUSY;
362 mutex_lock(&kvm->lock);
363 if (atomic_read(&kvm->online_vcpus) == 0) {
364 /* gmap_alloc will round the limit up */
365 struct gmap *new = gmap_alloc(current->mm, new_limit);
366
367 if (!new) {
368 ret = -ENOMEM;
369 } else {
370 gmap_free(kvm->arch.gmap);
371 new->private = kvm;
372 kvm->arch.gmap = new;
373 ret = 0;
374 }
375 }
376 mutex_unlock(&kvm->lock);
377 break;
378 }
4f718eab
DD
379 default:
380 ret = -ENXIO;
381 break;
382 }
383 return ret;
384}
385
a374e892
TK
386static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
387
388static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
389{
390 struct kvm_vcpu *vcpu;
391 int i;
392
9d8d5786 393 if (!test_kvm_facility(kvm, 76))
a374e892
TK
394 return -EINVAL;
395
396 mutex_lock(&kvm->lock);
397 switch (attr->attr) {
398 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
399 get_random_bytes(
400 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
401 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
402 kvm->arch.crypto.aes_kw = 1;
403 break;
404 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
405 get_random_bytes(
406 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
407 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
408 kvm->arch.crypto.dea_kw = 1;
409 break;
410 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
411 kvm->arch.crypto.aes_kw = 0;
412 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
413 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
414 break;
415 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
416 kvm->arch.crypto.dea_kw = 0;
417 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
418 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
419 break;
420 default:
421 mutex_unlock(&kvm->lock);
422 return -ENXIO;
423 }
424
425 kvm_for_each_vcpu(i, vcpu, kvm) {
426 kvm_s390_vcpu_crypto_setup(vcpu);
427 exit_sie(vcpu);
428 }
429 mutex_unlock(&kvm->lock);
430 return 0;
431}
432
72f25020
JH
433static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
434{
435 u8 gtod_high;
436
437 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
438 sizeof(gtod_high)))
439 return -EFAULT;
440
441 if (gtod_high != 0)
442 return -EINVAL;
443
444 return 0;
445}
446
447static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
448{
449 struct kvm_vcpu *cur_vcpu;
450 unsigned int vcpu_idx;
451 u64 host_tod, gtod;
452 int r;
453
454 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
455 return -EFAULT;
456
457 r = store_tod_clock(&host_tod);
458 if (r)
459 return r;
460
461 mutex_lock(&kvm->lock);
462 kvm->arch.epoch = gtod - host_tod;
27406cd5
CB
463 kvm_s390_vcpu_block_all(kvm);
464 kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm)
72f25020 465 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
27406cd5 466 kvm_s390_vcpu_unblock_all(kvm);
72f25020
JH
467 mutex_unlock(&kvm->lock);
468 return 0;
469}
470
471static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
472{
473 int ret;
474
475 if (attr->flags)
476 return -EINVAL;
477
478 switch (attr->attr) {
479 case KVM_S390_VM_TOD_HIGH:
480 ret = kvm_s390_set_tod_high(kvm, attr);
481 break;
482 case KVM_S390_VM_TOD_LOW:
483 ret = kvm_s390_set_tod_low(kvm, attr);
484 break;
485 default:
486 ret = -ENXIO;
487 break;
488 }
489 return ret;
490}
491
492static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
493{
494 u8 gtod_high = 0;
495
496 if (copy_to_user((void __user *)attr->addr, &gtod_high,
497 sizeof(gtod_high)))
498 return -EFAULT;
499
500 return 0;
501}
502
503static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
504{
505 u64 host_tod, gtod;
506 int r;
507
508 r = store_tod_clock(&host_tod);
509 if (r)
510 return r;
511
512 gtod = host_tod + kvm->arch.epoch;
513 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
514 return -EFAULT;
515
516 return 0;
517}
518
519static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
520{
521 int ret;
522
523 if (attr->flags)
524 return -EINVAL;
525
526 switch (attr->attr) {
527 case KVM_S390_VM_TOD_HIGH:
528 ret = kvm_s390_get_tod_high(kvm, attr);
529 break;
530 case KVM_S390_VM_TOD_LOW:
531 ret = kvm_s390_get_tod_low(kvm, attr);
532 break;
533 default:
534 ret = -ENXIO;
535 break;
536 }
537 return ret;
538}
539
658b6eda
MM
540static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
541{
542 struct kvm_s390_vm_cpu_processor *proc;
543 int ret = 0;
544
545 mutex_lock(&kvm->lock);
546 if (atomic_read(&kvm->online_vcpus)) {
547 ret = -EBUSY;
548 goto out;
549 }
550 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
551 if (!proc) {
552 ret = -ENOMEM;
553 goto out;
554 }
555 if (!copy_from_user(proc, (void __user *)attr->addr,
556 sizeof(*proc))) {
557 memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
558 sizeof(struct cpuid));
559 kvm->arch.model.ibc = proc->ibc;
981467c9 560 memcpy(kvm->arch.model.fac->list, proc->fac_list,
658b6eda
MM
561 S390_ARCH_FAC_LIST_SIZE_BYTE);
562 } else
563 ret = -EFAULT;
564 kfree(proc);
565out:
566 mutex_unlock(&kvm->lock);
567 return ret;
568}
569
570static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
571{
572 int ret = -ENXIO;
573
574 switch (attr->attr) {
575 case KVM_S390_VM_CPU_PROCESSOR:
576 ret = kvm_s390_set_processor(kvm, attr);
577 break;
578 }
579 return ret;
580}
581
582static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
583{
584 struct kvm_s390_vm_cpu_processor *proc;
585 int ret = 0;
586
587 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
588 if (!proc) {
589 ret = -ENOMEM;
590 goto out;
591 }
592 memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
593 proc->ibc = kvm->arch.model.ibc;
981467c9 594 memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda
MM
595 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
596 ret = -EFAULT;
597 kfree(proc);
598out:
599 return ret;
600}
601
602static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
603{
604 struct kvm_s390_vm_cpu_machine *mach;
605 int ret = 0;
606
607 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
608 if (!mach) {
609 ret = -ENOMEM;
610 goto out;
611 }
612 get_cpu_id((struct cpuid *) &mach->cpuid);
37c5f6c8 613 mach->ibc = sclp.ibc;
981467c9
MM
614 memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
615 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda 616 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
94422ee8 617 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda
MM
618 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
619 ret = -EFAULT;
620 kfree(mach);
621out:
622 return ret;
623}
624
625static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
626{
627 int ret = -ENXIO;
628
629 switch (attr->attr) {
630 case KVM_S390_VM_CPU_PROCESSOR:
631 ret = kvm_s390_get_processor(kvm, attr);
632 break;
633 case KVM_S390_VM_CPU_MACHINE:
634 ret = kvm_s390_get_machine(kvm, attr);
635 break;
636 }
637 return ret;
638}
639
f2061656
DD
640static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
641{
642 int ret;
643
644 switch (attr->group) {
4f718eab 645 case KVM_S390_VM_MEM_CTRL:
8c0a7ce6 646 ret = kvm_s390_set_mem_control(kvm, attr);
4f718eab 647 break;
72f25020
JH
648 case KVM_S390_VM_TOD:
649 ret = kvm_s390_set_tod(kvm, attr);
650 break;
658b6eda
MM
651 case KVM_S390_VM_CPU_MODEL:
652 ret = kvm_s390_set_cpu_model(kvm, attr);
653 break;
a374e892
TK
654 case KVM_S390_VM_CRYPTO:
655 ret = kvm_s390_vm_set_crypto(kvm, attr);
656 break;
f2061656
DD
657 default:
658 ret = -ENXIO;
659 break;
660 }
661
662 return ret;
663}
664
665static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
666{
8c0a7ce6
DD
667 int ret;
668
669 switch (attr->group) {
670 case KVM_S390_VM_MEM_CTRL:
671 ret = kvm_s390_get_mem_control(kvm, attr);
672 break;
72f25020
JH
673 case KVM_S390_VM_TOD:
674 ret = kvm_s390_get_tod(kvm, attr);
675 break;
658b6eda
MM
676 case KVM_S390_VM_CPU_MODEL:
677 ret = kvm_s390_get_cpu_model(kvm, attr);
678 break;
8c0a7ce6
DD
679 default:
680 ret = -ENXIO;
681 break;
682 }
683
684 return ret;
f2061656
DD
685}
686
687static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
688{
689 int ret;
690
691 switch (attr->group) {
4f718eab
DD
692 case KVM_S390_VM_MEM_CTRL:
693 switch (attr->attr) {
694 case KVM_S390_VM_MEM_ENABLE_CMMA:
695 case KVM_S390_VM_MEM_CLR_CMMA:
8c0a7ce6 696 case KVM_S390_VM_MEM_LIMIT_SIZE:
4f718eab
DD
697 ret = 0;
698 break;
699 default:
700 ret = -ENXIO;
701 break;
702 }
703 break;
72f25020
JH
704 case KVM_S390_VM_TOD:
705 switch (attr->attr) {
706 case KVM_S390_VM_TOD_LOW:
707 case KVM_S390_VM_TOD_HIGH:
708 ret = 0;
709 break;
710 default:
711 ret = -ENXIO;
712 break;
713 }
714 break;
658b6eda
MM
715 case KVM_S390_VM_CPU_MODEL:
716 switch (attr->attr) {
717 case KVM_S390_VM_CPU_PROCESSOR:
718 case KVM_S390_VM_CPU_MACHINE:
719 ret = 0;
720 break;
721 default:
722 ret = -ENXIO;
723 break;
724 }
725 break;
a374e892
TK
726 case KVM_S390_VM_CRYPTO:
727 switch (attr->attr) {
728 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
729 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
730 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
731 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
732 ret = 0;
733 break;
734 default:
735 ret = -ENXIO;
736 break;
737 }
738 break;
f2061656
DD
739 default:
740 ret = -ENXIO;
741 break;
742 }
743
744 return ret;
745}
746
30ee2a98
JH
747static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
748{
749 uint8_t *keys;
750 uint64_t hva;
751 unsigned long curkey;
752 int i, r = 0;
753
754 if (args->flags != 0)
755 return -EINVAL;
756
757 /* Is this guest using storage keys? */
758 if (!mm_use_skey(current->mm))
759 return KVM_S390_GET_SKEYS_NONE;
760
761 /* Enforce sane limit on memory allocation */
762 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
763 return -EINVAL;
764
765 keys = kmalloc_array(args->count, sizeof(uint8_t),
766 GFP_KERNEL | __GFP_NOWARN);
767 if (!keys)
768 keys = vmalloc(sizeof(uint8_t) * args->count);
769 if (!keys)
770 return -ENOMEM;
771
772 for (i = 0; i < args->count; i++) {
773 hva = gfn_to_hva(kvm, args->start_gfn + i);
774 if (kvm_is_error_hva(hva)) {
775 r = -EFAULT;
776 goto out;
777 }
778
779 curkey = get_guest_storage_key(current->mm, hva);
780 if (IS_ERR_VALUE(curkey)) {
781 r = curkey;
782 goto out;
783 }
784 keys[i] = curkey;
785 }
786
787 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
788 sizeof(uint8_t) * args->count);
789 if (r)
790 r = -EFAULT;
791out:
792 kvfree(keys);
793 return r;
794}
795
796static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
797{
798 uint8_t *keys;
799 uint64_t hva;
800 int i, r = 0;
801
802 if (args->flags != 0)
803 return -EINVAL;
804
805 /* Enforce sane limit on memory allocation */
806 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
807 return -EINVAL;
808
809 keys = kmalloc_array(args->count, sizeof(uint8_t),
810 GFP_KERNEL | __GFP_NOWARN);
811 if (!keys)
812 keys = vmalloc(sizeof(uint8_t) * args->count);
813 if (!keys)
814 return -ENOMEM;
815
816 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
817 sizeof(uint8_t) * args->count);
818 if (r) {
819 r = -EFAULT;
820 goto out;
821 }
822
823 /* Enable storage key handling for the guest */
824 s390_enable_skey();
825
826 for (i = 0; i < args->count; i++) {
827 hva = gfn_to_hva(kvm, args->start_gfn + i);
828 if (kvm_is_error_hva(hva)) {
829 r = -EFAULT;
830 goto out;
831 }
832
833 /* Lowest order bit is reserved */
834 if (keys[i] & 0x01) {
835 r = -EINVAL;
836 goto out;
837 }
838
839 r = set_guest_storage_key(current->mm, hva,
840 (unsigned long)keys[i], 0);
841 if (r)
842 goto out;
843 }
844out:
845 kvfree(keys);
846 return r;
847}
848
b0c632db
HC
849long kvm_arch_vm_ioctl(struct file *filp,
850 unsigned int ioctl, unsigned long arg)
851{
852 struct kvm *kvm = filp->private_data;
853 void __user *argp = (void __user *)arg;
f2061656 854 struct kvm_device_attr attr;
b0c632db
HC
855 int r;
856
857 switch (ioctl) {
ba5c1e9b
CO
858 case KVM_S390_INTERRUPT: {
859 struct kvm_s390_interrupt s390int;
860
861 r = -EFAULT;
862 if (copy_from_user(&s390int, argp, sizeof(s390int)))
863 break;
864 r = kvm_s390_inject_vm(kvm, &s390int);
865 break;
866 }
d938dc55
CH
867 case KVM_ENABLE_CAP: {
868 struct kvm_enable_cap cap;
869 r = -EFAULT;
870 if (copy_from_user(&cap, argp, sizeof(cap)))
871 break;
872 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
873 break;
874 }
84223598
CH
875 case KVM_CREATE_IRQCHIP: {
876 struct kvm_irq_routing_entry routing;
877
878 r = -EINVAL;
879 if (kvm->arch.use_irqchip) {
880 /* Set up dummy routing. */
881 memset(&routing, 0, sizeof(routing));
882 kvm_set_irq_routing(kvm, &routing, 0, 0);
883 r = 0;
884 }
885 break;
886 }
f2061656
DD
887 case KVM_SET_DEVICE_ATTR: {
888 r = -EFAULT;
889 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
890 break;
891 r = kvm_s390_vm_set_attr(kvm, &attr);
892 break;
893 }
894 case KVM_GET_DEVICE_ATTR: {
895 r = -EFAULT;
896 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
897 break;
898 r = kvm_s390_vm_get_attr(kvm, &attr);
899 break;
900 }
901 case KVM_HAS_DEVICE_ATTR: {
902 r = -EFAULT;
903 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
904 break;
905 r = kvm_s390_vm_has_attr(kvm, &attr);
906 break;
907 }
30ee2a98
JH
908 case KVM_S390_GET_SKEYS: {
909 struct kvm_s390_skeys args;
910
911 r = -EFAULT;
912 if (copy_from_user(&args, argp,
913 sizeof(struct kvm_s390_skeys)))
914 break;
915 r = kvm_s390_get_skeys(kvm, &args);
916 break;
917 }
918 case KVM_S390_SET_SKEYS: {
919 struct kvm_s390_skeys args;
920
921 r = -EFAULT;
922 if (copy_from_user(&args, argp,
923 sizeof(struct kvm_s390_skeys)))
924 break;
925 r = kvm_s390_set_skeys(kvm, &args);
926 break;
927 }
b0c632db 928 default:
367e1319 929 r = -ENOTTY;
b0c632db
HC
930 }
931
932 return r;
933}
934
45c9b47c
TK
935static int kvm_s390_query_ap_config(u8 *config)
936{
937 u32 fcn_code = 0x04000000UL;
86044c8c 938 u32 cc = 0;
45c9b47c 939
86044c8c 940 memset(config, 0, 128);
45c9b47c
TK
941 asm volatile(
942 "lgr 0,%1\n"
943 "lgr 2,%2\n"
944 ".long 0xb2af0000\n" /* PQAP(QCI) */
86044c8c 945 "0: ipm %0\n"
45c9b47c 946 "srl %0,28\n"
86044c8c
CB
947 "1:\n"
948 EX_TABLE(0b, 1b)
949 : "+r" (cc)
45c9b47c
TK
950 : "r" (fcn_code), "r" (config)
951 : "cc", "0", "2", "memory"
952 );
953
954 return cc;
955}
956
957static int kvm_s390_apxa_installed(void)
958{
959 u8 config[128];
960 int cc;
961
962 if (test_facility(2) && test_facility(12)) {
963 cc = kvm_s390_query_ap_config(config);
964
965 if (cc)
966 pr_err("PQAP(QCI) failed with cc=%d", cc);
967 else
968 return config[0] & 0x40;
969 }
970
971 return 0;
972}
973
974static void kvm_s390_set_crycb_format(struct kvm *kvm)
975{
976 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
977
978 if (kvm_s390_apxa_installed())
979 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
980 else
981 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
982}
983
9d8d5786
MM
984static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
985{
986 get_cpu_id(cpu_id);
987 cpu_id->version = 0xff;
988}
989
5102ee87
TK
990static int kvm_s390_crypto_init(struct kvm *kvm)
991{
9d8d5786 992 if (!test_kvm_facility(kvm, 76))
5102ee87
TK
993 return 0;
994
995 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
996 GFP_KERNEL | GFP_DMA);
997 if (!kvm->arch.crypto.crycb)
998 return -ENOMEM;
999
45c9b47c 1000 kvm_s390_set_crycb_format(kvm);
5102ee87 1001
ed6f76b4
TK
1002 /* Enable AES/DEA protected key functions by default */
1003 kvm->arch.crypto.aes_kw = 1;
1004 kvm->arch.crypto.dea_kw = 1;
1005 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1006 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1007 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1008 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
a374e892 1009
5102ee87
TK
1010 return 0;
1011}
1012
e08b9637 1013int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 1014{
9d8d5786 1015 int i, rc;
b0c632db 1016 char debug_name[16];
f6c137ff 1017 static unsigned long sca_offset;
b0c632db 1018
e08b9637
CO
1019 rc = -EINVAL;
1020#ifdef CONFIG_KVM_S390_UCONTROL
1021 if (type & ~KVM_VM_S390_UCONTROL)
1022 goto out_err;
1023 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1024 goto out_err;
1025#else
1026 if (type)
1027 goto out_err;
1028#endif
1029
b0c632db
HC
1030 rc = s390_enable_sie();
1031 if (rc)
d89f5eff 1032 goto out_err;
b0c632db 1033
b290411a
CO
1034 rc = -ENOMEM;
1035
b0c632db
HC
1036 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
1037 if (!kvm->arch.sca)
d89f5eff 1038 goto out_err;
f6c137ff
CB
1039 spin_lock(&kvm_lock);
1040 sca_offset = (sca_offset + 16) & 0x7f0;
1041 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
1042 spin_unlock(&kvm_lock);
b0c632db
HC
1043
1044 sprintf(debug_name, "kvm-%u", current->pid);
1045
1046 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
1047 if (!kvm->arch.dbf)
40f5b735 1048 goto out_err;
b0c632db 1049
9d8d5786
MM
1050 /*
1051 * The architectural maximum amount of facilities is 16 kbit. To store
1052 * this amount, 2 kbyte of memory is required. Thus we need a full
981467c9
MM
1053 * page to hold the guest facility list (arch.model.fac->list) and the
1054 * facility mask (arch.model.fac->mask). Its address size has to be
9d8d5786
MM
1055 * 31 bits and word aligned.
1056 */
1057 kvm->arch.model.fac =
981467c9 1058 (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
9d8d5786 1059 if (!kvm->arch.model.fac)
40f5b735 1060 goto out_err;
9d8d5786 1061
fb5bf93f 1062 /* Populate the facility mask initially. */
981467c9 1063 memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
94422ee8 1064 S390_ARCH_FAC_LIST_SIZE_BYTE);
9d8d5786
MM
1065 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1066 if (i < kvm_s390_fac_list_mask_size())
981467c9 1067 kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
9d8d5786 1068 else
981467c9 1069 kvm->arch.model.fac->mask[i] = 0UL;
9d8d5786
MM
1070 }
1071
981467c9
MM
1072 /* Populate the facility list initially. */
1073 memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
1074 S390_ARCH_FAC_LIST_SIZE_BYTE);
1075
9d8d5786 1076 kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
37c5f6c8 1077 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
9d8d5786 1078
5102ee87 1079 if (kvm_s390_crypto_init(kvm) < 0)
40f5b735 1080 goto out_err;
5102ee87 1081
ba5c1e9b 1082 spin_lock_init(&kvm->arch.float_int.lock);
6d3da241
JF
1083 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1084 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
8a242234 1085 init_waitqueue_head(&kvm->arch.ipte_wq);
a6b7e459 1086 mutex_init(&kvm->arch.ipte_mutex);
ba5c1e9b 1087
b0c632db
HC
1088 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
1089 VM_EVENT(kvm, 3, "%s", "vm created");
1090
e08b9637
CO
1091 if (type & KVM_VM_S390_UCONTROL) {
1092 kvm->arch.gmap = NULL;
1093 } else {
0349985a 1094 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
e08b9637 1095 if (!kvm->arch.gmap)
40f5b735 1096 goto out_err;
2c70fe44 1097 kvm->arch.gmap->private = kvm;
24eb3a82 1098 kvm->arch.gmap->pfault_enabled = 0;
e08b9637 1099 }
fa6b7fe9
CH
1100
1101 kvm->arch.css_support = 0;
84223598 1102 kvm->arch.use_irqchip = 0;
72f25020 1103 kvm->arch.epoch = 0;
fa6b7fe9 1104
8ad35755
DH
1105 spin_lock_init(&kvm->arch.start_stop_lock);
1106
d89f5eff 1107 return 0;
40f5b735 1108out_err:
5102ee87 1109 kfree(kvm->arch.crypto.crycb);
9d8d5786 1110 free_page((unsigned long)kvm->arch.model.fac);
598841ca 1111 debug_unregister(kvm->arch.dbf);
b0c632db 1112 free_page((unsigned long)(kvm->arch.sca));
d89f5eff 1113 return rc;
b0c632db
HC
1114}
1115
d329c035
CB
1116void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1117{
1118 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 1119 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
67335e63 1120 kvm_s390_clear_local_irqs(vcpu);
3c038e6b 1121 kvm_clear_async_pf_completion_queue(vcpu);
58f9460b
CO
1122 if (!kvm_is_ucontrol(vcpu->kvm)) {
1123 clear_bit(63 - vcpu->vcpu_id,
1124 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
1125 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
1126 (__u64) vcpu->arch.sie_block)
1127 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
1128 }
abf4a71e 1129 smp_mb();
27e0393f
CO
1130
1131 if (kvm_is_ucontrol(vcpu->kvm))
1132 gmap_free(vcpu->arch.gmap);
1133
b31605c1
DD
1134 if (kvm_s390_cmma_enabled(vcpu->kvm))
1135 kvm_s390_vcpu_unsetup_cmma(vcpu);
d329c035 1136 free_page((unsigned long)(vcpu->arch.sie_block));
b31288fa 1137
6692cef3 1138 kvm_vcpu_uninit(vcpu);
b110feaf 1139 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
1140}
1141
1142static void kvm_free_vcpus(struct kvm *kvm)
1143{
1144 unsigned int i;
988a2cae 1145 struct kvm_vcpu *vcpu;
d329c035 1146
988a2cae
GN
1147 kvm_for_each_vcpu(i, vcpu, kvm)
1148 kvm_arch_vcpu_destroy(vcpu);
1149
1150 mutex_lock(&kvm->lock);
1151 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1152 kvm->vcpus[i] = NULL;
1153
1154 atomic_set(&kvm->online_vcpus, 0);
1155 mutex_unlock(&kvm->lock);
d329c035
CB
1156}
1157
b0c632db
HC
1158void kvm_arch_destroy_vm(struct kvm *kvm)
1159{
d329c035 1160 kvm_free_vcpus(kvm);
9d8d5786 1161 free_page((unsigned long)kvm->arch.model.fac);
b0c632db 1162 free_page((unsigned long)(kvm->arch.sca));
d329c035 1163 debug_unregister(kvm->arch.dbf);
5102ee87 1164 kfree(kvm->arch.crypto.crycb);
27e0393f
CO
1165 if (!kvm_is_ucontrol(kvm))
1166 gmap_free(kvm->arch.gmap);
841b91c5 1167 kvm_s390_destroy_adapters(kvm);
67335e63 1168 kvm_s390_clear_float_irqs(kvm);
b0c632db
HC
1169}
1170
1171/* Section: vcpu related */
dafd032a
DD
1172static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1173{
1174 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
1175 if (!vcpu->arch.gmap)
1176 return -ENOMEM;
1177 vcpu->arch.gmap->private = vcpu->kvm;
1178
1179 return 0;
1180}
1181
b0c632db
HC
1182int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1183{
3c038e6b
DD
1184 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1185 kvm_clear_async_pf_completion_queue(vcpu);
59674c1a
CB
1186 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1187 KVM_SYNC_GPRS |
9eed0735 1188 KVM_SYNC_ACRS |
b028ee3e
DH
1189 KVM_SYNC_CRS |
1190 KVM_SYNC_ARCH0 |
1191 KVM_SYNC_PFAULT;
68c55750
EF
1192 if (test_kvm_facility(vcpu->kvm, 129))
1193 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
dafd032a
DD
1194
1195 if (kvm_is_ucontrol(vcpu->kvm))
1196 return __kvm_ucontrol_vcpu_init(vcpu);
1197
b0c632db
HC
1198 return 0;
1199}
1200
b0c632db
HC
1201void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1202{
4725c860 1203 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
18280d8b 1204 if (test_kvm_facility(vcpu->kvm, 129))
68c55750
EF
1205 save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
1206 else
1207 save_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db 1208 save_access_regs(vcpu->arch.host_acrs);
18280d8b 1209 if (test_kvm_facility(vcpu->kvm, 129)) {
68c55750
EF
1210 restore_fp_ctl(&vcpu->run->s.regs.fpc);
1211 restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
1212 } else {
1213 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1214 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
1215 }
59674c1a 1216 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 1217 gmap_enable(vcpu->arch.gmap);
9e6dabef 1218 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
1219}
1220
1221void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1222{
9e6dabef 1223 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 1224 gmap_disable(vcpu->arch.gmap);
18280d8b 1225 if (test_kvm_facility(vcpu->kvm, 129)) {
68c55750
EF
1226 save_fp_ctl(&vcpu->run->s.regs.fpc);
1227 save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
1228 } else {
1229 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1230 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1231 }
59674c1a 1232 save_access_regs(vcpu->run->s.regs.acrs);
4725c860 1233 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
18280d8b 1234 if (test_kvm_facility(vcpu->kvm, 129))
68c55750
EF
1235 restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
1236 else
1237 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db
HC
1238 restore_access_regs(vcpu->arch.host_acrs);
1239}
1240
1241static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1242{
1243 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1244 vcpu->arch.sie_block->gpsw.mask = 0UL;
1245 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 1246 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
1247 vcpu->arch.sie_block->cputm = 0UL;
1248 vcpu->arch.sie_block->ckc = 0UL;
1249 vcpu->arch.sie_block->todpr = 0;
1250 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1251 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
1252 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
1253 vcpu->arch.guest_fpregs.fpc = 0;
1254 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
1255 vcpu->arch.sie_block->gbea = 1;
672550fb 1256 vcpu->arch.sie_block->pp = 0;
3c038e6b
DD
1257 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1258 kvm_clear_async_pf_completion_queue(vcpu);
6352e4d2
DH
1259 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1260 kvm_s390_vcpu_stop(vcpu);
2ed10cc1 1261 kvm_s390_clear_local_irqs(vcpu);
b0c632db
HC
1262}
1263
31928aa5 1264void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
42897d86 1265{
72f25020
JH
1266 mutex_lock(&vcpu->kvm->lock);
1267 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
1268 mutex_unlock(&vcpu->kvm->lock);
dafd032a
DD
1269 if (!kvm_is_ucontrol(vcpu->kvm))
1270 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
42897d86
MT
1271}
1272
5102ee87
TK
1273static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1274{
9d8d5786 1275 if (!test_kvm_facility(vcpu->kvm, 76))
5102ee87
TK
1276 return;
1277
a374e892
TK
1278 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1279
1280 if (vcpu->kvm->arch.crypto.aes_kw)
1281 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1282 if (vcpu->kvm->arch.crypto.dea_kw)
1283 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1284
5102ee87
TK
1285 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1286}
1287
b31605c1
DD
1288void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1289{
1290 free_page(vcpu->arch.sie_block->cbrlo);
1291 vcpu->arch.sie_block->cbrlo = 0;
1292}
1293
1294int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1295{
1296 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1297 if (!vcpu->arch.sie_block->cbrlo)
1298 return -ENOMEM;
1299
1300 vcpu->arch.sie_block->ecb2 |= 0x80;
1301 vcpu->arch.sie_block->ecb2 &= ~0x08;
1302 return 0;
1303}
1304
91520f1a
MM
1305static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1306{
1307 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1308
1309 vcpu->arch.cpu_id = model->cpu_id;
1310 vcpu->arch.sie_block->ibc = model->ibc;
1311 vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
1312}
1313
b0c632db
HC
1314int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1315{
b31605c1 1316 int rc = 0;
b31288fa 1317
9e6dabef
CH
1318 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1319 CPUSTAT_SM |
a4a4f191
GH
1320 CPUSTAT_STOPPED);
1321
53df84f8
GH
1322 if (test_kvm_facility(vcpu->kvm, 78))
1323 atomic_set_mask(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
1324 else if (test_kvm_facility(vcpu->kvm, 8))
a4a4f191
GH
1325 atomic_set_mask(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
1326
91520f1a
MM
1327 kvm_s390_vcpu_setup_model(vcpu);
1328
fc34531d 1329 vcpu->arch.sie_block->ecb = 6;
9d8d5786 1330 if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
7feb6bb8
MM
1331 vcpu->arch.sie_block->ecb |= 0x10;
1332
69d0d3a3 1333 vcpu->arch.sie_block->ecb2 = 8;
ea5f4969 1334 vcpu->arch.sie_block->eca = 0xC1002000U;
37c5f6c8 1335 if (sclp.has_siif)
217a4406 1336 vcpu->arch.sie_block->eca |= 1;
37c5f6c8 1337 if (sclp.has_sigpif)
ea5f4969 1338 vcpu->arch.sie_block->eca |= 0x10000000U;
18280d8b 1339 if (test_kvm_facility(vcpu->kvm, 129)) {
13211ea7
EF
1340 vcpu->arch.sie_block->eca |= 0x00020000;
1341 vcpu->arch.sie_block->ecd |= 0x20000000;
1342 }
492d8642 1343 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
5a5e6536 1344
b31605c1
DD
1345 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
1346 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1347 if (rc)
1348 return rc;
b31288fa 1349 }
0ac96caf 1350 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ca872302 1351 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
9d8d5786 1352
5102ee87
TK
1353 kvm_s390_vcpu_crypto_setup(vcpu);
1354
b31605c1 1355 return rc;
b0c632db
HC
1356}
1357
1358struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1359 unsigned int id)
1360{
4d47555a 1361 struct kvm_vcpu *vcpu;
7feb6bb8 1362 struct sie_page *sie_page;
4d47555a
CO
1363 int rc = -EINVAL;
1364
1365 if (id >= KVM_MAX_VCPUS)
1366 goto out;
1367
1368 rc = -ENOMEM;
b0c632db 1369
b110feaf 1370 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 1371 if (!vcpu)
4d47555a 1372 goto out;
b0c632db 1373
7feb6bb8
MM
1374 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1375 if (!sie_page)
b0c632db
HC
1376 goto out_free_cpu;
1377
7feb6bb8
MM
1378 vcpu->arch.sie_block = &sie_page->sie_block;
1379 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
68c55750 1380 vcpu->arch.host_vregs = &sie_page->vregs;
7feb6bb8 1381
b0c632db 1382 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
1383 if (!kvm_is_ucontrol(kvm)) {
1384 if (!kvm->arch.sca) {
1385 WARN_ON_ONCE(1);
1386 goto out_free_cpu;
1387 }
1388 if (!kvm->arch.sca->cpu[id].sda)
1389 kvm->arch.sca->cpu[id].sda =
1390 (__u64) vcpu->arch.sie_block;
1391 vcpu->arch.sie_block->scaoh =
1392 (__u32)(((__u64)kvm->arch.sca) >> 32);
1393 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
1394 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
1395 }
b0c632db 1396
ba5c1e9b 1397 spin_lock_init(&vcpu->arch.local_int.lock);
ba5c1e9b 1398 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
d0321a24 1399 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 1400 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
ba5c1e9b 1401
b0c632db
HC
1402 rc = kvm_vcpu_init(vcpu, kvm, id);
1403 if (rc)
7b06bf2f 1404 goto out_free_sie_block;
b0c632db
HC
1405 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
1406 vcpu->arch.sie_block);
ade38c31 1407 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 1408
b0c632db 1409 return vcpu;
7b06bf2f
WY
1410out_free_sie_block:
1411 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 1412out_free_cpu:
b110feaf 1413 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 1414out:
b0c632db
HC
1415 return ERR_PTR(rc);
1416}
1417
b0c632db
HC
1418int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1419{
9a022067 1420 return kvm_s390_vcpu_has_irq(vcpu, 0);
b0c632db
HC
1421}
1422
27406cd5 1423void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
49b99e1e
CB
1424{
1425 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
61a6df54 1426 exit_sie(vcpu);
49b99e1e
CB
1427}
1428
27406cd5 1429void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
49b99e1e
CB
1430{
1431 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1432}
1433
8e236546
CB
1434static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
1435{
1436 atomic_set_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
61a6df54 1437 exit_sie(vcpu);
8e236546
CB
1438}
1439
1440static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
1441{
1442 atomic_clear_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
1443}
1444
49b99e1e
CB
1445/*
1446 * Kick a guest cpu out of SIE and wait until SIE is not running.
1447 * If the CPU is not running (e.g. waiting as idle) the function will
1448 * return immediately. */
1449void exit_sie(struct kvm_vcpu *vcpu)
1450{
1451 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
1452 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1453 cpu_relax();
1454}
1455
8e236546
CB
1456/* Kick a guest cpu out of SIE to process a request synchronously */
1457void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
49b99e1e 1458{
8e236546
CB
1459 kvm_make_request(req, vcpu);
1460 kvm_s390_vcpu_request(vcpu);
49b99e1e
CB
1461}
1462
2c70fe44
CB
1463static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1464{
1465 int i;
1466 struct kvm *kvm = gmap->private;
1467 struct kvm_vcpu *vcpu;
1468
1469 kvm_for_each_vcpu(i, vcpu, kvm) {
1470 /* match against both prefix pages */
fda902cb 1471 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
2c70fe44 1472 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
8e236546 1473 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
2c70fe44
CB
1474 }
1475 }
1476}
1477
b6d33834
CD
1478int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1479{
1480 /* kvm common code refers to this, but never calls it */
1481 BUG();
1482 return 0;
1483}
1484
14eebd91
CO
1485static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1486 struct kvm_one_reg *reg)
1487{
1488 int r = -EINVAL;
1489
1490 switch (reg->id) {
29b7c71b
CO
1491 case KVM_REG_S390_TODPR:
1492 r = put_user(vcpu->arch.sie_block->todpr,
1493 (u32 __user *)reg->addr);
1494 break;
1495 case KVM_REG_S390_EPOCHDIFF:
1496 r = put_user(vcpu->arch.sie_block->epoch,
1497 (u64 __user *)reg->addr);
1498 break;
46a6dd1c
J
1499 case KVM_REG_S390_CPU_TIMER:
1500 r = put_user(vcpu->arch.sie_block->cputm,
1501 (u64 __user *)reg->addr);
1502 break;
1503 case KVM_REG_S390_CLOCK_COMP:
1504 r = put_user(vcpu->arch.sie_block->ckc,
1505 (u64 __user *)reg->addr);
1506 break;
536336c2
DD
1507 case KVM_REG_S390_PFTOKEN:
1508 r = put_user(vcpu->arch.pfault_token,
1509 (u64 __user *)reg->addr);
1510 break;
1511 case KVM_REG_S390_PFCOMPARE:
1512 r = put_user(vcpu->arch.pfault_compare,
1513 (u64 __user *)reg->addr);
1514 break;
1515 case KVM_REG_S390_PFSELECT:
1516 r = put_user(vcpu->arch.pfault_select,
1517 (u64 __user *)reg->addr);
1518 break;
672550fb
CB
1519 case KVM_REG_S390_PP:
1520 r = put_user(vcpu->arch.sie_block->pp,
1521 (u64 __user *)reg->addr);
1522 break;
afa45ff5
CB
1523 case KVM_REG_S390_GBEA:
1524 r = put_user(vcpu->arch.sie_block->gbea,
1525 (u64 __user *)reg->addr);
1526 break;
14eebd91
CO
1527 default:
1528 break;
1529 }
1530
1531 return r;
1532}
1533
1534static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
1535 struct kvm_one_reg *reg)
1536{
1537 int r = -EINVAL;
1538
1539 switch (reg->id) {
29b7c71b
CO
1540 case KVM_REG_S390_TODPR:
1541 r = get_user(vcpu->arch.sie_block->todpr,
1542 (u32 __user *)reg->addr);
1543 break;
1544 case KVM_REG_S390_EPOCHDIFF:
1545 r = get_user(vcpu->arch.sie_block->epoch,
1546 (u64 __user *)reg->addr);
1547 break;
46a6dd1c
J
1548 case KVM_REG_S390_CPU_TIMER:
1549 r = get_user(vcpu->arch.sie_block->cputm,
1550 (u64 __user *)reg->addr);
1551 break;
1552 case KVM_REG_S390_CLOCK_COMP:
1553 r = get_user(vcpu->arch.sie_block->ckc,
1554 (u64 __user *)reg->addr);
1555 break;
536336c2
DD
1556 case KVM_REG_S390_PFTOKEN:
1557 r = get_user(vcpu->arch.pfault_token,
1558 (u64 __user *)reg->addr);
9fbd8082
DH
1559 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1560 kvm_clear_async_pf_completion_queue(vcpu);
536336c2
DD
1561 break;
1562 case KVM_REG_S390_PFCOMPARE:
1563 r = get_user(vcpu->arch.pfault_compare,
1564 (u64 __user *)reg->addr);
1565 break;
1566 case KVM_REG_S390_PFSELECT:
1567 r = get_user(vcpu->arch.pfault_select,
1568 (u64 __user *)reg->addr);
1569 break;
672550fb
CB
1570 case KVM_REG_S390_PP:
1571 r = get_user(vcpu->arch.sie_block->pp,
1572 (u64 __user *)reg->addr);
1573 break;
afa45ff5
CB
1574 case KVM_REG_S390_GBEA:
1575 r = get_user(vcpu->arch.sie_block->gbea,
1576 (u64 __user *)reg->addr);
1577 break;
14eebd91
CO
1578 default:
1579 break;
1580 }
1581
1582 return r;
1583}
b6d33834 1584
b0c632db
HC
1585static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1586{
b0c632db 1587 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
1588 return 0;
1589}
1590
1591int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1592{
5a32c1af 1593 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
1594 return 0;
1595}
1596
1597int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1598{
5a32c1af 1599 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
1600 return 0;
1601}
1602
1603int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1604 struct kvm_sregs *sregs)
1605{
59674c1a 1606 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 1607 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 1608 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
1609 return 0;
1610}
1611
1612int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1613 struct kvm_sregs *sregs)
1614{
59674c1a 1615 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 1616 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
1617 return 0;
1618}
1619
1620int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1621{
4725c860
MS
1622 if (test_fp_ctl(fpu->fpc))
1623 return -EINVAL;
b0c632db 1624 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4725c860
MS
1625 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
1626 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1627 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
b0c632db
HC
1628 return 0;
1629}
1630
1631int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1632{
b0c632db
HC
1633 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1634 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
1635 return 0;
1636}
1637
1638static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1639{
1640 int rc = 0;
1641
7a42fdc2 1642 if (!is_vcpu_stopped(vcpu))
b0c632db 1643 rc = -EBUSY;
d7b0b5eb
CO
1644 else {
1645 vcpu->run->psw_mask = psw.mask;
1646 vcpu->run->psw_addr = psw.addr;
1647 }
b0c632db
HC
1648 return rc;
1649}
1650
1651int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1652 struct kvm_translation *tr)
1653{
1654 return -EINVAL; /* not implemented yet */
1655}
1656
27291e21
DH
1657#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1658 KVM_GUESTDBG_USE_HW_BP | \
1659 KVM_GUESTDBG_ENABLE)
1660
d0bfb940
JK
1661int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1662 struct kvm_guest_debug *dbg)
b0c632db 1663{
27291e21
DH
1664 int rc = 0;
1665
1666 vcpu->guest_debug = 0;
1667 kvm_s390_clear_bp_data(vcpu);
1668
2de3bfc2 1669 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
27291e21
DH
1670 return -EINVAL;
1671
1672 if (dbg->control & KVM_GUESTDBG_ENABLE) {
1673 vcpu->guest_debug = dbg->control;
1674 /* enforce guest PER */
1675 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1676
1677 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
1678 rc = kvm_s390_import_bp_data(vcpu, dbg);
1679 } else {
1680 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1681 vcpu->arch.guestdbg.last_bp = 0;
1682 }
1683
1684 if (rc) {
1685 vcpu->guest_debug = 0;
1686 kvm_s390_clear_bp_data(vcpu);
1687 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1688 }
1689
1690 return rc;
b0c632db
HC
1691}
1692
62d9f0db
MT
1693int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1694 struct kvm_mp_state *mp_state)
1695{
6352e4d2
DH
1696 /* CHECK_STOP and LOAD are not supported yet */
1697 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1698 KVM_MP_STATE_OPERATING;
62d9f0db
MT
1699}
1700
1701int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1702 struct kvm_mp_state *mp_state)
1703{
6352e4d2
DH
1704 int rc = 0;
1705
1706 /* user space knows about this interface - let it control the state */
1707 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1708
1709 switch (mp_state->mp_state) {
1710 case KVM_MP_STATE_STOPPED:
1711 kvm_s390_vcpu_stop(vcpu);
1712 break;
1713 case KVM_MP_STATE_OPERATING:
1714 kvm_s390_vcpu_start(vcpu);
1715 break;
1716 case KVM_MP_STATE_LOAD:
1717 case KVM_MP_STATE_CHECK_STOP:
1718 /* fall through - CHECK_STOP and LOAD are not supported yet */
1719 default:
1720 rc = -ENXIO;
1721 }
1722
1723 return rc;
62d9f0db
MT
1724}
1725
b31605c1
DD
1726bool kvm_s390_cmma_enabled(struct kvm *kvm)
1727{
1728 if (!MACHINE_IS_LPAR)
1729 return false;
1730 /* only enable for z10 and later */
1731 if (!MACHINE_HAS_EDAT1)
1732 return false;
1733 if (!kvm->arch.use_cmma)
1734 return false;
1735 return true;
1736}
1737
8ad35755
DH
1738static bool ibs_enabled(struct kvm_vcpu *vcpu)
1739{
1740 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1741}
1742
2c70fe44
CB
1743static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1744{
785dbef4
CB
1745 if (!vcpu->requests)
1746 return 0;
8ad35755 1747retry:
8e236546 1748 kvm_s390_vcpu_request_handled(vcpu);
2c70fe44
CB
1749 /*
1750 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1751 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1752 * This ensures that the ipte instruction for this request has
1753 * already finished. We might race against a second unmapper that
1754 * wants to set the blocking bit. Lets just retry the request loop.
1755 */
8ad35755 1756 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
2c70fe44
CB
1757 int rc;
1758 rc = gmap_ipte_notify(vcpu->arch.gmap,
fda902cb 1759 kvm_s390_get_prefix(vcpu),
2c70fe44
CB
1760 PAGE_SIZE * 2);
1761 if (rc)
1762 return rc;
8ad35755 1763 goto retry;
2c70fe44 1764 }
8ad35755 1765
d3d692c8
DH
1766 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1767 vcpu->arch.sie_block->ihcpu = 0xffff;
1768 goto retry;
1769 }
1770
8ad35755
DH
1771 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1772 if (!ibs_enabled(vcpu)) {
1773 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1774 atomic_set_mask(CPUSTAT_IBS,
1775 &vcpu->arch.sie_block->cpuflags);
1776 }
1777 goto retry;
2c70fe44 1778 }
8ad35755
DH
1779
1780 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1781 if (ibs_enabled(vcpu)) {
1782 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1783 atomic_clear_mask(CPUSTAT_IBS,
1784 &vcpu->arch.sie_block->cpuflags);
1785 }
1786 goto retry;
1787 }
1788
0759d068
DH
1789 /* nothing to do, just clear the request */
1790 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1791
2c70fe44
CB
1792 return 0;
1793}
1794
fa576c58
TH
1795/**
1796 * kvm_arch_fault_in_page - fault-in guest page if necessary
1797 * @vcpu: The corresponding virtual cpu
1798 * @gpa: Guest physical address
1799 * @writable: Whether the page should be writable or not
1800 *
1801 * Make sure that a guest page has been faulted-in on the host.
1802 *
1803 * Return: Zero on success, negative error code otherwise.
1804 */
1805long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
24eb3a82 1806{
527e30b4
MS
1807 return gmap_fault(vcpu->arch.gmap, gpa,
1808 writable ? FAULT_FLAG_WRITE : 0);
24eb3a82
DD
1809}
1810
3c038e6b
DD
1811static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1812 unsigned long token)
1813{
1814 struct kvm_s390_interrupt inti;
383d0b05 1815 struct kvm_s390_irq irq;
3c038e6b
DD
1816
1817 if (start_token) {
383d0b05
JF
1818 irq.u.ext.ext_params2 = token;
1819 irq.type = KVM_S390_INT_PFAULT_INIT;
1820 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
3c038e6b
DD
1821 } else {
1822 inti.type = KVM_S390_INT_PFAULT_DONE;
383d0b05 1823 inti.parm64 = token;
3c038e6b
DD
1824 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1825 }
1826}
1827
1828void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1829 struct kvm_async_pf *work)
1830{
1831 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1832 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1833}
1834
1835void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1836 struct kvm_async_pf *work)
1837{
1838 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1839 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1840}
1841
1842void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1843 struct kvm_async_pf *work)
1844{
1845 /* s390 will always inject the page directly */
1846}
1847
1848bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1849{
1850 /*
1851 * s390 will always inject the page directly,
1852 * but we still want check_async_completion to cleanup
1853 */
1854 return true;
1855}
1856
1857static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1858{
1859 hva_t hva;
1860 struct kvm_arch_async_pf arch;
1861 int rc;
1862
1863 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1864 return 0;
1865 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1866 vcpu->arch.pfault_compare)
1867 return 0;
1868 if (psw_extint_disabled(vcpu))
1869 return 0;
9a022067 1870 if (kvm_s390_vcpu_has_irq(vcpu, 0))
3c038e6b
DD
1871 return 0;
1872 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1873 return 0;
1874 if (!vcpu->arch.gmap->pfault_enabled)
1875 return 0;
1876
81480cc1
HC
1877 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1878 hva += current->thread.gmap_addr & ~PAGE_MASK;
1879 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
3c038e6b
DD
1880 return 0;
1881
1882 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1883 return rc;
1884}
1885
3fb4c40f 1886static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 1887{
3fb4c40f 1888 int rc, cpuflags;
e168bf8d 1889
3c038e6b
DD
1890 /*
1891 * On s390 notifications for arriving pages will be delivered directly
1892 * to the guest but the house keeping for completed pfaults is
1893 * handled outside the worker.
1894 */
1895 kvm_check_async_pf_completion(vcpu);
1896
5a32c1af 1897 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
1898
1899 if (need_resched())
1900 schedule();
1901
d3a73acb 1902 if (test_cpu_flag(CIF_MCCK_PENDING))
71cde587
CB
1903 s390_handle_mcck();
1904
79395031
JF
1905 if (!kvm_is_ucontrol(vcpu->kvm)) {
1906 rc = kvm_s390_deliver_pending_interrupts(vcpu);
1907 if (rc)
1908 return rc;
1909 }
0ff31867 1910
2c70fe44
CB
1911 rc = kvm_s390_handle_requests(vcpu);
1912 if (rc)
1913 return rc;
1914
27291e21
DH
1915 if (guestdbg_enabled(vcpu)) {
1916 kvm_s390_backup_guest_per_regs(vcpu);
1917 kvm_s390_patch_guest_per_regs(vcpu);
1918 }
1919
b0c632db 1920 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
1921 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1922 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1923 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 1924
3fb4c40f
TH
1925 return 0;
1926}
1927
492d8642
TH
1928static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
1929{
1930 psw_t *psw = &vcpu->arch.sie_block->gpsw;
1931 u8 opcode;
1932 int rc;
1933
1934 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1935 trace_kvm_s390_sie_fault(vcpu);
1936
1937 /*
1938 * We want to inject an addressing exception, which is defined as a
1939 * suppressing or terminating exception. However, since we came here
1940 * by a DAT access exception, the PSW still points to the faulting
1941 * instruction since DAT exceptions are nullifying. So we've got
1942 * to look up the current opcode to get the length of the instruction
1943 * to be able to forward the PSW.
1944 */
8ae04b8f 1945 rc = read_guest(vcpu, psw->addr, 0, &opcode, 1);
492d8642
TH
1946 if (rc)
1947 return kvm_s390_inject_prog_cond(vcpu, rc);
1948 psw->addr = __rewind_psw(*psw, -insn_length(opcode));
1949
1950 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1951}
1952
3fb4c40f
TH
1953static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1954{
24eb3a82 1955 int rc = -1;
2b29a9fd
DD
1956
1957 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1958 vcpu->arch.sie_block->icptcode);
1959 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1960
27291e21
DH
1961 if (guestdbg_enabled(vcpu))
1962 kvm_s390_restore_guest_per_regs(vcpu);
1963
3fb4c40f 1964 if (exit_reason >= 0) {
7c470539 1965 rc = 0;
210b1607
TH
1966 } else if (kvm_is_ucontrol(vcpu->kvm)) {
1967 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1968 vcpu->run->s390_ucontrol.trans_exc_code =
1969 current->thread.gmap_addr;
1970 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1971 rc = -EREMOTE;
24eb3a82
DD
1972
1973 } else if (current->thread.gmap_pfault) {
3c038e6b 1974 trace_kvm_s390_major_guest_pfault(vcpu);
24eb3a82 1975 current->thread.gmap_pfault = 0;
fa576c58 1976 if (kvm_arch_setup_async_pf(vcpu)) {
24eb3a82 1977 rc = 0;
fa576c58
TH
1978 } else {
1979 gpa_t gpa = current->thread.gmap_addr;
1980 rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1981 }
24eb3a82
DD
1982 }
1983
492d8642
TH
1984 if (rc == -1)
1985 rc = vcpu_post_run_fault_in_sie(vcpu);
b0c632db 1986
5a32c1af 1987 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
3fb4c40f 1988
a76ccff6
TH
1989 if (rc == 0) {
1990 if (kvm_is_ucontrol(vcpu->kvm))
2955c83f
CB
1991 /* Don't exit for host interrupts. */
1992 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
a76ccff6
TH
1993 else
1994 rc = kvm_handle_sie_intercept(vcpu);
1995 }
1996
3fb4c40f
TH
1997 return rc;
1998}
1999
2000static int __vcpu_run(struct kvm_vcpu *vcpu)
2001{
2002 int rc, exit_reason;
2003
800c1065
TH
2004 /*
2005 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2006 * ning the guest), so that memslots (and other stuff) are protected
2007 */
2008 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2009
a76ccff6
TH
2010 do {
2011 rc = vcpu_pre_run(vcpu);
2012 if (rc)
2013 break;
3fb4c40f 2014
800c1065 2015 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
2016 /*
2017 * As PF_VCPU will be used in fault handler, between
2018 * guest_enter and guest_exit should be no uaccess.
2019 */
0097d12e
CB
2020 local_irq_disable();
2021 __kvm_guest_enter();
2022 local_irq_enable();
a76ccff6
TH
2023 exit_reason = sie64a(vcpu->arch.sie_block,
2024 vcpu->run->s.regs.gprs);
0097d12e
CB
2025 local_irq_disable();
2026 __kvm_guest_exit();
2027 local_irq_enable();
800c1065 2028 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
2029
2030 rc = vcpu_post_run(vcpu, exit_reason);
27291e21 2031 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
3fb4c40f 2032
800c1065 2033 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 2034 return rc;
b0c632db
HC
2035}
2036
b028ee3e
DH
2037static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2038{
2039 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2040 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2041 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2042 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2043 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2044 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
d3d692c8
DH
2045 /* some control register changes require a tlb flush */
2046 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
b028ee3e
DH
2047 }
2048 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
2049 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
2050 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2051 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2052 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2053 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2054 }
2055 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2056 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2057 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2058 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
9fbd8082
DH
2059 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2060 kvm_clear_async_pf_completion_queue(vcpu);
b028ee3e
DH
2061 }
2062 kvm_run->kvm_dirty_regs = 0;
2063}
2064
2065static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2066{
2067 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2068 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2069 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2070 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
2071 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
2072 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2073 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2074 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2075 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2076 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2077 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2078 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2079}
2080
b0c632db
HC
2081int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2082{
8f2abe6a 2083 int rc;
b0c632db
HC
2084 sigset_t sigsaved;
2085
27291e21
DH
2086 if (guestdbg_exit_pending(vcpu)) {
2087 kvm_s390_prepare_debug_exit(vcpu);
2088 return 0;
2089 }
2090
b0c632db
HC
2091 if (vcpu->sigset_active)
2092 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2093
6352e4d2
DH
2094 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2095 kvm_s390_vcpu_start(vcpu);
2096 } else if (is_vcpu_stopped(vcpu)) {
ea2cdd27 2097 pr_err_ratelimited("can't run stopped vcpu %d\n",
6352e4d2
DH
2098 vcpu->vcpu_id);
2099 return -EINVAL;
2100 }
b0c632db 2101
b028ee3e 2102 sync_regs(vcpu, kvm_run);
d7b0b5eb 2103
dab4079d 2104 might_fault();
a76ccff6 2105 rc = __vcpu_run(vcpu);
9ace903d 2106
b1d16c49
CE
2107 if (signal_pending(current) && !rc) {
2108 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 2109 rc = -EINTR;
b1d16c49 2110 }
8f2abe6a 2111
27291e21
DH
2112 if (guestdbg_exit_pending(vcpu) && !rc) {
2113 kvm_s390_prepare_debug_exit(vcpu);
2114 rc = 0;
2115 }
2116
b8e660b8 2117 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
2118 /* intercept cannot be handled in-kernel, prepare kvm-run */
2119 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
2120 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
2121 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
2122 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
2123 rc = 0;
2124 }
2125
2126 if (rc == -EREMOTE) {
2127 /* intercept was handled, but userspace support is needed
2128 * kvm_run has been prepared by the handler */
2129 rc = 0;
2130 }
b0c632db 2131
b028ee3e 2132 store_regs(vcpu, kvm_run);
d7b0b5eb 2133
b0c632db
HC
2134 if (vcpu->sigset_active)
2135 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2136
b0c632db 2137 vcpu->stat.exit_userspace++;
7e8e6ab4 2138 return rc;
b0c632db
HC
2139}
2140
b0c632db
HC
2141/*
2142 * store status at address
2143 * we use have two special cases:
2144 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2145 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2146 */
d0bce605 2147int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
b0c632db 2148{
092670cd 2149 unsigned char archmode = 1;
fda902cb 2150 unsigned int px;
178bd789 2151 u64 clkcomp;
d0bce605 2152 int rc;
b0c632db 2153
d0bce605
HC
2154 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2155 if (write_guest_abs(vcpu, 163, &archmode, 1))
b0c632db 2156 return -EFAULT;
d0bce605
HC
2157 gpa = SAVE_AREA_BASE;
2158 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2159 if (write_guest_real(vcpu, 163, &archmode, 1))
b0c632db 2160 return -EFAULT;
d0bce605
HC
2161 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
2162 }
2163 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
2164 vcpu->arch.guest_fpregs.fprs, 128);
2165 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
2166 vcpu->run->s.regs.gprs, 128);
2167 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
2168 &vcpu->arch.sie_block->gpsw, 16);
fda902cb 2169 px = kvm_s390_get_prefix(vcpu);
d0bce605 2170 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
fda902cb 2171 &px, 4);
d0bce605
HC
2172 rc |= write_guest_abs(vcpu,
2173 gpa + offsetof(struct save_area, fp_ctrl_reg),
2174 &vcpu->arch.guest_fpregs.fpc, 4);
2175 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
2176 &vcpu->arch.sie_block->todpr, 4);
2177 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
2178 &vcpu->arch.sie_block->cputm, 8);
178bd789 2179 clkcomp = vcpu->arch.sie_block->ckc >> 8;
d0bce605
HC
2180 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
2181 &clkcomp, 8);
2182 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
2183 &vcpu->run->s.regs.acrs, 64);
2184 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
2185 &vcpu->arch.sie_block->gcr, 128);
2186 return rc ? -EFAULT : 0;
b0c632db
HC
2187}
2188
e879892c
TH
2189int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2190{
2191 /*
2192 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2193 * copying in vcpu load/put. Lets update our copies before we save
2194 * it into the save area
2195 */
2196 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
2197 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
2198 save_access_regs(vcpu->run->s.regs.acrs);
2199
2200 return kvm_s390_store_status_unloaded(vcpu, addr);
2201}
2202
bc17de7c
EF
2203/*
2204 * store additional status at address
2205 */
2206int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2207 unsigned long gpa)
2208{
2209 /* Only bits 0-53 are used for address formation */
2210 if (!(gpa & ~0x3ff))
2211 return 0;
2212
2213 return write_guest_abs(vcpu, gpa & ~0x3ff,
2214 (void *)&vcpu->run->s.regs.vrs, 512);
2215}
2216
2217int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2218{
2219 if (!test_kvm_facility(vcpu->kvm, 129))
2220 return 0;
2221
2222 /*
2223 * The guest VXRS are in the host VXRs due to the lazy
2224 * copying in vcpu load/put. Let's update our copies before we save
2225 * it into the save area.
2226 */
2227 save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
2228
2229 return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2230}
2231
8ad35755
DH
2232static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2233{
2234 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
8e236546 2235 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
8ad35755
DH
2236}
2237
2238static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2239{
2240 unsigned int i;
2241 struct kvm_vcpu *vcpu;
2242
2243 kvm_for_each_vcpu(i, vcpu, kvm) {
2244 __disable_ibs_on_vcpu(vcpu);
2245 }
2246}
2247
2248static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2249{
2250 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
8e236546 2251 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
8ad35755
DH
2252}
2253
6852d7b6
DH
2254void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2255{
8ad35755
DH
2256 int i, online_vcpus, started_vcpus = 0;
2257
2258 if (!is_vcpu_stopped(vcpu))
2259 return;
2260
6852d7b6 2261 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
8ad35755 2262 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 2263 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
2264 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2265
2266 for (i = 0; i < online_vcpus; i++) {
2267 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2268 started_vcpus++;
2269 }
2270
2271 if (started_vcpus == 0) {
2272 /* we're the only active VCPU -> speed it up */
2273 __enable_ibs_on_vcpu(vcpu);
2274 } else if (started_vcpus == 1) {
2275 /*
2276 * As we are starting a second VCPU, we have to disable
2277 * the IBS facility on all VCPUs to remove potentially
2278 * oustanding ENABLE requests.
2279 */
2280 __disable_ibs_on_all_vcpus(vcpu->kvm);
2281 }
2282
6852d7b6 2283 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
2284 /*
2285 * Another VCPU might have used IBS while we were offline.
2286 * Let's play safe and flush the VCPU at startup.
2287 */
d3d692c8 2288 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
433b9ee4 2289 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 2290 return;
6852d7b6
DH
2291}
2292
2293void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2294{
8ad35755
DH
2295 int i, online_vcpus, started_vcpus = 0;
2296 struct kvm_vcpu *started_vcpu = NULL;
2297
2298 if (is_vcpu_stopped(vcpu))
2299 return;
2300
6852d7b6 2301 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
8ad35755 2302 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 2303 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
2304 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2305
32f5ff63 2306 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
6cddd432 2307 kvm_s390_clear_stop_irq(vcpu);
32f5ff63 2308
6cddd432 2309 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
2310 __disable_ibs_on_vcpu(vcpu);
2311
2312 for (i = 0; i < online_vcpus; i++) {
2313 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2314 started_vcpus++;
2315 started_vcpu = vcpu->kvm->vcpus[i];
2316 }
2317 }
2318
2319 if (started_vcpus == 1) {
2320 /*
2321 * As we only have one VCPU left, we want to enable the
2322 * IBS facility for that VCPU to speed it up.
2323 */
2324 __enable_ibs_on_vcpu(started_vcpu);
2325 }
2326
433b9ee4 2327 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 2328 return;
6852d7b6
DH
2329}
2330
d6712df9
CH
2331static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2332 struct kvm_enable_cap *cap)
2333{
2334 int r;
2335
2336 if (cap->flags)
2337 return -EINVAL;
2338
2339 switch (cap->cap) {
fa6b7fe9
CH
2340 case KVM_CAP_S390_CSS_SUPPORT:
2341 if (!vcpu->kvm->arch.css_support) {
2342 vcpu->kvm->arch.css_support = 1;
2343 trace_kvm_s390_enable_css(vcpu->kvm);
2344 }
2345 r = 0;
2346 break;
d6712df9
CH
2347 default:
2348 r = -EINVAL;
2349 break;
2350 }
2351 return r;
2352}
2353
41408c28
TH
2354static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
2355 struct kvm_s390_mem_op *mop)
2356{
2357 void __user *uaddr = (void __user *)mop->buf;
2358 void *tmpbuf = NULL;
2359 int r, srcu_idx;
2360 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
2361 | KVM_S390_MEMOP_F_CHECK_ONLY;
2362
2363 if (mop->flags & ~supported_flags)
2364 return -EINVAL;
2365
2366 if (mop->size > MEM_OP_MAX_SIZE)
2367 return -E2BIG;
2368
2369 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2370 tmpbuf = vmalloc(mop->size);
2371 if (!tmpbuf)
2372 return -ENOMEM;
2373 }
2374
2375 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2376
2377 switch (mop->op) {
2378 case KVM_S390_MEMOP_LOGICAL_READ:
2379 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2380 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false);
2381 break;
2382 }
2383 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2384 if (r == 0) {
2385 if (copy_to_user(uaddr, tmpbuf, mop->size))
2386 r = -EFAULT;
2387 }
2388 break;
2389 case KVM_S390_MEMOP_LOGICAL_WRITE:
2390 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2391 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true);
2392 break;
2393 }
2394 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2395 r = -EFAULT;
2396 break;
2397 }
2398 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2399 break;
2400 default:
2401 r = -EINVAL;
2402 }
2403
2404 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2405
2406 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
2407 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
2408
2409 vfree(tmpbuf);
2410 return r;
2411}
2412
b0c632db
HC
2413long kvm_arch_vcpu_ioctl(struct file *filp,
2414 unsigned int ioctl, unsigned long arg)
2415{
2416 struct kvm_vcpu *vcpu = filp->private_data;
2417 void __user *argp = (void __user *)arg;
800c1065 2418 int idx;
bc923cc9 2419 long r;
b0c632db 2420
93736624 2421 switch (ioctl) {
47b43c52
JF
2422 case KVM_S390_IRQ: {
2423 struct kvm_s390_irq s390irq;
2424
2425 r = -EFAULT;
2426 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
2427 break;
2428 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
2429 break;
2430 }
93736624 2431 case KVM_S390_INTERRUPT: {
ba5c1e9b 2432 struct kvm_s390_interrupt s390int;
383d0b05 2433 struct kvm_s390_irq s390irq;
ba5c1e9b 2434
93736624 2435 r = -EFAULT;
ba5c1e9b 2436 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624 2437 break;
383d0b05
JF
2438 if (s390int_to_s390irq(&s390int, &s390irq))
2439 return -EINVAL;
2440 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
93736624 2441 break;
ba5c1e9b 2442 }
b0c632db 2443 case KVM_S390_STORE_STATUS:
800c1065 2444 idx = srcu_read_lock(&vcpu->kvm->srcu);
bc923cc9 2445 r = kvm_s390_vcpu_store_status(vcpu, arg);
800c1065 2446 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 2447 break;
b0c632db
HC
2448 case KVM_S390_SET_INITIAL_PSW: {
2449 psw_t psw;
2450
bc923cc9 2451 r = -EFAULT;
b0c632db 2452 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
2453 break;
2454 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2455 break;
b0c632db
HC
2456 }
2457 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
2458 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2459 break;
14eebd91
CO
2460 case KVM_SET_ONE_REG:
2461 case KVM_GET_ONE_REG: {
2462 struct kvm_one_reg reg;
2463 r = -EFAULT;
2464 if (copy_from_user(&reg, argp, sizeof(reg)))
2465 break;
2466 if (ioctl == KVM_SET_ONE_REG)
2467 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
2468 else
2469 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
2470 break;
2471 }
27e0393f
CO
2472#ifdef CONFIG_KVM_S390_UCONTROL
2473 case KVM_S390_UCAS_MAP: {
2474 struct kvm_s390_ucas_mapping ucasmap;
2475
2476 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2477 r = -EFAULT;
2478 break;
2479 }
2480
2481 if (!kvm_is_ucontrol(vcpu->kvm)) {
2482 r = -EINVAL;
2483 break;
2484 }
2485
2486 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
2487 ucasmap.vcpu_addr, ucasmap.length);
2488 break;
2489 }
2490 case KVM_S390_UCAS_UNMAP: {
2491 struct kvm_s390_ucas_mapping ucasmap;
2492
2493 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2494 r = -EFAULT;
2495 break;
2496 }
2497
2498 if (!kvm_is_ucontrol(vcpu->kvm)) {
2499 r = -EINVAL;
2500 break;
2501 }
2502
2503 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
2504 ucasmap.length);
2505 break;
2506 }
2507#endif
ccc7910f 2508 case KVM_S390_VCPU_FAULT: {
527e30b4 2509 r = gmap_fault(vcpu->arch.gmap, arg, 0);
ccc7910f
CO
2510 break;
2511 }
d6712df9
CH
2512 case KVM_ENABLE_CAP:
2513 {
2514 struct kvm_enable_cap cap;
2515 r = -EFAULT;
2516 if (copy_from_user(&cap, argp, sizeof(cap)))
2517 break;
2518 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2519 break;
2520 }
41408c28
TH
2521 case KVM_S390_MEM_OP: {
2522 struct kvm_s390_mem_op mem_op;
2523
2524 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
2525 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
2526 else
2527 r = -EFAULT;
2528 break;
2529 }
816c7667
JF
2530 case KVM_S390_SET_IRQ_STATE: {
2531 struct kvm_s390_irq_state irq_state;
2532
2533 r = -EFAULT;
2534 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2535 break;
2536 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
2537 irq_state.len == 0 ||
2538 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
2539 r = -EINVAL;
2540 break;
2541 }
2542 r = kvm_s390_set_irq_state(vcpu,
2543 (void __user *) irq_state.buf,
2544 irq_state.len);
2545 break;
2546 }
2547 case KVM_S390_GET_IRQ_STATE: {
2548 struct kvm_s390_irq_state irq_state;
2549
2550 r = -EFAULT;
2551 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2552 break;
2553 if (irq_state.len == 0) {
2554 r = -EINVAL;
2555 break;
2556 }
2557 r = kvm_s390_get_irq_state(vcpu,
2558 (__u8 __user *) irq_state.buf,
2559 irq_state.len);
2560 break;
2561 }
b0c632db 2562 default:
3e6afcf1 2563 r = -ENOTTY;
b0c632db 2564 }
bc923cc9 2565 return r;
b0c632db
HC
2566}
2567
5b1c1493
CO
2568int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2569{
2570#ifdef CONFIG_KVM_S390_UCONTROL
2571 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
2572 && (kvm_is_ucontrol(vcpu->kvm))) {
2573 vmf->page = virt_to_page(vcpu->arch.sie_block);
2574 get_page(vmf->page);
2575 return 0;
2576 }
2577#endif
2578 return VM_FAULT_SIGBUS;
2579}
2580
5587027c
AK
2581int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2582 unsigned long npages)
db3fe4eb
TY
2583{
2584 return 0;
2585}
2586
b0c632db 2587/* Section: memory related */
f7784b8e
MT
2588int kvm_arch_prepare_memory_region(struct kvm *kvm,
2589 struct kvm_memory_slot *memslot,
09170a49 2590 const struct kvm_userspace_memory_region *mem,
7b6195a9 2591 enum kvm_mr_change change)
b0c632db 2592{
dd2887e7
NW
2593 /* A few sanity checks. We can have memory slots which have to be
2594 located/ended at a segment boundary (1MB). The memory in userland is
2595 ok to be fragmented into various different vmas. It is okay to mmap()
2596 and munmap() stuff in this slot after doing this call at any time */
b0c632db 2597
598841ca 2598 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
2599 return -EINVAL;
2600
598841ca 2601 if (mem->memory_size & 0xffffful)
b0c632db
HC
2602 return -EINVAL;
2603
f7784b8e
MT
2604 return 0;
2605}
2606
2607void kvm_arch_commit_memory_region(struct kvm *kvm,
09170a49 2608 const struct kvm_userspace_memory_region *mem,
8482644a 2609 const struct kvm_memory_slot *old,
f36f3f28 2610 const struct kvm_memory_slot *new,
8482644a 2611 enum kvm_mr_change change)
f7784b8e 2612{
f7850c92 2613 int rc;
f7784b8e 2614
2cef4deb
CB
2615 /* If the basics of the memslot do not change, we do not want
2616 * to update the gmap. Every update causes several unnecessary
2617 * segment translation exceptions. This is usually handled just
2618 * fine by the normal fault handler + gmap, but it will also
2619 * cause faults on the prefix page of running guest CPUs.
2620 */
2621 if (old->userspace_addr == mem->userspace_addr &&
2622 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
2623 old->npages * PAGE_SIZE == mem->memory_size)
2624 return;
598841ca
CO
2625
2626 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2627 mem->guest_phys_addr, mem->memory_size);
2628 if (rc)
ea2cdd27 2629 pr_warn("failed to commit memory region\n");
598841ca 2630 return;
b0c632db
HC
2631}
2632
b0c632db
HC
2633static int __init kvm_s390_init(void)
2634{
9d8d5786 2635 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
b0c632db
HC
2636}
2637
2638static void __exit kvm_s390_exit(void)
2639{
2640 kvm_exit();
2641}
2642
2643module_init(kvm_s390_init);
2644module_exit(kvm_s390_exit);
566af940
CH
2645
2646/*
2647 * Enable autoloading of the kvm module.
2648 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2649 * since x86 takes a different approach.
2650 */
2651#include <linux/miscdevice.h>
2652MODULE_ALIAS_MISCDEV(KVM_MINOR);
2653MODULE_ALIAS("devname:kvm");