]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/s390/kvm/kvm-s390.c
KVM: introduce kvm_alloc/free_memslots
[mirror_ubuntu-hirsute-kernel.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15f36ebd 14 * Jason J. Herne <jjherne@us.ibm.com>
b0c632db
HC
15 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
ca872302 20#include <linux/hrtimer.h>
b0c632db
HC
21#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
a374e892 25#include <linux/random.h>
b0c632db 26#include <linux/slab.h>
ba5c1e9b 27#include <linux/timer.h>
41408c28 28#include <linux/vmalloc.h>
cbb870c8 29#include <asm/asm-offsets.h>
b0c632db
HC
30#include <asm/lowcore.h>
31#include <asm/pgtable.h>
f5daba1d 32#include <asm/nmi.h>
a0616cde 33#include <asm/switch_to.h>
6d3da241 34#include <asm/isc.h>
1526bf9c 35#include <asm/sclp.h>
8f2abe6a 36#include "kvm-s390.h"
b0c632db
HC
37#include "gaccess.h"
38
5786fffa
CH
39#define CREATE_TRACE_POINTS
40#include "trace.h"
ade38c31 41#include "trace-s390.h"
5786fffa 42
41408c28 43#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
816c7667
JF
44#define LOCAL_IRQS 32
45#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
46 (KVM_MAX_VCPUS + LOCAL_IRQS))
41408c28 47
b0c632db
HC
48#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
49
50struct kvm_stats_debugfs_item debugfs_entries[] = {
51 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 52 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
53 { "exit_validity", VCPU_STAT(exit_validity) },
54 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
55 { "exit_external_request", VCPU_STAT(exit_external_request) },
56 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
57 { "exit_instruction", VCPU_STAT(exit_instruction) },
58 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
59 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f7819512 60 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
ce2e4f0b 61 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
f5e10b09 62 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b 63 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
aba07508
DH
64 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
65 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
ba5c1e9b 66 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 67 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
68 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
69 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
70 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
71 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
72 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
73 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
74 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 75 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
76 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
77 { "instruction_spx", VCPU_STAT(instruction_spx) },
78 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
79 { "instruction_stap", VCPU_STAT(instruction_stap) },
80 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
8a242234 81 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
453423dc
CB
82 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
83 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
b31288fa 84 { "instruction_essa", VCPU_STAT(instruction_essa) },
453423dc
CB
85 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
86 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 87 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 88 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 89 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 90 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0 91 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
42cb0c9f
DH
92 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
93 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
5288fbf0 94 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
42cb0c9f
DH
95 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
96 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
cd7b4b61 97 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
5288fbf0
CB
98 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
99 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
100 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
42cb0c9f
DH
101 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
102 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
103 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
388186bc 104 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 105 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 106 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
107 { NULL }
108};
109
9d8d5786
MM
110/* upper facilities limit for kvm */
111unsigned long kvm_s390_fac_list_mask[] = {
a3ed8dae 112 0xffe6fffbfcfdfc40UL,
53df84f8 113 0x005e800000000000UL,
9d8d5786 114};
b0c632db 115
9d8d5786 116unsigned long kvm_s390_fac_list_mask_size(void)
78c4b59f 117{
9d8d5786
MM
118 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
119 return ARRAY_SIZE(kvm_s390_fac_list_mask);
78c4b59f
MM
120}
121
9d8d5786
MM
122static struct gmap_notifier gmap_notifier;
123
b0c632db 124/* Section: not file related */
13a34e06 125int kvm_arch_hardware_enable(void)
b0c632db
HC
126{
127 /* every s390 is virtualization enabled ;-) */
10474ae8 128 return 0;
b0c632db
HC
129}
130
2c70fe44
CB
131static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
132
b0c632db
HC
133int kvm_arch_hardware_setup(void)
134{
2c70fe44
CB
135 gmap_notifier.notifier_call = kvm_gmap_notifier;
136 gmap_register_ipte_notifier(&gmap_notifier);
b0c632db
HC
137 return 0;
138}
139
140void kvm_arch_hardware_unsetup(void)
141{
2c70fe44 142 gmap_unregister_ipte_notifier(&gmap_notifier);
b0c632db
HC
143}
144
b0c632db
HC
145int kvm_arch_init(void *opaque)
146{
84877d93
CH
147 /* Register floating interrupt controller interface. */
148 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
b0c632db
HC
149}
150
b0c632db
HC
151/* Section: device related */
152long kvm_arch_dev_ioctl(struct file *filp,
153 unsigned int ioctl, unsigned long arg)
154{
155 if (ioctl == KVM_S390_ENABLE_SIE)
156 return s390_enable_sie();
157 return -EINVAL;
158}
159
784aa3d7 160int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
b0c632db 161{
d7b0b5eb
CO
162 int r;
163
2bd0ac4e 164 switch (ext) {
d7b0b5eb 165 case KVM_CAP_S390_PSW:
b6cf8788 166 case KVM_CAP_S390_GMAP:
52e16b18 167 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
168#ifdef CONFIG_KVM_S390_UCONTROL
169 case KVM_CAP_S390_UCONTROL:
170#endif
3c038e6b 171 case KVM_CAP_ASYNC_PF:
60b413c9 172 case KVM_CAP_SYNC_REGS:
14eebd91 173 case KVM_CAP_ONE_REG:
d6712df9 174 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 175 case KVM_CAP_S390_CSS_SUPPORT:
10ccaa1e 176 case KVM_CAP_IOEVENTFD:
c05c4186 177 case KVM_CAP_DEVICE_CTRL:
d938dc55 178 case KVM_CAP_ENABLE_CAP_VM:
78599d90 179 case KVM_CAP_S390_IRQCHIP:
f2061656 180 case KVM_CAP_VM_ATTRIBUTES:
6352e4d2 181 case KVM_CAP_MP_STATE:
47b43c52 182 case KVM_CAP_S390_INJECT_IRQ:
2444b352 183 case KVM_CAP_S390_USER_SIGP:
e44fc8c9 184 case KVM_CAP_S390_USER_STSI:
30ee2a98 185 case KVM_CAP_S390_SKEYS:
816c7667 186 case KVM_CAP_S390_IRQ_STATE:
d7b0b5eb
CO
187 r = 1;
188 break;
41408c28
TH
189 case KVM_CAP_S390_MEM_OP:
190 r = MEM_OP_MAX_SIZE;
191 break;
e726b1bd
CB
192 case KVM_CAP_NR_VCPUS:
193 case KVM_CAP_MAX_VCPUS:
194 r = KVM_MAX_VCPUS;
195 break;
e1e2e605
NW
196 case KVM_CAP_NR_MEMSLOTS:
197 r = KVM_USER_MEM_SLOTS;
198 break;
1526bf9c 199 case KVM_CAP_S390_COW:
abf09bed 200 r = MACHINE_HAS_ESOP;
1526bf9c 201 break;
68c55750
EF
202 case KVM_CAP_S390_VECTOR_REGISTERS:
203 r = MACHINE_HAS_VX;
204 break;
2bd0ac4e 205 default:
d7b0b5eb 206 r = 0;
2bd0ac4e 207 }
d7b0b5eb 208 return r;
b0c632db
HC
209}
210
15f36ebd
JH
211static void kvm_s390_sync_dirty_log(struct kvm *kvm,
212 struct kvm_memory_slot *memslot)
213{
214 gfn_t cur_gfn, last_gfn;
215 unsigned long address;
216 struct gmap *gmap = kvm->arch.gmap;
217
218 down_read(&gmap->mm->mmap_sem);
219 /* Loop over all guest pages */
220 last_gfn = memslot->base_gfn + memslot->npages;
221 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
222 address = gfn_to_hva_memslot(memslot, cur_gfn);
223
224 if (gmap_test_and_clear_dirty(address, gmap))
225 mark_page_dirty(kvm, cur_gfn);
226 }
227 up_read(&gmap->mm->mmap_sem);
228}
229
b0c632db
HC
230/* Section: vm related */
231/*
232 * Get (and clear) the dirty memory log for a memory slot.
233 */
234int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
235 struct kvm_dirty_log *log)
236{
15f36ebd
JH
237 int r;
238 unsigned long n;
239 struct kvm_memory_slot *memslot;
240 int is_dirty = 0;
241
242 mutex_lock(&kvm->slots_lock);
243
244 r = -EINVAL;
245 if (log->slot >= KVM_USER_MEM_SLOTS)
246 goto out;
247
248 memslot = id_to_memslot(kvm->memslots, log->slot);
249 r = -ENOENT;
250 if (!memslot->dirty_bitmap)
251 goto out;
252
253 kvm_s390_sync_dirty_log(kvm, memslot);
254 r = kvm_get_dirty_log(kvm, log, &is_dirty);
255 if (r)
256 goto out;
257
258 /* Clear the dirty log */
259 if (is_dirty) {
260 n = kvm_dirty_bitmap_bytes(memslot);
261 memset(memslot->dirty_bitmap, 0, n);
262 }
263 r = 0;
264out:
265 mutex_unlock(&kvm->slots_lock);
266 return r;
b0c632db
HC
267}
268
d938dc55
CH
269static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
270{
271 int r;
272
273 if (cap->flags)
274 return -EINVAL;
275
276 switch (cap->cap) {
84223598
CH
277 case KVM_CAP_S390_IRQCHIP:
278 kvm->arch.use_irqchip = 1;
279 r = 0;
280 break;
2444b352
DH
281 case KVM_CAP_S390_USER_SIGP:
282 kvm->arch.user_sigp = 1;
283 r = 0;
284 break;
68c55750 285 case KVM_CAP_S390_VECTOR_REGISTERS:
18280d8b
MM
286 if (MACHINE_HAS_VX) {
287 set_kvm_facility(kvm->arch.model.fac->mask, 129);
288 set_kvm_facility(kvm->arch.model.fac->list, 129);
289 r = 0;
290 } else
291 r = -EINVAL;
68c55750 292 break;
e44fc8c9
ET
293 case KVM_CAP_S390_USER_STSI:
294 kvm->arch.user_stsi = 1;
295 r = 0;
296 break;
d938dc55
CH
297 default:
298 r = -EINVAL;
299 break;
300 }
301 return r;
302}
303
8c0a7ce6
DD
304static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
305{
306 int ret;
307
308 switch (attr->attr) {
309 case KVM_S390_VM_MEM_LIMIT_SIZE:
310 ret = 0;
311 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
312 ret = -EFAULT;
313 break;
314 default:
315 ret = -ENXIO;
316 break;
317 }
318 return ret;
319}
320
321static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
4f718eab
DD
322{
323 int ret;
324 unsigned int idx;
325 switch (attr->attr) {
326 case KVM_S390_VM_MEM_ENABLE_CMMA:
327 ret = -EBUSY;
328 mutex_lock(&kvm->lock);
329 if (atomic_read(&kvm->online_vcpus) == 0) {
330 kvm->arch.use_cmma = 1;
331 ret = 0;
332 }
333 mutex_unlock(&kvm->lock);
334 break;
335 case KVM_S390_VM_MEM_CLR_CMMA:
336 mutex_lock(&kvm->lock);
337 idx = srcu_read_lock(&kvm->srcu);
a13cff31 338 s390_reset_cmma(kvm->arch.gmap->mm);
4f718eab
DD
339 srcu_read_unlock(&kvm->srcu, idx);
340 mutex_unlock(&kvm->lock);
341 ret = 0;
342 break;
8c0a7ce6
DD
343 case KVM_S390_VM_MEM_LIMIT_SIZE: {
344 unsigned long new_limit;
345
346 if (kvm_is_ucontrol(kvm))
347 return -EINVAL;
348
349 if (get_user(new_limit, (u64 __user *)attr->addr))
350 return -EFAULT;
351
352 if (new_limit > kvm->arch.gmap->asce_end)
353 return -E2BIG;
354
355 ret = -EBUSY;
356 mutex_lock(&kvm->lock);
357 if (atomic_read(&kvm->online_vcpus) == 0) {
358 /* gmap_alloc will round the limit up */
359 struct gmap *new = gmap_alloc(current->mm, new_limit);
360
361 if (!new) {
362 ret = -ENOMEM;
363 } else {
364 gmap_free(kvm->arch.gmap);
365 new->private = kvm;
366 kvm->arch.gmap = new;
367 ret = 0;
368 }
369 }
370 mutex_unlock(&kvm->lock);
371 break;
372 }
4f718eab
DD
373 default:
374 ret = -ENXIO;
375 break;
376 }
377 return ret;
378}
379
a374e892
TK
380static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
381
382static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
383{
384 struct kvm_vcpu *vcpu;
385 int i;
386
9d8d5786 387 if (!test_kvm_facility(kvm, 76))
a374e892
TK
388 return -EINVAL;
389
390 mutex_lock(&kvm->lock);
391 switch (attr->attr) {
392 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
393 get_random_bytes(
394 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
395 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
396 kvm->arch.crypto.aes_kw = 1;
397 break;
398 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
399 get_random_bytes(
400 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
401 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
402 kvm->arch.crypto.dea_kw = 1;
403 break;
404 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
405 kvm->arch.crypto.aes_kw = 0;
406 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
407 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
408 break;
409 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
410 kvm->arch.crypto.dea_kw = 0;
411 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
412 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
413 break;
414 default:
415 mutex_unlock(&kvm->lock);
416 return -ENXIO;
417 }
418
419 kvm_for_each_vcpu(i, vcpu, kvm) {
420 kvm_s390_vcpu_crypto_setup(vcpu);
421 exit_sie(vcpu);
422 }
423 mutex_unlock(&kvm->lock);
424 return 0;
425}
426
72f25020
JH
427static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
428{
429 u8 gtod_high;
430
431 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
432 sizeof(gtod_high)))
433 return -EFAULT;
434
435 if (gtod_high != 0)
436 return -EINVAL;
437
438 return 0;
439}
440
441static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
442{
443 struct kvm_vcpu *cur_vcpu;
444 unsigned int vcpu_idx;
445 u64 host_tod, gtod;
446 int r;
447
448 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
449 return -EFAULT;
450
451 r = store_tod_clock(&host_tod);
452 if (r)
453 return r;
454
455 mutex_lock(&kvm->lock);
456 kvm->arch.epoch = gtod - host_tod;
27406cd5
CB
457 kvm_s390_vcpu_block_all(kvm);
458 kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm)
72f25020 459 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
27406cd5 460 kvm_s390_vcpu_unblock_all(kvm);
72f25020
JH
461 mutex_unlock(&kvm->lock);
462 return 0;
463}
464
465static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
466{
467 int ret;
468
469 if (attr->flags)
470 return -EINVAL;
471
472 switch (attr->attr) {
473 case KVM_S390_VM_TOD_HIGH:
474 ret = kvm_s390_set_tod_high(kvm, attr);
475 break;
476 case KVM_S390_VM_TOD_LOW:
477 ret = kvm_s390_set_tod_low(kvm, attr);
478 break;
479 default:
480 ret = -ENXIO;
481 break;
482 }
483 return ret;
484}
485
486static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
487{
488 u8 gtod_high = 0;
489
490 if (copy_to_user((void __user *)attr->addr, &gtod_high,
491 sizeof(gtod_high)))
492 return -EFAULT;
493
494 return 0;
495}
496
497static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
498{
499 u64 host_tod, gtod;
500 int r;
501
502 r = store_tod_clock(&host_tod);
503 if (r)
504 return r;
505
506 gtod = host_tod + kvm->arch.epoch;
507 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
508 return -EFAULT;
509
510 return 0;
511}
512
513static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
514{
515 int ret;
516
517 if (attr->flags)
518 return -EINVAL;
519
520 switch (attr->attr) {
521 case KVM_S390_VM_TOD_HIGH:
522 ret = kvm_s390_get_tod_high(kvm, attr);
523 break;
524 case KVM_S390_VM_TOD_LOW:
525 ret = kvm_s390_get_tod_low(kvm, attr);
526 break;
527 default:
528 ret = -ENXIO;
529 break;
530 }
531 return ret;
532}
533
658b6eda
MM
534static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
535{
536 struct kvm_s390_vm_cpu_processor *proc;
537 int ret = 0;
538
539 mutex_lock(&kvm->lock);
540 if (atomic_read(&kvm->online_vcpus)) {
541 ret = -EBUSY;
542 goto out;
543 }
544 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
545 if (!proc) {
546 ret = -ENOMEM;
547 goto out;
548 }
549 if (!copy_from_user(proc, (void __user *)attr->addr,
550 sizeof(*proc))) {
551 memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
552 sizeof(struct cpuid));
553 kvm->arch.model.ibc = proc->ibc;
981467c9 554 memcpy(kvm->arch.model.fac->list, proc->fac_list,
658b6eda
MM
555 S390_ARCH_FAC_LIST_SIZE_BYTE);
556 } else
557 ret = -EFAULT;
558 kfree(proc);
559out:
560 mutex_unlock(&kvm->lock);
561 return ret;
562}
563
564static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
565{
566 int ret = -ENXIO;
567
568 switch (attr->attr) {
569 case KVM_S390_VM_CPU_PROCESSOR:
570 ret = kvm_s390_set_processor(kvm, attr);
571 break;
572 }
573 return ret;
574}
575
576static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
577{
578 struct kvm_s390_vm_cpu_processor *proc;
579 int ret = 0;
580
581 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
582 if (!proc) {
583 ret = -ENOMEM;
584 goto out;
585 }
586 memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
587 proc->ibc = kvm->arch.model.ibc;
981467c9 588 memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda
MM
589 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
590 ret = -EFAULT;
591 kfree(proc);
592out:
593 return ret;
594}
595
596static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
597{
598 struct kvm_s390_vm_cpu_machine *mach;
599 int ret = 0;
600
601 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
602 if (!mach) {
603 ret = -ENOMEM;
604 goto out;
605 }
606 get_cpu_id((struct cpuid *) &mach->cpuid);
607 mach->ibc = sclp_get_ibc();
981467c9
MM
608 memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
609 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda 610 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
94422ee8 611 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda
MM
612 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
613 ret = -EFAULT;
614 kfree(mach);
615out:
616 return ret;
617}
618
619static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
620{
621 int ret = -ENXIO;
622
623 switch (attr->attr) {
624 case KVM_S390_VM_CPU_PROCESSOR:
625 ret = kvm_s390_get_processor(kvm, attr);
626 break;
627 case KVM_S390_VM_CPU_MACHINE:
628 ret = kvm_s390_get_machine(kvm, attr);
629 break;
630 }
631 return ret;
632}
633
f2061656
DD
634static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
635{
636 int ret;
637
638 switch (attr->group) {
4f718eab 639 case KVM_S390_VM_MEM_CTRL:
8c0a7ce6 640 ret = kvm_s390_set_mem_control(kvm, attr);
4f718eab 641 break;
72f25020
JH
642 case KVM_S390_VM_TOD:
643 ret = kvm_s390_set_tod(kvm, attr);
644 break;
658b6eda
MM
645 case KVM_S390_VM_CPU_MODEL:
646 ret = kvm_s390_set_cpu_model(kvm, attr);
647 break;
a374e892
TK
648 case KVM_S390_VM_CRYPTO:
649 ret = kvm_s390_vm_set_crypto(kvm, attr);
650 break;
f2061656
DD
651 default:
652 ret = -ENXIO;
653 break;
654 }
655
656 return ret;
657}
658
659static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
660{
8c0a7ce6
DD
661 int ret;
662
663 switch (attr->group) {
664 case KVM_S390_VM_MEM_CTRL:
665 ret = kvm_s390_get_mem_control(kvm, attr);
666 break;
72f25020
JH
667 case KVM_S390_VM_TOD:
668 ret = kvm_s390_get_tod(kvm, attr);
669 break;
658b6eda
MM
670 case KVM_S390_VM_CPU_MODEL:
671 ret = kvm_s390_get_cpu_model(kvm, attr);
672 break;
8c0a7ce6
DD
673 default:
674 ret = -ENXIO;
675 break;
676 }
677
678 return ret;
f2061656
DD
679}
680
681static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
682{
683 int ret;
684
685 switch (attr->group) {
4f718eab
DD
686 case KVM_S390_VM_MEM_CTRL:
687 switch (attr->attr) {
688 case KVM_S390_VM_MEM_ENABLE_CMMA:
689 case KVM_S390_VM_MEM_CLR_CMMA:
8c0a7ce6 690 case KVM_S390_VM_MEM_LIMIT_SIZE:
4f718eab
DD
691 ret = 0;
692 break;
693 default:
694 ret = -ENXIO;
695 break;
696 }
697 break;
72f25020
JH
698 case KVM_S390_VM_TOD:
699 switch (attr->attr) {
700 case KVM_S390_VM_TOD_LOW:
701 case KVM_S390_VM_TOD_HIGH:
702 ret = 0;
703 break;
704 default:
705 ret = -ENXIO;
706 break;
707 }
708 break;
658b6eda
MM
709 case KVM_S390_VM_CPU_MODEL:
710 switch (attr->attr) {
711 case KVM_S390_VM_CPU_PROCESSOR:
712 case KVM_S390_VM_CPU_MACHINE:
713 ret = 0;
714 break;
715 default:
716 ret = -ENXIO;
717 break;
718 }
719 break;
a374e892
TK
720 case KVM_S390_VM_CRYPTO:
721 switch (attr->attr) {
722 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
723 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
724 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
725 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
726 ret = 0;
727 break;
728 default:
729 ret = -ENXIO;
730 break;
731 }
732 break;
f2061656
DD
733 default:
734 ret = -ENXIO;
735 break;
736 }
737
738 return ret;
739}
740
30ee2a98
JH
741static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
742{
743 uint8_t *keys;
744 uint64_t hva;
745 unsigned long curkey;
746 int i, r = 0;
747
748 if (args->flags != 0)
749 return -EINVAL;
750
751 /* Is this guest using storage keys? */
752 if (!mm_use_skey(current->mm))
753 return KVM_S390_GET_SKEYS_NONE;
754
755 /* Enforce sane limit on memory allocation */
756 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
757 return -EINVAL;
758
759 keys = kmalloc_array(args->count, sizeof(uint8_t),
760 GFP_KERNEL | __GFP_NOWARN);
761 if (!keys)
762 keys = vmalloc(sizeof(uint8_t) * args->count);
763 if (!keys)
764 return -ENOMEM;
765
766 for (i = 0; i < args->count; i++) {
767 hva = gfn_to_hva(kvm, args->start_gfn + i);
768 if (kvm_is_error_hva(hva)) {
769 r = -EFAULT;
770 goto out;
771 }
772
773 curkey = get_guest_storage_key(current->mm, hva);
774 if (IS_ERR_VALUE(curkey)) {
775 r = curkey;
776 goto out;
777 }
778 keys[i] = curkey;
779 }
780
781 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
782 sizeof(uint8_t) * args->count);
783 if (r)
784 r = -EFAULT;
785out:
786 kvfree(keys);
787 return r;
788}
789
790static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
791{
792 uint8_t *keys;
793 uint64_t hva;
794 int i, r = 0;
795
796 if (args->flags != 0)
797 return -EINVAL;
798
799 /* Enforce sane limit on memory allocation */
800 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
801 return -EINVAL;
802
803 keys = kmalloc_array(args->count, sizeof(uint8_t),
804 GFP_KERNEL | __GFP_NOWARN);
805 if (!keys)
806 keys = vmalloc(sizeof(uint8_t) * args->count);
807 if (!keys)
808 return -ENOMEM;
809
810 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
811 sizeof(uint8_t) * args->count);
812 if (r) {
813 r = -EFAULT;
814 goto out;
815 }
816
817 /* Enable storage key handling for the guest */
818 s390_enable_skey();
819
820 for (i = 0; i < args->count; i++) {
821 hva = gfn_to_hva(kvm, args->start_gfn + i);
822 if (kvm_is_error_hva(hva)) {
823 r = -EFAULT;
824 goto out;
825 }
826
827 /* Lowest order bit is reserved */
828 if (keys[i] & 0x01) {
829 r = -EINVAL;
830 goto out;
831 }
832
833 r = set_guest_storage_key(current->mm, hva,
834 (unsigned long)keys[i], 0);
835 if (r)
836 goto out;
837 }
838out:
839 kvfree(keys);
840 return r;
841}
842
b0c632db
HC
843long kvm_arch_vm_ioctl(struct file *filp,
844 unsigned int ioctl, unsigned long arg)
845{
846 struct kvm *kvm = filp->private_data;
847 void __user *argp = (void __user *)arg;
f2061656 848 struct kvm_device_attr attr;
b0c632db
HC
849 int r;
850
851 switch (ioctl) {
ba5c1e9b
CO
852 case KVM_S390_INTERRUPT: {
853 struct kvm_s390_interrupt s390int;
854
855 r = -EFAULT;
856 if (copy_from_user(&s390int, argp, sizeof(s390int)))
857 break;
858 r = kvm_s390_inject_vm(kvm, &s390int);
859 break;
860 }
d938dc55
CH
861 case KVM_ENABLE_CAP: {
862 struct kvm_enable_cap cap;
863 r = -EFAULT;
864 if (copy_from_user(&cap, argp, sizeof(cap)))
865 break;
866 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
867 break;
868 }
84223598
CH
869 case KVM_CREATE_IRQCHIP: {
870 struct kvm_irq_routing_entry routing;
871
872 r = -EINVAL;
873 if (kvm->arch.use_irqchip) {
874 /* Set up dummy routing. */
875 memset(&routing, 0, sizeof(routing));
876 kvm_set_irq_routing(kvm, &routing, 0, 0);
877 r = 0;
878 }
879 break;
880 }
f2061656
DD
881 case KVM_SET_DEVICE_ATTR: {
882 r = -EFAULT;
883 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
884 break;
885 r = kvm_s390_vm_set_attr(kvm, &attr);
886 break;
887 }
888 case KVM_GET_DEVICE_ATTR: {
889 r = -EFAULT;
890 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
891 break;
892 r = kvm_s390_vm_get_attr(kvm, &attr);
893 break;
894 }
895 case KVM_HAS_DEVICE_ATTR: {
896 r = -EFAULT;
897 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
898 break;
899 r = kvm_s390_vm_has_attr(kvm, &attr);
900 break;
901 }
30ee2a98
JH
902 case KVM_S390_GET_SKEYS: {
903 struct kvm_s390_skeys args;
904
905 r = -EFAULT;
906 if (copy_from_user(&args, argp,
907 sizeof(struct kvm_s390_skeys)))
908 break;
909 r = kvm_s390_get_skeys(kvm, &args);
910 break;
911 }
912 case KVM_S390_SET_SKEYS: {
913 struct kvm_s390_skeys args;
914
915 r = -EFAULT;
916 if (copy_from_user(&args, argp,
917 sizeof(struct kvm_s390_skeys)))
918 break;
919 r = kvm_s390_set_skeys(kvm, &args);
920 break;
921 }
b0c632db 922 default:
367e1319 923 r = -ENOTTY;
b0c632db
HC
924 }
925
926 return r;
927}
928
45c9b47c
TK
929static int kvm_s390_query_ap_config(u8 *config)
930{
931 u32 fcn_code = 0x04000000UL;
86044c8c 932 u32 cc = 0;
45c9b47c 933
86044c8c 934 memset(config, 0, 128);
45c9b47c
TK
935 asm volatile(
936 "lgr 0,%1\n"
937 "lgr 2,%2\n"
938 ".long 0xb2af0000\n" /* PQAP(QCI) */
86044c8c 939 "0: ipm %0\n"
45c9b47c 940 "srl %0,28\n"
86044c8c
CB
941 "1:\n"
942 EX_TABLE(0b, 1b)
943 : "+r" (cc)
45c9b47c
TK
944 : "r" (fcn_code), "r" (config)
945 : "cc", "0", "2", "memory"
946 );
947
948 return cc;
949}
950
951static int kvm_s390_apxa_installed(void)
952{
953 u8 config[128];
954 int cc;
955
956 if (test_facility(2) && test_facility(12)) {
957 cc = kvm_s390_query_ap_config(config);
958
959 if (cc)
960 pr_err("PQAP(QCI) failed with cc=%d", cc);
961 else
962 return config[0] & 0x40;
963 }
964
965 return 0;
966}
967
968static void kvm_s390_set_crycb_format(struct kvm *kvm)
969{
970 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
971
972 if (kvm_s390_apxa_installed())
973 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
974 else
975 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
976}
977
9d8d5786
MM
978static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
979{
980 get_cpu_id(cpu_id);
981 cpu_id->version = 0xff;
982}
983
5102ee87
TK
984static int kvm_s390_crypto_init(struct kvm *kvm)
985{
9d8d5786 986 if (!test_kvm_facility(kvm, 76))
5102ee87
TK
987 return 0;
988
989 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
990 GFP_KERNEL | GFP_DMA);
991 if (!kvm->arch.crypto.crycb)
992 return -ENOMEM;
993
45c9b47c 994 kvm_s390_set_crycb_format(kvm);
5102ee87 995
ed6f76b4
TK
996 /* Enable AES/DEA protected key functions by default */
997 kvm->arch.crypto.aes_kw = 1;
998 kvm->arch.crypto.dea_kw = 1;
999 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1000 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1001 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1002 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
a374e892 1003
5102ee87
TK
1004 return 0;
1005}
1006
e08b9637 1007int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 1008{
9d8d5786 1009 int i, rc;
b0c632db 1010 char debug_name[16];
f6c137ff 1011 static unsigned long sca_offset;
b0c632db 1012
e08b9637
CO
1013 rc = -EINVAL;
1014#ifdef CONFIG_KVM_S390_UCONTROL
1015 if (type & ~KVM_VM_S390_UCONTROL)
1016 goto out_err;
1017 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1018 goto out_err;
1019#else
1020 if (type)
1021 goto out_err;
1022#endif
1023
b0c632db
HC
1024 rc = s390_enable_sie();
1025 if (rc)
d89f5eff 1026 goto out_err;
b0c632db 1027
b290411a
CO
1028 rc = -ENOMEM;
1029
b0c632db
HC
1030 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
1031 if (!kvm->arch.sca)
d89f5eff 1032 goto out_err;
f6c137ff
CB
1033 spin_lock(&kvm_lock);
1034 sca_offset = (sca_offset + 16) & 0x7f0;
1035 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
1036 spin_unlock(&kvm_lock);
b0c632db
HC
1037
1038 sprintf(debug_name, "kvm-%u", current->pid);
1039
1040 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
1041 if (!kvm->arch.dbf)
40f5b735 1042 goto out_err;
b0c632db 1043
9d8d5786
MM
1044 /*
1045 * The architectural maximum amount of facilities is 16 kbit. To store
1046 * this amount, 2 kbyte of memory is required. Thus we need a full
981467c9
MM
1047 * page to hold the guest facility list (arch.model.fac->list) and the
1048 * facility mask (arch.model.fac->mask). Its address size has to be
9d8d5786
MM
1049 * 31 bits and word aligned.
1050 */
1051 kvm->arch.model.fac =
981467c9 1052 (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
9d8d5786 1053 if (!kvm->arch.model.fac)
40f5b735 1054 goto out_err;
9d8d5786 1055
fb5bf93f 1056 /* Populate the facility mask initially. */
981467c9 1057 memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
94422ee8 1058 S390_ARCH_FAC_LIST_SIZE_BYTE);
9d8d5786
MM
1059 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1060 if (i < kvm_s390_fac_list_mask_size())
981467c9 1061 kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
9d8d5786 1062 else
981467c9 1063 kvm->arch.model.fac->mask[i] = 0UL;
9d8d5786
MM
1064 }
1065
981467c9
MM
1066 /* Populate the facility list initially. */
1067 memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
1068 S390_ARCH_FAC_LIST_SIZE_BYTE);
1069
9d8d5786 1070 kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
658b6eda 1071 kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff;
9d8d5786 1072
5102ee87 1073 if (kvm_s390_crypto_init(kvm) < 0)
40f5b735 1074 goto out_err;
5102ee87 1075
ba5c1e9b 1076 spin_lock_init(&kvm->arch.float_int.lock);
6d3da241
JF
1077 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1078 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
8a242234 1079 init_waitqueue_head(&kvm->arch.ipte_wq);
a6b7e459 1080 mutex_init(&kvm->arch.ipte_mutex);
ba5c1e9b 1081
b0c632db
HC
1082 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
1083 VM_EVENT(kvm, 3, "%s", "vm created");
1084
e08b9637
CO
1085 if (type & KVM_VM_S390_UCONTROL) {
1086 kvm->arch.gmap = NULL;
1087 } else {
0349985a 1088 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
e08b9637 1089 if (!kvm->arch.gmap)
40f5b735 1090 goto out_err;
2c70fe44 1091 kvm->arch.gmap->private = kvm;
24eb3a82 1092 kvm->arch.gmap->pfault_enabled = 0;
e08b9637 1093 }
fa6b7fe9
CH
1094
1095 kvm->arch.css_support = 0;
84223598 1096 kvm->arch.use_irqchip = 0;
72f25020 1097 kvm->arch.epoch = 0;
fa6b7fe9 1098
8ad35755
DH
1099 spin_lock_init(&kvm->arch.start_stop_lock);
1100
d89f5eff 1101 return 0;
40f5b735 1102out_err:
5102ee87 1103 kfree(kvm->arch.crypto.crycb);
9d8d5786 1104 free_page((unsigned long)kvm->arch.model.fac);
598841ca 1105 debug_unregister(kvm->arch.dbf);
b0c632db 1106 free_page((unsigned long)(kvm->arch.sca));
d89f5eff 1107 return rc;
b0c632db
HC
1108}
1109
d329c035
CB
1110void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1111{
1112 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 1113 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
67335e63 1114 kvm_s390_clear_local_irqs(vcpu);
3c038e6b 1115 kvm_clear_async_pf_completion_queue(vcpu);
58f9460b
CO
1116 if (!kvm_is_ucontrol(vcpu->kvm)) {
1117 clear_bit(63 - vcpu->vcpu_id,
1118 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
1119 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
1120 (__u64) vcpu->arch.sie_block)
1121 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
1122 }
abf4a71e 1123 smp_mb();
27e0393f
CO
1124
1125 if (kvm_is_ucontrol(vcpu->kvm))
1126 gmap_free(vcpu->arch.gmap);
1127
b31605c1
DD
1128 if (kvm_s390_cmma_enabled(vcpu->kvm))
1129 kvm_s390_vcpu_unsetup_cmma(vcpu);
d329c035 1130 free_page((unsigned long)(vcpu->arch.sie_block));
b31288fa 1131
6692cef3 1132 kvm_vcpu_uninit(vcpu);
b110feaf 1133 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
1134}
1135
1136static void kvm_free_vcpus(struct kvm *kvm)
1137{
1138 unsigned int i;
988a2cae 1139 struct kvm_vcpu *vcpu;
d329c035 1140
988a2cae
GN
1141 kvm_for_each_vcpu(i, vcpu, kvm)
1142 kvm_arch_vcpu_destroy(vcpu);
1143
1144 mutex_lock(&kvm->lock);
1145 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1146 kvm->vcpus[i] = NULL;
1147
1148 atomic_set(&kvm->online_vcpus, 0);
1149 mutex_unlock(&kvm->lock);
d329c035
CB
1150}
1151
b0c632db
HC
1152void kvm_arch_destroy_vm(struct kvm *kvm)
1153{
d329c035 1154 kvm_free_vcpus(kvm);
9d8d5786 1155 free_page((unsigned long)kvm->arch.model.fac);
b0c632db 1156 free_page((unsigned long)(kvm->arch.sca));
d329c035 1157 debug_unregister(kvm->arch.dbf);
5102ee87 1158 kfree(kvm->arch.crypto.crycb);
27e0393f
CO
1159 if (!kvm_is_ucontrol(kvm))
1160 gmap_free(kvm->arch.gmap);
841b91c5 1161 kvm_s390_destroy_adapters(kvm);
67335e63 1162 kvm_s390_clear_float_irqs(kvm);
b0c632db
HC
1163}
1164
1165/* Section: vcpu related */
dafd032a
DD
1166static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1167{
1168 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
1169 if (!vcpu->arch.gmap)
1170 return -ENOMEM;
1171 vcpu->arch.gmap->private = vcpu->kvm;
1172
1173 return 0;
1174}
1175
b0c632db
HC
1176int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1177{
3c038e6b
DD
1178 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1179 kvm_clear_async_pf_completion_queue(vcpu);
59674c1a
CB
1180 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1181 KVM_SYNC_GPRS |
9eed0735 1182 KVM_SYNC_ACRS |
b028ee3e
DH
1183 KVM_SYNC_CRS |
1184 KVM_SYNC_ARCH0 |
1185 KVM_SYNC_PFAULT;
68c55750
EF
1186 if (test_kvm_facility(vcpu->kvm, 129))
1187 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
dafd032a
DD
1188
1189 if (kvm_is_ucontrol(vcpu->kvm))
1190 return __kvm_ucontrol_vcpu_init(vcpu);
1191
b0c632db
HC
1192 return 0;
1193}
1194
b0c632db
HC
1195void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1196{
4725c860 1197 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
18280d8b 1198 if (test_kvm_facility(vcpu->kvm, 129))
68c55750
EF
1199 save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
1200 else
1201 save_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db 1202 save_access_regs(vcpu->arch.host_acrs);
18280d8b 1203 if (test_kvm_facility(vcpu->kvm, 129)) {
68c55750
EF
1204 restore_fp_ctl(&vcpu->run->s.regs.fpc);
1205 restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
1206 } else {
1207 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1208 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
1209 }
59674c1a 1210 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 1211 gmap_enable(vcpu->arch.gmap);
9e6dabef 1212 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
1213}
1214
1215void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1216{
9e6dabef 1217 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 1218 gmap_disable(vcpu->arch.gmap);
18280d8b 1219 if (test_kvm_facility(vcpu->kvm, 129)) {
68c55750
EF
1220 save_fp_ctl(&vcpu->run->s.regs.fpc);
1221 save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
1222 } else {
1223 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1224 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1225 }
59674c1a 1226 save_access_regs(vcpu->run->s.regs.acrs);
4725c860 1227 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
18280d8b 1228 if (test_kvm_facility(vcpu->kvm, 129))
68c55750
EF
1229 restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
1230 else
1231 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db
HC
1232 restore_access_regs(vcpu->arch.host_acrs);
1233}
1234
1235static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1236{
1237 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1238 vcpu->arch.sie_block->gpsw.mask = 0UL;
1239 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 1240 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
1241 vcpu->arch.sie_block->cputm = 0UL;
1242 vcpu->arch.sie_block->ckc = 0UL;
1243 vcpu->arch.sie_block->todpr = 0;
1244 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1245 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
1246 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
1247 vcpu->arch.guest_fpregs.fpc = 0;
1248 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
1249 vcpu->arch.sie_block->gbea = 1;
672550fb 1250 vcpu->arch.sie_block->pp = 0;
3c038e6b
DD
1251 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1252 kvm_clear_async_pf_completion_queue(vcpu);
6352e4d2
DH
1253 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1254 kvm_s390_vcpu_stop(vcpu);
2ed10cc1 1255 kvm_s390_clear_local_irqs(vcpu);
b0c632db
HC
1256}
1257
31928aa5 1258void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
42897d86 1259{
72f25020
JH
1260 mutex_lock(&vcpu->kvm->lock);
1261 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
1262 mutex_unlock(&vcpu->kvm->lock);
dafd032a
DD
1263 if (!kvm_is_ucontrol(vcpu->kvm))
1264 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
42897d86
MT
1265}
1266
5102ee87
TK
1267static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1268{
9d8d5786 1269 if (!test_kvm_facility(vcpu->kvm, 76))
5102ee87
TK
1270 return;
1271
a374e892
TK
1272 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1273
1274 if (vcpu->kvm->arch.crypto.aes_kw)
1275 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1276 if (vcpu->kvm->arch.crypto.dea_kw)
1277 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1278
5102ee87
TK
1279 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1280}
1281
b31605c1
DD
1282void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1283{
1284 free_page(vcpu->arch.sie_block->cbrlo);
1285 vcpu->arch.sie_block->cbrlo = 0;
1286}
1287
1288int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1289{
1290 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1291 if (!vcpu->arch.sie_block->cbrlo)
1292 return -ENOMEM;
1293
1294 vcpu->arch.sie_block->ecb2 |= 0x80;
1295 vcpu->arch.sie_block->ecb2 &= ~0x08;
1296 return 0;
1297}
1298
91520f1a
MM
1299static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1300{
1301 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1302
1303 vcpu->arch.cpu_id = model->cpu_id;
1304 vcpu->arch.sie_block->ibc = model->ibc;
1305 vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
1306}
1307
b0c632db
HC
1308int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1309{
b31605c1 1310 int rc = 0;
b31288fa 1311
9e6dabef
CH
1312 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1313 CPUSTAT_SM |
a4a4f191
GH
1314 CPUSTAT_STOPPED);
1315
53df84f8
GH
1316 if (test_kvm_facility(vcpu->kvm, 78))
1317 atomic_set_mask(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
1318 else if (test_kvm_facility(vcpu->kvm, 8))
a4a4f191
GH
1319 atomic_set_mask(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
1320
91520f1a
MM
1321 kvm_s390_vcpu_setup_model(vcpu);
1322
fc34531d 1323 vcpu->arch.sie_block->ecb = 6;
9d8d5786 1324 if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
7feb6bb8
MM
1325 vcpu->arch.sie_block->ecb |= 0x10;
1326
69d0d3a3 1327 vcpu->arch.sie_block->ecb2 = 8;
ea5f4969 1328 vcpu->arch.sie_block->eca = 0xC1002000U;
217a4406
HC
1329 if (sclp_has_siif())
1330 vcpu->arch.sie_block->eca |= 1;
ea5f4969
DH
1331 if (sclp_has_sigpif())
1332 vcpu->arch.sie_block->eca |= 0x10000000U;
18280d8b 1333 if (test_kvm_facility(vcpu->kvm, 129)) {
13211ea7
EF
1334 vcpu->arch.sie_block->eca |= 0x00020000;
1335 vcpu->arch.sie_block->ecd |= 0x20000000;
1336 }
492d8642 1337 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
5a5e6536 1338
b31605c1
DD
1339 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
1340 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1341 if (rc)
1342 return rc;
b31288fa 1343 }
0ac96caf 1344 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ca872302 1345 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
9d8d5786 1346
5102ee87
TK
1347 kvm_s390_vcpu_crypto_setup(vcpu);
1348
b31605c1 1349 return rc;
b0c632db
HC
1350}
1351
1352struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1353 unsigned int id)
1354{
4d47555a 1355 struct kvm_vcpu *vcpu;
7feb6bb8 1356 struct sie_page *sie_page;
4d47555a
CO
1357 int rc = -EINVAL;
1358
1359 if (id >= KVM_MAX_VCPUS)
1360 goto out;
1361
1362 rc = -ENOMEM;
b0c632db 1363
b110feaf 1364 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 1365 if (!vcpu)
4d47555a 1366 goto out;
b0c632db 1367
7feb6bb8
MM
1368 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1369 if (!sie_page)
b0c632db
HC
1370 goto out_free_cpu;
1371
7feb6bb8
MM
1372 vcpu->arch.sie_block = &sie_page->sie_block;
1373 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
68c55750 1374 vcpu->arch.host_vregs = &sie_page->vregs;
7feb6bb8 1375
b0c632db 1376 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
1377 if (!kvm_is_ucontrol(kvm)) {
1378 if (!kvm->arch.sca) {
1379 WARN_ON_ONCE(1);
1380 goto out_free_cpu;
1381 }
1382 if (!kvm->arch.sca->cpu[id].sda)
1383 kvm->arch.sca->cpu[id].sda =
1384 (__u64) vcpu->arch.sie_block;
1385 vcpu->arch.sie_block->scaoh =
1386 (__u32)(((__u64)kvm->arch.sca) >> 32);
1387 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
1388 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
1389 }
b0c632db 1390
ba5c1e9b 1391 spin_lock_init(&vcpu->arch.local_int.lock);
ba5c1e9b 1392 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
d0321a24 1393 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 1394 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
ba5c1e9b 1395
b0c632db
HC
1396 rc = kvm_vcpu_init(vcpu, kvm, id);
1397 if (rc)
7b06bf2f 1398 goto out_free_sie_block;
b0c632db
HC
1399 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
1400 vcpu->arch.sie_block);
ade38c31 1401 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 1402
b0c632db 1403 return vcpu;
7b06bf2f
WY
1404out_free_sie_block:
1405 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 1406out_free_cpu:
b110feaf 1407 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 1408out:
b0c632db
HC
1409 return ERR_PTR(rc);
1410}
1411
b0c632db
HC
1412int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1413{
9a022067 1414 return kvm_s390_vcpu_has_irq(vcpu, 0);
b0c632db
HC
1415}
1416
27406cd5 1417void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
49b99e1e
CB
1418{
1419 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1420}
1421
27406cd5 1422void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
49b99e1e
CB
1423{
1424 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1425}
1426
8e236546
CB
1427static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
1428{
1429 atomic_set_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
1430}
1431
1432static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
1433{
1434 atomic_clear_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
1435}
1436
49b99e1e
CB
1437/*
1438 * Kick a guest cpu out of SIE and wait until SIE is not running.
1439 * If the CPU is not running (e.g. waiting as idle) the function will
1440 * return immediately. */
1441void exit_sie(struct kvm_vcpu *vcpu)
1442{
1443 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
1444 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1445 cpu_relax();
1446}
1447
8e236546
CB
1448/* Kick a guest cpu out of SIE to process a request synchronously */
1449void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
49b99e1e 1450{
8e236546
CB
1451 kvm_make_request(req, vcpu);
1452 kvm_s390_vcpu_request(vcpu);
49b99e1e
CB
1453 exit_sie(vcpu);
1454}
1455
2c70fe44
CB
1456static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1457{
1458 int i;
1459 struct kvm *kvm = gmap->private;
1460 struct kvm_vcpu *vcpu;
1461
1462 kvm_for_each_vcpu(i, vcpu, kvm) {
1463 /* match against both prefix pages */
fda902cb 1464 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
2c70fe44 1465 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
8e236546 1466 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
2c70fe44
CB
1467 }
1468 }
1469}
1470
b6d33834
CD
1471int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1472{
1473 /* kvm common code refers to this, but never calls it */
1474 BUG();
1475 return 0;
1476}
1477
14eebd91
CO
1478static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1479 struct kvm_one_reg *reg)
1480{
1481 int r = -EINVAL;
1482
1483 switch (reg->id) {
29b7c71b
CO
1484 case KVM_REG_S390_TODPR:
1485 r = put_user(vcpu->arch.sie_block->todpr,
1486 (u32 __user *)reg->addr);
1487 break;
1488 case KVM_REG_S390_EPOCHDIFF:
1489 r = put_user(vcpu->arch.sie_block->epoch,
1490 (u64 __user *)reg->addr);
1491 break;
46a6dd1c
J
1492 case KVM_REG_S390_CPU_TIMER:
1493 r = put_user(vcpu->arch.sie_block->cputm,
1494 (u64 __user *)reg->addr);
1495 break;
1496 case KVM_REG_S390_CLOCK_COMP:
1497 r = put_user(vcpu->arch.sie_block->ckc,
1498 (u64 __user *)reg->addr);
1499 break;
536336c2
DD
1500 case KVM_REG_S390_PFTOKEN:
1501 r = put_user(vcpu->arch.pfault_token,
1502 (u64 __user *)reg->addr);
1503 break;
1504 case KVM_REG_S390_PFCOMPARE:
1505 r = put_user(vcpu->arch.pfault_compare,
1506 (u64 __user *)reg->addr);
1507 break;
1508 case KVM_REG_S390_PFSELECT:
1509 r = put_user(vcpu->arch.pfault_select,
1510 (u64 __user *)reg->addr);
1511 break;
672550fb
CB
1512 case KVM_REG_S390_PP:
1513 r = put_user(vcpu->arch.sie_block->pp,
1514 (u64 __user *)reg->addr);
1515 break;
afa45ff5
CB
1516 case KVM_REG_S390_GBEA:
1517 r = put_user(vcpu->arch.sie_block->gbea,
1518 (u64 __user *)reg->addr);
1519 break;
14eebd91
CO
1520 default:
1521 break;
1522 }
1523
1524 return r;
1525}
1526
1527static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
1528 struct kvm_one_reg *reg)
1529{
1530 int r = -EINVAL;
1531
1532 switch (reg->id) {
29b7c71b
CO
1533 case KVM_REG_S390_TODPR:
1534 r = get_user(vcpu->arch.sie_block->todpr,
1535 (u32 __user *)reg->addr);
1536 break;
1537 case KVM_REG_S390_EPOCHDIFF:
1538 r = get_user(vcpu->arch.sie_block->epoch,
1539 (u64 __user *)reg->addr);
1540 break;
46a6dd1c
J
1541 case KVM_REG_S390_CPU_TIMER:
1542 r = get_user(vcpu->arch.sie_block->cputm,
1543 (u64 __user *)reg->addr);
1544 break;
1545 case KVM_REG_S390_CLOCK_COMP:
1546 r = get_user(vcpu->arch.sie_block->ckc,
1547 (u64 __user *)reg->addr);
1548 break;
536336c2
DD
1549 case KVM_REG_S390_PFTOKEN:
1550 r = get_user(vcpu->arch.pfault_token,
1551 (u64 __user *)reg->addr);
9fbd8082
DH
1552 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1553 kvm_clear_async_pf_completion_queue(vcpu);
536336c2
DD
1554 break;
1555 case KVM_REG_S390_PFCOMPARE:
1556 r = get_user(vcpu->arch.pfault_compare,
1557 (u64 __user *)reg->addr);
1558 break;
1559 case KVM_REG_S390_PFSELECT:
1560 r = get_user(vcpu->arch.pfault_select,
1561 (u64 __user *)reg->addr);
1562 break;
672550fb
CB
1563 case KVM_REG_S390_PP:
1564 r = get_user(vcpu->arch.sie_block->pp,
1565 (u64 __user *)reg->addr);
1566 break;
afa45ff5
CB
1567 case KVM_REG_S390_GBEA:
1568 r = get_user(vcpu->arch.sie_block->gbea,
1569 (u64 __user *)reg->addr);
1570 break;
14eebd91
CO
1571 default:
1572 break;
1573 }
1574
1575 return r;
1576}
b6d33834 1577
b0c632db
HC
1578static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1579{
b0c632db 1580 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
1581 return 0;
1582}
1583
1584int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1585{
5a32c1af 1586 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
1587 return 0;
1588}
1589
1590int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1591{
5a32c1af 1592 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
1593 return 0;
1594}
1595
1596int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1597 struct kvm_sregs *sregs)
1598{
59674c1a 1599 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 1600 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 1601 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
1602 return 0;
1603}
1604
1605int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1606 struct kvm_sregs *sregs)
1607{
59674c1a 1608 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 1609 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
1610 return 0;
1611}
1612
1613int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1614{
4725c860
MS
1615 if (test_fp_ctl(fpu->fpc))
1616 return -EINVAL;
b0c632db 1617 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4725c860
MS
1618 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
1619 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1620 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
b0c632db
HC
1621 return 0;
1622}
1623
1624int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1625{
b0c632db
HC
1626 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1627 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
1628 return 0;
1629}
1630
1631static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1632{
1633 int rc = 0;
1634
7a42fdc2 1635 if (!is_vcpu_stopped(vcpu))
b0c632db 1636 rc = -EBUSY;
d7b0b5eb
CO
1637 else {
1638 vcpu->run->psw_mask = psw.mask;
1639 vcpu->run->psw_addr = psw.addr;
1640 }
b0c632db
HC
1641 return rc;
1642}
1643
1644int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1645 struct kvm_translation *tr)
1646{
1647 return -EINVAL; /* not implemented yet */
1648}
1649
27291e21
DH
1650#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1651 KVM_GUESTDBG_USE_HW_BP | \
1652 KVM_GUESTDBG_ENABLE)
1653
d0bfb940
JK
1654int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1655 struct kvm_guest_debug *dbg)
b0c632db 1656{
27291e21
DH
1657 int rc = 0;
1658
1659 vcpu->guest_debug = 0;
1660 kvm_s390_clear_bp_data(vcpu);
1661
2de3bfc2 1662 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
27291e21
DH
1663 return -EINVAL;
1664
1665 if (dbg->control & KVM_GUESTDBG_ENABLE) {
1666 vcpu->guest_debug = dbg->control;
1667 /* enforce guest PER */
1668 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1669
1670 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
1671 rc = kvm_s390_import_bp_data(vcpu, dbg);
1672 } else {
1673 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1674 vcpu->arch.guestdbg.last_bp = 0;
1675 }
1676
1677 if (rc) {
1678 vcpu->guest_debug = 0;
1679 kvm_s390_clear_bp_data(vcpu);
1680 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1681 }
1682
1683 return rc;
b0c632db
HC
1684}
1685
62d9f0db
MT
1686int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1687 struct kvm_mp_state *mp_state)
1688{
6352e4d2
DH
1689 /* CHECK_STOP and LOAD are not supported yet */
1690 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1691 KVM_MP_STATE_OPERATING;
62d9f0db
MT
1692}
1693
1694int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1695 struct kvm_mp_state *mp_state)
1696{
6352e4d2
DH
1697 int rc = 0;
1698
1699 /* user space knows about this interface - let it control the state */
1700 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1701
1702 switch (mp_state->mp_state) {
1703 case KVM_MP_STATE_STOPPED:
1704 kvm_s390_vcpu_stop(vcpu);
1705 break;
1706 case KVM_MP_STATE_OPERATING:
1707 kvm_s390_vcpu_start(vcpu);
1708 break;
1709 case KVM_MP_STATE_LOAD:
1710 case KVM_MP_STATE_CHECK_STOP:
1711 /* fall through - CHECK_STOP and LOAD are not supported yet */
1712 default:
1713 rc = -ENXIO;
1714 }
1715
1716 return rc;
62d9f0db
MT
1717}
1718
b31605c1
DD
1719bool kvm_s390_cmma_enabled(struct kvm *kvm)
1720{
1721 if (!MACHINE_IS_LPAR)
1722 return false;
1723 /* only enable for z10 and later */
1724 if (!MACHINE_HAS_EDAT1)
1725 return false;
1726 if (!kvm->arch.use_cmma)
1727 return false;
1728 return true;
1729}
1730
8ad35755
DH
1731static bool ibs_enabled(struct kvm_vcpu *vcpu)
1732{
1733 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1734}
1735
2c70fe44
CB
1736static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1737{
785dbef4
CB
1738 if (!vcpu->requests)
1739 return 0;
8ad35755 1740retry:
8e236546 1741 kvm_s390_vcpu_request_handled(vcpu);
2c70fe44
CB
1742 /*
1743 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1744 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1745 * This ensures that the ipte instruction for this request has
1746 * already finished. We might race against a second unmapper that
1747 * wants to set the blocking bit. Lets just retry the request loop.
1748 */
8ad35755 1749 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
2c70fe44
CB
1750 int rc;
1751 rc = gmap_ipte_notify(vcpu->arch.gmap,
fda902cb 1752 kvm_s390_get_prefix(vcpu),
2c70fe44
CB
1753 PAGE_SIZE * 2);
1754 if (rc)
1755 return rc;
8ad35755 1756 goto retry;
2c70fe44 1757 }
8ad35755 1758
d3d692c8
DH
1759 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1760 vcpu->arch.sie_block->ihcpu = 0xffff;
1761 goto retry;
1762 }
1763
8ad35755
DH
1764 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1765 if (!ibs_enabled(vcpu)) {
1766 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1767 atomic_set_mask(CPUSTAT_IBS,
1768 &vcpu->arch.sie_block->cpuflags);
1769 }
1770 goto retry;
2c70fe44 1771 }
8ad35755
DH
1772
1773 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1774 if (ibs_enabled(vcpu)) {
1775 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1776 atomic_clear_mask(CPUSTAT_IBS,
1777 &vcpu->arch.sie_block->cpuflags);
1778 }
1779 goto retry;
1780 }
1781
0759d068
DH
1782 /* nothing to do, just clear the request */
1783 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1784
2c70fe44
CB
1785 return 0;
1786}
1787
fa576c58
TH
1788/**
1789 * kvm_arch_fault_in_page - fault-in guest page if necessary
1790 * @vcpu: The corresponding virtual cpu
1791 * @gpa: Guest physical address
1792 * @writable: Whether the page should be writable or not
1793 *
1794 * Make sure that a guest page has been faulted-in on the host.
1795 *
1796 * Return: Zero on success, negative error code otherwise.
1797 */
1798long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
24eb3a82 1799{
527e30b4
MS
1800 return gmap_fault(vcpu->arch.gmap, gpa,
1801 writable ? FAULT_FLAG_WRITE : 0);
24eb3a82
DD
1802}
1803
3c038e6b
DD
1804static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1805 unsigned long token)
1806{
1807 struct kvm_s390_interrupt inti;
383d0b05 1808 struct kvm_s390_irq irq;
3c038e6b
DD
1809
1810 if (start_token) {
383d0b05
JF
1811 irq.u.ext.ext_params2 = token;
1812 irq.type = KVM_S390_INT_PFAULT_INIT;
1813 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
3c038e6b
DD
1814 } else {
1815 inti.type = KVM_S390_INT_PFAULT_DONE;
383d0b05 1816 inti.parm64 = token;
3c038e6b
DD
1817 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1818 }
1819}
1820
1821void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1822 struct kvm_async_pf *work)
1823{
1824 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1825 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1826}
1827
1828void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1829 struct kvm_async_pf *work)
1830{
1831 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1832 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1833}
1834
1835void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1836 struct kvm_async_pf *work)
1837{
1838 /* s390 will always inject the page directly */
1839}
1840
1841bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1842{
1843 /*
1844 * s390 will always inject the page directly,
1845 * but we still want check_async_completion to cleanup
1846 */
1847 return true;
1848}
1849
1850static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1851{
1852 hva_t hva;
1853 struct kvm_arch_async_pf arch;
1854 int rc;
1855
1856 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1857 return 0;
1858 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1859 vcpu->arch.pfault_compare)
1860 return 0;
1861 if (psw_extint_disabled(vcpu))
1862 return 0;
9a022067 1863 if (kvm_s390_vcpu_has_irq(vcpu, 0))
3c038e6b
DD
1864 return 0;
1865 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1866 return 0;
1867 if (!vcpu->arch.gmap->pfault_enabled)
1868 return 0;
1869
81480cc1
HC
1870 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1871 hva += current->thread.gmap_addr & ~PAGE_MASK;
1872 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
3c038e6b
DD
1873 return 0;
1874
1875 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1876 return rc;
1877}
1878
3fb4c40f 1879static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 1880{
3fb4c40f 1881 int rc, cpuflags;
e168bf8d 1882
3c038e6b
DD
1883 /*
1884 * On s390 notifications for arriving pages will be delivered directly
1885 * to the guest but the house keeping for completed pfaults is
1886 * handled outside the worker.
1887 */
1888 kvm_check_async_pf_completion(vcpu);
1889
5a32c1af 1890 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
1891
1892 if (need_resched())
1893 schedule();
1894
d3a73acb 1895 if (test_cpu_flag(CIF_MCCK_PENDING))
71cde587
CB
1896 s390_handle_mcck();
1897
79395031
JF
1898 if (!kvm_is_ucontrol(vcpu->kvm)) {
1899 rc = kvm_s390_deliver_pending_interrupts(vcpu);
1900 if (rc)
1901 return rc;
1902 }
0ff31867 1903
2c70fe44
CB
1904 rc = kvm_s390_handle_requests(vcpu);
1905 if (rc)
1906 return rc;
1907
27291e21
DH
1908 if (guestdbg_enabled(vcpu)) {
1909 kvm_s390_backup_guest_per_regs(vcpu);
1910 kvm_s390_patch_guest_per_regs(vcpu);
1911 }
1912
b0c632db 1913 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
1914 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1915 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1916 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 1917
3fb4c40f
TH
1918 return 0;
1919}
1920
492d8642
TH
1921static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
1922{
1923 psw_t *psw = &vcpu->arch.sie_block->gpsw;
1924 u8 opcode;
1925 int rc;
1926
1927 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1928 trace_kvm_s390_sie_fault(vcpu);
1929
1930 /*
1931 * We want to inject an addressing exception, which is defined as a
1932 * suppressing or terminating exception. However, since we came here
1933 * by a DAT access exception, the PSW still points to the faulting
1934 * instruction since DAT exceptions are nullifying. So we've got
1935 * to look up the current opcode to get the length of the instruction
1936 * to be able to forward the PSW.
1937 */
8ae04b8f 1938 rc = read_guest(vcpu, psw->addr, 0, &opcode, 1);
492d8642
TH
1939 if (rc)
1940 return kvm_s390_inject_prog_cond(vcpu, rc);
1941 psw->addr = __rewind_psw(*psw, -insn_length(opcode));
1942
1943 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1944}
1945
3fb4c40f
TH
1946static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1947{
24eb3a82 1948 int rc = -1;
2b29a9fd
DD
1949
1950 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1951 vcpu->arch.sie_block->icptcode);
1952 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1953
27291e21
DH
1954 if (guestdbg_enabled(vcpu))
1955 kvm_s390_restore_guest_per_regs(vcpu);
1956
3fb4c40f 1957 if (exit_reason >= 0) {
7c470539 1958 rc = 0;
210b1607
TH
1959 } else if (kvm_is_ucontrol(vcpu->kvm)) {
1960 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1961 vcpu->run->s390_ucontrol.trans_exc_code =
1962 current->thread.gmap_addr;
1963 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1964 rc = -EREMOTE;
24eb3a82
DD
1965
1966 } else if (current->thread.gmap_pfault) {
3c038e6b 1967 trace_kvm_s390_major_guest_pfault(vcpu);
24eb3a82 1968 current->thread.gmap_pfault = 0;
fa576c58 1969 if (kvm_arch_setup_async_pf(vcpu)) {
24eb3a82 1970 rc = 0;
fa576c58
TH
1971 } else {
1972 gpa_t gpa = current->thread.gmap_addr;
1973 rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1974 }
24eb3a82
DD
1975 }
1976
492d8642
TH
1977 if (rc == -1)
1978 rc = vcpu_post_run_fault_in_sie(vcpu);
b0c632db 1979
5a32c1af 1980 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
3fb4c40f 1981
a76ccff6
TH
1982 if (rc == 0) {
1983 if (kvm_is_ucontrol(vcpu->kvm))
2955c83f
CB
1984 /* Don't exit for host interrupts. */
1985 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
a76ccff6
TH
1986 else
1987 rc = kvm_handle_sie_intercept(vcpu);
1988 }
1989
3fb4c40f
TH
1990 return rc;
1991}
1992
1993static int __vcpu_run(struct kvm_vcpu *vcpu)
1994{
1995 int rc, exit_reason;
1996
800c1065
TH
1997 /*
1998 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1999 * ning the guest), so that memslots (and other stuff) are protected
2000 */
2001 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2002
a76ccff6
TH
2003 do {
2004 rc = vcpu_pre_run(vcpu);
2005 if (rc)
2006 break;
3fb4c40f 2007
800c1065 2008 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
2009 /*
2010 * As PF_VCPU will be used in fault handler, between
2011 * guest_enter and guest_exit should be no uaccess.
2012 */
0097d12e
CB
2013 local_irq_disable();
2014 __kvm_guest_enter();
2015 local_irq_enable();
a76ccff6
TH
2016 exit_reason = sie64a(vcpu->arch.sie_block,
2017 vcpu->run->s.regs.gprs);
0097d12e
CB
2018 local_irq_disable();
2019 __kvm_guest_exit();
2020 local_irq_enable();
800c1065 2021 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
2022
2023 rc = vcpu_post_run(vcpu, exit_reason);
27291e21 2024 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
3fb4c40f 2025
800c1065 2026 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 2027 return rc;
b0c632db
HC
2028}
2029
b028ee3e
DH
2030static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2031{
2032 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2033 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2034 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2035 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2036 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2037 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
d3d692c8
DH
2038 /* some control register changes require a tlb flush */
2039 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
b028ee3e
DH
2040 }
2041 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
2042 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
2043 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2044 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2045 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2046 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2047 }
2048 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2049 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2050 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2051 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
9fbd8082
DH
2052 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2053 kvm_clear_async_pf_completion_queue(vcpu);
b028ee3e
DH
2054 }
2055 kvm_run->kvm_dirty_regs = 0;
2056}
2057
2058static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2059{
2060 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2061 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2062 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2063 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
2064 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
2065 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2066 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2067 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2068 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2069 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2070 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2071 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2072}
2073
b0c632db
HC
2074int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2075{
8f2abe6a 2076 int rc;
b0c632db
HC
2077 sigset_t sigsaved;
2078
27291e21
DH
2079 if (guestdbg_exit_pending(vcpu)) {
2080 kvm_s390_prepare_debug_exit(vcpu);
2081 return 0;
2082 }
2083
b0c632db
HC
2084 if (vcpu->sigset_active)
2085 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2086
6352e4d2
DH
2087 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2088 kvm_s390_vcpu_start(vcpu);
2089 } else if (is_vcpu_stopped(vcpu)) {
2090 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
2091 vcpu->vcpu_id);
2092 return -EINVAL;
2093 }
b0c632db 2094
b028ee3e 2095 sync_regs(vcpu, kvm_run);
d7b0b5eb 2096
dab4079d 2097 might_fault();
a76ccff6 2098 rc = __vcpu_run(vcpu);
9ace903d 2099
b1d16c49
CE
2100 if (signal_pending(current) && !rc) {
2101 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 2102 rc = -EINTR;
b1d16c49 2103 }
8f2abe6a 2104
27291e21
DH
2105 if (guestdbg_exit_pending(vcpu) && !rc) {
2106 kvm_s390_prepare_debug_exit(vcpu);
2107 rc = 0;
2108 }
2109
b8e660b8 2110 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
2111 /* intercept cannot be handled in-kernel, prepare kvm-run */
2112 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
2113 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
2114 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
2115 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
2116 rc = 0;
2117 }
2118
2119 if (rc == -EREMOTE) {
2120 /* intercept was handled, but userspace support is needed
2121 * kvm_run has been prepared by the handler */
2122 rc = 0;
2123 }
b0c632db 2124
b028ee3e 2125 store_regs(vcpu, kvm_run);
d7b0b5eb 2126
b0c632db
HC
2127 if (vcpu->sigset_active)
2128 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2129
b0c632db 2130 vcpu->stat.exit_userspace++;
7e8e6ab4 2131 return rc;
b0c632db
HC
2132}
2133
b0c632db
HC
2134/*
2135 * store status at address
2136 * we use have two special cases:
2137 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2138 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2139 */
d0bce605 2140int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
b0c632db 2141{
092670cd 2142 unsigned char archmode = 1;
fda902cb 2143 unsigned int px;
178bd789 2144 u64 clkcomp;
d0bce605 2145 int rc;
b0c632db 2146
d0bce605
HC
2147 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2148 if (write_guest_abs(vcpu, 163, &archmode, 1))
b0c632db 2149 return -EFAULT;
d0bce605
HC
2150 gpa = SAVE_AREA_BASE;
2151 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2152 if (write_guest_real(vcpu, 163, &archmode, 1))
b0c632db 2153 return -EFAULT;
d0bce605
HC
2154 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
2155 }
2156 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
2157 vcpu->arch.guest_fpregs.fprs, 128);
2158 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
2159 vcpu->run->s.regs.gprs, 128);
2160 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
2161 &vcpu->arch.sie_block->gpsw, 16);
fda902cb 2162 px = kvm_s390_get_prefix(vcpu);
d0bce605 2163 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
fda902cb 2164 &px, 4);
d0bce605
HC
2165 rc |= write_guest_abs(vcpu,
2166 gpa + offsetof(struct save_area, fp_ctrl_reg),
2167 &vcpu->arch.guest_fpregs.fpc, 4);
2168 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
2169 &vcpu->arch.sie_block->todpr, 4);
2170 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
2171 &vcpu->arch.sie_block->cputm, 8);
178bd789 2172 clkcomp = vcpu->arch.sie_block->ckc >> 8;
d0bce605
HC
2173 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
2174 &clkcomp, 8);
2175 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
2176 &vcpu->run->s.regs.acrs, 64);
2177 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
2178 &vcpu->arch.sie_block->gcr, 128);
2179 return rc ? -EFAULT : 0;
b0c632db
HC
2180}
2181
e879892c
TH
2182int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2183{
2184 /*
2185 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2186 * copying in vcpu load/put. Lets update our copies before we save
2187 * it into the save area
2188 */
2189 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
2190 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
2191 save_access_regs(vcpu->run->s.regs.acrs);
2192
2193 return kvm_s390_store_status_unloaded(vcpu, addr);
2194}
2195
bc17de7c
EF
2196/*
2197 * store additional status at address
2198 */
2199int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2200 unsigned long gpa)
2201{
2202 /* Only bits 0-53 are used for address formation */
2203 if (!(gpa & ~0x3ff))
2204 return 0;
2205
2206 return write_guest_abs(vcpu, gpa & ~0x3ff,
2207 (void *)&vcpu->run->s.regs.vrs, 512);
2208}
2209
2210int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2211{
2212 if (!test_kvm_facility(vcpu->kvm, 129))
2213 return 0;
2214
2215 /*
2216 * The guest VXRS are in the host VXRs due to the lazy
2217 * copying in vcpu load/put. Let's update our copies before we save
2218 * it into the save area.
2219 */
2220 save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
2221
2222 return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2223}
2224
8ad35755
DH
2225static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2226{
2227 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
8e236546 2228 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
8ad35755
DH
2229}
2230
2231static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2232{
2233 unsigned int i;
2234 struct kvm_vcpu *vcpu;
2235
2236 kvm_for_each_vcpu(i, vcpu, kvm) {
2237 __disable_ibs_on_vcpu(vcpu);
2238 }
2239}
2240
2241static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2242{
2243 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
8e236546 2244 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
8ad35755
DH
2245}
2246
6852d7b6
DH
2247void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2248{
8ad35755
DH
2249 int i, online_vcpus, started_vcpus = 0;
2250
2251 if (!is_vcpu_stopped(vcpu))
2252 return;
2253
6852d7b6 2254 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
8ad35755 2255 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 2256 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
2257 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2258
2259 for (i = 0; i < online_vcpus; i++) {
2260 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2261 started_vcpus++;
2262 }
2263
2264 if (started_vcpus == 0) {
2265 /* we're the only active VCPU -> speed it up */
2266 __enable_ibs_on_vcpu(vcpu);
2267 } else if (started_vcpus == 1) {
2268 /*
2269 * As we are starting a second VCPU, we have to disable
2270 * the IBS facility on all VCPUs to remove potentially
2271 * oustanding ENABLE requests.
2272 */
2273 __disable_ibs_on_all_vcpus(vcpu->kvm);
2274 }
2275
6852d7b6 2276 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
2277 /*
2278 * Another VCPU might have used IBS while we were offline.
2279 * Let's play safe and flush the VCPU at startup.
2280 */
d3d692c8 2281 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
433b9ee4 2282 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 2283 return;
6852d7b6
DH
2284}
2285
2286void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2287{
8ad35755
DH
2288 int i, online_vcpus, started_vcpus = 0;
2289 struct kvm_vcpu *started_vcpu = NULL;
2290
2291 if (is_vcpu_stopped(vcpu))
2292 return;
2293
6852d7b6 2294 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
8ad35755 2295 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 2296 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
2297 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2298
32f5ff63 2299 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
6cddd432 2300 kvm_s390_clear_stop_irq(vcpu);
32f5ff63 2301
6cddd432 2302 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
2303 __disable_ibs_on_vcpu(vcpu);
2304
2305 for (i = 0; i < online_vcpus; i++) {
2306 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2307 started_vcpus++;
2308 started_vcpu = vcpu->kvm->vcpus[i];
2309 }
2310 }
2311
2312 if (started_vcpus == 1) {
2313 /*
2314 * As we only have one VCPU left, we want to enable the
2315 * IBS facility for that VCPU to speed it up.
2316 */
2317 __enable_ibs_on_vcpu(started_vcpu);
2318 }
2319
433b9ee4 2320 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 2321 return;
6852d7b6
DH
2322}
2323
d6712df9
CH
2324static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2325 struct kvm_enable_cap *cap)
2326{
2327 int r;
2328
2329 if (cap->flags)
2330 return -EINVAL;
2331
2332 switch (cap->cap) {
fa6b7fe9
CH
2333 case KVM_CAP_S390_CSS_SUPPORT:
2334 if (!vcpu->kvm->arch.css_support) {
2335 vcpu->kvm->arch.css_support = 1;
2336 trace_kvm_s390_enable_css(vcpu->kvm);
2337 }
2338 r = 0;
2339 break;
d6712df9
CH
2340 default:
2341 r = -EINVAL;
2342 break;
2343 }
2344 return r;
2345}
2346
41408c28
TH
2347static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
2348 struct kvm_s390_mem_op *mop)
2349{
2350 void __user *uaddr = (void __user *)mop->buf;
2351 void *tmpbuf = NULL;
2352 int r, srcu_idx;
2353 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
2354 | KVM_S390_MEMOP_F_CHECK_ONLY;
2355
2356 if (mop->flags & ~supported_flags)
2357 return -EINVAL;
2358
2359 if (mop->size > MEM_OP_MAX_SIZE)
2360 return -E2BIG;
2361
2362 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2363 tmpbuf = vmalloc(mop->size);
2364 if (!tmpbuf)
2365 return -ENOMEM;
2366 }
2367
2368 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2369
2370 switch (mop->op) {
2371 case KVM_S390_MEMOP_LOGICAL_READ:
2372 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2373 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false);
2374 break;
2375 }
2376 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2377 if (r == 0) {
2378 if (copy_to_user(uaddr, tmpbuf, mop->size))
2379 r = -EFAULT;
2380 }
2381 break;
2382 case KVM_S390_MEMOP_LOGICAL_WRITE:
2383 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2384 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true);
2385 break;
2386 }
2387 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2388 r = -EFAULT;
2389 break;
2390 }
2391 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2392 break;
2393 default:
2394 r = -EINVAL;
2395 }
2396
2397 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2398
2399 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
2400 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
2401
2402 vfree(tmpbuf);
2403 return r;
2404}
2405
b0c632db
HC
2406long kvm_arch_vcpu_ioctl(struct file *filp,
2407 unsigned int ioctl, unsigned long arg)
2408{
2409 struct kvm_vcpu *vcpu = filp->private_data;
2410 void __user *argp = (void __user *)arg;
800c1065 2411 int idx;
bc923cc9 2412 long r;
b0c632db 2413
93736624 2414 switch (ioctl) {
47b43c52
JF
2415 case KVM_S390_IRQ: {
2416 struct kvm_s390_irq s390irq;
2417
2418 r = -EFAULT;
2419 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
2420 break;
2421 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
2422 break;
2423 }
93736624 2424 case KVM_S390_INTERRUPT: {
ba5c1e9b 2425 struct kvm_s390_interrupt s390int;
383d0b05 2426 struct kvm_s390_irq s390irq;
ba5c1e9b 2427
93736624 2428 r = -EFAULT;
ba5c1e9b 2429 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624 2430 break;
383d0b05
JF
2431 if (s390int_to_s390irq(&s390int, &s390irq))
2432 return -EINVAL;
2433 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
93736624 2434 break;
ba5c1e9b 2435 }
b0c632db 2436 case KVM_S390_STORE_STATUS:
800c1065 2437 idx = srcu_read_lock(&vcpu->kvm->srcu);
bc923cc9 2438 r = kvm_s390_vcpu_store_status(vcpu, arg);
800c1065 2439 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 2440 break;
b0c632db
HC
2441 case KVM_S390_SET_INITIAL_PSW: {
2442 psw_t psw;
2443
bc923cc9 2444 r = -EFAULT;
b0c632db 2445 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
2446 break;
2447 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2448 break;
b0c632db
HC
2449 }
2450 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
2451 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2452 break;
14eebd91
CO
2453 case KVM_SET_ONE_REG:
2454 case KVM_GET_ONE_REG: {
2455 struct kvm_one_reg reg;
2456 r = -EFAULT;
2457 if (copy_from_user(&reg, argp, sizeof(reg)))
2458 break;
2459 if (ioctl == KVM_SET_ONE_REG)
2460 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
2461 else
2462 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
2463 break;
2464 }
27e0393f
CO
2465#ifdef CONFIG_KVM_S390_UCONTROL
2466 case KVM_S390_UCAS_MAP: {
2467 struct kvm_s390_ucas_mapping ucasmap;
2468
2469 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2470 r = -EFAULT;
2471 break;
2472 }
2473
2474 if (!kvm_is_ucontrol(vcpu->kvm)) {
2475 r = -EINVAL;
2476 break;
2477 }
2478
2479 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
2480 ucasmap.vcpu_addr, ucasmap.length);
2481 break;
2482 }
2483 case KVM_S390_UCAS_UNMAP: {
2484 struct kvm_s390_ucas_mapping ucasmap;
2485
2486 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2487 r = -EFAULT;
2488 break;
2489 }
2490
2491 if (!kvm_is_ucontrol(vcpu->kvm)) {
2492 r = -EINVAL;
2493 break;
2494 }
2495
2496 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
2497 ucasmap.length);
2498 break;
2499 }
2500#endif
ccc7910f 2501 case KVM_S390_VCPU_FAULT: {
527e30b4 2502 r = gmap_fault(vcpu->arch.gmap, arg, 0);
ccc7910f
CO
2503 break;
2504 }
d6712df9
CH
2505 case KVM_ENABLE_CAP:
2506 {
2507 struct kvm_enable_cap cap;
2508 r = -EFAULT;
2509 if (copy_from_user(&cap, argp, sizeof(cap)))
2510 break;
2511 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2512 break;
2513 }
41408c28
TH
2514 case KVM_S390_MEM_OP: {
2515 struct kvm_s390_mem_op mem_op;
2516
2517 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
2518 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
2519 else
2520 r = -EFAULT;
2521 break;
2522 }
816c7667
JF
2523 case KVM_S390_SET_IRQ_STATE: {
2524 struct kvm_s390_irq_state irq_state;
2525
2526 r = -EFAULT;
2527 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2528 break;
2529 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
2530 irq_state.len == 0 ||
2531 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
2532 r = -EINVAL;
2533 break;
2534 }
2535 r = kvm_s390_set_irq_state(vcpu,
2536 (void __user *) irq_state.buf,
2537 irq_state.len);
2538 break;
2539 }
2540 case KVM_S390_GET_IRQ_STATE: {
2541 struct kvm_s390_irq_state irq_state;
2542
2543 r = -EFAULT;
2544 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2545 break;
2546 if (irq_state.len == 0) {
2547 r = -EINVAL;
2548 break;
2549 }
2550 r = kvm_s390_get_irq_state(vcpu,
2551 (__u8 __user *) irq_state.buf,
2552 irq_state.len);
2553 break;
2554 }
b0c632db 2555 default:
3e6afcf1 2556 r = -ENOTTY;
b0c632db 2557 }
bc923cc9 2558 return r;
b0c632db
HC
2559}
2560
5b1c1493
CO
2561int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2562{
2563#ifdef CONFIG_KVM_S390_UCONTROL
2564 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
2565 && (kvm_is_ucontrol(vcpu->kvm))) {
2566 vmf->page = virt_to_page(vcpu->arch.sie_block);
2567 get_page(vmf->page);
2568 return 0;
2569 }
2570#endif
2571 return VM_FAULT_SIGBUS;
2572}
2573
5587027c
AK
2574int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2575 unsigned long npages)
db3fe4eb
TY
2576{
2577 return 0;
2578}
2579
b0c632db 2580/* Section: memory related */
f7784b8e
MT
2581int kvm_arch_prepare_memory_region(struct kvm *kvm,
2582 struct kvm_memory_slot *memslot,
7b6195a9
TY
2583 struct kvm_userspace_memory_region *mem,
2584 enum kvm_mr_change change)
b0c632db 2585{
dd2887e7
NW
2586 /* A few sanity checks. We can have memory slots which have to be
2587 located/ended at a segment boundary (1MB). The memory in userland is
2588 ok to be fragmented into various different vmas. It is okay to mmap()
2589 and munmap() stuff in this slot after doing this call at any time */
b0c632db 2590
598841ca 2591 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
2592 return -EINVAL;
2593
598841ca 2594 if (mem->memory_size & 0xffffful)
b0c632db
HC
2595 return -EINVAL;
2596
f7784b8e
MT
2597 return 0;
2598}
2599
2600void kvm_arch_commit_memory_region(struct kvm *kvm,
2601 struct kvm_userspace_memory_region *mem,
8482644a
TY
2602 const struct kvm_memory_slot *old,
2603 enum kvm_mr_change change)
f7784b8e 2604{
f7850c92 2605 int rc;
f7784b8e 2606
2cef4deb
CB
2607 /* If the basics of the memslot do not change, we do not want
2608 * to update the gmap. Every update causes several unnecessary
2609 * segment translation exceptions. This is usually handled just
2610 * fine by the normal fault handler + gmap, but it will also
2611 * cause faults on the prefix page of running guest CPUs.
2612 */
2613 if (old->userspace_addr == mem->userspace_addr &&
2614 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
2615 old->npages * PAGE_SIZE == mem->memory_size)
2616 return;
598841ca
CO
2617
2618 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2619 mem->guest_phys_addr, mem->memory_size);
2620 if (rc)
f7850c92 2621 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 2622 return;
b0c632db
HC
2623}
2624
b0c632db
HC
2625static int __init kvm_s390_init(void)
2626{
9d8d5786 2627 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
b0c632db
HC
2628}
2629
2630static void __exit kvm_s390_exit(void)
2631{
2632 kvm_exit();
2633}
2634
2635module_init(kvm_s390_init);
2636module_exit(kvm_s390_exit);
566af940
CH
2637
2638/*
2639 * Enable autoloading of the kvm module.
2640 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2641 * since x86 takes a different approach.
2642 */
2643#include <linux/miscdevice.h>
2644MODULE_ALIAS_MISCDEV(KVM_MINOR);
2645MODULE_ALIAS("devname:kvm");