]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/s390/kvm/kvm-s390.c
kvm: remove KVM_MMIO_SIZE
[mirror_ubuntu-artful-kernel.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15f36ebd 14 * Jason J. Herne <jjherne@us.ibm.com>
b0c632db
HC
15 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
ca872302 20#include <linux/hrtimer.h>
b0c632db
HC
21#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
a374e892 25#include <linux/random.h>
b0c632db 26#include <linux/slab.h>
ba5c1e9b 27#include <linux/timer.h>
cbb870c8 28#include <asm/asm-offsets.h>
b0c632db
HC
29#include <asm/lowcore.h>
30#include <asm/pgtable.h>
f5daba1d 31#include <asm/nmi.h>
a0616cde 32#include <asm/switch_to.h>
78c4b59f 33#include <asm/facility.h>
1526bf9c 34#include <asm/sclp.h>
8f2abe6a 35#include "kvm-s390.h"
b0c632db
HC
36#include "gaccess.h"
37
5786fffa
CH
38#define CREATE_TRACE_POINTS
39#include "trace.h"
ade38c31 40#include "trace-s390.h"
5786fffa 41
b0c632db
HC
42#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
43
44struct kvm_stats_debugfs_item debugfs_entries[] = {
45 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 46 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
47 { "exit_validity", VCPU_STAT(exit_validity) },
48 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
49 { "exit_external_request", VCPU_STAT(exit_external_request) },
50 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
51 { "exit_instruction", VCPU_STAT(exit_instruction) },
52 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
53 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
ce2e4f0b 54 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
f5e10b09 55 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b 56 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
aba07508
DH
57 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
58 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
ba5c1e9b 59 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 60 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
61 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
62 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
63 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
64 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
65 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
66 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
67 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 68 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
69 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
70 { "instruction_spx", VCPU_STAT(instruction_spx) },
71 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
72 { "instruction_stap", VCPU_STAT(instruction_stap) },
73 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
8a242234 74 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
453423dc
CB
75 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
76 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
b31288fa 77 { "instruction_essa", VCPU_STAT(instruction_essa) },
453423dc
CB
78 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
79 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 80 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 81 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 82 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 83 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0 84 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
42cb0c9f
DH
85 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
86 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
5288fbf0 87 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
42cb0c9f
DH
88 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
89 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
5288fbf0
CB
90 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
91 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
92 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
42cb0c9f
DH
93 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
94 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
95 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
388186bc 96 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 97 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 98 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
99 { NULL }
100};
101
78c4b59f 102unsigned long *vfacilities;
2c70fe44 103static struct gmap_notifier gmap_notifier;
b0c632db 104
78c4b59f 105/* test availability of vfacility */
280ef0f1 106int test_vfacility(unsigned long nr)
78c4b59f
MM
107{
108 return __test_facility(nr, (void *) vfacilities);
109}
110
b0c632db 111/* Section: not file related */
13a34e06 112int kvm_arch_hardware_enable(void)
b0c632db
HC
113{
114 /* every s390 is virtualization enabled ;-) */
10474ae8 115 return 0;
b0c632db
HC
116}
117
2c70fe44
CB
118static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
119
b0c632db
HC
120int kvm_arch_hardware_setup(void)
121{
2c70fe44
CB
122 gmap_notifier.notifier_call = kvm_gmap_notifier;
123 gmap_register_ipte_notifier(&gmap_notifier);
b0c632db
HC
124 return 0;
125}
126
127void kvm_arch_hardware_unsetup(void)
128{
2c70fe44 129 gmap_unregister_ipte_notifier(&gmap_notifier);
b0c632db
HC
130}
131
b0c632db
HC
132int kvm_arch_init(void *opaque)
133{
84877d93
CH
134 /* Register floating interrupt controller interface. */
135 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
b0c632db
HC
136}
137
b0c632db
HC
138/* Section: device related */
139long kvm_arch_dev_ioctl(struct file *filp,
140 unsigned int ioctl, unsigned long arg)
141{
142 if (ioctl == KVM_S390_ENABLE_SIE)
143 return s390_enable_sie();
144 return -EINVAL;
145}
146
784aa3d7 147int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
b0c632db 148{
d7b0b5eb
CO
149 int r;
150
2bd0ac4e 151 switch (ext) {
d7b0b5eb 152 case KVM_CAP_S390_PSW:
b6cf8788 153 case KVM_CAP_S390_GMAP:
52e16b18 154 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
155#ifdef CONFIG_KVM_S390_UCONTROL
156 case KVM_CAP_S390_UCONTROL:
157#endif
3c038e6b 158 case KVM_CAP_ASYNC_PF:
60b413c9 159 case KVM_CAP_SYNC_REGS:
14eebd91 160 case KVM_CAP_ONE_REG:
d6712df9 161 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 162 case KVM_CAP_S390_CSS_SUPPORT:
ebc32262 163 case KVM_CAP_IRQFD:
10ccaa1e 164 case KVM_CAP_IOEVENTFD:
c05c4186 165 case KVM_CAP_DEVICE_CTRL:
d938dc55 166 case KVM_CAP_ENABLE_CAP_VM:
78599d90 167 case KVM_CAP_S390_IRQCHIP:
f2061656 168 case KVM_CAP_VM_ATTRIBUTES:
6352e4d2 169 case KVM_CAP_MP_STATE:
2444b352 170 case KVM_CAP_S390_USER_SIGP:
d7b0b5eb
CO
171 r = 1;
172 break;
e726b1bd
CB
173 case KVM_CAP_NR_VCPUS:
174 case KVM_CAP_MAX_VCPUS:
175 r = KVM_MAX_VCPUS;
176 break;
e1e2e605
NW
177 case KVM_CAP_NR_MEMSLOTS:
178 r = KVM_USER_MEM_SLOTS;
179 break;
1526bf9c 180 case KVM_CAP_S390_COW:
abf09bed 181 r = MACHINE_HAS_ESOP;
1526bf9c 182 break;
2bd0ac4e 183 default:
d7b0b5eb 184 r = 0;
2bd0ac4e 185 }
d7b0b5eb 186 return r;
b0c632db
HC
187}
188
15f36ebd
JH
189static void kvm_s390_sync_dirty_log(struct kvm *kvm,
190 struct kvm_memory_slot *memslot)
191{
192 gfn_t cur_gfn, last_gfn;
193 unsigned long address;
194 struct gmap *gmap = kvm->arch.gmap;
195
196 down_read(&gmap->mm->mmap_sem);
197 /* Loop over all guest pages */
198 last_gfn = memslot->base_gfn + memslot->npages;
199 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
200 address = gfn_to_hva_memslot(memslot, cur_gfn);
201
202 if (gmap_test_and_clear_dirty(address, gmap))
203 mark_page_dirty(kvm, cur_gfn);
204 }
205 up_read(&gmap->mm->mmap_sem);
206}
207
b0c632db
HC
208/* Section: vm related */
209/*
210 * Get (and clear) the dirty memory log for a memory slot.
211 */
212int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
213 struct kvm_dirty_log *log)
214{
15f36ebd
JH
215 int r;
216 unsigned long n;
217 struct kvm_memory_slot *memslot;
218 int is_dirty = 0;
219
220 mutex_lock(&kvm->slots_lock);
221
222 r = -EINVAL;
223 if (log->slot >= KVM_USER_MEM_SLOTS)
224 goto out;
225
226 memslot = id_to_memslot(kvm->memslots, log->slot);
227 r = -ENOENT;
228 if (!memslot->dirty_bitmap)
229 goto out;
230
231 kvm_s390_sync_dirty_log(kvm, memslot);
232 r = kvm_get_dirty_log(kvm, log, &is_dirty);
233 if (r)
234 goto out;
235
236 /* Clear the dirty log */
237 if (is_dirty) {
238 n = kvm_dirty_bitmap_bytes(memslot);
239 memset(memslot->dirty_bitmap, 0, n);
240 }
241 r = 0;
242out:
243 mutex_unlock(&kvm->slots_lock);
244 return r;
b0c632db
HC
245}
246
d938dc55
CH
247static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
248{
249 int r;
250
251 if (cap->flags)
252 return -EINVAL;
253
254 switch (cap->cap) {
84223598
CH
255 case KVM_CAP_S390_IRQCHIP:
256 kvm->arch.use_irqchip = 1;
257 r = 0;
258 break;
2444b352
DH
259 case KVM_CAP_S390_USER_SIGP:
260 kvm->arch.user_sigp = 1;
261 r = 0;
262 break;
d938dc55
CH
263 default:
264 r = -EINVAL;
265 break;
266 }
267 return r;
268}
269
8c0a7ce6
DD
270static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
271{
272 int ret;
273
274 switch (attr->attr) {
275 case KVM_S390_VM_MEM_LIMIT_SIZE:
276 ret = 0;
277 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
278 ret = -EFAULT;
279 break;
280 default:
281 ret = -ENXIO;
282 break;
283 }
284 return ret;
285}
286
287static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
4f718eab
DD
288{
289 int ret;
290 unsigned int idx;
291 switch (attr->attr) {
292 case KVM_S390_VM_MEM_ENABLE_CMMA:
293 ret = -EBUSY;
294 mutex_lock(&kvm->lock);
295 if (atomic_read(&kvm->online_vcpus) == 0) {
296 kvm->arch.use_cmma = 1;
297 ret = 0;
298 }
299 mutex_unlock(&kvm->lock);
300 break;
301 case KVM_S390_VM_MEM_CLR_CMMA:
302 mutex_lock(&kvm->lock);
303 idx = srcu_read_lock(&kvm->srcu);
a13cff31 304 s390_reset_cmma(kvm->arch.gmap->mm);
4f718eab
DD
305 srcu_read_unlock(&kvm->srcu, idx);
306 mutex_unlock(&kvm->lock);
307 ret = 0;
308 break;
8c0a7ce6
DD
309 case KVM_S390_VM_MEM_LIMIT_SIZE: {
310 unsigned long new_limit;
311
312 if (kvm_is_ucontrol(kvm))
313 return -EINVAL;
314
315 if (get_user(new_limit, (u64 __user *)attr->addr))
316 return -EFAULT;
317
318 if (new_limit > kvm->arch.gmap->asce_end)
319 return -E2BIG;
320
321 ret = -EBUSY;
322 mutex_lock(&kvm->lock);
323 if (atomic_read(&kvm->online_vcpus) == 0) {
324 /* gmap_alloc will round the limit up */
325 struct gmap *new = gmap_alloc(current->mm, new_limit);
326
327 if (!new) {
328 ret = -ENOMEM;
329 } else {
330 gmap_free(kvm->arch.gmap);
331 new->private = kvm;
332 kvm->arch.gmap = new;
333 ret = 0;
334 }
335 }
336 mutex_unlock(&kvm->lock);
337 break;
338 }
4f718eab
DD
339 default:
340 ret = -ENXIO;
341 break;
342 }
343 return ret;
344}
345
a374e892
TK
346static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
347
348static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
349{
350 struct kvm_vcpu *vcpu;
351 int i;
352
353 if (!test_vfacility(76))
354 return -EINVAL;
355
356 mutex_lock(&kvm->lock);
357 switch (attr->attr) {
358 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
359 get_random_bytes(
360 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
361 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
362 kvm->arch.crypto.aes_kw = 1;
363 break;
364 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
365 get_random_bytes(
366 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
367 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
368 kvm->arch.crypto.dea_kw = 1;
369 break;
370 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
371 kvm->arch.crypto.aes_kw = 0;
372 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
373 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
374 break;
375 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
376 kvm->arch.crypto.dea_kw = 0;
377 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
378 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
379 break;
380 default:
381 mutex_unlock(&kvm->lock);
382 return -ENXIO;
383 }
384
385 kvm_for_each_vcpu(i, vcpu, kvm) {
386 kvm_s390_vcpu_crypto_setup(vcpu);
387 exit_sie(vcpu);
388 }
389 mutex_unlock(&kvm->lock);
390 return 0;
391}
392
72f25020
JH
393static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
394{
395 u8 gtod_high;
396
397 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
398 sizeof(gtod_high)))
399 return -EFAULT;
400
401 if (gtod_high != 0)
402 return -EINVAL;
403
404 return 0;
405}
406
407static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
408{
409 struct kvm_vcpu *cur_vcpu;
410 unsigned int vcpu_idx;
411 u64 host_tod, gtod;
412 int r;
413
414 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
415 return -EFAULT;
416
417 r = store_tod_clock(&host_tod);
418 if (r)
419 return r;
420
421 mutex_lock(&kvm->lock);
422 kvm->arch.epoch = gtod - host_tod;
423 kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) {
424 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
425 exit_sie(cur_vcpu);
426 }
427 mutex_unlock(&kvm->lock);
428 return 0;
429}
430
431static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
432{
433 int ret;
434
435 if (attr->flags)
436 return -EINVAL;
437
438 switch (attr->attr) {
439 case KVM_S390_VM_TOD_HIGH:
440 ret = kvm_s390_set_tod_high(kvm, attr);
441 break;
442 case KVM_S390_VM_TOD_LOW:
443 ret = kvm_s390_set_tod_low(kvm, attr);
444 break;
445 default:
446 ret = -ENXIO;
447 break;
448 }
449 return ret;
450}
451
452static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
453{
454 u8 gtod_high = 0;
455
456 if (copy_to_user((void __user *)attr->addr, &gtod_high,
457 sizeof(gtod_high)))
458 return -EFAULT;
459
460 return 0;
461}
462
463static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
464{
465 u64 host_tod, gtod;
466 int r;
467
468 r = store_tod_clock(&host_tod);
469 if (r)
470 return r;
471
472 gtod = host_tod + kvm->arch.epoch;
473 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
474 return -EFAULT;
475
476 return 0;
477}
478
479static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
480{
481 int ret;
482
483 if (attr->flags)
484 return -EINVAL;
485
486 switch (attr->attr) {
487 case KVM_S390_VM_TOD_HIGH:
488 ret = kvm_s390_get_tod_high(kvm, attr);
489 break;
490 case KVM_S390_VM_TOD_LOW:
491 ret = kvm_s390_get_tod_low(kvm, attr);
492 break;
493 default:
494 ret = -ENXIO;
495 break;
496 }
497 return ret;
498}
499
f2061656
DD
500static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
501{
502 int ret;
503
504 switch (attr->group) {
4f718eab 505 case KVM_S390_VM_MEM_CTRL:
8c0a7ce6 506 ret = kvm_s390_set_mem_control(kvm, attr);
4f718eab 507 break;
72f25020
JH
508 case KVM_S390_VM_TOD:
509 ret = kvm_s390_set_tod(kvm, attr);
510 break;
a374e892
TK
511 case KVM_S390_VM_CRYPTO:
512 ret = kvm_s390_vm_set_crypto(kvm, attr);
513 break;
f2061656
DD
514 default:
515 ret = -ENXIO;
516 break;
517 }
518
519 return ret;
520}
521
522static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
523{
8c0a7ce6
DD
524 int ret;
525
526 switch (attr->group) {
527 case KVM_S390_VM_MEM_CTRL:
528 ret = kvm_s390_get_mem_control(kvm, attr);
529 break;
72f25020
JH
530 case KVM_S390_VM_TOD:
531 ret = kvm_s390_get_tod(kvm, attr);
532 break;
8c0a7ce6
DD
533 default:
534 ret = -ENXIO;
535 break;
536 }
537
538 return ret;
f2061656
DD
539}
540
541static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
542{
543 int ret;
544
545 switch (attr->group) {
4f718eab
DD
546 case KVM_S390_VM_MEM_CTRL:
547 switch (attr->attr) {
548 case KVM_S390_VM_MEM_ENABLE_CMMA:
549 case KVM_S390_VM_MEM_CLR_CMMA:
8c0a7ce6 550 case KVM_S390_VM_MEM_LIMIT_SIZE:
4f718eab
DD
551 ret = 0;
552 break;
553 default:
554 ret = -ENXIO;
555 break;
556 }
557 break;
72f25020
JH
558 case KVM_S390_VM_TOD:
559 switch (attr->attr) {
560 case KVM_S390_VM_TOD_LOW:
561 case KVM_S390_VM_TOD_HIGH:
562 ret = 0;
563 break;
564 default:
565 ret = -ENXIO;
566 break;
567 }
568 break;
a374e892
TK
569 case KVM_S390_VM_CRYPTO:
570 switch (attr->attr) {
571 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
572 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
573 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
574 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
575 ret = 0;
576 break;
577 default:
578 ret = -ENXIO;
579 break;
580 }
581 break;
f2061656
DD
582 default:
583 ret = -ENXIO;
584 break;
585 }
586
587 return ret;
588}
589
b0c632db
HC
590long kvm_arch_vm_ioctl(struct file *filp,
591 unsigned int ioctl, unsigned long arg)
592{
593 struct kvm *kvm = filp->private_data;
594 void __user *argp = (void __user *)arg;
f2061656 595 struct kvm_device_attr attr;
b0c632db
HC
596 int r;
597
598 switch (ioctl) {
ba5c1e9b
CO
599 case KVM_S390_INTERRUPT: {
600 struct kvm_s390_interrupt s390int;
601
602 r = -EFAULT;
603 if (copy_from_user(&s390int, argp, sizeof(s390int)))
604 break;
605 r = kvm_s390_inject_vm(kvm, &s390int);
606 break;
607 }
d938dc55
CH
608 case KVM_ENABLE_CAP: {
609 struct kvm_enable_cap cap;
610 r = -EFAULT;
611 if (copy_from_user(&cap, argp, sizeof(cap)))
612 break;
613 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
614 break;
615 }
84223598
CH
616 case KVM_CREATE_IRQCHIP: {
617 struct kvm_irq_routing_entry routing;
618
619 r = -EINVAL;
620 if (kvm->arch.use_irqchip) {
621 /* Set up dummy routing. */
622 memset(&routing, 0, sizeof(routing));
623 kvm_set_irq_routing(kvm, &routing, 0, 0);
624 r = 0;
625 }
626 break;
627 }
f2061656
DD
628 case KVM_SET_DEVICE_ATTR: {
629 r = -EFAULT;
630 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
631 break;
632 r = kvm_s390_vm_set_attr(kvm, &attr);
633 break;
634 }
635 case KVM_GET_DEVICE_ATTR: {
636 r = -EFAULT;
637 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
638 break;
639 r = kvm_s390_vm_get_attr(kvm, &attr);
640 break;
641 }
642 case KVM_HAS_DEVICE_ATTR: {
643 r = -EFAULT;
644 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
645 break;
646 r = kvm_s390_vm_has_attr(kvm, &attr);
647 break;
648 }
b0c632db 649 default:
367e1319 650 r = -ENOTTY;
b0c632db
HC
651 }
652
653 return r;
654}
655
5102ee87
TK
656static int kvm_s390_crypto_init(struct kvm *kvm)
657{
658 if (!test_vfacility(76))
659 return 0;
660
661 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
662 GFP_KERNEL | GFP_DMA);
663 if (!kvm->arch.crypto.crycb)
664 return -ENOMEM;
665
666 kvm->arch.crypto.crycbd = (__u32) (unsigned long) kvm->arch.crypto.crycb |
667 CRYCB_FORMAT1;
668
a374e892
TK
669 /* Disable AES/DEA protected key functions by default */
670 kvm->arch.crypto.aes_kw = 0;
671 kvm->arch.crypto.dea_kw = 0;
672
5102ee87
TK
673 return 0;
674}
675
e08b9637 676int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 677{
b0c632db
HC
678 int rc;
679 char debug_name[16];
f6c137ff 680 static unsigned long sca_offset;
b0c632db 681
e08b9637
CO
682 rc = -EINVAL;
683#ifdef CONFIG_KVM_S390_UCONTROL
684 if (type & ~KVM_VM_S390_UCONTROL)
685 goto out_err;
686 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
687 goto out_err;
688#else
689 if (type)
690 goto out_err;
691#endif
692
b0c632db
HC
693 rc = s390_enable_sie();
694 if (rc)
d89f5eff 695 goto out_err;
b0c632db 696
b290411a
CO
697 rc = -ENOMEM;
698
b0c632db
HC
699 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
700 if (!kvm->arch.sca)
d89f5eff 701 goto out_err;
f6c137ff
CB
702 spin_lock(&kvm_lock);
703 sca_offset = (sca_offset + 16) & 0x7f0;
704 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
705 spin_unlock(&kvm_lock);
b0c632db
HC
706
707 sprintf(debug_name, "kvm-%u", current->pid);
708
709 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
710 if (!kvm->arch.dbf)
711 goto out_nodbf;
712
5102ee87
TK
713 if (kvm_s390_crypto_init(kvm) < 0)
714 goto out_crypto;
715
ba5c1e9b
CO
716 spin_lock_init(&kvm->arch.float_int.lock);
717 INIT_LIST_HEAD(&kvm->arch.float_int.list);
8a242234 718 init_waitqueue_head(&kvm->arch.ipte_wq);
a6b7e459 719 mutex_init(&kvm->arch.ipte_mutex);
ba5c1e9b 720
b0c632db
HC
721 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
722 VM_EVENT(kvm, 3, "%s", "vm created");
723
e08b9637
CO
724 if (type & KVM_VM_S390_UCONTROL) {
725 kvm->arch.gmap = NULL;
726 } else {
0349985a 727 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
e08b9637
CO
728 if (!kvm->arch.gmap)
729 goto out_nogmap;
2c70fe44 730 kvm->arch.gmap->private = kvm;
24eb3a82 731 kvm->arch.gmap->pfault_enabled = 0;
e08b9637 732 }
fa6b7fe9
CH
733
734 kvm->arch.css_support = 0;
84223598 735 kvm->arch.use_irqchip = 0;
72f25020 736 kvm->arch.epoch = 0;
fa6b7fe9 737
8ad35755
DH
738 spin_lock_init(&kvm->arch.start_stop_lock);
739
d89f5eff 740 return 0;
598841ca 741out_nogmap:
5102ee87
TK
742 kfree(kvm->arch.crypto.crycb);
743out_crypto:
598841ca 744 debug_unregister(kvm->arch.dbf);
b0c632db
HC
745out_nodbf:
746 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
747out_err:
748 return rc;
b0c632db
HC
749}
750
d329c035
CB
751void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
752{
753 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 754 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
67335e63 755 kvm_s390_clear_local_irqs(vcpu);
3c038e6b 756 kvm_clear_async_pf_completion_queue(vcpu);
58f9460b
CO
757 if (!kvm_is_ucontrol(vcpu->kvm)) {
758 clear_bit(63 - vcpu->vcpu_id,
759 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
760 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
761 (__u64) vcpu->arch.sie_block)
762 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
763 }
abf4a71e 764 smp_mb();
27e0393f
CO
765
766 if (kvm_is_ucontrol(vcpu->kvm))
767 gmap_free(vcpu->arch.gmap);
768
b31605c1
DD
769 if (kvm_s390_cmma_enabled(vcpu->kvm))
770 kvm_s390_vcpu_unsetup_cmma(vcpu);
d329c035 771 free_page((unsigned long)(vcpu->arch.sie_block));
b31288fa 772
6692cef3 773 kvm_vcpu_uninit(vcpu);
b110feaf 774 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
775}
776
777static void kvm_free_vcpus(struct kvm *kvm)
778{
779 unsigned int i;
988a2cae 780 struct kvm_vcpu *vcpu;
d329c035 781
988a2cae
GN
782 kvm_for_each_vcpu(i, vcpu, kvm)
783 kvm_arch_vcpu_destroy(vcpu);
784
785 mutex_lock(&kvm->lock);
786 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
787 kvm->vcpus[i] = NULL;
788
789 atomic_set(&kvm->online_vcpus, 0);
790 mutex_unlock(&kvm->lock);
d329c035
CB
791}
792
b0c632db
HC
793void kvm_arch_destroy_vm(struct kvm *kvm)
794{
d329c035 795 kvm_free_vcpus(kvm);
b0c632db 796 free_page((unsigned long)(kvm->arch.sca));
d329c035 797 debug_unregister(kvm->arch.dbf);
5102ee87 798 kfree(kvm->arch.crypto.crycb);
27e0393f
CO
799 if (!kvm_is_ucontrol(kvm))
800 gmap_free(kvm->arch.gmap);
841b91c5 801 kvm_s390_destroy_adapters(kvm);
67335e63 802 kvm_s390_clear_float_irqs(kvm);
b0c632db
HC
803}
804
805/* Section: vcpu related */
dafd032a
DD
806static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
807{
808 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
809 if (!vcpu->arch.gmap)
810 return -ENOMEM;
811 vcpu->arch.gmap->private = vcpu->kvm;
812
813 return 0;
814}
815
b0c632db
HC
816int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
817{
3c038e6b
DD
818 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
819 kvm_clear_async_pf_completion_queue(vcpu);
59674c1a
CB
820 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
821 KVM_SYNC_GPRS |
9eed0735 822 KVM_SYNC_ACRS |
b028ee3e
DH
823 KVM_SYNC_CRS |
824 KVM_SYNC_ARCH0 |
825 KVM_SYNC_PFAULT;
dafd032a
DD
826
827 if (kvm_is_ucontrol(vcpu->kvm))
828 return __kvm_ucontrol_vcpu_init(vcpu);
829
b0c632db
HC
830 return 0;
831}
832
b0c632db
HC
833void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
834{
4725c860
MS
835 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
836 save_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db 837 save_access_regs(vcpu->arch.host_acrs);
4725c860
MS
838 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
839 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 840 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 841 gmap_enable(vcpu->arch.gmap);
9e6dabef 842 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
843}
844
845void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
846{
9e6dabef 847 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 848 gmap_disable(vcpu->arch.gmap);
4725c860
MS
849 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
850 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 851 save_access_regs(vcpu->run->s.regs.acrs);
4725c860
MS
852 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
853 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db
HC
854 restore_access_regs(vcpu->arch.host_acrs);
855}
856
857static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
858{
859 /* this equals initial cpu reset in pop, but we don't switch to ESA */
860 vcpu->arch.sie_block->gpsw.mask = 0UL;
861 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 862 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
863 vcpu->arch.sie_block->cputm = 0UL;
864 vcpu->arch.sie_block->ckc = 0UL;
865 vcpu->arch.sie_block->todpr = 0;
866 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
867 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
868 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
869 vcpu->arch.guest_fpregs.fpc = 0;
870 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
871 vcpu->arch.sie_block->gbea = 1;
672550fb 872 vcpu->arch.sie_block->pp = 0;
3c038e6b
DD
873 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
874 kvm_clear_async_pf_completion_queue(vcpu);
6352e4d2
DH
875 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
876 kvm_s390_vcpu_stop(vcpu);
2ed10cc1 877 kvm_s390_clear_local_irqs(vcpu);
b0c632db
HC
878}
879
31928aa5 880void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
42897d86 881{
72f25020
JH
882 mutex_lock(&vcpu->kvm->lock);
883 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
884 mutex_unlock(&vcpu->kvm->lock);
dafd032a
DD
885 if (!kvm_is_ucontrol(vcpu->kvm))
886 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
42897d86
MT
887}
888
5102ee87
TK
889static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
890{
891 if (!test_vfacility(76))
892 return;
893
a374e892
TK
894 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
895
896 if (vcpu->kvm->arch.crypto.aes_kw)
897 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
898 if (vcpu->kvm->arch.crypto.dea_kw)
899 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
900
5102ee87
TK
901 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
902}
903
b31605c1
DD
904void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
905{
906 free_page(vcpu->arch.sie_block->cbrlo);
907 vcpu->arch.sie_block->cbrlo = 0;
908}
909
910int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
911{
912 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
913 if (!vcpu->arch.sie_block->cbrlo)
914 return -ENOMEM;
915
916 vcpu->arch.sie_block->ecb2 |= 0x80;
917 vcpu->arch.sie_block->ecb2 &= ~0x08;
918 return 0;
919}
920
b0c632db
HC
921int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
922{
b31605c1 923 int rc = 0;
b31288fa 924
9e6dabef
CH
925 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
926 CPUSTAT_SM |
69d0d3a3
CB
927 CPUSTAT_STOPPED |
928 CPUSTAT_GED);
fc34531d 929 vcpu->arch.sie_block->ecb = 6;
7feb6bb8
MM
930 if (test_vfacility(50) && test_vfacility(73))
931 vcpu->arch.sie_block->ecb |= 0x10;
932
69d0d3a3 933 vcpu->arch.sie_block->ecb2 = 8;
ea5f4969 934 vcpu->arch.sie_block->eca = 0xC1002000U;
217a4406
HC
935 if (sclp_has_siif())
936 vcpu->arch.sie_block->eca |= 1;
ea5f4969
DH
937 if (sclp_has_sigpif())
938 vcpu->arch.sie_block->eca |= 0x10000000U;
78c4b59f 939 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
5a5e6536
MR
940 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
941 ICTL_TPROT;
942
b31605c1
DD
943 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
944 rc = kvm_s390_vcpu_setup_cmma(vcpu);
945 if (rc)
946 return rc;
b31288fa 947 }
0ac96caf 948 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ca872302 949 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 950 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 951 vcpu->arch.cpu_id.version = 0xff;
5102ee87
TK
952
953 kvm_s390_vcpu_crypto_setup(vcpu);
954
b31605c1 955 return rc;
b0c632db
HC
956}
957
958struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
959 unsigned int id)
960{
4d47555a 961 struct kvm_vcpu *vcpu;
7feb6bb8 962 struct sie_page *sie_page;
4d47555a
CO
963 int rc = -EINVAL;
964
965 if (id >= KVM_MAX_VCPUS)
966 goto out;
967
968 rc = -ENOMEM;
b0c632db 969
b110feaf 970 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 971 if (!vcpu)
4d47555a 972 goto out;
b0c632db 973
7feb6bb8
MM
974 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
975 if (!sie_page)
b0c632db
HC
976 goto out_free_cpu;
977
7feb6bb8
MM
978 vcpu->arch.sie_block = &sie_page->sie_block;
979 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
980
b0c632db 981 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
982 if (!kvm_is_ucontrol(kvm)) {
983 if (!kvm->arch.sca) {
984 WARN_ON_ONCE(1);
985 goto out_free_cpu;
986 }
987 if (!kvm->arch.sca->cpu[id].sda)
988 kvm->arch.sca->cpu[id].sda =
989 (__u64) vcpu->arch.sie_block;
990 vcpu->arch.sie_block->scaoh =
991 (__u32)(((__u64)kvm->arch.sca) >> 32);
992 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
993 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
994 }
b0c632db 995
ba5c1e9b 996 spin_lock_init(&vcpu->arch.local_int.lock);
ba5c1e9b 997 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
d0321a24 998 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 999 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
ba5c1e9b 1000
b0c632db
HC
1001 rc = kvm_vcpu_init(vcpu, kvm, id);
1002 if (rc)
7b06bf2f 1003 goto out_free_sie_block;
b0c632db
HC
1004 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
1005 vcpu->arch.sie_block);
ade38c31 1006 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 1007
b0c632db 1008 return vcpu;
7b06bf2f
WY
1009out_free_sie_block:
1010 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 1011out_free_cpu:
b110feaf 1012 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 1013out:
b0c632db
HC
1014 return ERR_PTR(rc);
1015}
1016
b0c632db
HC
1017int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1018{
9a022067 1019 return kvm_s390_vcpu_has_irq(vcpu, 0);
b0c632db
HC
1020}
1021
49b99e1e
CB
1022void s390_vcpu_block(struct kvm_vcpu *vcpu)
1023{
1024 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1025}
1026
1027void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
1028{
1029 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1030}
1031
1032/*
1033 * Kick a guest cpu out of SIE and wait until SIE is not running.
1034 * If the CPU is not running (e.g. waiting as idle) the function will
1035 * return immediately. */
1036void exit_sie(struct kvm_vcpu *vcpu)
1037{
1038 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
1039 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1040 cpu_relax();
1041}
1042
1043/* Kick a guest cpu out of SIE and prevent SIE-reentry */
1044void exit_sie_sync(struct kvm_vcpu *vcpu)
1045{
1046 s390_vcpu_block(vcpu);
1047 exit_sie(vcpu);
1048}
1049
2c70fe44
CB
1050static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1051{
1052 int i;
1053 struct kvm *kvm = gmap->private;
1054 struct kvm_vcpu *vcpu;
1055
1056 kvm_for_each_vcpu(i, vcpu, kvm) {
1057 /* match against both prefix pages */
fda902cb 1058 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
2c70fe44
CB
1059 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
1060 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
1061 exit_sie_sync(vcpu);
1062 }
1063 }
1064}
1065
b6d33834
CD
1066int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1067{
1068 /* kvm common code refers to this, but never calls it */
1069 BUG();
1070 return 0;
1071}
1072
14eebd91
CO
1073static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1074 struct kvm_one_reg *reg)
1075{
1076 int r = -EINVAL;
1077
1078 switch (reg->id) {
29b7c71b
CO
1079 case KVM_REG_S390_TODPR:
1080 r = put_user(vcpu->arch.sie_block->todpr,
1081 (u32 __user *)reg->addr);
1082 break;
1083 case KVM_REG_S390_EPOCHDIFF:
1084 r = put_user(vcpu->arch.sie_block->epoch,
1085 (u64 __user *)reg->addr);
1086 break;
46a6dd1c
J
1087 case KVM_REG_S390_CPU_TIMER:
1088 r = put_user(vcpu->arch.sie_block->cputm,
1089 (u64 __user *)reg->addr);
1090 break;
1091 case KVM_REG_S390_CLOCK_COMP:
1092 r = put_user(vcpu->arch.sie_block->ckc,
1093 (u64 __user *)reg->addr);
1094 break;
536336c2
DD
1095 case KVM_REG_S390_PFTOKEN:
1096 r = put_user(vcpu->arch.pfault_token,
1097 (u64 __user *)reg->addr);
1098 break;
1099 case KVM_REG_S390_PFCOMPARE:
1100 r = put_user(vcpu->arch.pfault_compare,
1101 (u64 __user *)reg->addr);
1102 break;
1103 case KVM_REG_S390_PFSELECT:
1104 r = put_user(vcpu->arch.pfault_select,
1105 (u64 __user *)reg->addr);
1106 break;
672550fb
CB
1107 case KVM_REG_S390_PP:
1108 r = put_user(vcpu->arch.sie_block->pp,
1109 (u64 __user *)reg->addr);
1110 break;
afa45ff5
CB
1111 case KVM_REG_S390_GBEA:
1112 r = put_user(vcpu->arch.sie_block->gbea,
1113 (u64 __user *)reg->addr);
1114 break;
14eebd91
CO
1115 default:
1116 break;
1117 }
1118
1119 return r;
1120}
1121
1122static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
1123 struct kvm_one_reg *reg)
1124{
1125 int r = -EINVAL;
1126
1127 switch (reg->id) {
29b7c71b
CO
1128 case KVM_REG_S390_TODPR:
1129 r = get_user(vcpu->arch.sie_block->todpr,
1130 (u32 __user *)reg->addr);
1131 break;
1132 case KVM_REG_S390_EPOCHDIFF:
1133 r = get_user(vcpu->arch.sie_block->epoch,
1134 (u64 __user *)reg->addr);
1135 break;
46a6dd1c
J
1136 case KVM_REG_S390_CPU_TIMER:
1137 r = get_user(vcpu->arch.sie_block->cputm,
1138 (u64 __user *)reg->addr);
1139 break;
1140 case KVM_REG_S390_CLOCK_COMP:
1141 r = get_user(vcpu->arch.sie_block->ckc,
1142 (u64 __user *)reg->addr);
1143 break;
536336c2
DD
1144 case KVM_REG_S390_PFTOKEN:
1145 r = get_user(vcpu->arch.pfault_token,
1146 (u64 __user *)reg->addr);
9fbd8082
DH
1147 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1148 kvm_clear_async_pf_completion_queue(vcpu);
536336c2
DD
1149 break;
1150 case KVM_REG_S390_PFCOMPARE:
1151 r = get_user(vcpu->arch.pfault_compare,
1152 (u64 __user *)reg->addr);
1153 break;
1154 case KVM_REG_S390_PFSELECT:
1155 r = get_user(vcpu->arch.pfault_select,
1156 (u64 __user *)reg->addr);
1157 break;
672550fb
CB
1158 case KVM_REG_S390_PP:
1159 r = get_user(vcpu->arch.sie_block->pp,
1160 (u64 __user *)reg->addr);
1161 break;
afa45ff5
CB
1162 case KVM_REG_S390_GBEA:
1163 r = get_user(vcpu->arch.sie_block->gbea,
1164 (u64 __user *)reg->addr);
1165 break;
14eebd91
CO
1166 default:
1167 break;
1168 }
1169
1170 return r;
1171}
b6d33834 1172
b0c632db
HC
1173static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1174{
b0c632db 1175 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
1176 return 0;
1177}
1178
1179int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1180{
5a32c1af 1181 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
1182 return 0;
1183}
1184
1185int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1186{
5a32c1af 1187 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
1188 return 0;
1189}
1190
1191int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1192 struct kvm_sregs *sregs)
1193{
59674c1a 1194 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 1195 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 1196 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
1197 return 0;
1198}
1199
1200int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1201 struct kvm_sregs *sregs)
1202{
59674c1a 1203 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 1204 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
1205 return 0;
1206}
1207
1208int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1209{
4725c860
MS
1210 if (test_fp_ctl(fpu->fpc))
1211 return -EINVAL;
b0c632db 1212 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4725c860
MS
1213 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
1214 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1215 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
b0c632db
HC
1216 return 0;
1217}
1218
1219int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1220{
b0c632db
HC
1221 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1222 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
1223 return 0;
1224}
1225
1226static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1227{
1228 int rc = 0;
1229
7a42fdc2 1230 if (!is_vcpu_stopped(vcpu))
b0c632db 1231 rc = -EBUSY;
d7b0b5eb
CO
1232 else {
1233 vcpu->run->psw_mask = psw.mask;
1234 vcpu->run->psw_addr = psw.addr;
1235 }
b0c632db
HC
1236 return rc;
1237}
1238
1239int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1240 struct kvm_translation *tr)
1241{
1242 return -EINVAL; /* not implemented yet */
1243}
1244
27291e21
DH
1245#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1246 KVM_GUESTDBG_USE_HW_BP | \
1247 KVM_GUESTDBG_ENABLE)
1248
d0bfb940
JK
1249int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1250 struct kvm_guest_debug *dbg)
b0c632db 1251{
27291e21
DH
1252 int rc = 0;
1253
1254 vcpu->guest_debug = 0;
1255 kvm_s390_clear_bp_data(vcpu);
1256
2de3bfc2 1257 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
27291e21
DH
1258 return -EINVAL;
1259
1260 if (dbg->control & KVM_GUESTDBG_ENABLE) {
1261 vcpu->guest_debug = dbg->control;
1262 /* enforce guest PER */
1263 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1264
1265 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
1266 rc = kvm_s390_import_bp_data(vcpu, dbg);
1267 } else {
1268 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1269 vcpu->arch.guestdbg.last_bp = 0;
1270 }
1271
1272 if (rc) {
1273 vcpu->guest_debug = 0;
1274 kvm_s390_clear_bp_data(vcpu);
1275 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1276 }
1277
1278 return rc;
b0c632db
HC
1279}
1280
62d9f0db
MT
1281int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1282 struct kvm_mp_state *mp_state)
1283{
6352e4d2
DH
1284 /* CHECK_STOP and LOAD are not supported yet */
1285 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1286 KVM_MP_STATE_OPERATING;
62d9f0db
MT
1287}
1288
1289int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1290 struct kvm_mp_state *mp_state)
1291{
6352e4d2
DH
1292 int rc = 0;
1293
1294 /* user space knows about this interface - let it control the state */
1295 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1296
1297 switch (mp_state->mp_state) {
1298 case KVM_MP_STATE_STOPPED:
1299 kvm_s390_vcpu_stop(vcpu);
1300 break;
1301 case KVM_MP_STATE_OPERATING:
1302 kvm_s390_vcpu_start(vcpu);
1303 break;
1304 case KVM_MP_STATE_LOAD:
1305 case KVM_MP_STATE_CHECK_STOP:
1306 /* fall through - CHECK_STOP and LOAD are not supported yet */
1307 default:
1308 rc = -ENXIO;
1309 }
1310
1311 return rc;
62d9f0db
MT
1312}
1313
b31605c1
DD
1314bool kvm_s390_cmma_enabled(struct kvm *kvm)
1315{
1316 if (!MACHINE_IS_LPAR)
1317 return false;
1318 /* only enable for z10 and later */
1319 if (!MACHINE_HAS_EDAT1)
1320 return false;
1321 if (!kvm->arch.use_cmma)
1322 return false;
1323 return true;
1324}
1325
8ad35755
DH
1326static bool ibs_enabled(struct kvm_vcpu *vcpu)
1327{
1328 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1329}
1330
2c70fe44
CB
1331static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1332{
8ad35755
DH
1333retry:
1334 s390_vcpu_unblock(vcpu);
2c70fe44
CB
1335 /*
1336 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1337 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1338 * This ensures that the ipte instruction for this request has
1339 * already finished. We might race against a second unmapper that
1340 * wants to set the blocking bit. Lets just retry the request loop.
1341 */
8ad35755 1342 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
2c70fe44
CB
1343 int rc;
1344 rc = gmap_ipte_notify(vcpu->arch.gmap,
fda902cb 1345 kvm_s390_get_prefix(vcpu),
2c70fe44
CB
1346 PAGE_SIZE * 2);
1347 if (rc)
1348 return rc;
8ad35755 1349 goto retry;
2c70fe44 1350 }
8ad35755 1351
d3d692c8
DH
1352 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1353 vcpu->arch.sie_block->ihcpu = 0xffff;
1354 goto retry;
1355 }
1356
8ad35755
DH
1357 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1358 if (!ibs_enabled(vcpu)) {
1359 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1360 atomic_set_mask(CPUSTAT_IBS,
1361 &vcpu->arch.sie_block->cpuflags);
1362 }
1363 goto retry;
2c70fe44 1364 }
8ad35755
DH
1365
1366 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1367 if (ibs_enabled(vcpu)) {
1368 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1369 atomic_clear_mask(CPUSTAT_IBS,
1370 &vcpu->arch.sie_block->cpuflags);
1371 }
1372 goto retry;
1373 }
1374
0759d068
DH
1375 /* nothing to do, just clear the request */
1376 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1377
2c70fe44
CB
1378 return 0;
1379}
1380
fa576c58
TH
1381/**
1382 * kvm_arch_fault_in_page - fault-in guest page if necessary
1383 * @vcpu: The corresponding virtual cpu
1384 * @gpa: Guest physical address
1385 * @writable: Whether the page should be writable or not
1386 *
1387 * Make sure that a guest page has been faulted-in on the host.
1388 *
1389 * Return: Zero on success, negative error code otherwise.
1390 */
1391long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
24eb3a82 1392{
527e30b4
MS
1393 return gmap_fault(vcpu->arch.gmap, gpa,
1394 writable ? FAULT_FLAG_WRITE : 0);
24eb3a82
DD
1395}
1396
3c038e6b
DD
1397static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1398 unsigned long token)
1399{
1400 struct kvm_s390_interrupt inti;
383d0b05 1401 struct kvm_s390_irq irq;
3c038e6b
DD
1402
1403 if (start_token) {
383d0b05
JF
1404 irq.u.ext.ext_params2 = token;
1405 irq.type = KVM_S390_INT_PFAULT_INIT;
1406 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
3c038e6b
DD
1407 } else {
1408 inti.type = KVM_S390_INT_PFAULT_DONE;
383d0b05 1409 inti.parm64 = token;
3c038e6b
DD
1410 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1411 }
1412}
1413
1414void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1415 struct kvm_async_pf *work)
1416{
1417 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1418 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1419}
1420
1421void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1422 struct kvm_async_pf *work)
1423{
1424 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1425 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1426}
1427
1428void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1429 struct kvm_async_pf *work)
1430{
1431 /* s390 will always inject the page directly */
1432}
1433
1434bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1435{
1436 /*
1437 * s390 will always inject the page directly,
1438 * but we still want check_async_completion to cleanup
1439 */
1440 return true;
1441}
1442
1443static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1444{
1445 hva_t hva;
1446 struct kvm_arch_async_pf arch;
1447 int rc;
1448
1449 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1450 return 0;
1451 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1452 vcpu->arch.pfault_compare)
1453 return 0;
1454 if (psw_extint_disabled(vcpu))
1455 return 0;
9a022067 1456 if (kvm_s390_vcpu_has_irq(vcpu, 0))
3c038e6b
DD
1457 return 0;
1458 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1459 return 0;
1460 if (!vcpu->arch.gmap->pfault_enabled)
1461 return 0;
1462
81480cc1
HC
1463 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1464 hva += current->thread.gmap_addr & ~PAGE_MASK;
1465 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
3c038e6b
DD
1466 return 0;
1467
1468 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1469 return rc;
1470}
1471
3fb4c40f 1472static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 1473{
3fb4c40f 1474 int rc, cpuflags;
e168bf8d 1475
3c038e6b
DD
1476 /*
1477 * On s390 notifications for arriving pages will be delivered directly
1478 * to the guest but the house keeping for completed pfaults is
1479 * handled outside the worker.
1480 */
1481 kvm_check_async_pf_completion(vcpu);
1482
5a32c1af 1483 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
1484
1485 if (need_resched())
1486 schedule();
1487
d3a73acb 1488 if (test_cpu_flag(CIF_MCCK_PENDING))
71cde587
CB
1489 s390_handle_mcck();
1490
79395031
JF
1491 if (!kvm_is_ucontrol(vcpu->kvm)) {
1492 rc = kvm_s390_deliver_pending_interrupts(vcpu);
1493 if (rc)
1494 return rc;
1495 }
0ff31867 1496
2c70fe44
CB
1497 rc = kvm_s390_handle_requests(vcpu);
1498 if (rc)
1499 return rc;
1500
27291e21
DH
1501 if (guestdbg_enabled(vcpu)) {
1502 kvm_s390_backup_guest_per_regs(vcpu);
1503 kvm_s390_patch_guest_per_regs(vcpu);
1504 }
1505
b0c632db 1506 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
1507 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1508 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1509 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 1510
3fb4c40f
TH
1511 return 0;
1512}
1513
1514static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1515{
24eb3a82 1516 int rc = -1;
2b29a9fd
DD
1517
1518 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1519 vcpu->arch.sie_block->icptcode);
1520 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1521
27291e21
DH
1522 if (guestdbg_enabled(vcpu))
1523 kvm_s390_restore_guest_per_regs(vcpu);
1524
3fb4c40f 1525 if (exit_reason >= 0) {
7c470539 1526 rc = 0;
210b1607
TH
1527 } else if (kvm_is_ucontrol(vcpu->kvm)) {
1528 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1529 vcpu->run->s390_ucontrol.trans_exc_code =
1530 current->thread.gmap_addr;
1531 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1532 rc = -EREMOTE;
24eb3a82
DD
1533
1534 } else if (current->thread.gmap_pfault) {
3c038e6b 1535 trace_kvm_s390_major_guest_pfault(vcpu);
24eb3a82 1536 current->thread.gmap_pfault = 0;
fa576c58 1537 if (kvm_arch_setup_async_pf(vcpu)) {
24eb3a82 1538 rc = 0;
fa576c58
TH
1539 } else {
1540 gpa_t gpa = current->thread.gmap_addr;
1541 rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1542 }
24eb3a82
DD
1543 }
1544
1545 if (rc == -1) {
699bde3b
CB
1546 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1547 trace_kvm_s390_sie_fault(vcpu);
1548 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1f0d0f09 1549 }
b0c632db 1550
5a32c1af 1551 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
3fb4c40f 1552
a76ccff6
TH
1553 if (rc == 0) {
1554 if (kvm_is_ucontrol(vcpu->kvm))
2955c83f
CB
1555 /* Don't exit for host interrupts. */
1556 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
a76ccff6
TH
1557 else
1558 rc = kvm_handle_sie_intercept(vcpu);
1559 }
1560
3fb4c40f
TH
1561 return rc;
1562}
1563
1564static int __vcpu_run(struct kvm_vcpu *vcpu)
1565{
1566 int rc, exit_reason;
1567
800c1065
TH
1568 /*
1569 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1570 * ning the guest), so that memslots (and other stuff) are protected
1571 */
1572 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1573
a76ccff6
TH
1574 do {
1575 rc = vcpu_pre_run(vcpu);
1576 if (rc)
1577 break;
3fb4c40f 1578
800c1065 1579 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
1580 /*
1581 * As PF_VCPU will be used in fault handler, between
1582 * guest_enter and guest_exit should be no uaccess.
1583 */
1584 preempt_disable();
1585 kvm_guest_enter();
1586 preempt_enable();
1587 exit_reason = sie64a(vcpu->arch.sie_block,
1588 vcpu->run->s.regs.gprs);
1589 kvm_guest_exit();
800c1065 1590 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
1591
1592 rc = vcpu_post_run(vcpu, exit_reason);
27291e21 1593 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
3fb4c40f 1594
800c1065 1595 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 1596 return rc;
b0c632db
HC
1597}
1598
b028ee3e
DH
1599static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1600{
1601 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1602 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1603 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
1604 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1605 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1606 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
d3d692c8
DH
1607 /* some control register changes require a tlb flush */
1608 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
b028ee3e
DH
1609 }
1610 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
1611 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
1612 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
1613 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
1614 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
1615 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
1616 }
1617 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
1618 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
1619 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
1620 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
9fbd8082
DH
1621 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1622 kvm_clear_async_pf_completion_queue(vcpu);
b028ee3e
DH
1623 }
1624 kvm_run->kvm_dirty_regs = 0;
1625}
1626
1627static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1628{
1629 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1630 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1631 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1632 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1633 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
1634 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
1635 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
1636 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
1637 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
1638 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1639 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1640 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1641}
1642
b0c632db
HC
1643int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1644{
8f2abe6a 1645 int rc;
b0c632db
HC
1646 sigset_t sigsaved;
1647
27291e21
DH
1648 if (guestdbg_exit_pending(vcpu)) {
1649 kvm_s390_prepare_debug_exit(vcpu);
1650 return 0;
1651 }
1652
b0c632db
HC
1653 if (vcpu->sigset_active)
1654 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1655
6352e4d2
DH
1656 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
1657 kvm_s390_vcpu_start(vcpu);
1658 } else if (is_vcpu_stopped(vcpu)) {
1659 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
1660 vcpu->vcpu_id);
1661 return -EINVAL;
1662 }
b0c632db 1663
b028ee3e 1664 sync_regs(vcpu, kvm_run);
d7b0b5eb 1665
dab4079d 1666 might_fault();
a76ccff6 1667 rc = __vcpu_run(vcpu);
9ace903d 1668
b1d16c49
CE
1669 if (signal_pending(current) && !rc) {
1670 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 1671 rc = -EINTR;
b1d16c49 1672 }
8f2abe6a 1673
27291e21
DH
1674 if (guestdbg_exit_pending(vcpu) && !rc) {
1675 kvm_s390_prepare_debug_exit(vcpu);
1676 rc = 0;
1677 }
1678
b8e660b8 1679 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
1680 /* intercept cannot be handled in-kernel, prepare kvm-run */
1681 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
1682 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
1683 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
1684 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
1685 rc = 0;
1686 }
1687
1688 if (rc == -EREMOTE) {
1689 /* intercept was handled, but userspace support is needed
1690 * kvm_run has been prepared by the handler */
1691 rc = 0;
1692 }
b0c632db 1693
b028ee3e 1694 store_regs(vcpu, kvm_run);
d7b0b5eb 1695
b0c632db
HC
1696 if (vcpu->sigset_active)
1697 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1698
b0c632db 1699 vcpu->stat.exit_userspace++;
7e8e6ab4 1700 return rc;
b0c632db
HC
1701}
1702
b0c632db
HC
1703/*
1704 * store status at address
1705 * we use have two special cases:
1706 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1707 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1708 */
d0bce605 1709int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
b0c632db 1710{
092670cd 1711 unsigned char archmode = 1;
fda902cb 1712 unsigned int px;
178bd789 1713 u64 clkcomp;
d0bce605 1714 int rc;
b0c632db 1715
d0bce605
HC
1716 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1717 if (write_guest_abs(vcpu, 163, &archmode, 1))
b0c632db 1718 return -EFAULT;
d0bce605
HC
1719 gpa = SAVE_AREA_BASE;
1720 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1721 if (write_guest_real(vcpu, 163, &archmode, 1))
b0c632db 1722 return -EFAULT;
d0bce605
HC
1723 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1724 }
1725 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1726 vcpu->arch.guest_fpregs.fprs, 128);
1727 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1728 vcpu->run->s.regs.gprs, 128);
1729 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1730 &vcpu->arch.sie_block->gpsw, 16);
fda902cb 1731 px = kvm_s390_get_prefix(vcpu);
d0bce605 1732 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
fda902cb 1733 &px, 4);
d0bce605
HC
1734 rc |= write_guest_abs(vcpu,
1735 gpa + offsetof(struct save_area, fp_ctrl_reg),
1736 &vcpu->arch.guest_fpregs.fpc, 4);
1737 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1738 &vcpu->arch.sie_block->todpr, 4);
1739 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1740 &vcpu->arch.sie_block->cputm, 8);
178bd789 1741 clkcomp = vcpu->arch.sie_block->ckc >> 8;
d0bce605
HC
1742 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1743 &clkcomp, 8);
1744 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1745 &vcpu->run->s.regs.acrs, 64);
1746 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1747 &vcpu->arch.sie_block->gcr, 128);
1748 return rc ? -EFAULT : 0;
b0c632db
HC
1749}
1750
e879892c
TH
1751int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1752{
1753 /*
1754 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1755 * copying in vcpu load/put. Lets update our copies before we save
1756 * it into the save area
1757 */
1758 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1759 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1760 save_access_regs(vcpu->run->s.regs.acrs);
1761
1762 return kvm_s390_store_status_unloaded(vcpu, addr);
1763}
1764
8ad35755
DH
1765static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1766{
1767 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
1768 kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
1769 exit_sie_sync(vcpu);
1770}
1771
1772static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
1773{
1774 unsigned int i;
1775 struct kvm_vcpu *vcpu;
1776
1777 kvm_for_each_vcpu(i, vcpu, kvm) {
1778 __disable_ibs_on_vcpu(vcpu);
1779 }
1780}
1781
1782static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1783{
1784 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
1785 kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
1786 exit_sie_sync(vcpu);
1787}
1788
6852d7b6
DH
1789void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
1790{
8ad35755
DH
1791 int i, online_vcpus, started_vcpus = 0;
1792
1793 if (!is_vcpu_stopped(vcpu))
1794 return;
1795
6852d7b6 1796 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
8ad35755 1797 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 1798 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
1799 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1800
1801 for (i = 0; i < online_vcpus; i++) {
1802 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
1803 started_vcpus++;
1804 }
1805
1806 if (started_vcpus == 0) {
1807 /* we're the only active VCPU -> speed it up */
1808 __enable_ibs_on_vcpu(vcpu);
1809 } else if (started_vcpus == 1) {
1810 /*
1811 * As we are starting a second VCPU, we have to disable
1812 * the IBS facility on all VCPUs to remove potentially
1813 * oustanding ENABLE requests.
1814 */
1815 __disable_ibs_on_all_vcpus(vcpu->kvm);
1816 }
1817
6852d7b6 1818 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
1819 /*
1820 * Another VCPU might have used IBS while we were offline.
1821 * Let's play safe and flush the VCPU at startup.
1822 */
d3d692c8 1823 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
433b9ee4 1824 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 1825 return;
6852d7b6
DH
1826}
1827
1828void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
1829{
8ad35755
DH
1830 int i, online_vcpus, started_vcpus = 0;
1831 struct kvm_vcpu *started_vcpu = NULL;
1832
1833 if (is_vcpu_stopped(vcpu))
1834 return;
1835
6852d7b6 1836 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
8ad35755 1837 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 1838 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
1839 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1840
32f5ff63 1841 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
6cddd432 1842 kvm_s390_clear_stop_irq(vcpu);
32f5ff63 1843
6cddd432 1844 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
1845 __disable_ibs_on_vcpu(vcpu);
1846
1847 for (i = 0; i < online_vcpus; i++) {
1848 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
1849 started_vcpus++;
1850 started_vcpu = vcpu->kvm->vcpus[i];
1851 }
1852 }
1853
1854 if (started_vcpus == 1) {
1855 /*
1856 * As we only have one VCPU left, we want to enable the
1857 * IBS facility for that VCPU to speed it up.
1858 */
1859 __enable_ibs_on_vcpu(started_vcpu);
1860 }
1861
433b9ee4 1862 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 1863 return;
6852d7b6
DH
1864}
1865
d6712df9
CH
1866static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1867 struct kvm_enable_cap *cap)
1868{
1869 int r;
1870
1871 if (cap->flags)
1872 return -EINVAL;
1873
1874 switch (cap->cap) {
fa6b7fe9
CH
1875 case KVM_CAP_S390_CSS_SUPPORT:
1876 if (!vcpu->kvm->arch.css_support) {
1877 vcpu->kvm->arch.css_support = 1;
1878 trace_kvm_s390_enable_css(vcpu->kvm);
1879 }
1880 r = 0;
1881 break;
d6712df9
CH
1882 default:
1883 r = -EINVAL;
1884 break;
1885 }
1886 return r;
1887}
1888
b0c632db
HC
1889long kvm_arch_vcpu_ioctl(struct file *filp,
1890 unsigned int ioctl, unsigned long arg)
1891{
1892 struct kvm_vcpu *vcpu = filp->private_data;
1893 void __user *argp = (void __user *)arg;
800c1065 1894 int idx;
bc923cc9 1895 long r;
b0c632db 1896
93736624
AK
1897 switch (ioctl) {
1898 case KVM_S390_INTERRUPT: {
ba5c1e9b 1899 struct kvm_s390_interrupt s390int;
383d0b05 1900 struct kvm_s390_irq s390irq;
ba5c1e9b 1901
93736624 1902 r = -EFAULT;
ba5c1e9b 1903 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624 1904 break;
383d0b05
JF
1905 if (s390int_to_s390irq(&s390int, &s390irq))
1906 return -EINVAL;
1907 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
93736624 1908 break;
ba5c1e9b 1909 }
b0c632db 1910 case KVM_S390_STORE_STATUS:
800c1065 1911 idx = srcu_read_lock(&vcpu->kvm->srcu);
bc923cc9 1912 r = kvm_s390_vcpu_store_status(vcpu, arg);
800c1065 1913 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 1914 break;
b0c632db
HC
1915 case KVM_S390_SET_INITIAL_PSW: {
1916 psw_t psw;
1917
bc923cc9 1918 r = -EFAULT;
b0c632db 1919 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
1920 break;
1921 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1922 break;
b0c632db
HC
1923 }
1924 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
1925 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1926 break;
14eebd91
CO
1927 case KVM_SET_ONE_REG:
1928 case KVM_GET_ONE_REG: {
1929 struct kvm_one_reg reg;
1930 r = -EFAULT;
1931 if (copy_from_user(&reg, argp, sizeof(reg)))
1932 break;
1933 if (ioctl == KVM_SET_ONE_REG)
1934 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1935 else
1936 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1937 break;
1938 }
27e0393f
CO
1939#ifdef CONFIG_KVM_S390_UCONTROL
1940 case KVM_S390_UCAS_MAP: {
1941 struct kvm_s390_ucas_mapping ucasmap;
1942
1943 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1944 r = -EFAULT;
1945 break;
1946 }
1947
1948 if (!kvm_is_ucontrol(vcpu->kvm)) {
1949 r = -EINVAL;
1950 break;
1951 }
1952
1953 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1954 ucasmap.vcpu_addr, ucasmap.length);
1955 break;
1956 }
1957 case KVM_S390_UCAS_UNMAP: {
1958 struct kvm_s390_ucas_mapping ucasmap;
1959
1960 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1961 r = -EFAULT;
1962 break;
1963 }
1964
1965 if (!kvm_is_ucontrol(vcpu->kvm)) {
1966 r = -EINVAL;
1967 break;
1968 }
1969
1970 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1971 ucasmap.length);
1972 break;
1973 }
1974#endif
ccc7910f 1975 case KVM_S390_VCPU_FAULT: {
527e30b4 1976 r = gmap_fault(vcpu->arch.gmap, arg, 0);
ccc7910f
CO
1977 break;
1978 }
d6712df9
CH
1979 case KVM_ENABLE_CAP:
1980 {
1981 struct kvm_enable_cap cap;
1982 r = -EFAULT;
1983 if (copy_from_user(&cap, argp, sizeof(cap)))
1984 break;
1985 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1986 break;
1987 }
b0c632db 1988 default:
3e6afcf1 1989 r = -ENOTTY;
b0c632db 1990 }
bc923cc9 1991 return r;
b0c632db
HC
1992}
1993
5b1c1493
CO
1994int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1995{
1996#ifdef CONFIG_KVM_S390_UCONTROL
1997 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1998 && (kvm_is_ucontrol(vcpu->kvm))) {
1999 vmf->page = virt_to_page(vcpu->arch.sie_block);
2000 get_page(vmf->page);
2001 return 0;
2002 }
2003#endif
2004 return VM_FAULT_SIGBUS;
2005}
2006
5587027c
AK
2007int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2008 unsigned long npages)
db3fe4eb
TY
2009{
2010 return 0;
2011}
2012
b0c632db 2013/* Section: memory related */
f7784b8e
MT
2014int kvm_arch_prepare_memory_region(struct kvm *kvm,
2015 struct kvm_memory_slot *memslot,
7b6195a9
TY
2016 struct kvm_userspace_memory_region *mem,
2017 enum kvm_mr_change change)
b0c632db 2018{
dd2887e7
NW
2019 /* A few sanity checks. We can have memory slots which have to be
2020 located/ended at a segment boundary (1MB). The memory in userland is
2021 ok to be fragmented into various different vmas. It is okay to mmap()
2022 and munmap() stuff in this slot after doing this call at any time */
b0c632db 2023
598841ca 2024 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
2025 return -EINVAL;
2026
598841ca 2027 if (mem->memory_size & 0xffffful)
b0c632db
HC
2028 return -EINVAL;
2029
f7784b8e
MT
2030 return 0;
2031}
2032
2033void kvm_arch_commit_memory_region(struct kvm *kvm,
2034 struct kvm_userspace_memory_region *mem,
8482644a
TY
2035 const struct kvm_memory_slot *old,
2036 enum kvm_mr_change change)
f7784b8e 2037{
f7850c92 2038 int rc;
f7784b8e 2039
2cef4deb
CB
2040 /* If the basics of the memslot do not change, we do not want
2041 * to update the gmap. Every update causes several unnecessary
2042 * segment translation exceptions. This is usually handled just
2043 * fine by the normal fault handler + gmap, but it will also
2044 * cause faults on the prefix page of running guest CPUs.
2045 */
2046 if (old->userspace_addr == mem->userspace_addr &&
2047 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
2048 old->npages * PAGE_SIZE == mem->memory_size)
2049 return;
598841ca
CO
2050
2051 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2052 mem->guest_phys_addr, mem->memory_size);
2053 if (rc)
f7850c92 2054 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 2055 return;
b0c632db
HC
2056}
2057
b0c632db
HC
2058static int __init kvm_s390_init(void)
2059{
ef50f7ac 2060 int ret;
0ee75bea 2061 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
2062 if (ret)
2063 return ret;
2064
2065 /*
2066 * guests can ask for up to 255+1 double words, we need a full page
25985edc 2067 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
2068 * only set facilities that are known to work in KVM.
2069 */
78c4b59f
MM
2070 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
2071 if (!vfacilities) {
ef50f7ac
CB
2072 kvm_exit();
2073 return -ENOMEM;
2074 }
78c4b59f 2075 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
7be81a46 2076 vfacilities[0] &= 0xff82fffbf47c2000UL;
7feb6bb8 2077 vfacilities[1] &= 0x005c000000000000UL;
ef50f7ac 2078 return 0;
b0c632db
HC
2079}
2080
2081static void __exit kvm_s390_exit(void)
2082{
78c4b59f 2083 free_page((unsigned long) vfacilities);
b0c632db
HC
2084 kvm_exit();
2085}
2086
2087module_init(kvm_s390_init);
2088module_exit(kvm_s390_exit);
566af940
CH
2089
2090/*
2091 * Enable autoloading of the kvm module.
2092 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2093 * since x86 takes a different approach.
2094 */
2095#include <linux/miscdevice.h>
2096MODULE_ALIAS_MISCDEV(KVM_MINOR);
2097MODULE_ALIAS("devname:kvm");