]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/s390/kvm/kvm-s390.c
KVM: s390: Provide guest TOD Clock Get/Set Controls
[mirror_ubuntu-artful-kernel.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15f36ebd 14 * Jason J. Herne <jjherne@us.ibm.com>
b0c632db
HC
15 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
ca872302 20#include <linux/hrtimer.h>
b0c632db
HC
21#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
25#include <linux/slab.h>
ba5c1e9b 26#include <linux/timer.h>
cbb870c8 27#include <asm/asm-offsets.h>
b0c632db
HC
28#include <asm/lowcore.h>
29#include <asm/pgtable.h>
f5daba1d 30#include <asm/nmi.h>
a0616cde 31#include <asm/switch_to.h>
78c4b59f 32#include <asm/facility.h>
1526bf9c 33#include <asm/sclp.h>
8f2abe6a 34#include "kvm-s390.h"
b0c632db
HC
35#include "gaccess.h"
36
5786fffa
CH
37#define CREATE_TRACE_POINTS
38#include "trace.h"
ade38c31 39#include "trace-s390.h"
5786fffa 40
b0c632db
HC
41#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42
43struct kvm_stats_debugfs_item debugfs_entries[] = {
44 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 45 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
46 { "exit_validity", VCPU_STAT(exit_validity) },
47 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
48 { "exit_external_request", VCPU_STAT(exit_external_request) },
49 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
50 { "exit_instruction", VCPU_STAT(exit_instruction) },
51 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
ce2e4f0b 53 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
f5e10b09 54 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b 55 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
aba07508
DH
56 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
57 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
ba5c1e9b 58 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 59 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
60 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
61 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
62 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
63 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
64 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
65 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
66 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 67 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
68 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
69 { "instruction_spx", VCPU_STAT(instruction_spx) },
70 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
71 { "instruction_stap", VCPU_STAT(instruction_stap) },
72 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
8a242234 73 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
453423dc
CB
74 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
75 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
b31288fa 76 { "instruction_essa", VCPU_STAT(instruction_essa) },
453423dc
CB
77 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
78 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 79 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 80 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 81 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 82 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0 83 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
42cb0c9f
DH
84 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
85 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
5288fbf0 86 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
42cb0c9f
DH
87 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
88 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
5288fbf0
CB
89 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
90 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
91 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
42cb0c9f
DH
92 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
93 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
94 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
388186bc 95 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 96 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 97 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
98 { NULL }
99};
100
78c4b59f 101unsigned long *vfacilities;
2c70fe44 102static struct gmap_notifier gmap_notifier;
b0c632db 103
78c4b59f 104/* test availability of vfacility */
280ef0f1 105int test_vfacility(unsigned long nr)
78c4b59f
MM
106{
107 return __test_facility(nr, (void *) vfacilities);
108}
109
b0c632db 110/* Section: not file related */
13a34e06 111int kvm_arch_hardware_enable(void)
b0c632db
HC
112{
113 /* every s390 is virtualization enabled ;-) */
10474ae8 114 return 0;
b0c632db
HC
115}
116
2c70fe44
CB
117static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
118
b0c632db
HC
119int kvm_arch_hardware_setup(void)
120{
2c70fe44
CB
121 gmap_notifier.notifier_call = kvm_gmap_notifier;
122 gmap_register_ipte_notifier(&gmap_notifier);
b0c632db
HC
123 return 0;
124}
125
126void kvm_arch_hardware_unsetup(void)
127{
2c70fe44 128 gmap_unregister_ipte_notifier(&gmap_notifier);
b0c632db
HC
129}
130
b0c632db
HC
131int kvm_arch_init(void *opaque)
132{
84877d93
CH
133 /* Register floating interrupt controller interface. */
134 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
b0c632db
HC
135}
136
b0c632db
HC
137/* Section: device related */
138long kvm_arch_dev_ioctl(struct file *filp,
139 unsigned int ioctl, unsigned long arg)
140{
141 if (ioctl == KVM_S390_ENABLE_SIE)
142 return s390_enable_sie();
143 return -EINVAL;
144}
145
784aa3d7 146int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
b0c632db 147{
d7b0b5eb
CO
148 int r;
149
2bd0ac4e 150 switch (ext) {
d7b0b5eb 151 case KVM_CAP_S390_PSW:
b6cf8788 152 case KVM_CAP_S390_GMAP:
52e16b18 153 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
154#ifdef CONFIG_KVM_S390_UCONTROL
155 case KVM_CAP_S390_UCONTROL:
156#endif
3c038e6b 157 case KVM_CAP_ASYNC_PF:
60b413c9 158 case KVM_CAP_SYNC_REGS:
14eebd91 159 case KVM_CAP_ONE_REG:
d6712df9 160 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 161 case KVM_CAP_S390_CSS_SUPPORT:
ebc32262 162 case KVM_CAP_IRQFD:
10ccaa1e 163 case KVM_CAP_IOEVENTFD:
c05c4186 164 case KVM_CAP_DEVICE_CTRL:
d938dc55 165 case KVM_CAP_ENABLE_CAP_VM:
78599d90 166 case KVM_CAP_S390_IRQCHIP:
f2061656 167 case KVM_CAP_VM_ATTRIBUTES:
6352e4d2 168 case KVM_CAP_MP_STATE:
2444b352 169 case KVM_CAP_S390_USER_SIGP:
d7b0b5eb
CO
170 r = 1;
171 break;
e726b1bd
CB
172 case KVM_CAP_NR_VCPUS:
173 case KVM_CAP_MAX_VCPUS:
174 r = KVM_MAX_VCPUS;
175 break;
e1e2e605
NW
176 case KVM_CAP_NR_MEMSLOTS:
177 r = KVM_USER_MEM_SLOTS;
178 break;
1526bf9c 179 case KVM_CAP_S390_COW:
abf09bed 180 r = MACHINE_HAS_ESOP;
1526bf9c 181 break;
2bd0ac4e 182 default:
d7b0b5eb 183 r = 0;
2bd0ac4e 184 }
d7b0b5eb 185 return r;
b0c632db
HC
186}
187
15f36ebd
JH
188static void kvm_s390_sync_dirty_log(struct kvm *kvm,
189 struct kvm_memory_slot *memslot)
190{
191 gfn_t cur_gfn, last_gfn;
192 unsigned long address;
193 struct gmap *gmap = kvm->arch.gmap;
194
195 down_read(&gmap->mm->mmap_sem);
196 /* Loop over all guest pages */
197 last_gfn = memslot->base_gfn + memslot->npages;
198 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
199 address = gfn_to_hva_memslot(memslot, cur_gfn);
200
201 if (gmap_test_and_clear_dirty(address, gmap))
202 mark_page_dirty(kvm, cur_gfn);
203 }
204 up_read(&gmap->mm->mmap_sem);
205}
206
b0c632db
HC
207/* Section: vm related */
208/*
209 * Get (and clear) the dirty memory log for a memory slot.
210 */
211int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
212 struct kvm_dirty_log *log)
213{
15f36ebd
JH
214 int r;
215 unsigned long n;
216 struct kvm_memory_slot *memslot;
217 int is_dirty = 0;
218
219 mutex_lock(&kvm->slots_lock);
220
221 r = -EINVAL;
222 if (log->slot >= KVM_USER_MEM_SLOTS)
223 goto out;
224
225 memslot = id_to_memslot(kvm->memslots, log->slot);
226 r = -ENOENT;
227 if (!memslot->dirty_bitmap)
228 goto out;
229
230 kvm_s390_sync_dirty_log(kvm, memslot);
231 r = kvm_get_dirty_log(kvm, log, &is_dirty);
232 if (r)
233 goto out;
234
235 /* Clear the dirty log */
236 if (is_dirty) {
237 n = kvm_dirty_bitmap_bytes(memslot);
238 memset(memslot->dirty_bitmap, 0, n);
239 }
240 r = 0;
241out:
242 mutex_unlock(&kvm->slots_lock);
243 return r;
b0c632db
HC
244}
245
d938dc55
CH
246static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
247{
248 int r;
249
250 if (cap->flags)
251 return -EINVAL;
252
253 switch (cap->cap) {
84223598
CH
254 case KVM_CAP_S390_IRQCHIP:
255 kvm->arch.use_irqchip = 1;
256 r = 0;
257 break;
2444b352
DH
258 case KVM_CAP_S390_USER_SIGP:
259 kvm->arch.user_sigp = 1;
260 r = 0;
261 break;
d938dc55
CH
262 default:
263 r = -EINVAL;
264 break;
265 }
266 return r;
267}
268
8c0a7ce6
DD
269static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
270{
271 int ret;
272
273 switch (attr->attr) {
274 case KVM_S390_VM_MEM_LIMIT_SIZE:
275 ret = 0;
276 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
277 ret = -EFAULT;
278 break;
279 default:
280 ret = -ENXIO;
281 break;
282 }
283 return ret;
284}
285
286static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
4f718eab
DD
287{
288 int ret;
289 unsigned int idx;
290 switch (attr->attr) {
291 case KVM_S390_VM_MEM_ENABLE_CMMA:
292 ret = -EBUSY;
293 mutex_lock(&kvm->lock);
294 if (atomic_read(&kvm->online_vcpus) == 0) {
295 kvm->arch.use_cmma = 1;
296 ret = 0;
297 }
298 mutex_unlock(&kvm->lock);
299 break;
300 case KVM_S390_VM_MEM_CLR_CMMA:
301 mutex_lock(&kvm->lock);
302 idx = srcu_read_lock(&kvm->srcu);
a13cff31 303 s390_reset_cmma(kvm->arch.gmap->mm);
4f718eab
DD
304 srcu_read_unlock(&kvm->srcu, idx);
305 mutex_unlock(&kvm->lock);
306 ret = 0;
307 break;
8c0a7ce6
DD
308 case KVM_S390_VM_MEM_LIMIT_SIZE: {
309 unsigned long new_limit;
310
311 if (kvm_is_ucontrol(kvm))
312 return -EINVAL;
313
314 if (get_user(new_limit, (u64 __user *)attr->addr))
315 return -EFAULT;
316
317 if (new_limit > kvm->arch.gmap->asce_end)
318 return -E2BIG;
319
320 ret = -EBUSY;
321 mutex_lock(&kvm->lock);
322 if (atomic_read(&kvm->online_vcpus) == 0) {
323 /* gmap_alloc will round the limit up */
324 struct gmap *new = gmap_alloc(current->mm, new_limit);
325
326 if (!new) {
327 ret = -ENOMEM;
328 } else {
329 gmap_free(kvm->arch.gmap);
330 new->private = kvm;
331 kvm->arch.gmap = new;
332 ret = 0;
333 }
334 }
335 mutex_unlock(&kvm->lock);
336 break;
337 }
4f718eab
DD
338 default:
339 ret = -ENXIO;
340 break;
341 }
342 return ret;
343}
344
72f25020
JH
345static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
346{
347 u8 gtod_high;
348
349 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
350 sizeof(gtod_high)))
351 return -EFAULT;
352
353 if (gtod_high != 0)
354 return -EINVAL;
355
356 return 0;
357}
358
359static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
360{
361 struct kvm_vcpu *cur_vcpu;
362 unsigned int vcpu_idx;
363 u64 host_tod, gtod;
364 int r;
365
366 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
367 return -EFAULT;
368
369 r = store_tod_clock(&host_tod);
370 if (r)
371 return r;
372
373 mutex_lock(&kvm->lock);
374 kvm->arch.epoch = gtod - host_tod;
375 kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) {
376 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
377 exit_sie(cur_vcpu);
378 }
379 mutex_unlock(&kvm->lock);
380 return 0;
381}
382
383static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
384{
385 int ret;
386
387 if (attr->flags)
388 return -EINVAL;
389
390 switch (attr->attr) {
391 case KVM_S390_VM_TOD_HIGH:
392 ret = kvm_s390_set_tod_high(kvm, attr);
393 break;
394 case KVM_S390_VM_TOD_LOW:
395 ret = kvm_s390_set_tod_low(kvm, attr);
396 break;
397 default:
398 ret = -ENXIO;
399 break;
400 }
401 return ret;
402}
403
404static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
405{
406 u8 gtod_high = 0;
407
408 if (copy_to_user((void __user *)attr->addr, &gtod_high,
409 sizeof(gtod_high)))
410 return -EFAULT;
411
412 return 0;
413}
414
415static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
416{
417 u64 host_tod, gtod;
418 int r;
419
420 r = store_tod_clock(&host_tod);
421 if (r)
422 return r;
423
424 gtod = host_tod + kvm->arch.epoch;
425 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
426 return -EFAULT;
427
428 return 0;
429}
430
431static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
432{
433 int ret;
434
435 if (attr->flags)
436 return -EINVAL;
437
438 switch (attr->attr) {
439 case KVM_S390_VM_TOD_HIGH:
440 ret = kvm_s390_get_tod_high(kvm, attr);
441 break;
442 case KVM_S390_VM_TOD_LOW:
443 ret = kvm_s390_get_tod_low(kvm, attr);
444 break;
445 default:
446 ret = -ENXIO;
447 break;
448 }
449 return ret;
450}
451
f2061656
DD
452static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
453{
454 int ret;
455
456 switch (attr->group) {
4f718eab 457 case KVM_S390_VM_MEM_CTRL:
8c0a7ce6 458 ret = kvm_s390_set_mem_control(kvm, attr);
4f718eab 459 break;
72f25020
JH
460 case KVM_S390_VM_TOD:
461 ret = kvm_s390_set_tod(kvm, attr);
462 break;
f2061656
DD
463 default:
464 ret = -ENXIO;
465 break;
466 }
467
468 return ret;
469}
470
471static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
472{
8c0a7ce6
DD
473 int ret;
474
475 switch (attr->group) {
476 case KVM_S390_VM_MEM_CTRL:
477 ret = kvm_s390_get_mem_control(kvm, attr);
478 break;
72f25020
JH
479 case KVM_S390_VM_TOD:
480 ret = kvm_s390_get_tod(kvm, attr);
481 break;
8c0a7ce6
DD
482 default:
483 ret = -ENXIO;
484 break;
485 }
486
487 return ret;
f2061656
DD
488}
489
490static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
491{
492 int ret;
493
494 switch (attr->group) {
4f718eab
DD
495 case KVM_S390_VM_MEM_CTRL:
496 switch (attr->attr) {
497 case KVM_S390_VM_MEM_ENABLE_CMMA:
498 case KVM_S390_VM_MEM_CLR_CMMA:
8c0a7ce6 499 case KVM_S390_VM_MEM_LIMIT_SIZE:
4f718eab
DD
500 ret = 0;
501 break;
502 default:
503 ret = -ENXIO;
504 break;
505 }
506 break;
72f25020
JH
507 case KVM_S390_VM_TOD:
508 switch (attr->attr) {
509 case KVM_S390_VM_TOD_LOW:
510 case KVM_S390_VM_TOD_HIGH:
511 ret = 0;
512 break;
513 default:
514 ret = -ENXIO;
515 break;
516 }
517 break;
f2061656
DD
518 default:
519 ret = -ENXIO;
520 break;
521 }
522
523 return ret;
524}
525
b0c632db
HC
526long kvm_arch_vm_ioctl(struct file *filp,
527 unsigned int ioctl, unsigned long arg)
528{
529 struct kvm *kvm = filp->private_data;
530 void __user *argp = (void __user *)arg;
f2061656 531 struct kvm_device_attr attr;
b0c632db
HC
532 int r;
533
534 switch (ioctl) {
ba5c1e9b
CO
535 case KVM_S390_INTERRUPT: {
536 struct kvm_s390_interrupt s390int;
537
538 r = -EFAULT;
539 if (copy_from_user(&s390int, argp, sizeof(s390int)))
540 break;
541 r = kvm_s390_inject_vm(kvm, &s390int);
542 break;
543 }
d938dc55
CH
544 case KVM_ENABLE_CAP: {
545 struct kvm_enable_cap cap;
546 r = -EFAULT;
547 if (copy_from_user(&cap, argp, sizeof(cap)))
548 break;
549 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
550 break;
551 }
84223598
CH
552 case KVM_CREATE_IRQCHIP: {
553 struct kvm_irq_routing_entry routing;
554
555 r = -EINVAL;
556 if (kvm->arch.use_irqchip) {
557 /* Set up dummy routing. */
558 memset(&routing, 0, sizeof(routing));
559 kvm_set_irq_routing(kvm, &routing, 0, 0);
560 r = 0;
561 }
562 break;
563 }
f2061656
DD
564 case KVM_SET_DEVICE_ATTR: {
565 r = -EFAULT;
566 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
567 break;
568 r = kvm_s390_vm_set_attr(kvm, &attr);
569 break;
570 }
571 case KVM_GET_DEVICE_ATTR: {
572 r = -EFAULT;
573 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
574 break;
575 r = kvm_s390_vm_get_attr(kvm, &attr);
576 break;
577 }
578 case KVM_HAS_DEVICE_ATTR: {
579 r = -EFAULT;
580 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
581 break;
582 r = kvm_s390_vm_has_attr(kvm, &attr);
583 break;
584 }
b0c632db 585 default:
367e1319 586 r = -ENOTTY;
b0c632db
HC
587 }
588
589 return r;
590}
591
5102ee87
TK
592static int kvm_s390_crypto_init(struct kvm *kvm)
593{
594 if (!test_vfacility(76))
595 return 0;
596
597 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
598 GFP_KERNEL | GFP_DMA);
599 if (!kvm->arch.crypto.crycb)
600 return -ENOMEM;
601
602 kvm->arch.crypto.crycbd = (__u32) (unsigned long) kvm->arch.crypto.crycb |
603 CRYCB_FORMAT1;
604
605 return 0;
606}
607
e08b9637 608int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 609{
b0c632db
HC
610 int rc;
611 char debug_name[16];
f6c137ff 612 static unsigned long sca_offset;
b0c632db 613
e08b9637
CO
614 rc = -EINVAL;
615#ifdef CONFIG_KVM_S390_UCONTROL
616 if (type & ~KVM_VM_S390_UCONTROL)
617 goto out_err;
618 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
619 goto out_err;
620#else
621 if (type)
622 goto out_err;
623#endif
624
b0c632db
HC
625 rc = s390_enable_sie();
626 if (rc)
d89f5eff 627 goto out_err;
b0c632db 628
b290411a
CO
629 rc = -ENOMEM;
630
b0c632db
HC
631 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
632 if (!kvm->arch.sca)
d89f5eff 633 goto out_err;
f6c137ff
CB
634 spin_lock(&kvm_lock);
635 sca_offset = (sca_offset + 16) & 0x7f0;
636 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
637 spin_unlock(&kvm_lock);
b0c632db
HC
638
639 sprintf(debug_name, "kvm-%u", current->pid);
640
641 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
642 if (!kvm->arch.dbf)
643 goto out_nodbf;
644
5102ee87
TK
645 if (kvm_s390_crypto_init(kvm) < 0)
646 goto out_crypto;
647
ba5c1e9b
CO
648 spin_lock_init(&kvm->arch.float_int.lock);
649 INIT_LIST_HEAD(&kvm->arch.float_int.list);
8a242234 650 init_waitqueue_head(&kvm->arch.ipte_wq);
a6b7e459 651 mutex_init(&kvm->arch.ipte_mutex);
ba5c1e9b 652
b0c632db
HC
653 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
654 VM_EVENT(kvm, 3, "%s", "vm created");
655
e08b9637
CO
656 if (type & KVM_VM_S390_UCONTROL) {
657 kvm->arch.gmap = NULL;
658 } else {
0349985a 659 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
e08b9637
CO
660 if (!kvm->arch.gmap)
661 goto out_nogmap;
2c70fe44 662 kvm->arch.gmap->private = kvm;
24eb3a82 663 kvm->arch.gmap->pfault_enabled = 0;
e08b9637 664 }
fa6b7fe9
CH
665
666 kvm->arch.css_support = 0;
84223598 667 kvm->arch.use_irqchip = 0;
72f25020 668 kvm->arch.epoch = 0;
fa6b7fe9 669
8ad35755
DH
670 spin_lock_init(&kvm->arch.start_stop_lock);
671
d89f5eff 672 return 0;
598841ca 673out_nogmap:
5102ee87
TK
674 kfree(kvm->arch.crypto.crycb);
675out_crypto:
598841ca 676 debug_unregister(kvm->arch.dbf);
b0c632db
HC
677out_nodbf:
678 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
679out_err:
680 return rc;
b0c632db
HC
681}
682
d329c035
CB
683void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
684{
685 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 686 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
67335e63 687 kvm_s390_clear_local_irqs(vcpu);
3c038e6b 688 kvm_clear_async_pf_completion_queue(vcpu);
58f9460b
CO
689 if (!kvm_is_ucontrol(vcpu->kvm)) {
690 clear_bit(63 - vcpu->vcpu_id,
691 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
692 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
693 (__u64) vcpu->arch.sie_block)
694 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
695 }
abf4a71e 696 smp_mb();
27e0393f
CO
697
698 if (kvm_is_ucontrol(vcpu->kvm))
699 gmap_free(vcpu->arch.gmap);
700
b31605c1
DD
701 if (kvm_s390_cmma_enabled(vcpu->kvm))
702 kvm_s390_vcpu_unsetup_cmma(vcpu);
d329c035 703 free_page((unsigned long)(vcpu->arch.sie_block));
b31288fa 704
6692cef3 705 kvm_vcpu_uninit(vcpu);
b110feaf 706 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
707}
708
709static void kvm_free_vcpus(struct kvm *kvm)
710{
711 unsigned int i;
988a2cae 712 struct kvm_vcpu *vcpu;
d329c035 713
988a2cae
GN
714 kvm_for_each_vcpu(i, vcpu, kvm)
715 kvm_arch_vcpu_destroy(vcpu);
716
717 mutex_lock(&kvm->lock);
718 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
719 kvm->vcpus[i] = NULL;
720
721 atomic_set(&kvm->online_vcpus, 0);
722 mutex_unlock(&kvm->lock);
d329c035
CB
723}
724
b0c632db
HC
725void kvm_arch_destroy_vm(struct kvm *kvm)
726{
d329c035 727 kvm_free_vcpus(kvm);
b0c632db 728 free_page((unsigned long)(kvm->arch.sca));
d329c035 729 debug_unregister(kvm->arch.dbf);
5102ee87 730 kfree(kvm->arch.crypto.crycb);
27e0393f
CO
731 if (!kvm_is_ucontrol(kvm))
732 gmap_free(kvm->arch.gmap);
841b91c5 733 kvm_s390_destroy_adapters(kvm);
67335e63 734 kvm_s390_clear_float_irqs(kvm);
b0c632db
HC
735}
736
737/* Section: vcpu related */
dafd032a
DD
738static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
739{
740 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
741 if (!vcpu->arch.gmap)
742 return -ENOMEM;
743 vcpu->arch.gmap->private = vcpu->kvm;
744
745 return 0;
746}
747
b0c632db
HC
748int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
749{
3c038e6b
DD
750 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
751 kvm_clear_async_pf_completion_queue(vcpu);
59674c1a
CB
752 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
753 KVM_SYNC_GPRS |
9eed0735 754 KVM_SYNC_ACRS |
b028ee3e
DH
755 KVM_SYNC_CRS |
756 KVM_SYNC_ARCH0 |
757 KVM_SYNC_PFAULT;
dafd032a
DD
758
759 if (kvm_is_ucontrol(vcpu->kvm))
760 return __kvm_ucontrol_vcpu_init(vcpu);
761
b0c632db
HC
762 return 0;
763}
764
b0c632db
HC
765void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
766{
4725c860
MS
767 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
768 save_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db 769 save_access_regs(vcpu->arch.host_acrs);
4725c860
MS
770 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
771 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 772 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 773 gmap_enable(vcpu->arch.gmap);
9e6dabef 774 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
775}
776
777void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
778{
9e6dabef 779 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 780 gmap_disable(vcpu->arch.gmap);
4725c860
MS
781 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
782 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 783 save_access_regs(vcpu->run->s.regs.acrs);
4725c860
MS
784 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
785 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db
HC
786 restore_access_regs(vcpu->arch.host_acrs);
787}
788
789static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
790{
791 /* this equals initial cpu reset in pop, but we don't switch to ESA */
792 vcpu->arch.sie_block->gpsw.mask = 0UL;
793 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 794 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
795 vcpu->arch.sie_block->cputm = 0UL;
796 vcpu->arch.sie_block->ckc = 0UL;
797 vcpu->arch.sie_block->todpr = 0;
798 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
799 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
800 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
801 vcpu->arch.guest_fpregs.fpc = 0;
802 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
803 vcpu->arch.sie_block->gbea = 1;
672550fb 804 vcpu->arch.sie_block->pp = 0;
3c038e6b
DD
805 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
806 kvm_clear_async_pf_completion_queue(vcpu);
6352e4d2
DH
807 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
808 kvm_s390_vcpu_stop(vcpu);
2ed10cc1 809 kvm_s390_clear_local_irqs(vcpu);
b0c632db
HC
810}
811
31928aa5 812void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
42897d86 813{
72f25020
JH
814 mutex_lock(&vcpu->kvm->lock);
815 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
816 mutex_unlock(&vcpu->kvm->lock);
dafd032a
DD
817 if (!kvm_is_ucontrol(vcpu->kvm))
818 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
42897d86
MT
819}
820
5102ee87
TK
821static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
822{
823 if (!test_vfacility(76))
824 return;
825
826 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
827}
828
b31605c1
DD
829void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
830{
831 free_page(vcpu->arch.sie_block->cbrlo);
832 vcpu->arch.sie_block->cbrlo = 0;
833}
834
835int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
836{
837 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
838 if (!vcpu->arch.sie_block->cbrlo)
839 return -ENOMEM;
840
841 vcpu->arch.sie_block->ecb2 |= 0x80;
842 vcpu->arch.sie_block->ecb2 &= ~0x08;
843 return 0;
844}
845
b0c632db
HC
846int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
847{
b31605c1 848 int rc = 0;
b31288fa 849
9e6dabef
CH
850 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
851 CPUSTAT_SM |
69d0d3a3
CB
852 CPUSTAT_STOPPED |
853 CPUSTAT_GED);
fc34531d 854 vcpu->arch.sie_block->ecb = 6;
7feb6bb8
MM
855 if (test_vfacility(50) && test_vfacility(73))
856 vcpu->arch.sie_block->ecb |= 0x10;
857
69d0d3a3 858 vcpu->arch.sie_block->ecb2 = 8;
ea5f4969 859 vcpu->arch.sie_block->eca = 0xC1002000U;
217a4406
HC
860 if (sclp_has_siif())
861 vcpu->arch.sie_block->eca |= 1;
ea5f4969
DH
862 if (sclp_has_sigpif())
863 vcpu->arch.sie_block->eca |= 0x10000000U;
78c4b59f 864 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
5a5e6536
MR
865 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
866 ICTL_TPROT;
867
b31605c1
DD
868 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
869 rc = kvm_s390_vcpu_setup_cmma(vcpu);
870 if (rc)
871 return rc;
b31288fa 872 }
0ac96caf 873 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ca872302 874 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 875 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 876 vcpu->arch.cpu_id.version = 0xff;
5102ee87
TK
877
878 kvm_s390_vcpu_crypto_setup(vcpu);
879
b31605c1 880 return rc;
b0c632db
HC
881}
882
883struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
884 unsigned int id)
885{
4d47555a 886 struct kvm_vcpu *vcpu;
7feb6bb8 887 struct sie_page *sie_page;
4d47555a
CO
888 int rc = -EINVAL;
889
890 if (id >= KVM_MAX_VCPUS)
891 goto out;
892
893 rc = -ENOMEM;
b0c632db 894
b110feaf 895 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 896 if (!vcpu)
4d47555a 897 goto out;
b0c632db 898
7feb6bb8
MM
899 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
900 if (!sie_page)
b0c632db
HC
901 goto out_free_cpu;
902
7feb6bb8
MM
903 vcpu->arch.sie_block = &sie_page->sie_block;
904 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
905
b0c632db 906 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
907 if (!kvm_is_ucontrol(kvm)) {
908 if (!kvm->arch.sca) {
909 WARN_ON_ONCE(1);
910 goto out_free_cpu;
911 }
912 if (!kvm->arch.sca->cpu[id].sda)
913 kvm->arch.sca->cpu[id].sda =
914 (__u64) vcpu->arch.sie_block;
915 vcpu->arch.sie_block->scaoh =
916 (__u32)(((__u64)kvm->arch.sca) >> 32);
917 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
918 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
919 }
b0c632db 920
ba5c1e9b 921 spin_lock_init(&vcpu->arch.local_int.lock);
ba5c1e9b 922 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
d0321a24 923 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 924 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
ba5c1e9b 925
b0c632db
HC
926 rc = kvm_vcpu_init(vcpu, kvm, id);
927 if (rc)
7b06bf2f 928 goto out_free_sie_block;
b0c632db
HC
929 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
930 vcpu->arch.sie_block);
ade38c31 931 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 932
b0c632db 933 return vcpu;
7b06bf2f
WY
934out_free_sie_block:
935 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 936out_free_cpu:
b110feaf 937 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 938out:
b0c632db
HC
939 return ERR_PTR(rc);
940}
941
b0c632db
HC
942int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
943{
9a022067 944 return kvm_s390_vcpu_has_irq(vcpu, 0);
b0c632db
HC
945}
946
49b99e1e
CB
947void s390_vcpu_block(struct kvm_vcpu *vcpu)
948{
949 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
950}
951
952void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
953{
954 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
955}
956
957/*
958 * Kick a guest cpu out of SIE and wait until SIE is not running.
959 * If the CPU is not running (e.g. waiting as idle) the function will
960 * return immediately. */
961void exit_sie(struct kvm_vcpu *vcpu)
962{
963 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
964 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
965 cpu_relax();
966}
967
968/* Kick a guest cpu out of SIE and prevent SIE-reentry */
969void exit_sie_sync(struct kvm_vcpu *vcpu)
970{
971 s390_vcpu_block(vcpu);
972 exit_sie(vcpu);
973}
974
2c70fe44
CB
975static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
976{
977 int i;
978 struct kvm *kvm = gmap->private;
979 struct kvm_vcpu *vcpu;
980
981 kvm_for_each_vcpu(i, vcpu, kvm) {
982 /* match against both prefix pages */
fda902cb 983 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
2c70fe44
CB
984 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
985 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
986 exit_sie_sync(vcpu);
987 }
988 }
989}
990
b6d33834
CD
991int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
992{
993 /* kvm common code refers to this, but never calls it */
994 BUG();
995 return 0;
996}
997
14eebd91
CO
998static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
999 struct kvm_one_reg *reg)
1000{
1001 int r = -EINVAL;
1002
1003 switch (reg->id) {
29b7c71b
CO
1004 case KVM_REG_S390_TODPR:
1005 r = put_user(vcpu->arch.sie_block->todpr,
1006 (u32 __user *)reg->addr);
1007 break;
1008 case KVM_REG_S390_EPOCHDIFF:
1009 r = put_user(vcpu->arch.sie_block->epoch,
1010 (u64 __user *)reg->addr);
1011 break;
46a6dd1c
J
1012 case KVM_REG_S390_CPU_TIMER:
1013 r = put_user(vcpu->arch.sie_block->cputm,
1014 (u64 __user *)reg->addr);
1015 break;
1016 case KVM_REG_S390_CLOCK_COMP:
1017 r = put_user(vcpu->arch.sie_block->ckc,
1018 (u64 __user *)reg->addr);
1019 break;
536336c2
DD
1020 case KVM_REG_S390_PFTOKEN:
1021 r = put_user(vcpu->arch.pfault_token,
1022 (u64 __user *)reg->addr);
1023 break;
1024 case KVM_REG_S390_PFCOMPARE:
1025 r = put_user(vcpu->arch.pfault_compare,
1026 (u64 __user *)reg->addr);
1027 break;
1028 case KVM_REG_S390_PFSELECT:
1029 r = put_user(vcpu->arch.pfault_select,
1030 (u64 __user *)reg->addr);
1031 break;
672550fb
CB
1032 case KVM_REG_S390_PP:
1033 r = put_user(vcpu->arch.sie_block->pp,
1034 (u64 __user *)reg->addr);
1035 break;
afa45ff5
CB
1036 case KVM_REG_S390_GBEA:
1037 r = put_user(vcpu->arch.sie_block->gbea,
1038 (u64 __user *)reg->addr);
1039 break;
14eebd91
CO
1040 default:
1041 break;
1042 }
1043
1044 return r;
1045}
1046
1047static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
1048 struct kvm_one_reg *reg)
1049{
1050 int r = -EINVAL;
1051
1052 switch (reg->id) {
29b7c71b
CO
1053 case KVM_REG_S390_TODPR:
1054 r = get_user(vcpu->arch.sie_block->todpr,
1055 (u32 __user *)reg->addr);
1056 break;
1057 case KVM_REG_S390_EPOCHDIFF:
1058 r = get_user(vcpu->arch.sie_block->epoch,
1059 (u64 __user *)reg->addr);
1060 break;
46a6dd1c
J
1061 case KVM_REG_S390_CPU_TIMER:
1062 r = get_user(vcpu->arch.sie_block->cputm,
1063 (u64 __user *)reg->addr);
1064 break;
1065 case KVM_REG_S390_CLOCK_COMP:
1066 r = get_user(vcpu->arch.sie_block->ckc,
1067 (u64 __user *)reg->addr);
1068 break;
536336c2
DD
1069 case KVM_REG_S390_PFTOKEN:
1070 r = get_user(vcpu->arch.pfault_token,
1071 (u64 __user *)reg->addr);
9fbd8082
DH
1072 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1073 kvm_clear_async_pf_completion_queue(vcpu);
536336c2
DD
1074 break;
1075 case KVM_REG_S390_PFCOMPARE:
1076 r = get_user(vcpu->arch.pfault_compare,
1077 (u64 __user *)reg->addr);
1078 break;
1079 case KVM_REG_S390_PFSELECT:
1080 r = get_user(vcpu->arch.pfault_select,
1081 (u64 __user *)reg->addr);
1082 break;
672550fb
CB
1083 case KVM_REG_S390_PP:
1084 r = get_user(vcpu->arch.sie_block->pp,
1085 (u64 __user *)reg->addr);
1086 break;
afa45ff5
CB
1087 case KVM_REG_S390_GBEA:
1088 r = get_user(vcpu->arch.sie_block->gbea,
1089 (u64 __user *)reg->addr);
1090 break;
14eebd91
CO
1091 default:
1092 break;
1093 }
1094
1095 return r;
1096}
b6d33834 1097
b0c632db
HC
1098static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1099{
b0c632db 1100 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
1101 return 0;
1102}
1103
1104int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1105{
5a32c1af 1106 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
1107 return 0;
1108}
1109
1110int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1111{
5a32c1af 1112 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
1113 return 0;
1114}
1115
1116int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1117 struct kvm_sregs *sregs)
1118{
59674c1a 1119 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 1120 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 1121 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
1122 return 0;
1123}
1124
1125int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1126 struct kvm_sregs *sregs)
1127{
59674c1a 1128 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 1129 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
1130 return 0;
1131}
1132
1133int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1134{
4725c860
MS
1135 if (test_fp_ctl(fpu->fpc))
1136 return -EINVAL;
b0c632db 1137 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4725c860
MS
1138 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
1139 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1140 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
b0c632db
HC
1141 return 0;
1142}
1143
1144int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1145{
b0c632db
HC
1146 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1147 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
1148 return 0;
1149}
1150
1151static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1152{
1153 int rc = 0;
1154
7a42fdc2 1155 if (!is_vcpu_stopped(vcpu))
b0c632db 1156 rc = -EBUSY;
d7b0b5eb
CO
1157 else {
1158 vcpu->run->psw_mask = psw.mask;
1159 vcpu->run->psw_addr = psw.addr;
1160 }
b0c632db
HC
1161 return rc;
1162}
1163
1164int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1165 struct kvm_translation *tr)
1166{
1167 return -EINVAL; /* not implemented yet */
1168}
1169
27291e21
DH
1170#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1171 KVM_GUESTDBG_USE_HW_BP | \
1172 KVM_GUESTDBG_ENABLE)
1173
d0bfb940
JK
1174int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1175 struct kvm_guest_debug *dbg)
b0c632db 1176{
27291e21
DH
1177 int rc = 0;
1178
1179 vcpu->guest_debug = 0;
1180 kvm_s390_clear_bp_data(vcpu);
1181
2de3bfc2 1182 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
27291e21
DH
1183 return -EINVAL;
1184
1185 if (dbg->control & KVM_GUESTDBG_ENABLE) {
1186 vcpu->guest_debug = dbg->control;
1187 /* enforce guest PER */
1188 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1189
1190 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
1191 rc = kvm_s390_import_bp_data(vcpu, dbg);
1192 } else {
1193 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1194 vcpu->arch.guestdbg.last_bp = 0;
1195 }
1196
1197 if (rc) {
1198 vcpu->guest_debug = 0;
1199 kvm_s390_clear_bp_data(vcpu);
1200 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1201 }
1202
1203 return rc;
b0c632db
HC
1204}
1205
62d9f0db
MT
1206int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1207 struct kvm_mp_state *mp_state)
1208{
6352e4d2
DH
1209 /* CHECK_STOP and LOAD are not supported yet */
1210 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1211 KVM_MP_STATE_OPERATING;
62d9f0db
MT
1212}
1213
1214int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1215 struct kvm_mp_state *mp_state)
1216{
6352e4d2
DH
1217 int rc = 0;
1218
1219 /* user space knows about this interface - let it control the state */
1220 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1221
1222 switch (mp_state->mp_state) {
1223 case KVM_MP_STATE_STOPPED:
1224 kvm_s390_vcpu_stop(vcpu);
1225 break;
1226 case KVM_MP_STATE_OPERATING:
1227 kvm_s390_vcpu_start(vcpu);
1228 break;
1229 case KVM_MP_STATE_LOAD:
1230 case KVM_MP_STATE_CHECK_STOP:
1231 /* fall through - CHECK_STOP and LOAD are not supported yet */
1232 default:
1233 rc = -ENXIO;
1234 }
1235
1236 return rc;
62d9f0db
MT
1237}
1238
b31605c1
DD
1239bool kvm_s390_cmma_enabled(struct kvm *kvm)
1240{
1241 if (!MACHINE_IS_LPAR)
1242 return false;
1243 /* only enable for z10 and later */
1244 if (!MACHINE_HAS_EDAT1)
1245 return false;
1246 if (!kvm->arch.use_cmma)
1247 return false;
1248 return true;
1249}
1250
8ad35755
DH
1251static bool ibs_enabled(struct kvm_vcpu *vcpu)
1252{
1253 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1254}
1255
2c70fe44
CB
1256static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1257{
8ad35755
DH
1258retry:
1259 s390_vcpu_unblock(vcpu);
2c70fe44
CB
1260 /*
1261 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1262 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1263 * This ensures that the ipte instruction for this request has
1264 * already finished. We might race against a second unmapper that
1265 * wants to set the blocking bit. Lets just retry the request loop.
1266 */
8ad35755 1267 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
2c70fe44
CB
1268 int rc;
1269 rc = gmap_ipte_notify(vcpu->arch.gmap,
fda902cb 1270 kvm_s390_get_prefix(vcpu),
2c70fe44
CB
1271 PAGE_SIZE * 2);
1272 if (rc)
1273 return rc;
8ad35755 1274 goto retry;
2c70fe44 1275 }
8ad35755 1276
d3d692c8
DH
1277 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1278 vcpu->arch.sie_block->ihcpu = 0xffff;
1279 goto retry;
1280 }
1281
8ad35755
DH
1282 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1283 if (!ibs_enabled(vcpu)) {
1284 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1285 atomic_set_mask(CPUSTAT_IBS,
1286 &vcpu->arch.sie_block->cpuflags);
1287 }
1288 goto retry;
2c70fe44 1289 }
8ad35755
DH
1290
1291 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1292 if (ibs_enabled(vcpu)) {
1293 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1294 atomic_clear_mask(CPUSTAT_IBS,
1295 &vcpu->arch.sie_block->cpuflags);
1296 }
1297 goto retry;
1298 }
1299
0759d068
DH
1300 /* nothing to do, just clear the request */
1301 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1302
2c70fe44
CB
1303 return 0;
1304}
1305
fa576c58
TH
1306/**
1307 * kvm_arch_fault_in_page - fault-in guest page if necessary
1308 * @vcpu: The corresponding virtual cpu
1309 * @gpa: Guest physical address
1310 * @writable: Whether the page should be writable or not
1311 *
1312 * Make sure that a guest page has been faulted-in on the host.
1313 *
1314 * Return: Zero on success, negative error code otherwise.
1315 */
1316long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
24eb3a82 1317{
527e30b4
MS
1318 return gmap_fault(vcpu->arch.gmap, gpa,
1319 writable ? FAULT_FLAG_WRITE : 0);
24eb3a82
DD
1320}
1321
3c038e6b
DD
1322static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1323 unsigned long token)
1324{
1325 struct kvm_s390_interrupt inti;
383d0b05 1326 struct kvm_s390_irq irq;
3c038e6b
DD
1327
1328 if (start_token) {
383d0b05
JF
1329 irq.u.ext.ext_params2 = token;
1330 irq.type = KVM_S390_INT_PFAULT_INIT;
1331 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
3c038e6b
DD
1332 } else {
1333 inti.type = KVM_S390_INT_PFAULT_DONE;
383d0b05 1334 inti.parm64 = token;
3c038e6b
DD
1335 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1336 }
1337}
1338
1339void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1340 struct kvm_async_pf *work)
1341{
1342 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1343 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1344}
1345
1346void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1347 struct kvm_async_pf *work)
1348{
1349 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1350 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1351}
1352
1353void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1354 struct kvm_async_pf *work)
1355{
1356 /* s390 will always inject the page directly */
1357}
1358
1359bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1360{
1361 /*
1362 * s390 will always inject the page directly,
1363 * but we still want check_async_completion to cleanup
1364 */
1365 return true;
1366}
1367
1368static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1369{
1370 hva_t hva;
1371 struct kvm_arch_async_pf arch;
1372 int rc;
1373
1374 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1375 return 0;
1376 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1377 vcpu->arch.pfault_compare)
1378 return 0;
1379 if (psw_extint_disabled(vcpu))
1380 return 0;
9a022067 1381 if (kvm_s390_vcpu_has_irq(vcpu, 0))
3c038e6b
DD
1382 return 0;
1383 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1384 return 0;
1385 if (!vcpu->arch.gmap->pfault_enabled)
1386 return 0;
1387
81480cc1
HC
1388 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1389 hva += current->thread.gmap_addr & ~PAGE_MASK;
1390 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
3c038e6b
DD
1391 return 0;
1392
1393 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1394 return rc;
1395}
1396
3fb4c40f 1397static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 1398{
3fb4c40f 1399 int rc, cpuflags;
e168bf8d 1400
3c038e6b
DD
1401 /*
1402 * On s390 notifications for arriving pages will be delivered directly
1403 * to the guest but the house keeping for completed pfaults is
1404 * handled outside the worker.
1405 */
1406 kvm_check_async_pf_completion(vcpu);
1407
5a32c1af 1408 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
1409
1410 if (need_resched())
1411 schedule();
1412
d3a73acb 1413 if (test_cpu_flag(CIF_MCCK_PENDING))
71cde587
CB
1414 s390_handle_mcck();
1415
79395031
JF
1416 if (!kvm_is_ucontrol(vcpu->kvm)) {
1417 rc = kvm_s390_deliver_pending_interrupts(vcpu);
1418 if (rc)
1419 return rc;
1420 }
0ff31867 1421
2c70fe44
CB
1422 rc = kvm_s390_handle_requests(vcpu);
1423 if (rc)
1424 return rc;
1425
27291e21
DH
1426 if (guestdbg_enabled(vcpu)) {
1427 kvm_s390_backup_guest_per_regs(vcpu);
1428 kvm_s390_patch_guest_per_regs(vcpu);
1429 }
1430
b0c632db 1431 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
1432 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1433 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1434 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 1435
3fb4c40f
TH
1436 return 0;
1437}
1438
1439static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1440{
24eb3a82 1441 int rc = -1;
2b29a9fd
DD
1442
1443 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1444 vcpu->arch.sie_block->icptcode);
1445 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1446
27291e21
DH
1447 if (guestdbg_enabled(vcpu))
1448 kvm_s390_restore_guest_per_regs(vcpu);
1449
3fb4c40f 1450 if (exit_reason >= 0) {
7c470539 1451 rc = 0;
210b1607
TH
1452 } else if (kvm_is_ucontrol(vcpu->kvm)) {
1453 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1454 vcpu->run->s390_ucontrol.trans_exc_code =
1455 current->thread.gmap_addr;
1456 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1457 rc = -EREMOTE;
24eb3a82
DD
1458
1459 } else if (current->thread.gmap_pfault) {
3c038e6b 1460 trace_kvm_s390_major_guest_pfault(vcpu);
24eb3a82 1461 current->thread.gmap_pfault = 0;
fa576c58 1462 if (kvm_arch_setup_async_pf(vcpu)) {
24eb3a82 1463 rc = 0;
fa576c58
TH
1464 } else {
1465 gpa_t gpa = current->thread.gmap_addr;
1466 rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1467 }
24eb3a82
DD
1468 }
1469
1470 if (rc == -1) {
699bde3b
CB
1471 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1472 trace_kvm_s390_sie_fault(vcpu);
1473 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1f0d0f09 1474 }
b0c632db 1475
5a32c1af 1476 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
3fb4c40f 1477
a76ccff6
TH
1478 if (rc == 0) {
1479 if (kvm_is_ucontrol(vcpu->kvm))
2955c83f
CB
1480 /* Don't exit for host interrupts. */
1481 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
a76ccff6
TH
1482 else
1483 rc = kvm_handle_sie_intercept(vcpu);
1484 }
1485
3fb4c40f
TH
1486 return rc;
1487}
1488
1489static int __vcpu_run(struct kvm_vcpu *vcpu)
1490{
1491 int rc, exit_reason;
1492
800c1065
TH
1493 /*
1494 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1495 * ning the guest), so that memslots (and other stuff) are protected
1496 */
1497 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1498
a76ccff6
TH
1499 do {
1500 rc = vcpu_pre_run(vcpu);
1501 if (rc)
1502 break;
3fb4c40f 1503
800c1065 1504 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
1505 /*
1506 * As PF_VCPU will be used in fault handler, between
1507 * guest_enter and guest_exit should be no uaccess.
1508 */
1509 preempt_disable();
1510 kvm_guest_enter();
1511 preempt_enable();
1512 exit_reason = sie64a(vcpu->arch.sie_block,
1513 vcpu->run->s.regs.gprs);
1514 kvm_guest_exit();
800c1065 1515 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
1516
1517 rc = vcpu_post_run(vcpu, exit_reason);
27291e21 1518 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
3fb4c40f 1519
800c1065 1520 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 1521 return rc;
b0c632db
HC
1522}
1523
b028ee3e
DH
1524static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1525{
1526 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1527 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1528 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
1529 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1530 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1531 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
d3d692c8
DH
1532 /* some control register changes require a tlb flush */
1533 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
b028ee3e
DH
1534 }
1535 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
1536 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
1537 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
1538 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
1539 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
1540 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
1541 }
1542 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
1543 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
1544 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
1545 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
9fbd8082
DH
1546 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1547 kvm_clear_async_pf_completion_queue(vcpu);
b028ee3e
DH
1548 }
1549 kvm_run->kvm_dirty_regs = 0;
1550}
1551
1552static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1553{
1554 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1555 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1556 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1557 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1558 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
1559 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
1560 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
1561 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
1562 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
1563 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1564 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1565 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1566}
1567
b0c632db
HC
1568int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1569{
8f2abe6a 1570 int rc;
b0c632db
HC
1571 sigset_t sigsaved;
1572
27291e21
DH
1573 if (guestdbg_exit_pending(vcpu)) {
1574 kvm_s390_prepare_debug_exit(vcpu);
1575 return 0;
1576 }
1577
b0c632db
HC
1578 if (vcpu->sigset_active)
1579 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1580
6352e4d2
DH
1581 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
1582 kvm_s390_vcpu_start(vcpu);
1583 } else if (is_vcpu_stopped(vcpu)) {
1584 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
1585 vcpu->vcpu_id);
1586 return -EINVAL;
1587 }
b0c632db 1588
b028ee3e 1589 sync_regs(vcpu, kvm_run);
d7b0b5eb 1590
dab4079d 1591 might_fault();
a76ccff6 1592 rc = __vcpu_run(vcpu);
9ace903d 1593
b1d16c49
CE
1594 if (signal_pending(current) && !rc) {
1595 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 1596 rc = -EINTR;
b1d16c49 1597 }
8f2abe6a 1598
27291e21
DH
1599 if (guestdbg_exit_pending(vcpu) && !rc) {
1600 kvm_s390_prepare_debug_exit(vcpu);
1601 rc = 0;
1602 }
1603
b8e660b8 1604 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
1605 /* intercept cannot be handled in-kernel, prepare kvm-run */
1606 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
1607 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
1608 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
1609 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
1610 rc = 0;
1611 }
1612
1613 if (rc == -EREMOTE) {
1614 /* intercept was handled, but userspace support is needed
1615 * kvm_run has been prepared by the handler */
1616 rc = 0;
1617 }
b0c632db 1618
b028ee3e 1619 store_regs(vcpu, kvm_run);
d7b0b5eb 1620
b0c632db
HC
1621 if (vcpu->sigset_active)
1622 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1623
b0c632db 1624 vcpu->stat.exit_userspace++;
7e8e6ab4 1625 return rc;
b0c632db
HC
1626}
1627
b0c632db
HC
1628/*
1629 * store status at address
1630 * we use have two special cases:
1631 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1632 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1633 */
d0bce605 1634int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
b0c632db 1635{
092670cd 1636 unsigned char archmode = 1;
fda902cb 1637 unsigned int px;
178bd789 1638 u64 clkcomp;
d0bce605 1639 int rc;
b0c632db 1640
d0bce605
HC
1641 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1642 if (write_guest_abs(vcpu, 163, &archmode, 1))
b0c632db 1643 return -EFAULT;
d0bce605
HC
1644 gpa = SAVE_AREA_BASE;
1645 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1646 if (write_guest_real(vcpu, 163, &archmode, 1))
b0c632db 1647 return -EFAULT;
d0bce605
HC
1648 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1649 }
1650 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1651 vcpu->arch.guest_fpregs.fprs, 128);
1652 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1653 vcpu->run->s.regs.gprs, 128);
1654 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1655 &vcpu->arch.sie_block->gpsw, 16);
fda902cb 1656 px = kvm_s390_get_prefix(vcpu);
d0bce605 1657 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
fda902cb 1658 &px, 4);
d0bce605
HC
1659 rc |= write_guest_abs(vcpu,
1660 gpa + offsetof(struct save_area, fp_ctrl_reg),
1661 &vcpu->arch.guest_fpregs.fpc, 4);
1662 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1663 &vcpu->arch.sie_block->todpr, 4);
1664 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1665 &vcpu->arch.sie_block->cputm, 8);
178bd789 1666 clkcomp = vcpu->arch.sie_block->ckc >> 8;
d0bce605
HC
1667 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1668 &clkcomp, 8);
1669 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1670 &vcpu->run->s.regs.acrs, 64);
1671 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1672 &vcpu->arch.sie_block->gcr, 128);
1673 return rc ? -EFAULT : 0;
b0c632db
HC
1674}
1675
e879892c
TH
1676int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1677{
1678 /*
1679 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1680 * copying in vcpu load/put. Lets update our copies before we save
1681 * it into the save area
1682 */
1683 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1684 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1685 save_access_regs(vcpu->run->s.regs.acrs);
1686
1687 return kvm_s390_store_status_unloaded(vcpu, addr);
1688}
1689
8ad35755
DH
1690static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1691{
1692 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
1693 kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
1694 exit_sie_sync(vcpu);
1695}
1696
1697static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
1698{
1699 unsigned int i;
1700 struct kvm_vcpu *vcpu;
1701
1702 kvm_for_each_vcpu(i, vcpu, kvm) {
1703 __disable_ibs_on_vcpu(vcpu);
1704 }
1705}
1706
1707static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1708{
1709 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
1710 kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
1711 exit_sie_sync(vcpu);
1712}
1713
6852d7b6
DH
1714void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
1715{
8ad35755
DH
1716 int i, online_vcpus, started_vcpus = 0;
1717
1718 if (!is_vcpu_stopped(vcpu))
1719 return;
1720
6852d7b6 1721 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
8ad35755 1722 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 1723 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
1724 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1725
1726 for (i = 0; i < online_vcpus; i++) {
1727 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
1728 started_vcpus++;
1729 }
1730
1731 if (started_vcpus == 0) {
1732 /* we're the only active VCPU -> speed it up */
1733 __enable_ibs_on_vcpu(vcpu);
1734 } else if (started_vcpus == 1) {
1735 /*
1736 * As we are starting a second VCPU, we have to disable
1737 * the IBS facility on all VCPUs to remove potentially
1738 * oustanding ENABLE requests.
1739 */
1740 __disable_ibs_on_all_vcpus(vcpu->kvm);
1741 }
1742
6852d7b6 1743 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
1744 /*
1745 * Another VCPU might have used IBS while we were offline.
1746 * Let's play safe and flush the VCPU at startup.
1747 */
d3d692c8 1748 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
433b9ee4 1749 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 1750 return;
6852d7b6
DH
1751}
1752
1753void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
1754{
8ad35755
DH
1755 int i, online_vcpus, started_vcpus = 0;
1756 struct kvm_vcpu *started_vcpu = NULL;
1757
1758 if (is_vcpu_stopped(vcpu))
1759 return;
1760
6852d7b6 1761 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
8ad35755 1762 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 1763 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
1764 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1765
32f5ff63 1766 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
6cddd432 1767 kvm_s390_clear_stop_irq(vcpu);
32f5ff63 1768
6cddd432 1769 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
1770 __disable_ibs_on_vcpu(vcpu);
1771
1772 for (i = 0; i < online_vcpus; i++) {
1773 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
1774 started_vcpus++;
1775 started_vcpu = vcpu->kvm->vcpus[i];
1776 }
1777 }
1778
1779 if (started_vcpus == 1) {
1780 /*
1781 * As we only have one VCPU left, we want to enable the
1782 * IBS facility for that VCPU to speed it up.
1783 */
1784 __enable_ibs_on_vcpu(started_vcpu);
1785 }
1786
433b9ee4 1787 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 1788 return;
6852d7b6
DH
1789}
1790
d6712df9
CH
1791static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1792 struct kvm_enable_cap *cap)
1793{
1794 int r;
1795
1796 if (cap->flags)
1797 return -EINVAL;
1798
1799 switch (cap->cap) {
fa6b7fe9
CH
1800 case KVM_CAP_S390_CSS_SUPPORT:
1801 if (!vcpu->kvm->arch.css_support) {
1802 vcpu->kvm->arch.css_support = 1;
1803 trace_kvm_s390_enable_css(vcpu->kvm);
1804 }
1805 r = 0;
1806 break;
d6712df9
CH
1807 default:
1808 r = -EINVAL;
1809 break;
1810 }
1811 return r;
1812}
1813
b0c632db
HC
1814long kvm_arch_vcpu_ioctl(struct file *filp,
1815 unsigned int ioctl, unsigned long arg)
1816{
1817 struct kvm_vcpu *vcpu = filp->private_data;
1818 void __user *argp = (void __user *)arg;
800c1065 1819 int idx;
bc923cc9 1820 long r;
b0c632db 1821
93736624
AK
1822 switch (ioctl) {
1823 case KVM_S390_INTERRUPT: {
ba5c1e9b 1824 struct kvm_s390_interrupt s390int;
383d0b05 1825 struct kvm_s390_irq s390irq;
ba5c1e9b 1826
93736624 1827 r = -EFAULT;
ba5c1e9b 1828 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624 1829 break;
383d0b05
JF
1830 if (s390int_to_s390irq(&s390int, &s390irq))
1831 return -EINVAL;
1832 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
93736624 1833 break;
ba5c1e9b 1834 }
b0c632db 1835 case KVM_S390_STORE_STATUS:
800c1065 1836 idx = srcu_read_lock(&vcpu->kvm->srcu);
bc923cc9 1837 r = kvm_s390_vcpu_store_status(vcpu, arg);
800c1065 1838 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 1839 break;
b0c632db
HC
1840 case KVM_S390_SET_INITIAL_PSW: {
1841 psw_t psw;
1842
bc923cc9 1843 r = -EFAULT;
b0c632db 1844 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
1845 break;
1846 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1847 break;
b0c632db
HC
1848 }
1849 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
1850 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1851 break;
14eebd91
CO
1852 case KVM_SET_ONE_REG:
1853 case KVM_GET_ONE_REG: {
1854 struct kvm_one_reg reg;
1855 r = -EFAULT;
1856 if (copy_from_user(&reg, argp, sizeof(reg)))
1857 break;
1858 if (ioctl == KVM_SET_ONE_REG)
1859 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1860 else
1861 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1862 break;
1863 }
27e0393f
CO
1864#ifdef CONFIG_KVM_S390_UCONTROL
1865 case KVM_S390_UCAS_MAP: {
1866 struct kvm_s390_ucas_mapping ucasmap;
1867
1868 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1869 r = -EFAULT;
1870 break;
1871 }
1872
1873 if (!kvm_is_ucontrol(vcpu->kvm)) {
1874 r = -EINVAL;
1875 break;
1876 }
1877
1878 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1879 ucasmap.vcpu_addr, ucasmap.length);
1880 break;
1881 }
1882 case KVM_S390_UCAS_UNMAP: {
1883 struct kvm_s390_ucas_mapping ucasmap;
1884
1885 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1886 r = -EFAULT;
1887 break;
1888 }
1889
1890 if (!kvm_is_ucontrol(vcpu->kvm)) {
1891 r = -EINVAL;
1892 break;
1893 }
1894
1895 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1896 ucasmap.length);
1897 break;
1898 }
1899#endif
ccc7910f 1900 case KVM_S390_VCPU_FAULT: {
527e30b4 1901 r = gmap_fault(vcpu->arch.gmap, arg, 0);
ccc7910f
CO
1902 break;
1903 }
d6712df9
CH
1904 case KVM_ENABLE_CAP:
1905 {
1906 struct kvm_enable_cap cap;
1907 r = -EFAULT;
1908 if (copy_from_user(&cap, argp, sizeof(cap)))
1909 break;
1910 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1911 break;
1912 }
b0c632db 1913 default:
3e6afcf1 1914 r = -ENOTTY;
b0c632db 1915 }
bc923cc9 1916 return r;
b0c632db
HC
1917}
1918
5b1c1493
CO
1919int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1920{
1921#ifdef CONFIG_KVM_S390_UCONTROL
1922 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1923 && (kvm_is_ucontrol(vcpu->kvm))) {
1924 vmf->page = virt_to_page(vcpu->arch.sie_block);
1925 get_page(vmf->page);
1926 return 0;
1927 }
1928#endif
1929 return VM_FAULT_SIGBUS;
1930}
1931
5587027c
AK
1932int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1933 unsigned long npages)
db3fe4eb
TY
1934{
1935 return 0;
1936}
1937
b0c632db 1938/* Section: memory related */
f7784b8e
MT
1939int kvm_arch_prepare_memory_region(struct kvm *kvm,
1940 struct kvm_memory_slot *memslot,
7b6195a9
TY
1941 struct kvm_userspace_memory_region *mem,
1942 enum kvm_mr_change change)
b0c632db 1943{
dd2887e7
NW
1944 /* A few sanity checks. We can have memory slots which have to be
1945 located/ended at a segment boundary (1MB). The memory in userland is
1946 ok to be fragmented into various different vmas. It is okay to mmap()
1947 and munmap() stuff in this slot after doing this call at any time */
b0c632db 1948
598841ca 1949 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
1950 return -EINVAL;
1951
598841ca 1952 if (mem->memory_size & 0xffffful)
b0c632db
HC
1953 return -EINVAL;
1954
f7784b8e
MT
1955 return 0;
1956}
1957
1958void kvm_arch_commit_memory_region(struct kvm *kvm,
1959 struct kvm_userspace_memory_region *mem,
8482644a
TY
1960 const struct kvm_memory_slot *old,
1961 enum kvm_mr_change change)
f7784b8e 1962{
f7850c92 1963 int rc;
f7784b8e 1964
2cef4deb
CB
1965 /* If the basics of the memslot do not change, we do not want
1966 * to update the gmap. Every update causes several unnecessary
1967 * segment translation exceptions. This is usually handled just
1968 * fine by the normal fault handler + gmap, but it will also
1969 * cause faults on the prefix page of running guest CPUs.
1970 */
1971 if (old->userspace_addr == mem->userspace_addr &&
1972 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1973 old->npages * PAGE_SIZE == mem->memory_size)
1974 return;
598841ca
CO
1975
1976 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1977 mem->guest_phys_addr, mem->memory_size);
1978 if (rc)
f7850c92 1979 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 1980 return;
b0c632db
HC
1981}
1982
b0c632db
HC
1983static int __init kvm_s390_init(void)
1984{
ef50f7ac 1985 int ret;
0ee75bea 1986 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
1987 if (ret)
1988 return ret;
1989
1990 /*
1991 * guests can ask for up to 255+1 double words, we need a full page
25985edc 1992 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
1993 * only set facilities that are known to work in KVM.
1994 */
78c4b59f
MM
1995 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1996 if (!vfacilities) {
ef50f7ac
CB
1997 kvm_exit();
1998 return -ENOMEM;
1999 }
78c4b59f 2000 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
7be81a46 2001 vfacilities[0] &= 0xff82fffbf47c2000UL;
7feb6bb8 2002 vfacilities[1] &= 0x005c000000000000UL;
ef50f7ac 2003 return 0;
b0c632db
HC
2004}
2005
2006static void __exit kvm_s390_exit(void)
2007{
78c4b59f 2008 free_page((unsigned long) vfacilities);
b0c632db
HC
2009 kvm_exit();
2010}
2011
2012module_init(kvm_s390_init);
2013module_exit(kvm_s390_exit);
566af940
CH
2014
2015/*
2016 * Enable autoloading of the kvm module.
2017 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2018 * since x86 takes a different approach.
2019 */
2020#include <linux/miscdevice.h>
2021MODULE_ALIAS_MISCDEV(KVM_MINOR);
2022MODULE_ALIAS("devname:kvm");