]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/s390/kvm/kvm-s390.c
KVM: s390: new parameter for SIGP STOP irqs
[mirror_ubuntu-artful-kernel.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15f36ebd 14 * Jason J. Herne <jjherne@us.ibm.com>
b0c632db
HC
15 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
ca872302 20#include <linux/hrtimer.h>
b0c632db
HC
21#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
25#include <linux/slab.h>
ba5c1e9b 26#include <linux/timer.h>
cbb870c8 27#include <asm/asm-offsets.h>
b0c632db
HC
28#include <asm/lowcore.h>
29#include <asm/pgtable.h>
f5daba1d 30#include <asm/nmi.h>
a0616cde 31#include <asm/switch_to.h>
78c4b59f 32#include <asm/facility.h>
1526bf9c 33#include <asm/sclp.h>
8f2abe6a 34#include "kvm-s390.h"
b0c632db
HC
35#include "gaccess.h"
36
5786fffa
CH
37#define CREATE_TRACE_POINTS
38#include "trace.h"
ade38c31 39#include "trace-s390.h"
5786fffa 40
b0c632db
HC
41#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42
43struct kvm_stats_debugfs_item debugfs_entries[] = {
44 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 45 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
46 { "exit_validity", VCPU_STAT(exit_validity) },
47 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
48 { "exit_external_request", VCPU_STAT(exit_external_request) },
49 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
50 { "exit_instruction", VCPU_STAT(exit_instruction) },
51 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
ce2e4f0b 53 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
f5e10b09 54 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b 55 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
aba07508
DH
56 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
57 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
ba5c1e9b 58 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 59 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
60 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
61 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
62 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
63 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
64 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
65 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
66 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 67 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
68 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
69 { "instruction_spx", VCPU_STAT(instruction_spx) },
70 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
71 { "instruction_stap", VCPU_STAT(instruction_stap) },
72 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
8a242234 73 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
453423dc
CB
74 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
75 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
b31288fa 76 { "instruction_essa", VCPU_STAT(instruction_essa) },
453423dc
CB
77 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
78 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 79 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 80 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 81 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 82 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0 83 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
42cb0c9f
DH
84 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
85 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
5288fbf0 86 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
42cb0c9f
DH
87 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
88 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
5288fbf0
CB
89 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
90 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
91 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
42cb0c9f
DH
92 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
93 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
94 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
388186bc 95 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 96 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 97 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
98 { NULL }
99};
100
78c4b59f 101unsigned long *vfacilities;
2c70fe44 102static struct gmap_notifier gmap_notifier;
b0c632db 103
78c4b59f 104/* test availability of vfacility */
280ef0f1 105int test_vfacility(unsigned long nr)
78c4b59f
MM
106{
107 return __test_facility(nr, (void *) vfacilities);
108}
109
b0c632db 110/* Section: not file related */
13a34e06 111int kvm_arch_hardware_enable(void)
b0c632db
HC
112{
113 /* every s390 is virtualization enabled ;-) */
10474ae8 114 return 0;
b0c632db
HC
115}
116
2c70fe44
CB
117static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
118
b0c632db
HC
119int kvm_arch_hardware_setup(void)
120{
2c70fe44
CB
121 gmap_notifier.notifier_call = kvm_gmap_notifier;
122 gmap_register_ipte_notifier(&gmap_notifier);
b0c632db
HC
123 return 0;
124}
125
126void kvm_arch_hardware_unsetup(void)
127{
2c70fe44 128 gmap_unregister_ipte_notifier(&gmap_notifier);
b0c632db
HC
129}
130
b0c632db
HC
131int kvm_arch_init(void *opaque)
132{
84877d93
CH
133 /* Register floating interrupt controller interface. */
134 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
b0c632db
HC
135}
136
b0c632db
HC
137/* Section: device related */
138long kvm_arch_dev_ioctl(struct file *filp,
139 unsigned int ioctl, unsigned long arg)
140{
141 if (ioctl == KVM_S390_ENABLE_SIE)
142 return s390_enable_sie();
143 return -EINVAL;
144}
145
784aa3d7 146int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
b0c632db 147{
d7b0b5eb
CO
148 int r;
149
2bd0ac4e 150 switch (ext) {
d7b0b5eb 151 case KVM_CAP_S390_PSW:
b6cf8788 152 case KVM_CAP_S390_GMAP:
52e16b18 153 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
154#ifdef CONFIG_KVM_S390_UCONTROL
155 case KVM_CAP_S390_UCONTROL:
156#endif
3c038e6b 157 case KVM_CAP_ASYNC_PF:
60b413c9 158 case KVM_CAP_SYNC_REGS:
14eebd91 159 case KVM_CAP_ONE_REG:
d6712df9 160 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 161 case KVM_CAP_S390_CSS_SUPPORT:
ebc32262 162 case KVM_CAP_IRQFD:
10ccaa1e 163 case KVM_CAP_IOEVENTFD:
c05c4186 164 case KVM_CAP_DEVICE_CTRL:
d938dc55 165 case KVM_CAP_ENABLE_CAP_VM:
78599d90 166 case KVM_CAP_S390_IRQCHIP:
f2061656 167 case KVM_CAP_VM_ATTRIBUTES:
6352e4d2 168 case KVM_CAP_MP_STATE:
d7b0b5eb
CO
169 r = 1;
170 break;
e726b1bd
CB
171 case KVM_CAP_NR_VCPUS:
172 case KVM_CAP_MAX_VCPUS:
173 r = KVM_MAX_VCPUS;
174 break;
e1e2e605
NW
175 case KVM_CAP_NR_MEMSLOTS:
176 r = KVM_USER_MEM_SLOTS;
177 break;
1526bf9c 178 case KVM_CAP_S390_COW:
abf09bed 179 r = MACHINE_HAS_ESOP;
1526bf9c 180 break;
2bd0ac4e 181 default:
d7b0b5eb 182 r = 0;
2bd0ac4e 183 }
d7b0b5eb 184 return r;
b0c632db
HC
185}
186
15f36ebd
JH
187static void kvm_s390_sync_dirty_log(struct kvm *kvm,
188 struct kvm_memory_slot *memslot)
189{
190 gfn_t cur_gfn, last_gfn;
191 unsigned long address;
192 struct gmap *gmap = kvm->arch.gmap;
193
194 down_read(&gmap->mm->mmap_sem);
195 /* Loop over all guest pages */
196 last_gfn = memslot->base_gfn + memslot->npages;
197 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
198 address = gfn_to_hva_memslot(memslot, cur_gfn);
199
200 if (gmap_test_and_clear_dirty(address, gmap))
201 mark_page_dirty(kvm, cur_gfn);
202 }
203 up_read(&gmap->mm->mmap_sem);
204}
205
b0c632db
HC
206/* Section: vm related */
207/*
208 * Get (and clear) the dirty memory log for a memory slot.
209 */
210int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
211 struct kvm_dirty_log *log)
212{
15f36ebd
JH
213 int r;
214 unsigned long n;
215 struct kvm_memory_slot *memslot;
216 int is_dirty = 0;
217
218 mutex_lock(&kvm->slots_lock);
219
220 r = -EINVAL;
221 if (log->slot >= KVM_USER_MEM_SLOTS)
222 goto out;
223
224 memslot = id_to_memslot(kvm->memslots, log->slot);
225 r = -ENOENT;
226 if (!memslot->dirty_bitmap)
227 goto out;
228
229 kvm_s390_sync_dirty_log(kvm, memslot);
230 r = kvm_get_dirty_log(kvm, log, &is_dirty);
231 if (r)
232 goto out;
233
234 /* Clear the dirty log */
235 if (is_dirty) {
236 n = kvm_dirty_bitmap_bytes(memslot);
237 memset(memslot->dirty_bitmap, 0, n);
238 }
239 r = 0;
240out:
241 mutex_unlock(&kvm->slots_lock);
242 return r;
b0c632db
HC
243}
244
d938dc55
CH
245static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
246{
247 int r;
248
249 if (cap->flags)
250 return -EINVAL;
251
252 switch (cap->cap) {
84223598
CH
253 case KVM_CAP_S390_IRQCHIP:
254 kvm->arch.use_irqchip = 1;
255 r = 0;
256 break;
d938dc55
CH
257 default:
258 r = -EINVAL;
259 break;
260 }
261 return r;
262}
263
8c0a7ce6
DD
264static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
265{
266 int ret;
267
268 switch (attr->attr) {
269 case KVM_S390_VM_MEM_LIMIT_SIZE:
270 ret = 0;
271 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
272 ret = -EFAULT;
273 break;
274 default:
275 ret = -ENXIO;
276 break;
277 }
278 return ret;
279}
280
281static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
4f718eab
DD
282{
283 int ret;
284 unsigned int idx;
285 switch (attr->attr) {
286 case KVM_S390_VM_MEM_ENABLE_CMMA:
287 ret = -EBUSY;
288 mutex_lock(&kvm->lock);
289 if (atomic_read(&kvm->online_vcpus) == 0) {
290 kvm->arch.use_cmma = 1;
291 ret = 0;
292 }
293 mutex_unlock(&kvm->lock);
294 break;
295 case KVM_S390_VM_MEM_CLR_CMMA:
296 mutex_lock(&kvm->lock);
297 idx = srcu_read_lock(&kvm->srcu);
a13cff31 298 s390_reset_cmma(kvm->arch.gmap->mm);
4f718eab
DD
299 srcu_read_unlock(&kvm->srcu, idx);
300 mutex_unlock(&kvm->lock);
301 ret = 0;
302 break;
8c0a7ce6
DD
303 case KVM_S390_VM_MEM_LIMIT_SIZE: {
304 unsigned long new_limit;
305
306 if (kvm_is_ucontrol(kvm))
307 return -EINVAL;
308
309 if (get_user(new_limit, (u64 __user *)attr->addr))
310 return -EFAULT;
311
312 if (new_limit > kvm->arch.gmap->asce_end)
313 return -E2BIG;
314
315 ret = -EBUSY;
316 mutex_lock(&kvm->lock);
317 if (atomic_read(&kvm->online_vcpus) == 0) {
318 /* gmap_alloc will round the limit up */
319 struct gmap *new = gmap_alloc(current->mm, new_limit);
320
321 if (!new) {
322 ret = -ENOMEM;
323 } else {
324 gmap_free(kvm->arch.gmap);
325 new->private = kvm;
326 kvm->arch.gmap = new;
327 ret = 0;
328 }
329 }
330 mutex_unlock(&kvm->lock);
331 break;
332 }
4f718eab
DD
333 default:
334 ret = -ENXIO;
335 break;
336 }
337 return ret;
338}
339
f2061656
DD
340static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
341{
342 int ret;
343
344 switch (attr->group) {
4f718eab 345 case KVM_S390_VM_MEM_CTRL:
8c0a7ce6 346 ret = kvm_s390_set_mem_control(kvm, attr);
4f718eab 347 break;
f2061656
DD
348 default:
349 ret = -ENXIO;
350 break;
351 }
352
353 return ret;
354}
355
356static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
357{
8c0a7ce6
DD
358 int ret;
359
360 switch (attr->group) {
361 case KVM_S390_VM_MEM_CTRL:
362 ret = kvm_s390_get_mem_control(kvm, attr);
363 break;
364 default:
365 ret = -ENXIO;
366 break;
367 }
368
369 return ret;
f2061656
DD
370}
371
372static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
373{
374 int ret;
375
376 switch (attr->group) {
4f718eab
DD
377 case KVM_S390_VM_MEM_CTRL:
378 switch (attr->attr) {
379 case KVM_S390_VM_MEM_ENABLE_CMMA:
380 case KVM_S390_VM_MEM_CLR_CMMA:
8c0a7ce6 381 case KVM_S390_VM_MEM_LIMIT_SIZE:
4f718eab
DD
382 ret = 0;
383 break;
384 default:
385 ret = -ENXIO;
386 break;
387 }
388 break;
f2061656
DD
389 default:
390 ret = -ENXIO;
391 break;
392 }
393
394 return ret;
395}
396
b0c632db
HC
397long kvm_arch_vm_ioctl(struct file *filp,
398 unsigned int ioctl, unsigned long arg)
399{
400 struct kvm *kvm = filp->private_data;
401 void __user *argp = (void __user *)arg;
f2061656 402 struct kvm_device_attr attr;
b0c632db
HC
403 int r;
404
405 switch (ioctl) {
ba5c1e9b
CO
406 case KVM_S390_INTERRUPT: {
407 struct kvm_s390_interrupt s390int;
408
409 r = -EFAULT;
410 if (copy_from_user(&s390int, argp, sizeof(s390int)))
411 break;
412 r = kvm_s390_inject_vm(kvm, &s390int);
413 break;
414 }
d938dc55
CH
415 case KVM_ENABLE_CAP: {
416 struct kvm_enable_cap cap;
417 r = -EFAULT;
418 if (copy_from_user(&cap, argp, sizeof(cap)))
419 break;
420 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
421 break;
422 }
84223598
CH
423 case KVM_CREATE_IRQCHIP: {
424 struct kvm_irq_routing_entry routing;
425
426 r = -EINVAL;
427 if (kvm->arch.use_irqchip) {
428 /* Set up dummy routing. */
429 memset(&routing, 0, sizeof(routing));
430 kvm_set_irq_routing(kvm, &routing, 0, 0);
431 r = 0;
432 }
433 break;
434 }
f2061656
DD
435 case KVM_SET_DEVICE_ATTR: {
436 r = -EFAULT;
437 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
438 break;
439 r = kvm_s390_vm_set_attr(kvm, &attr);
440 break;
441 }
442 case KVM_GET_DEVICE_ATTR: {
443 r = -EFAULT;
444 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
445 break;
446 r = kvm_s390_vm_get_attr(kvm, &attr);
447 break;
448 }
449 case KVM_HAS_DEVICE_ATTR: {
450 r = -EFAULT;
451 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
452 break;
453 r = kvm_s390_vm_has_attr(kvm, &attr);
454 break;
455 }
b0c632db 456 default:
367e1319 457 r = -ENOTTY;
b0c632db
HC
458 }
459
460 return r;
461}
462
5102ee87
TK
463static int kvm_s390_crypto_init(struct kvm *kvm)
464{
465 if (!test_vfacility(76))
466 return 0;
467
468 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
469 GFP_KERNEL | GFP_DMA);
470 if (!kvm->arch.crypto.crycb)
471 return -ENOMEM;
472
473 kvm->arch.crypto.crycbd = (__u32) (unsigned long) kvm->arch.crypto.crycb |
474 CRYCB_FORMAT1;
475
476 return 0;
477}
478
e08b9637 479int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 480{
b0c632db
HC
481 int rc;
482 char debug_name[16];
f6c137ff 483 static unsigned long sca_offset;
b0c632db 484
e08b9637
CO
485 rc = -EINVAL;
486#ifdef CONFIG_KVM_S390_UCONTROL
487 if (type & ~KVM_VM_S390_UCONTROL)
488 goto out_err;
489 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
490 goto out_err;
491#else
492 if (type)
493 goto out_err;
494#endif
495
b0c632db
HC
496 rc = s390_enable_sie();
497 if (rc)
d89f5eff 498 goto out_err;
b0c632db 499
b290411a
CO
500 rc = -ENOMEM;
501
b0c632db
HC
502 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
503 if (!kvm->arch.sca)
d89f5eff 504 goto out_err;
f6c137ff
CB
505 spin_lock(&kvm_lock);
506 sca_offset = (sca_offset + 16) & 0x7f0;
507 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
508 spin_unlock(&kvm_lock);
b0c632db
HC
509
510 sprintf(debug_name, "kvm-%u", current->pid);
511
512 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
513 if (!kvm->arch.dbf)
514 goto out_nodbf;
515
5102ee87
TK
516 if (kvm_s390_crypto_init(kvm) < 0)
517 goto out_crypto;
518
ba5c1e9b
CO
519 spin_lock_init(&kvm->arch.float_int.lock);
520 INIT_LIST_HEAD(&kvm->arch.float_int.list);
8a242234 521 init_waitqueue_head(&kvm->arch.ipte_wq);
a6b7e459 522 mutex_init(&kvm->arch.ipte_mutex);
ba5c1e9b 523
b0c632db
HC
524 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
525 VM_EVENT(kvm, 3, "%s", "vm created");
526
e08b9637
CO
527 if (type & KVM_VM_S390_UCONTROL) {
528 kvm->arch.gmap = NULL;
529 } else {
0349985a 530 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
e08b9637
CO
531 if (!kvm->arch.gmap)
532 goto out_nogmap;
2c70fe44 533 kvm->arch.gmap->private = kvm;
24eb3a82 534 kvm->arch.gmap->pfault_enabled = 0;
e08b9637 535 }
fa6b7fe9
CH
536
537 kvm->arch.css_support = 0;
84223598 538 kvm->arch.use_irqchip = 0;
fa6b7fe9 539
8ad35755
DH
540 spin_lock_init(&kvm->arch.start_stop_lock);
541
d89f5eff 542 return 0;
598841ca 543out_nogmap:
5102ee87
TK
544 kfree(kvm->arch.crypto.crycb);
545out_crypto:
598841ca 546 debug_unregister(kvm->arch.dbf);
b0c632db
HC
547out_nodbf:
548 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
549out_err:
550 return rc;
b0c632db
HC
551}
552
d329c035
CB
553void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
554{
555 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 556 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
67335e63 557 kvm_s390_clear_local_irqs(vcpu);
3c038e6b 558 kvm_clear_async_pf_completion_queue(vcpu);
58f9460b
CO
559 if (!kvm_is_ucontrol(vcpu->kvm)) {
560 clear_bit(63 - vcpu->vcpu_id,
561 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
562 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
563 (__u64) vcpu->arch.sie_block)
564 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
565 }
abf4a71e 566 smp_mb();
27e0393f
CO
567
568 if (kvm_is_ucontrol(vcpu->kvm))
569 gmap_free(vcpu->arch.gmap);
570
b31605c1
DD
571 if (kvm_s390_cmma_enabled(vcpu->kvm))
572 kvm_s390_vcpu_unsetup_cmma(vcpu);
d329c035 573 free_page((unsigned long)(vcpu->arch.sie_block));
b31288fa 574
6692cef3 575 kvm_vcpu_uninit(vcpu);
b110feaf 576 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
577}
578
579static void kvm_free_vcpus(struct kvm *kvm)
580{
581 unsigned int i;
988a2cae 582 struct kvm_vcpu *vcpu;
d329c035 583
988a2cae
GN
584 kvm_for_each_vcpu(i, vcpu, kvm)
585 kvm_arch_vcpu_destroy(vcpu);
586
587 mutex_lock(&kvm->lock);
588 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
589 kvm->vcpus[i] = NULL;
590
591 atomic_set(&kvm->online_vcpus, 0);
592 mutex_unlock(&kvm->lock);
d329c035
CB
593}
594
b0c632db
HC
595void kvm_arch_destroy_vm(struct kvm *kvm)
596{
d329c035 597 kvm_free_vcpus(kvm);
b0c632db 598 free_page((unsigned long)(kvm->arch.sca));
d329c035 599 debug_unregister(kvm->arch.dbf);
5102ee87 600 kfree(kvm->arch.crypto.crycb);
27e0393f
CO
601 if (!kvm_is_ucontrol(kvm))
602 gmap_free(kvm->arch.gmap);
841b91c5 603 kvm_s390_destroy_adapters(kvm);
67335e63 604 kvm_s390_clear_float_irqs(kvm);
b0c632db
HC
605}
606
607/* Section: vcpu related */
dafd032a
DD
608static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
609{
610 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
611 if (!vcpu->arch.gmap)
612 return -ENOMEM;
613 vcpu->arch.gmap->private = vcpu->kvm;
614
615 return 0;
616}
617
b0c632db
HC
618int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
619{
3c038e6b
DD
620 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
621 kvm_clear_async_pf_completion_queue(vcpu);
59674c1a
CB
622 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
623 KVM_SYNC_GPRS |
9eed0735 624 KVM_SYNC_ACRS |
b028ee3e
DH
625 KVM_SYNC_CRS |
626 KVM_SYNC_ARCH0 |
627 KVM_SYNC_PFAULT;
dafd032a
DD
628
629 if (kvm_is_ucontrol(vcpu->kvm))
630 return __kvm_ucontrol_vcpu_init(vcpu);
631
b0c632db
HC
632 return 0;
633}
634
b0c632db
HC
635void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
636{
4725c860
MS
637 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
638 save_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db 639 save_access_regs(vcpu->arch.host_acrs);
4725c860
MS
640 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
641 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 642 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 643 gmap_enable(vcpu->arch.gmap);
9e6dabef 644 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
645}
646
647void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
648{
9e6dabef 649 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 650 gmap_disable(vcpu->arch.gmap);
4725c860
MS
651 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
652 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 653 save_access_regs(vcpu->run->s.regs.acrs);
4725c860
MS
654 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
655 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db
HC
656 restore_access_regs(vcpu->arch.host_acrs);
657}
658
659static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
660{
661 /* this equals initial cpu reset in pop, but we don't switch to ESA */
662 vcpu->arch.sie_block->gpsw.mask = 0UL;
663 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 664 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
665 vcpu->arch.sie_block->cputm = 0UL;
666 vcpu->arch.sie_block->ckc = 0UL;
667 vcpu->arch.sie_block->todpr = 0;
668 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
669 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
670 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
671 vcpu->arch.guest_fpregs.fpc = 0;
672 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
673 vcpu->arch.sie_block->gbea = 1;
672550fb 674 vcpu->arch.sie_block->pp = 0;
3c038e6b
DD
675 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
676 kvm_clear_async_pf_completion_queue(vcpu);
6352e4d2
DH
677 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
678 kvm_s390_vcpu_stop(vcpu);
2ed10cc1 679 kvm_s390_clear_local_irqs(vcpu);
b0c632db
HC
680}
681
31928aa5 682void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
42897d86 683{
dafd032a
DD
684 if (!kvm_is_ucontrol(vcpu->kvm))
685 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
42897d86
MT
686}
687
5102ee87
TK
688static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
689{
690 if (!test_vfacility(76))
691 return;
692
693 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
694}
695
b31605c1
DD
696void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
697{
698 free_page(vcpu->arch.sie_block->cbrlo);
699 vcpu->arch.sie_block->cbrlo = 0;
700}
701
702int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
703{
704 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
705 if (!vcpu->arch.sie_block->cbrlo)
706 return -ENOMEM;
707
708 vcpu->arch.sie_block->ecb2 |= 0x80;
709 vcpu->arch.sie_block->ecb2 &= ~0x08;
710 return 0;
711}
712
b0c632db
HC
713int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
714{
b31605c1 715 int rc = 0;
b31288fa 716
9e6dabef
CH
717 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
718 CPUSTAT_SM |
69d0d3a3
CB
719 CPUSTAT_STOPPED |
720 CPUSTAT_GED);
fc34531d 721 vcpu->arch.sie_block->ecb = 6;
7feb6bb8
MM
722 if (test_vfacility(50) && test_vfacility(73))
723 vcpu->arch.sie_block->ecb |= 0x10;
724
69d0d3a3 725 vcpu->arch.sie_block->ecb2 = 8;
4953919f 726 vcpu->arch.sie_block->eca = 0xD1002000U;
217a4406
HC
727 if (sclp_has_siif())
728 vcpu->arch.sie_block->eca |= 1;
78c4b59f 729 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
5a5e6536
MR
730 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
731 ICTL_TPROT;
732
b31605c1
DD
733 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
734 rc = kvm_s390_vcpu_setup_cmma(vcpu);
735 if (rc)
736 return rc;
b31288fa 737 }
0ac96caf 738 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ca872302 739 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 740 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 741 vcpu->arch.cpu_id.version = 0xff;
5102ee87
TK
742
743 kvm_s390_vcpu_crypto_setup(vcpu);
744
b31605c1 745 return rc;
b0c632db
HC
746}
747
748struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
749 unsigned int id)
750{
4d47555a 751 struct kvm_vcpu *vcpu;
7feb6bb8 752 struct sie_page *sie_page;
4d47555a
CO
753 int rc = -EINVAL;
754
755 if (id >= KVM_MAX_VCPUS)
756 goto out;
757
758 rc = -ENOMEM;
b0c632db 759
b110feaf 760 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 761 if (!vcpu)
4d47555a 762 goto out;
b0c632db 763
7feb6bb8
MM
764 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
765 if (!sie_page)
b0c632db
HC
766 goto out_free_cpu;
767
7feb6bb8
MM
768 vcpu->arch.sie_block = &sie_page->sie_block;
769 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
770
b0c632db 771 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
772 if (!kvm_is_ucontrol(kvm)) {
773 if (!kvm->arch.sca) {
774 WARN_ON_ONCE(1);
775 goto out_free_cpu;
776 }
777 if (!kvm->arch.sca->cpu[id].sda)
778 kvm->arch.sca->cpu[id].sda =
779 (__u64) vcpu->arch.sie_block;
780 vcpu->arch.sie_block->scaoh =
781 (__u32)(((__u64)kvm->arch.sca) >> 32);
782 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
783 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
784 }
b0c632db 785
ba5c1e9b 786 spin_lock_init(&vcpu->arch.local_int.lock);
ba5c1e9b 787 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
d0321a24 788 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 789 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
ba5c1e9b 790
b0c632db
HC
791 rc = kvm_vcpu_init(vcpu, kvm, id);
792 if (rc)
7b06bf2f 793 goto out_free_sie_block;
b0c632db
HC
794 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
795 vcpu->arch.sie_block);
ade38c31 796 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 797
b0c632db 798 return vcpu;
7b06bf2f
WY
799out_free_sie_block:
800 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 801out_free_cpu:
b110feaf 802 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 803out:
b0c632db
HC
804 return ERR_PTR(rc);
805}
806
b0c632db
HC
807int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
808{
f87618e8 809 return kvm_cpu_has_interrupt(vcpu);
b0c632db
HC
810}
811
49b99e1e
CB
812void s390_vcpu_block(struct kvm_vcpu *vcpu)
813{
814 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
815}
816
817void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
818{
819 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
820}
821
822/*
823 * Kick a guest cpu out of SIE and wait until SIE is not running.
824 * If the CPU is not running (e.g. waiting as idle) the function will
825 * return immediately. */
826void exit_sie(struct kvm_vcpu *vcpu)
827{
828 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
829 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
830 cpu_relax();
831}
832
833/* Kick a guest cpu out of SIE and prevent SIE-reentry */
834void exit_sie_sync(struct kvm_vcpu *vcpu)
835{
836 s390_vcpu_block(vcpu);
837 exit_sie(vcpu);
838}
839
2c70fe44
CB
840static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
841{
842 int i;
843 struct kvm *kvm = gmap->private;
844 struct kvm_vcpu *vcpu;
845
846 kvm_for_each_vcpu(i, vcpu, kvm) {
847 /* match against both prefix pages */
fda902cb 848 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
2c70fe44
CB
849 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
850 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
851 exit_sie_sync(vcpu);
852 }
853 }
854}
855
b6d33834
CD
856int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
857{
858 /* kvm common code refers to this, but never calls it */
859 BUG();
860 return 0;
861}
862
14eebd91
CO
863static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
864 struct kvm_one_reg *reg)
865{
866 int r = -EINVAL;
867
868 switch (reg->id) {
29b7c71b
CO
869 case KVM_REG_S390_TODPR:
870 r = put_user(vcpu->arch.sie_block->todpr,
871 (u32 __user *)reg->addr);
872 break;
873 case KVM_REG_S390_EPOCHDIFF:
874 r = put_user(vcpu->arch.sie_block->epoch,
875 (u64 __user *)reg->addr);
876 break;
46a6dd1c
J
877 case KVM_REG_S390_CPU_TIMER:
878 r = put_user(vcpu->arch.sie_block->cputm,
879 (u64 __user *)reg->addr);
880 break;
881 case KVM_REG_S390_CLOCK_COMP:
882 r = put_user(vcpu->arch.sie_block->ckc,
883 (u64 __user *)reg->addr);
884 break;
536336c2
DD
885 case KVM_REG_S390_PFTOKEN:
886 r = put_user(vcpu->arch.pfault_token,
887 (u64 __user *)reg->addr);
888 break;
889 case KVM_REG_S390_PFCOMPARE:
890 r = put_user(vcpu->arch.pfault_compare,
891 (u64 __user *)reg->addr);
892 break;
893 case KVM_REG_S390_PFSELECT:
894 r = put_user(vcpu->arch.pfault_select,
895 (u64 __user *)reg->addr);
896 break;
672550fb
CB
897 case KVM_REG_S390_PP:
898 r = put_user(vcpu->arch.sie_block->pp,
899 (u64 __user *)reg->addr);
900 break;
afa45ff5
CB
901 case KVM_REG_S390_GBEA:
902 r = put_user(vcpu->arch.sie_block->gbea,
903 (u64 __user *)reg->addr);
904 break;
14eebd91
CO
905 default:
906 break;
907 }
908
909 return r;
910}
911
912static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
913 struct kvm_one_reg *reg)
914{
915 int r = -EINVAL;
916
917 switch (reg->id) {
29b7c71b
CO
918 case KVM_REG_S390_TODPR:
919 r = get_user(vcpu->arch.sie_block->todpr,
920 (u32 __user *)reg->addr);
921 break;
922 case KVM_REG_S390_EPOCHDIFF:
923 r = get_user(vcpu->arch.sie_block->epoch,
924 (u64 __user *)reg->addr);
925 break;
46a6dd1c
J
926 case KVM_REG_S390_CPU_TIMER:
927 r = get_user(vcpu->arch.sie_block->cputm,
928 (u64 __user *)reg->addr);
929 break;
930 case KVM_REG_S390_CLOCK_COMP:
931 r = get_user(vcpu->arch.sie_block->ckc,
932 (u64 __user *)reg->addr);
933 break;
536336c2
DD
934 case KVM_REG_S390_PFTOKEN:
935 r = get_user(vcpu->arch.pfault_token,
936 (u64 __user *)reg->addr);
937 break;
938 case KVM_REG_S390_PFCOMPARE:
939 r = get_user(vcpu->arch.pfault_compare,
940 (u64 __user *)reg->addr);
941 break;
942 case KVM_REG_S390_PFSELECT:
943 r = get_user(vcpu->arch.pfault_select,
944 (u64 __user *)reg->addr);
945 break;
672550fb
CB
946 case KVM_REG_S390_PP:
947 r = get_user(vcpu->arch.sie_block->pp,
948 (u64 __user *)reg->addr);
949 break;
afa45ff5
CB
950 case KVM_REG_S390_GBEA:
951 r = get_user(vcpu->arch.sie_block->gbea,
952 (u64 __user *)reg->addr);
953 break;
14eebd91
CO
954 default:
955 break;
956 }
957
958 return r;
959}
b6d33834 960
b0c632db
HC
961static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
962{
b0c632db 963 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
964 return 0;
965}
966
967int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
968{
5a32c1af 969 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
970 return 0;
971}
972
973int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
974{
5a32c1af 975 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
976 return 0;
977}
978
979int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
980 struct kvm_sregs *sregs)
981{
59674c1a 982 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 983 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 984 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
985 return 0;
986}
987
988int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
989 struct kvm_sregs *sregs)
990{
59674c1a 991 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 992 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
993 return 0;
994}
995
996int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
997{
4725c860
MS
998 if (test_fp_ctl(fpu->fpc))
999 return -EINVAL;
b0c632db 1000 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4725c860
MS
1001 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
1002 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1003 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
b0c632db
HC
1004 return 0;
1005}
1006
1007int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1008{
b0c632db
HC
1009 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1010 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
1011 return 0;
1012}
1013
1014static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1015{
1016 int rc = 0;
1017
7a42fdc2 1018 if (!is_vcpu_stopped(vcpu))
b0c632db 1019 rc = -EBUSY;
d7b0b5eb
CO
1020 else {
1021 vcpu->run->psw_mask = psw.mask;
1022 vcpu->run->psw_addr = psw.addr;
1023 }
b0c632db
HC
1024 return rc;
1025}
1026
1027int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1028 struct kvm_translation *tr)
1029{
1030 return -EINVAL; /* not implemented yet */
1031}
1032
27291e21
DH
1033#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1034 KVM_GUESTDBG_USE_HW_BP | \
1035 KVM_GUESTDBG_ENABLE)
1036
d0bfb940
JK
1037int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1038 struct kvm_guest_debug *dbg)
b0c632db 1039{
27291e21
DH
1040 int rc = 0;
1041
1042 vcpu->guest_debug = 0;
1043 kvm_s390_clear_bp_data(vcpu);
1044
2de3bfc2 1045 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
27291e21
DH
1046 return -EINVAL;
1047
1048 if (dbg->control & KVM_GUESTDBG_ENABLE) {
1049 vcpu->guest_debug = dbg->control;
1050 /* enforce guest PER */
1051 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1052
1053 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
1054 rc = kvm_s390_import_bp_data(vcpu, dbg);
1055 } else {
1056 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1057 vcpu->arch.guestdbg.last_bp = 0;
1058 }
1059
1060 if (rc) {
1061 vcpu->guest_debug = 0;
1062 kvm_s390_clear_bp_data(vcpu);
1063 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1064 }
1065
1066 return rc;
b0c632db
HC
1067}
1068
62d9f0db
MT
1069int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1070 struct kvm_mp_state *mp_state)
1071{
6352e4d2
DH
1072 /* CHECK_STOP and LOAD are not supported yet */
1073 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1074 KVM_MP_STATE_OPERATING;
62d9f0db
MT
1075}
1076
1077int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1078 struct kvm_mp_state *mp_state)
1079{
6352e4d2
DH
1080 int rc = 0;
1081
1082 /* user space knows about this interface - let it control the state */
1083 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1084
1085 switch (mp_state->mp_state) {
1086 case KVM_MP_STATE_STOPPED:
1087 kvm_s390_vcpu_stop(vcpu);
1088 break;
1089 case KVM_MP_STATE_OPERATING:
1090 kvm_s390_vcpu_start(vcpu);
1091 break;
1092 case KVM_MP_STATE_LOAD:
1093 case KVM_MP_STATE_CHECK_STOP:
1094 /* fall through - CHECK_STOP and LOAD are not supported yet */
1095 default:
1096 rc = -ENXIO;
1097 }
1098
1099 return rc;
62d9f0db
MT
1100}
1101
b31605c1
DD
1102bool kvm_s390_cmma_enabled(struct kvm *kvm)
1103{
1104 if (!MACHINE_IS_LPAR)
1105 return false;
1106 /* only enable for z10 and later */
1107 if (!MACHINE_HAS_EDAT1)
1108 return false;
1109 if (!kvm->arch.use_cmma)
1110 return false;
1111 return true;
1112}
1113
8ad35755
DH
1114static bool ibs_enabled(struct kvm_vcpu *vcpu)
1115{
1116 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1117}
1118
2c70fe44
CB
1119static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1120{
8ad35755
DH
1121retry:
1122 s390_vcpu_unblock(vcpu);
2c70fe44
CB
1123 /*
1124 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1125 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1126 * This ensures that the ipte instruction for this request has
1127 * already finished. We might race against a second unmapper that
1128 * wants to set the blocking bit. Lets just retry the request loop.
1129 */
8ad35755 1130 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
2c70fe44
CB
1131 int rc;
1132 rc = gmap_ipte_notify(vcpu->arch.gmap,
fda902cb 1133 kvm_s390_get_prefix(vcpu),
2c70fe44
CB
1134 PAGE_SIZE * 2);
1135 if (rc)
1136 return rc;
8ad35755 1137 goto retry;
2c70fe44 1138 }
8ad35755 1139
d3d692c8
DH
1140 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1141 vcpu->arch.sie_block->ihcpu = 0xffff;
1142 goto retry;
1143 }
1144
8ad35755
DH
1145 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1146 if (!ibs_enabled(vcpu)) {
1147 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1148 atomic_set_mask(CPUSTAT_IBS,
1149 &vcpu->arch.sie_block->cpuflags);
1150 }
1151 goto retry;
2c70fe44 1152 }
8ad35755
DH
1153
1154 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1155 if (ibs_enabled(vcpu)) {
1156 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1157 atomic_clear_mask(CPUSTAT_IBS,
1158 &vcpu->arch.sie_block->cpuflags);
1159 }
1160 goto retry;
1161 }
1162
0759d068
DH
1163 /* nothing to do, just clear the request */
1164 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1165
2c70fe44
CB
1166 return 0;
1167}
1168
fa576c58
TH
1169/**
1170 * kvm_arch_fault_in_page - fault-in guest page if necessary
1171 * @vcpu: The corresponding virtual cpu
1172 * @gpa: Guest physical address
1173 * @writable: Whether the page should be writable or not
1174 *
1175 * Make sure that a guest page has been faulted-in on the host.
1176 *
1177 * Return: Zero on success, negative error code otherwise.
1178 */
1179long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
24eb3a82 1180{
527e30b4
MS
1181 return gmap_fault(vcpu->arch.gmap, gpa,
1182 writable ? FAULT_FLAG_WRITE : 0);
24eb3a82
DD
1183}
1184
3c038e6b
DD
1185static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1186 unsigned long token)
1187{
1188 struct kvm_s390_interrupt inti;
383d0b05 1189 struct kvm_s390_irq irq;
3c038e6b
DD
1190
1191 if (start_token) {
383d0b05
JF
1192 irq.u.ext.ext_params2 = token;
1193 irq.type = KVM_S390_INT_PFAULT_INIT;
1194 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
3c038e6b
DD
1195 } else {
1196 inti.type = KVM_S390_INT_PFAULT_DONE;
383d0b05 1197 inti.parm64 = token;
3c038e6b
DD
1198 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1199 }
1200}
1201
1202void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1203 struct kvm_async_pf *work)
1204{
1205 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1206 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1207}
1208
1209void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1210 struct kvm_async_pf *work)
1211{
1212 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1213 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1214}
1215
1216void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1217 struct kvm_async_pf *work)
1218{
1219 /* s390 will always inject the page directly */
1220}
1221
1222bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1223{
1224 /*
1225 * s390 will always inject the page directly,
1226 * but we still want check_async_completion to cleanup
1227 */
1228 return true;
1229}
1230
1231static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1232{
1233 hva_t hva;
1234 struct kvm_arch_async_pf arch;
1235 int rc;
1236
1237 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1238 return 0;
1239 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1240 vcpu->arch.pfault_compare)
1241 return 0;
1242 if (psw_extint_disabled(vcpu))
1243 return 0;
1244 if (kvm_cpu_has_interrupt(vcpu))
1245 return 0;
1246 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1247 return 0;
1248 if (!vcpu->arch.gmap->pfault_enabled)
1249 return 0;
1250
81480cc1
HC
1251 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1252 hva += current->thread.gmap_addr & ~PAGE_MASK;
1253 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
3c038e6b
DD
1254 return 0;
1255
1256 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1257 return rc;
1258}
1259
3fb4c40f 1260static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 1261{
3fb4c40f 1262 int rc, cpuflags;
e168bf8d 1263
3c038e6b
DD
1264 /*
1265 * On s390 notifications for arriving pages will be delivered directly
1266 * to the guest but the house keeping for completed pfaults is
1267 * handled outside the worker.
1268 */
1269 kvm_check_async_pf_completion(vcpu);
1270
5a32c1af 1271 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
1272
1273 if (need_resched())
1274 schedule();
1275
d3a73acb 1276 if (test_cpu_flag(CIF_MCCK_PENDING))
71cde587
CB
1277 s390_handle_mcck();
1278
79395031
JF
1279 if (!kvm_is_ucontrol(vcpu->kvm)) {
1280 rc = kvm_s390_deliver_pending_interrupts(vcpu);
1281 if (rc)
1282 return rc;
1283 }
0ff31867 1284
2c70fe44
CB
1285 rc = kvm_s390_handle_requests(vcpu);
1286 if (rc)
1287 return rc;
1288
27291e21
DH
1289 if (guestdbg_enabled(vcpu)) {
1290 kvm_s390_backup_guest_per_regs(vcpu);
1291 kvm_s390_patch_guest_per_regs(vcpu);
1292 }
1293
b0c632db 1294 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
1295 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1296 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1297 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 1298
3fb4c40f
TH
1299 return 0;
1300}
1301
1302static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1303{
24eb3a82 1304 int rc = -1;
2b29a9fd
DD
1305
1306 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1307 vcpu->arch.sie_block->icptcode);
1308 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1309
27291e21
DH
1310 if (guestdbg_enabled(vcpu))
1311 kvm_s390_restore_guest_per_regs(vcpu);
1312
3fb4c40f 1313 if (exit_reason >= 0) {
7c470539 1314 rc = 0;
210b1607
TH
1315 } else if (kvm_is_ucontrol(vcpu->kvm)) {
1316 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1317 vcpu->run->s390_ucontrol.trans_exc_code =
1318 current->thread.gmap_addr;
1319 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1320 rc = -EREMOTE;
24eb3a82
DD
1321
1322 } else if (current->thread.gmap_pfault) {
3c038e6b 1323 trace_kvm_s390_major_guest_pfault(vcpu);
24eb3a82 1324 current->thread.gmap_pfault = 0;
fa576c58 1325 if (kvm_arch_setup_async_pf(vcpu)) {
24eb3a82 1326 rc = 0;
fa576c58
TH
1327 } else {
1328 gpa_t gpa = current->thread.gmap_addr;
1329 rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1330 }
24eb3a82
DD
1331 }
1332
1333 if (rc == -1) {
699bde3b
CB
1334 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1335 trace_kvm_s390_sie_fault(vcpu);
1336 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1f0d0f09 1337 }
b0c632db 1338
5a32c1af 1339 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
3fb4c40f 1340
a76ccff6
TH
1341 if (rc == 0) {
1342 if (kvm_is_ucontrol(vcpu->kvm))
2955c83f
CB
1343 /* Don't exit for host interrupts. */
1344 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
a76ccff6
TH
1345 else
1346 rc = kvm_handle_sie_intercept(vcpu);
1347 }
1348
3fb4c40f
TH
1349 return rc;
1350}
1351
1352static int __vcpu_run(struct kvm_vcpu *vcpu)
1353{
1354 int rc, exit_reason;
1355
800c1065
TH
1356 /*
1357 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1358 * ning the guest), so that memslots (and other stuff) are protected
1359 */
1360 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1361
a76ccff6
TH
1362 do {
1363 rc = vcpu_pre_run(vcpu);
1364 if (rc)
1365 break;
3fb4c40f 1366
800c1065 1367 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
1368 /*
1369 * As PF_VCPU will be used in fault handler, between
1370 * guest_enter and guest_exit should be no uaccess.
1371 */
1372 preempt_disable();
1373 kvm_guest_enter();
1374 preempt_enable();
1375 exit_reason = sie64a(vcpu->arch.sie_block,
1376 vcpu->run->s.regs.gprs);
1377 kvm_guest_exit();
800c1065 1378 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
1379
1380 rc = vcpu_post_run(vcpu, exit_reason);
27291e21 1381 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
3fb4c40f 1382
800c1065 1383 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 1384 return rc;
b0c632db
HC
1385}
1386
b028ee3e
DH
1387static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1388{
1389 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1390 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1391 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
1392 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1393 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1394 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
d3d692c8
DH
1395 /* some control register changes require a tlb flush */
1396 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
b028ee3e
DH
1397 }
1398 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
1399 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
1400 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
1401 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
1402 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
1403 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
1404 }
1405 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
1406 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
1407 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
1408 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
1409 }
1410 kvm_run->kvm_dirty_regs = 0;
1411}
1412
1413static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1414{
1415 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1416 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1417 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1418 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1419 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
1420 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
1421 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
1422 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
1423 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
1424 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1425 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1426 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1427}
1428
b0c632db
HC
1429int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1430{
8f2abe6a 1431 int rc;
b0c632db
HC
1432 sigset_t sigsaved;
1433
27291e21
DH
1434 if (guestdbg_exit_pending(vcpu)) {
1435 kvm_s390_prepare_debug_exit(vcpu);
1436 return 0;
1437 }
1438
b0c632db
HC
1439 if (vcpu->sigset_active)
1440 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1441
6352e4d2
DH
1442 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
1443 kvm_s390_vcpu_start(vcpu);
1444 } else if (is_vcpu_stopped(vcpu)) {
1445 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
1446 vcpu->vcpu_id);
1447 return -EINVAL;
1448 }
b0c632db 1449
b028ee3e 1450 sync_regs(vcpu, kvm_run);
d7b0b5eb 1451
dab4079d 1452 might_fault();
a76ccff6 1453 rc = __vcpu_run(vcpu);
9ace903d 1454
b1d16c49
CE
1455 if (signal_pending(current) && !rc) {
1456 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 1457 rc = -EINTR;
b1d16c49 1458 }
8f2abe6a 1459
27291e21
DH
1460 if (guestdbg_exit_pending(vcpu) && !rc) {
1461 kvm_s390_prepare_debug_exit(vcpu);
1462 rc = 0;
1463 }
1464
b8e660b8 1465 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
1466 /* intercept cannot be handled in-kernel, prepare kvm-run */
1467 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
1468 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
1469 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
1470 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
1471 rc = 0;
1472 }
1473
1474 if (rc == -EREMOTE) {
1475 /* intercept was handled, but userspace support is needed
1476 * kvm_run has been prepared by the handler */
1477 rc = 0;
1478 }
b0c632db 1479
b028ee3e 1480 store_regs(vcpu, kvm_run);
d7b0b5eb 1481
b0c632db
HC
1482 if (vcpu->sigset_active)
1483 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1484
b0c632db 1485 vcpu->stat.exit_userspace++;
7e8e6ab4 1486 return rc;
b0c632db
HC
1487}
1488
b0c632db
HC
1489/*
1490 * store status at address
1491 * we use have two special cases:
1492 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1493 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1494 */
d0bce605 1495int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
b0c632db 1496{
092670cd 1497 unsigned char archmode = 1;
fda902cb 1498 unsigned int px;
178bd789 1499 u64 clkcomp;
d0bce605 1500 int rc;
b0c632db 1501
d0bce605
HC
1502 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1503 if (write_guest_abs(vcpu, 163, &archmode, 1))
b0c632db 1504 return -EFAULT;
d0bce605
HC
1505 gpa = SAVE_AREA_BASE;
1506 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1507 if (write_guest_real(vcpu, 163, &archmode, 1))
b0c632db 1508 return -EFAULT;
d0bce605
HC
1509 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1510 }
1511 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1512 vcpu->arch.guest_fpregs.fprs, 128);
1513 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1514 vcpu->run->s.regs.gprs, 128);
1515 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1516 &vcpu->arch.sie_block->gpsw, 16);
fda902cb 1517 px = kvm_s390_get_prefix(vcpu);
d0bce605 1518 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
fda902cb 1519 &px, 4);
d0bce605
HC
1520 rc |= write_guest_abs(vcpu,
1521 gpa + offsetof(struct save_area, fp_ctrl_reg),
1522 &vcpu->arch.guest_fpregs.fpc, 4);
1523 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1524 &vcpu->arch.sie_block->todpr, 4);
1525 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1526 &vcpu->arch.sie_block->cputm, 8);
178bd789 1527 clkcomp = vcpu->arch.sie_block->ckc >> 8;
d0bce605
HC
1528 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1529 &clkcomp, 8);
1530 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1531 &vcpu->run->s.regs.acrs, 64);
1532 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1533 &vcpu->arch.sie_block->gcr, 128);
1534 return rc ? -EFAULT : 0;
b0c632db
HC
1535}
1536
e879892c
TH
1537int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1538{
1539 /*
1540 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1541 * copying in vcpu load/put. Lets update our copies before we save
1542 * it into the save area
1543 */
1544 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1545 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1546 save_access_regs(vcpu->run->s.regs.acrs);
1547
1548 return kvm_s390_store_status_unloaded(vcpu, addr);
1549}
1550
8ad35755
DH
1551static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1552{
1553 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
1554 kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
1555 exit_sie_sync(vcpu);
1556}
1557
1558static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
1559{
1560 unsigned int i;
1561 struct kvm_vcpu *vcpu;
1562
1563 kvm_for_each_vcpu(i, vcpu, kvm) {
1564 __disable_ibs_on_vcpu(vcpu);
1565 }
1566}
1567
1568static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1569{
1570 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
1571 kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
1572 exit_sie_sync(vcpu);
1573}
1574
6852d7b6
DH
1575void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
1576{
8ad35755
DH
1577 int i, online_vcpus, started_vcpus = 0;
1578
1579 if (!is_vcpu_stopped(vcpu))
1580 return;
1581
6852d7b6 1582 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
8ad35755 1583 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 1584 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
1585 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1586
1587 for (i = 0; i < online_vcpus; i++) {
1588 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
1589 started_vcpus++;
1590 }
1591
1592 if (started_vcpus == 0) {
1593 /* we're the only active VCPU -> speed it up */
1594 __enable_ibs_on_vcpu(vcpu);
1595 } else if (started_vcpus == 1) {
1596 /*
1597 * As we are starting a second VCPU, we have to disable
1598 * the IBS facility on all VCPUs to remove potentially
1599 * oustanding ENABLE requests.
1600 */
1601 __disable_ibs_on_all_vcpus(vcpu->kvm);
1602 }
1603
6852d7b6 1604 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
1605 /*
1606 * Another VCPU might have used IBS while we were offline.
1607 * Let's play safe and flush the VCPU at startup.
1608 */
d3d692c8 1609 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
433b9ee4 1610 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 1611 return;
6852d7b6
DH
1612}
1613
1614void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
1615{
8ad35755
DH
1616 int i, online_vcpus, started_vcpus = 0;
1617 struct kvm_vcpu *started_vcpu = NULL;
1618
1619 if (is_vcpu_stopped(vcpu))
1620 return;
1621
6852d7b6 1622 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
8ad35755 1623 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 1624 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
1625 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1626
32f5ff63 1627 /* Need to lock access to action_bits to avoid a SIGP race condition */
4ae3c081 1628 spin_lock(&vcpu->arch.local_int.lock);
6852d7b6 1629 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
32f5ff63
DH
1630
1631 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
1632 vcpu->arch.local_int.action_bits &=
1633 ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP);
4ae3c081 1634 spin_unlock(&vcpu->arch.local_int.lock);
32f5ff63 1635
8ad35755
DH
1636 __disable_ibs_on_vcpu(vcpu);
1637
1638 for (i = 0; i < online_vcpus; i++) {
1639 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
1640 started_vcpus++;
1641 started_vcpu = vcpu->kvm->vcpus[i];
1642 }
1643 }
1644
1645 if (started_vcpus == 1) {
1646 /*
1647 * As we only have one VCPU left, we want to enable the
1648 * IBS facility for that VCPU to speed it up.
1649 */
1650 __enable_ibs_on_vcpu(started_vcpu);
1651 }
1652
433b9ee4 1653 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 1654 return;
6852d7b6
DH
1655}
1656
d6712df9
CH
1657static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1658 struct kvm_enable_cap *cap)
1659{
1660 int r;
1661
1662 if (cap->flags)
1663 return -EINVAL;
1664
1665 switch (cap->cap) {
fa6b7fe9
CH
1666 case KVM_CAP_S390_CSS_SUPPORT:
1667 if (!vcpu->kvm->arch.css_support) {
1668 vcpu->kvm->arch.css_support = 1;
1669 trace_kvm_s390_enable_css(vcpu->kvm);
1670 }
1671 r = 0;
1672 break;
d6712df9
CH
1673 default:
1674 r = -EINVAL;
1675 break;
1676 }
1677 return r;
1678}
1679
b0c632db
HC
1680long kvm_arch_vcpu_ioctl(struct file *filp,
1681 unsigned int ioctl, unsigned long arg)
1682{
1683 struct kvm_vcpu *vcpu = filp->private_data;
1684 void __user *argp = (void __user *)arg;
800c1065 1685 int idx;
bc923cc9 1686 long r;
b0c632db 1687
93736624
AK
1688 switch (ioctl) {
1689 case KVM_S390_INTERRUPT: {
ba5c1e9b 1690 struct kvm_s390_interrupt s390int;
383d0b05 1691 struct kvm_s390_irq s390irq;
ba5c1e9b 1692
93736624 1693 r = -EFAULT;
ba5c1e9b 1694 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624 1695 break;
383d0b05
JF
1696 if (s390int_to_s390irq(&s390int, &s390irq))
1697 return -EINVAL;
1698 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
93736624 1699 break;
ba5c1e9b 1700 }
b0c632db 1701 case KVM_S390_STORE_STATUS:
800c1065 1702 idx = srcu_read_lock(&vcpu->kvm->srcu);
bc923cc9 1703 r = kvm_s390_vcpu_store_status(vcpu, arg);
800c1065 1704 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 1705 break;
b0c632db
HC
1706 case KVM_S390_SET_INITIAL_PSW: {
1707 psw_t psw;
1708
bc923cc9 1709 r = -EFAULT;
b0c632db 1710 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
1711 break;
1712 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1713 break;
b0c632db
HC
1714 }
1715 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
1716 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1717 break;
14eebd91
CO
1718 case KVM_SET_ONE_REG:
1719 case KVM_GET_ONE_REG: {
1720 struct kvm_one_reg reg;
1721 r = -EFAULT;
1722 if (copy_from_user(&reg, argp, sizeof(reg)))
1723 break;
1724 if (ioctl == KVM_SET_ONE_REG)
1725 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1726 else
1727 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1728 break;
1729 }
27e0393f
CO
1730#ifdef CONFIG_KVM_S390_UCONTROL
1731 case KVM_S390_UCAS_MAP: {
1732 struct kvm_s390_ucas_mapping ucasmap;
1733
1734 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1735 r = -EFAULT;
1736 break;
1737 }
1738
1739 if (!kvm_is_ucontrol(vcpu->kvm)) {
1740 r = -EINVAL;
1741 break;
1742 }
1743
1744 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1745 ucasmap.vcpu_addr, ucasmap.length);
1746 break;
1747 }
1748 case KVM_S390_UCAS_UNMAP: {
1749 struct kvm_s390_ucas_mapping ucasmap;
1750
1751 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1752 r = -EFAULT;
1753 break;
1754 }
1755
1756 if (!kvm_is_ucontrol(vcpu->kvm)) {
1757 r = -EINVAL;
1758 break;
1759 }
1760
1761 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1762 ucasmap.length);
1763 break;
1764 }
1765#endif
ccc7910f 1766 case KVM_S390_VCPU_FAULT: {
527e30b4 1767 r = gmap_fault(vcpu->arch.gmap, arg, 0);
ccc7910f
CO
1768 break;
1769 }
d6712df9
CH
1770 case KVM_ENABLE_CAP:
1771 {
1772 struct kvm_enable_cap cap;
1773 r = -EFAULT;
1774 if (copy_from_user(&cap, argp, sizeof(cap)))
1775 break;
1776 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1777 break;
1778 }
b0c632db 1779 default:
3e6afcf1 1780 r = -ENOTTY;
b0c632db 1781 }
bc923cc9 1782 return r;
b0c632db
HC
1783}
1784
5b1c1493
CO
1785int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1786{
1787#ifdef CONFIG_KVM_S390_UCONTROL
1788 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1789 && (kvm_is_ucontrol(vcpu->kvm))) {
1790 vmf->page = virt_to_page(vcpu->arch.sie_block);
1791 get_page(vmf->page);
1792 return 0;
1793 }
1794#endif
1795 return VM_FAULT_SIGBUS;
1796}
1797
5587027c
AK
1798int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1799 unsigned long npages)
db3fe4eb
TY
1800{
1801 return 0;
1802}
1803
b0c632db 1804/* Section: memory related */
f7784b8e
MT
1805int kvm_arch_prepare_memory_region(struct kvm *kvm,
1806 struct kvm_memory_slot *memslot,
7b6195a9
TY
1807 struct kvm_userspace_memory_region *mem,
1808 enum kvm_mr_change change)
b0c632db 1809{
dd2887e7
NW
1810 /* A few sanity checks. We can have memory slots which have to be
1811 located/ended at a segment boundary (1MB). The memory in userland is
1812 ok to be fragmented into various different vmas. It is okay to mmap()
1813 and munmap() stuff in this slot after doing this call at any time */
b0c632db 1814
598841ca 1815 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
1816 return -EINVAL;
1817
598841ca 1818 if (mem->memory_size & 0xffffful)
b0c632db
HC
1819 return -EINVAL;
1820
f7784b8e
MT
1821 return 0;
1822}
1823
1824void kvm_arch_commit_memory_region(struct kvm *kvm,
1825 struct kvm_userspace_memory_region *mem,
8482644a
TY
1826 const struct kvm_memory_slot *old,
1827 enum kvm_mr_change change)
f7784b8e 1828{
f7850c92 1829 int rc;
f7784b8e 1830
2cef4deb
CB
1831 /* If the basics of the memslot do not change, we do not want
1832 * to update the gmap. Every update causes several unnecessary
1833 * segment translation exceptions. This is usually handled just
1834 * fine by the normal fault handler + gmap, but it will also
1835 * cause faults on the prefix page of running guest CPUs.
1836 */
1837 if (old->userspace_addr == mem->userspace_addr &&
1838 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1839 old->npages * PAGE_SIZE == mem->memory_size)
1840 return;
598841ca
CO
1841
1842 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1843 mem->guest_phys_addr, mem->memory_size);
1844 if (rc)
f7850c92 1845 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 1846 return;
b0c632db
HC
1847}
1848
b0c632db
HC
1849static int __init kvm_s390_init(void)
1850{
ef50f7ac 1851 int ret;
0ee75bea 1852 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
1853 if (ret)
1854 return ret;
1855
1856 /*
1857 * guests can ask for up to 255+1 double words, we need a full page
25985edc 1858 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
1859 * only set facilities that are known to work in KVM.
1860 */
78c4b59f
MM
1861 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1862 if (!vfacilities) {
ef50f7ac
CB
1863 kvm_exit();
1864 return -ENOMEM;
1865 }
78c4b59f 1866 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
7be81a46 1867 vfacilities[0] &= 0xff82fffbf47c2000UL;
7feb6bb8 1868 vfacilities[1] &= 0x005c000000000000UL;
ef50f7ac 1869 return 0;
b0c632db
HC
1870}
1871
1872static void __exit kvm_s390_exit(void)
1873{
78c4b59f 1874 free_page((unsigned long) vfacilities);
b0c632db
HC
1875 kvm_exit();
1876}
1877
1878module_init(kvm_s390_init);
1879module_exit(kvm_s390_exit);
566af940
CH
1880
1881/*
1882 * Enable autoloading of the kvm module.
1883 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1884 * since x86 takes a different approach.
1885 */
1886#include <linux/miscdevice.h>
1887MODULE_ALIAS_MISCDEV(KVM_MINOR);
1888MODULE_ALIAS("devname:kvm");