]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/s390/kvm/kvm-s390.c
s390: update default configuration
[mirror_ubuntu-artful-kernel.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15f36ebd 14 * Jason J. Herne <jjherne@us.ibm.com>
b0c632db
HC
15 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
ca872302 20#include <linux/hrtimer.h>
b0c632db
HC
21#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
25#include <linux/slab.h>
ba5c1e9b 26#include <linux/timer.h>
cbb870c8 27#include <asm/asm-offsets.h>
b0c632db
HC
28#include <asm/lowcore.h>
29#include <asm/pgtable.h>
f5daba1d 30#include <asm/nmi.h>
a0616cde 31#include <asm/switch_to.h>
78c4b59f 32#include <asm/facility.h>
1526bf9c 33#include <asm/sclp.h>
8f2abe6a 34#include "kvm-s390.h"
b0c632db
HC
35#include "gaccess.h"
36
5786fffa
CH
37#define CREATE_TRACE_POINTS
38#include "trace.h"
ade38c31 39#include "trace-s390.h"
5786fffa 40
b0c632db
HC
41#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42
43struct kvm_stats_debugfs_item debugfs_entries[] = {
44 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 45 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
46 { "exit_validity", VCPU_STAT(exit_validity) },
47 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
48 { "exit_external_request", VCPU_STAT(exit_external_request) },
49 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
50 { "exit_instruction", VCPU_STAT(exit_instruction) },
51 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
ce2e4f0b 53 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
f5e10b09 54 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b 55 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
aba07508
DH
56 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
57 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
ba5c1e9b 58 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 59 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
60 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
61 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
62 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
63 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
64 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
65 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
66 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 67 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
68 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
69 { "instruction_spx", VCPU_STAT(instruction_spx) },
70 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
71 { "instruction_stap", VCPU_STAT(instruction_stap) },
72 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
8a242234 73 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
453423dc
CB
74 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
75 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
b31288fa 76 { "instruction_essa", VCPU_STAT(instruction_essa) },
453423dc
CB
77 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
78 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 79 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 80 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 81 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 82 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
83 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
84 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
85 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
86 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
87 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 88 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 89 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 90 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
91 { NULL }
92};
93
78c4b59f 94unsigned long *vfacilities;
2c70fe44 95static struct gmap_notifier gmap_notifier;
b0c632db 96
78c4b59f 97/* test availability of vfacility */
280ef0f1 98int test_vfacility(unsigned long nr)
78c4b59f
MM
99{
100 return __test_facility(nr, (void *) vfacilities);
101}
102
b0c632db 103/* Section: not file related */
13a34e06 104int kvm_arch_hardware_enable(void)
b0c632db
HC
105{
106 /* every s390 is virtualization enabled ;-) */
10474ae8 107 return 0;
b0c632db
HC
108}
109
2c70fe44
CB
110static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
111
b0c632db
HC
112int kvm_arch_hardware_setup(void)
113{
2c70fe44
CB
114 gmap_notifier.notifier_call = kvm_gmap_notifier;
115 gmap_register_ipte_notifier(&gmap_notifier);
b0c632db
HC
116 return 0;
117}
118
119void kvm_arch_hardware_unsetup(void)
120{
2c70fe44 121 gmap_unregister_ipte_notifier(&gmap_notifier);
b0c632db
HC
122}
123
b0c632db
HC
124int kvm_arch_init(void *opaque)
125{
84877d93
CH
126 /* Register floating interrupt controller interface. */
127 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
b0c632db
HC
128}
129
b0c632db
HC
130/* Section: device related */
131long kvm_arch_dev_ioctl(struct file *filp,
132 unsigned int ioctl, unsigned long arg)
133{
134 if (ioctl == KVM_S390_ENABLE_SIE)
135 return s390_enable_sie();
136 return -EINVAL;
137}
138
784aa3d7 139int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
b0c632db 140{
d7b0b5eb
CO
141 int r;
142
2bd0ac4e 143 switch (ext) {
d7b0b5eb 144 case KVM_CAP_S390_PSW:
b6cf8788 145 case KVM_CAP_S390_GMAP:
52e16b18 146 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
147#ifdef CONFIG_KVM_S390_UCONTROL
148 case KVM_CAP_S390_UCONTROL:
149#endif
3c038e6b 150 case KVM_CAP_ASYNC_PF:
60b413c9 151 case KVM_CAP_SYNC_REGS:
14eebd91 152 case KVM_CAP_ONE_REG:
d6712df9 153 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 154 case KVM_CAP_S390_CSS_SUPPORT:
ebc32262 155 case KVM_CAP_IRQFD:
10ccaa1e 156 case KVM_CAP_IOEVENTFD:
c05c4186 157 case KVM_CAP_DEVICE_CTRL:
d938dc55 158 case KVM_CAP_ENABLE_CAP_VM:
78599d90 159 case KVM_CAP_S390_IRQCHIP:
f2061656 160 case KVM_CAP_VM_ATTRIBUTES:
6352e4d2 161 case KVM_CAP_MP_STATE:
d7b0b5eb
CO
162 r = 1;
163 break;
e726b1bd
CB
164 case KVM_CAP_NR_VCPUS:
165 case KVM_CAP_MAX_VCPUS:
166 r = KVM_MAX_VCPUS;
167 break;
e1e2e605
NW
168 case KVM_CAP_NR_MEMSLOTS:
169 r = KVM_USER_MEM_SLOTS;
170 break;
1526bf9c 171 case KVM_CAP_S390_COW:
abf09bed 172 r = MACHINE_HAS_ESOP;
1526bf9c 173 break;
2bd0ac4e 174 default:
d7b0b5eb 175 r = 0;
2bd0ac4e 176 }
d7b0b5eb 177 return r;
b0c632db
HC
178}
179
15f36ebd
JH
180static void kvm_s390_sync_dirty_log(struct kvm *kvm,
181 struct kvm_memory_slot *memslot)
182{
183 gfn_t cur_gfn, last_gfn;
184 unsigned long address;
185 struct gmap *gmap = kvm->arch.gmap;
186
187 down_read(&gmap->mm->mmap_sem);
188 /* Loop over all guest pages */
189 last_gfn = memslot->base_gfn + memslot->npages;
190 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
191 address = gfn_to_hva_memslot(memslot, cur_gfn);
192
193 if (gmap_test_and_clear_dirty(address, gmap))
194 mark_page_dirty(kvm, cur_gfn);
195 }
196 up_read(&gmap->mm->mmap_sem);
197}
198
b0c632db
HC
199/* Section: vm related */
200/*
201 * Get (and clear) the dirty memory log for a memory slot.
202 */
203int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
204 struct kvm_dirty_log *log)
205{
15f36ebd
JH
206 int r;
207 unsigned long n;
208 struct kvm_memory_slot *memslot;
209 int is_dirty = 0;
210
211 mutex_lock(&kvm->slots_lock);
212
213 r = -EINVAL;
214 if (log->slot >= KVM_USER_MEM_SLOTS)
215 goto out;
216
217 memslot = id_to_memslot(kvm->memslots, log->slot);
218 r = -ENOENT;
219 if (!memslot->dirty_bitmap)
220 goto out;
221
222 kvm_s390_sync_dirty_log(kvm, memslot);
223 r = kvm_get_dirty_log(kvm, log, &is_dirty);
224 if (r)
225 goto out;
226
227 /* Clear the dirty log */
228 if (is_dirty) {
229 n = kvm_dirty_bitmap_bytes(memslot);
230 memset(memslot->dirty_bitmap, 0, n);
231 }
232 r = 0;
233out:
234 mutex_unlock(&kvm->slots_lock);
235 return r;
b0c632db
HC
236}
237
d938dc55
CH
238static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
239{
240 int r;
241
242 if (cap->flags)
243 return -EINVAL;
244
245 switch (cap->cap) {
84223598
CH
246 case KVM_CAP_S390_IRQCHIP:
247 kvm->arch.use_irqchip = 1;
248 r = 0;
249 break;
d938dc55
CH
250 default:
251 r = -EINVAL;
252 break;
253 }
254 return r;
255}
256
4f718eab
DD
257static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
258{
259 int ret;
260 unsigned int idx;
261 switch (attr->attr) {
262 case KVM_S390_VM_MEM_ENABLE_CMMA:
263 ret = -EBUSY;
264 mutex_lock(&kvm->lock);
265 if (atomic_read(&kvm->online_vcpus) == 0) {
266 kvm->arch.use_cmma = 1;
267 ret = 0;
268 }
269 mutex_unlock(&kvm->lock);
270 break;
271 case KVM_S390_VM_MEM_CLR_CMMA:
272 mutex_lock(&kvm->lock);
273 idx = srcu_read_lock(&kvm->srcu);
274 page_table_reset_pgste(kvm->arch.gmap->mm, 0, TASK_SIZE, false);
275 srcu_read_unlock(&kvm->srcu, idx);
276 mutex_unlock(&kvm->lock);
277 ret = 0;
278 break;
279 default:
280 ret = -ENXIO;
281 break;
282 }
283 return ret;
284}
285
f2061656
DD
286static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
287{
288 int ret;
289
290 switch (attr->group) {
4f718eab
DD
291 case KVM_S390_VM_MEM_CTRL:
292 ret = kvm_s390_mem_control(kvm, attr);
293 break;
f2061656
DD
294 default:
295 ret = -ENXIO;
296 break;
297 }
298
299 return ret;
300}
301
302static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
303{
304 return -ENXIO;
305}
306
307static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
308{
309 int ret;
310
311 switch (attr->group) {
4f718eab
DD
312 case KVM_S390_VM_MEM_CTRL:
313 switch (attr->attr) {
314 case KVM_S390_VM_MEM_ENABLE_CMMA:
315 case KVM_S390_VM_MEM_CLR_CMMA:
316 ret = 0;
317 break;
318 default:
319 ret = -ENXIO;
320 break;
321 }
322 break;
f2061656
DD
323 default:
324 ret = -ENXIO;
325 break;
326 }
327
328 return ret;
329}
330
b0c632db
HC
331long kvm_arch_vm_ioctl(struct file *filp,
332 unsigned int ioctl, unsigned long arg)
333{
334 struct kvm *kvm = filp->private_data;
335 void __user *argp = (void __user *)arg;
f2061656 336 struct kvm_device_attr attr;
b0c632db
HC
337 int r;
338
339 switch (ioctl) {
ba5c1e9b
CO
340 case KVM_S390_INTERRUPT: {
341 struct kvm_s390_interrupt s390int;
342
343 r = -EFAULT;
344 if (copy_from_user(&s390int, argp, sizeof(s390int)))
345 break;
346 r = kvm_s390_inject_vm(kvm, &s390int);
347 break;
348 }
d938dc55
CH
349 case KVM_ENABLE_CAP: {
350 struct kvm_enable_cap cap;
351 r = -EFAULT;
352 if (copy_from_user(&cap, argp, sizeof(cap)))
353 break;
354 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
355 break;
356 }
84223598
CH
357 case KVM_CREATE_IRQCHIP: {
358 struct kvm_irq_routing_entry routing;
359
360 r = -EINVAL;
361 if (kvm->arch.use_irqchip) {
362 /* Set up dummy routing. */
363 memset(&routing, 0, sizeof(routing));
364 kvm_set_irq_routing(kvm, &routing, 0, 0);
365 r = 0;
366 }
367 break;
368 }
f2061656
DD
369 case KVM_SET_DEVICE_ATTR: {
370 r = -EFAULT;
371 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
372 break;
373 r = kvm_s390_vm_set_attr(kvm, &attr);
374 break;
375 }
376 case KVM_GET_DEVICE_ATTR: {
377 r = -EFAULT;
378 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
379 break;
380 r = kvm_s390_vm_get_attr(kvm, &attr);
381 break;
382 }
383 case KVM_HAS_DEVICE_ATTR: {
384 r = -EFAULT;
385 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
386 break;
387 r = kvm_s390_vm_has_attr(kvm, &attr);
388 break;
389 }
b0c632db 390 default:
367e1319 391 r = -ENOTTY;
b0c632db
HC
392 }
393
394 return r;
395}
396
5102ee87
TK
397static int kvm_s390_crypto_init(struct kvm *kvm)
398{
399 if (!test_vfacility(76))
400 return 0;
401
402 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
403 GFP_KERNEL | GFP_DMA);
404 if (!kvm->arch.crypto.crycb)
405 return -ENOMEM;
406
407 kvm->arch.crypto.crycbd = (__u32) (unsigned long) kvm->arch.crypto.crycb |
408 CRYCB_FORMAT1;
409
410 return 0;
411}
412
e08b9637 413int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 414{
b0c632db
HC
415 int rc;
416 char debug_name[16];
f6c137ff 417 static unsigned long sca_offset;
b0c632db 418
e08b9637
CO
419 rc = -EINVAL;
420#ifdef CONFIG_KVM_S390_UCONTROL
421 if (type & ~KVM_VM_S390_UCONTROL)
422 goto out_err;
423 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
424 goto out_err;
425#else
426 if (type)
427 goto out_err;
428#endif
429
b0c632db
HC
430 rc = s390_enable_sie();
431 if (rc)
d89f5eff 432 goto out_err;
b0c632db 433
b290411a
CO
434 rc = -ENOMEM;
435
b0c632db
HC
436 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
437 if (!kvm->arch.sca)
d89f5eff 438 goto out_err;
f6c137ff
CB
439 spin_lock(&kvm_lock);
440 sca_offset = (sca_offset + 16) & 0x7f0;
441 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
442 spin_unlock(&kvm_lock);
b0c632db
HC
443
444 sprintf(debug_name, "kvm-%u", current->pid);
445
446 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
447 if (!kvm->arch.dbf)
448 goto out_nodbf;
449
5102ee87
TK
450 if (kvm_s390_crypto_init(kvm) < 0)
451 goto out_crypto;
452
ba5c1e9b
CO
453 spin_lock_init(&kvm->arch.float_int.lock);
454 INIT_LIST_HEAD(&kvm->arch.float_int.list);
8a242234 455 init_waitqueue_head(&kvm->arch.ipte_wq);
ba5c1e9b 456
b0c632db
HC
457 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
458 VM_EVENT(kvm, 3, "%s", "vm created");
459
e08b9637
CO
460 if (type & KVM_VM_S390_UCONTROL) {
461 kvm->arch.gmap = NULL;
462 } else {
0349985a 463 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
e08b9637
CO
464 if (!kvm->arch.gmap)
465 goto out_nogmap;
2c70fe44 466 kvm->arch.gmap->private = kvm;
24eb3a82 467 kvm->arch.gmap->pfault_enabled = 0;
e08b9637 468 }
fa6b7fe9
CH
469
470 kvm->arch.css_support = 0;
84223598 471 kvm->arch.use_irqchip = 0;
fa6b7fe9 472
8ad35755
DH
473 spin_lock_init(&kvm->arch.start_stop_lock);
474
d89f5eff 475 return 0;
598841ca 476out_nogmap:
5102ee87
TK
477 kfree(kvm->arch.crypto.crycb);
478out_crypto:
598841ca 479 debug_unregister(kvm->arch.dbf);
b0c632db
HC
480out_nodbf:
481 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
482out_err:
483 return rc;
b0c632db
HC
484}
485
d329c035
CB
486void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
487{
488 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 489 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
67335e63 490 kvm_s390_clear_local_irqs(vcpu);
3c038e6b 491 kvm_clear_async_pf_completion_queue(vcpu);
58f9460b
CO
492 if (!kvm_is_ucontrol(vcpu->kvm)) {
493 clear_bit(63 - vcpu->vcpu_id,
494 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
495 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
496 (__u64) vcpu->arch.sie_block)
497 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
498 }
abf4a71e 499 smp_mb();
27e0393f
CO
500
501 if (kvm_is_ucontrol(vcpu->kvm))
502 gmap_free(vcpu->arch.gmap);
503
b31605c1
DD
504 if (kvm_s390_cmma_enabled(vcpu->kvm))
505 kvm_s390_vcpu_unsetup_cmma(vcpu);
d329c035 506 free_page((unsigned long)(vcpu->arch.sie_block));
b31288fa 507
6692cef3 508 kvm_vcpu_uninit(vcpu);
b110feaf 509 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
510}
511
512static void kvm_free_vcpus(struct kvm *kvm)
513{
514 unsigned int i;
988a2cae 515 struct kvm_vcpu *vcpu;
d329c035 516
988a2cae
GN
517 kvm_for_each_vcpu(i, vcpu, kvm)
518 kvm_arch_vcpu_destroy(vcpu);
519
520 mutex_lock(&kvm->lock);
521 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
522 kvm->vcpus[i] = NULL;
523
524 atomic_set(&kvm->online_vcpus, 0);
525 mutex_unlock(&kvm->lock);
d329c035
CB
526}
527
b0c632db
HC
528void kvm_arch_destroy_vm(struct kvm *kvm)
529{
d329c035 530 kvm_free_vcpus(kvm);
b0c632db 531 free_page((unsigned long)(kvm->arch.sca));
d329c035 532 debug_unregister(kvm->arch.dbf);
5102ee87 533 kfree(kvm->arch.crypto.crycb);
27e0393f
CO
534 if (!kvm_is_ucontrol(kvm))
535 gmap_free(kvm->arch.gmap);
841b91c5 536 kvm_s390_destroy_adapters(kvm);
67335e63 537 kvm_s390_clear_float_irqs(kvm);
b0c632db
HC
538}
539
540/* Section: vcpu related */
541int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
542{
3c038e6b
DD
543 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
544 kvm_clear_async_pf_completion_queue(vcpu);
27e0393f 545 if (kvm_is_ucontrol(vcpu->kvm)) {
c6c956b8 546 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
27e0393f
CO
547 if (!vcpu->arch.gmap)
548 return -ENOMEM;
2c70fe44 549 vcpu->arch.gmap->private = vcpu->kvm;
27e0393f
CO
550 return 0;
551 }
552
598841ca 553 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
59674c1a
CB
554 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
555 KVM_SYNC_GPRS |
9eed0735 556 KVM_SYNC_ACRS |
b028ee3e
DH
557 KVM_SYNC_CRS |
558 KVM_SYNC_ARCH0 |
559 KVM_SYNC_PFAULT;
b0c632db
HC
560 return 0;
561}
562
b0c632db
HC
563void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
564{
4725c860
MS
565 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
566 save_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db 567 save_access_regs(vcpu->arch.host_acrs);
4725c860
MS
568 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
569 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 570 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 571 gmap_enable(vcpu->arch.gmap);
9e6dabef 572 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
573}
574
575void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
576{
9e6dabef 577 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 578 gmap_disable(vcpu->arch.gmap);
4725c860
MS
579 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
580 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 581 save_access_regs(vcpu->run->s.regs.acrs);
4725c860
MS
582 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
583 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db
HC
584 restore_access_regs(vcpu->arch.host_acrs);
585}
586
587static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
588{
589 /* this equals initial cpu reset in pop, but we don't switch to ESA */
590 vcpu->arch.sie_block->gpsw.mask = 0UL;
591 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 592 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
593 vcpu->arch.sie_block->cputm = 0UL;
594 vcpu->arch.sie_block->ckc = 0UL;
595 vcpu->arch.sie_block->todpr = 0;
596 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
597 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
598 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
599 vcpu->arch.guest_fpregs.fpc = 0;
600 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
601 vcpu->arch.sie_block->gbea = 1;
672550fb 602 vcpu->arch.sie_block->pp = 0;
3c038e6b
DD
603 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
604 kvm_clear_async_pf_completion_queue(vcpu);
6352e4d2
DH
605 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
606 kvm_s390_vcpu_stop(vcpu);
2ed10cc1 607 kvm_s390_clear_local_irqs(vcpu);
b0c632db
HC
608}
609
42897d86
MT
610int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
611{
612 return 0;
613}
614
5102ee87
TK
615static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
616{
617 if (!test_vfacility(76))
618 return;
619
620 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
621}
622
b31605c1
DD
623void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
624{
625 free_page(vcpu->arch.sie_block->cbrlo);
626 vcpu->arch.sie_block->cbrlo = 0;
627}
628
629int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
630{
631 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
632 if (!vcpu->arch.sie_block->cbrlo)
633 return -ENOMEM;
634
635 vcpu->arch.sie_block->ecb2 |= 0x80;
636 vcpu->arch.sie_block->ecb2 &= ~0x08;
637 return 0;
638}
639
b0c632db
HC
640int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
641{
b31605c1 642 int rc = 0;
b31288fa 643
9e6dabef
CH
644 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
645 CPUSTAT_SM |
69d0d3a3
CB
646 CPUSTAT_STOPPED |
647 CPUSTAT_GED);
fc34531d 648 vcpu->arch.sie_block->ecb = 6;
7feb6bb8
MM
649 if (test_vfacility(50) && test_vfacility(73))
650 vcpu->arch.sie_block->ecb |= 0x10;
651
69d0d3a3 652 vcpu->arch.sie_block->ecb2 = 8;
4953919f 653 vcpu->arch.sie_block->eca = 0xD1002000U;
217a4406
HC
654 if (sclp_has_siif())
655 vcpu->arch.sie_block->eca |= 1;
78c4b59f 656 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
5a5e6536
MR
657 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
658 ICTL_TPROT;
659
b31605c1
DD
660 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
661 rc = kvm_s390_vcpu_setup_cmma(vcpu);
662 if (rc)
663 return rc;
b31288fa 664 }
ca872302 665 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
ca872302 666 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 667 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 668 vcpu->arch.cpu_id.version = 0xff;
5102ee87
TK
669
670 kvm_s390_vcpu_crypto_setup(vcpu);
671
b31605c1 672 return rc;
b0c632db
HC
673}
674
675struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
676 unsigned int id)
677{
4d47555a 678 struct kvm_vcpu *vcpu;
7feb6bb8 679 struct sie_page *sie_page;
4d47555a
CO
680 int rc = -EINVAL;
681
682 if (id >= KVM_MAX_VCPUS)
683 goto out;
684
685 rc = -ENOMEM;
b0c632db 686
b110feaf 687 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 688 if (!vcpu)
4d47555a 689 goto out;
b0c632db 690
7feb6bb8
MM
691 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
692 if (!sie_page)
b0c632db
HC
693 goto out_free_cpu;
694
7feb6bb8
MM
695 vcpu->arch.sie_block = &sie_page->sie_block;
696 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
697
b0c632db 698 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
699 if (!kvm_is_ucontrol(kvm)) {
700 if (!kvm->arch.sca) {
701 WARN_ON_ONCE(1);
702 goto out_free_cpu;
703 }
704 if (!kvm->arch.sca->cpu[id].sda)
705 kvm->arch.sca->cpu[id].sda =
706 (__u64) vcpu->arch.sie_block;
707 vcpu->arch.sie_block->scaoh =
708 (__u32)(((__u64)kvm->arch.sca) >> 32);
709 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
710 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
711 }
b0c632db 712
ba5c1e9b
CO
713 spin_lock_init(&vcpu->arch.local_int.lock);
714 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
715 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
d0321a24 716 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 717 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
ba5c1e9b 718
b0c632db
HC
719 rc = kvm_vcpu_init(vcpu, kvm, id);
720 if (rc)
7b06bf2f 721 goto out_free_sie_block;
b0c632db
HC
722 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
723 vcpu->arch.sie_block);
ade38c31 724 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 725
b0c632db 726 return vcpu;
7b06bf2f
WY
727out_free_sie_block:
728 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 729out_free_cpu:
b110feaf 730 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 731out:
b0c632db
HC
732 return ERR_PTR(rc);
733}
734
b0c632db
HC
735int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
736{
f87618e8 737 return kvm_cpu_has_interrupt(vcpu);
b0c632db
HC
738}
739
49b99e1e
CB
740void s390_vcpu_block(struct kvm_vcpu *vcpu)
741{
742 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
743}
744
745void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
746{
747 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
748}
749
750/*
751 * Kick a guest cpu out of SIE and wait until SIE is not running.
752 * If the CPU is not running (e.g. waiting as idle) the function will
753 * return immediately. */
754void exit_sie(struct kvm_vcpu *vcpu)
755{
756 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
757 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
758 cpu_relax();
759}
760
761/* Kick a guest cpu out of SIE and prevent SIE-reentry */
762void exit_sie_sync(struct kvm_vcpu *vcpu)
763{
764 s390_vcpu_block(vcpu);
765 exit_sie(vcpu);
766}
767
2c70fe44
CB
768static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
769{
770 int i;
771 struct kvm *kvm = gmap->private;
772 struct kvm_vcpu *vcpu;
773
774 kvm_for_each_vcpu(i, vcpu, kvm) {
775 /* match against both prefix pages */
fda902cb 776 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
2c70fe44
CB
777 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
778 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
779 exit_sie_sync(vcpu);
780 }
781 }
782}
783
b6d33834
CD
784int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
785{
786 /* kvm common code refers to this, but never calls it */
787 BUG();
788 return 0;
789}
790
14eebd91
CO
791static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
792 struct kvm_one_reg *reg)
793{
794 int r = -EINVAL;
795
796 switch (reg->id) {
29b7c71b
CO
797 case KVM_REG_S390_TODPR:
798 r = put_user(vcpu->arch.sie_block->todpr,
799 (u32 __user *)reg->addr);
800 break;
801 case KVM_REG_S390_EPOCHDIFF:
802 r = put_user(vcpu->arch.sie_block->epoch,
803 (u64 __user *)reg->addr);
804 break;
46a6dd1c
J
805 case KVM_REG_S390_CPU_TIMER:
806 r = put_user(vcpu->arch.sie_block->cputm,
807 (u64 __user *)reg->addr);
808 break;
809 case KVM_REG_S390_CLOCK_COMP:
810 r = put_user(vcpu->arch.sie_block->ckc,
811 (u64 __user *)reg->addr);
812 break;
536336c2
DD
813 case KVM_REG_S390_PFTOKEN:
814 r = put_user(vcpu->arch.pfault_token,
815 (u64 __user *)reg->addr);
816 break;
817 case KVM_REG_S390_PFCOMPARE:
818 r = put_user(vcpu->arch.pfault_compare,
819 (u64 __user *)reg->addr);
820 break;
821 case KVM_REG_S390_PFSELECT:
822 r = put_user(vcpu->arch.pfault_select,
823 (u64 __user *)reg->addr);
824 break;
672550fb
CB
825 case KVM_REG_S390_PP:
826 r = put_user(vcpu->arch.sie_block->pp,
827 (u64 __user *)reg->addr);
828 break;
afa45ff5
CB
829 case KVM_REG_S390_GBEA:
830 r = put_user(vcpu->arch.sie_block->gbea,
831 (u64 __user *)reg->addr);
832 break;
14eebd91
CO
833 default:
834 break;
835 }
836
837 return r;
838}
839
840static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
841 struct kvm_one_reg *reg)
842{
843 int r = -EINVAL;
844
845 switch (reg->id) {
29b7c71b
CO
846 case KVM_REG_S390_TODPR:
847 r = get_user(vcpu->arch.sie_block->todpr,
848 (u32 __user *)reg->addr);
849 break;
850 case KVM_REG_S390_EPOCHDIFF:
851 r = get_user(vcpu->arch.sie_block->epoch,
852 (u64 __user *)reg->addr);
853 break;
46a6dd1c
J
854 case KVM_REG_S390_CPU_TIMER:
855 r = get_user(vcpu->arch.sie_block->cputm,
856 (u64 __user *)reg->addr);
857 break;
858 case KVM_REG_S390_CLOCK_COMP:
859 r = get_user(vcpu->arch.sie_block->ckc,
860 (u64 __user *)reg->addr);
861 break;
536336c2
DD
862 case KVM_REG_S390_PFTOKEN:
863 r = get_user(vcpu->arch.pfault_token,
864 (u64 __user *)reg->addr);
865 break;
866 case KVM_REG_S390_PFCOMPARE:
867 r = get_user(vcpu->arch.pfault_compare,
868 (u64 __user *)reg->addr);
869 break;
870 case KVM_REG_S390_PFSELECT:
871 r = get_user(vcpu->arch.pfault_select,
872 (u64 __user *)reg->addr);
873 break;
672550fb
CB
874 case KVM_REG_S390_PP:
875 r = get_user(vcpu->arch.sie_block->pp,
876 (u64 __user *)reg->addr);
877 break;
afa45ff5
CB
878 case KVM_REG_S390_GBEA:
879 r = get_user(vcpu->arch.sie_block->gbea,
880 (u64 __user *)reg->addr);
881 break;
14eebd91
CO
882 default:
883 break;
884 }
885
886 return r;
887}
b6d33834 888
b0c632db
HC
889static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
890{
b0c632db 891 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
892 return 0;
893}
894
895int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
896{
5a32c1af 897 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
898 return 0;
899}
900
901int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
902{
5a32c1af 903 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
904 return 0;
905}
906
907int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
908 struct kvm_sregs *sregs)
909{
59674c1a 910 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 911 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 912 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
913 return 0;
914}
915
916int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
917 struct kvm_sregs *sregs)
918{
59674c1a 919 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 920 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
921 return 0;
922}
923
924int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
925{
4725c860
MS
926 if (test_fp_ctl(fpu->fpc))
927 return -EINVAL;
b0c632db 928 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4725c860
MS
929 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
930 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
931 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
b0c632db
HC
932 return 0;
933}
934
935int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
936{
b0c632db
HC
937 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
938 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
939 return 0;
940}
941
942static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
943{
944 int rc = 0;
945
7a42fdc2 946 if (!is_vcpu_stopped(vcpu))
b0c632db 947 rc = -EBUSY;
d7b0b5eb
CO
948 else {
949 vcpu->run->psw_mask = psw.mask;
950 vcpu->run->psw_addr = psw.addr;
951 }
b0c632db
HC
952 return rc;
953}
954
955int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
956 struct kvm_translation *tr)
957{
958 return -EINVAL; /* not implemented yet */
959}
960
27291e21
DH
961#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
962 KVM_GUESTDBG_USE_HW_BP | \
963 KVM_GUESTDBG_ENABLE)
964
d0bfb940
JK
965int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
966 struct kvm_guest_debug *dbg)
b0c632db 967{
27291e21
DH
968 int rc = 0;
969
970 vcpu->guest_debug = 0;
971 kvm_s390_clear_bp_data(vcpu);
972
2de3bfc2 973 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
27291e21
DH
974 return -EINVAL;
975
976 if (dbg->control & KVM_GUESTDBG_ENABLE) {
977 vcpu->guest_debug = dbg->control;
978 /* enforce guest PER */
979 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
980
981 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
982 rc = kvm_s390_import_bp_data(vcpu, dbg);
983 } else {
984 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
985 vcpu->arch.guestdbg.last_bp = 0;
986 }
987
988 if (rc) {
989 vcpu->guest_debug = 0;
990 kvm_s390_clear_bp_data(vcpu);
991 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
992 }
993
994 return rc;
b0c632db
HC
995}
996
62d9f0db
MT
997int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
998 struct kvm_mp_state *mp_state)
999{
6352e4d2
DH
1000 /* CHECK_STOP and LOAD are not supported yet */
1001 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1002 KVM_MP_STATE_OPERATING;
62d9f0db
MT
1003}
1004
1005int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1006 struct kvm_mp_state *mp_state)
1007{
6352e4d2
DH
1008 int rc = 0;
1009
1010 /* user space knows about this interface - let it control the state */
1011 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1012
1013 switch (mp_state->mp_state) {
1014 case KVM_MP_STATE_STOPPED:
1015 kvm_s390_vcpu_stop(vcpu);
1016 break;
1017 case KVM_MP_STATE_OPERATING:
1018 kvm_s390_vcpu_start(vcpu);
1019 break;
1020 case KVM_MP_STATE_LOAD:
1021 case KVM_MP_STATE_CHECK_STOP:
1022 /* fall through - CHECK_STOP and LOAD are not supported yet */
1023 default:
1024 rc = -ENXIO;
1025 }
1026
1027 return rc;
62d9f0db
MT
1028}
1029
b31605c1
DD
1030bool kvm_s390_cmma_enabled(struct kvm *kvm)
1031{
1032 if (!MACHINE_IS_LPAR)
1033 return false;
1034 /* only enable for z10 and later */
1035 if (!MACHINE_HAS_EDAT1)
1036 return false;
1037 if (!kvm->arch.use_cmma)
1038 return false;
1039 return true;
1040}
1041
8ad35755
DH
1042static bool ibs_enabled(struct kvm_vcpu *vcpu)
1043{
1044 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1045}
1046
2c70fe44
CB
1047static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1048{
8ad35755
DH
1049retry:
1050 s390_vcpu_unblock(vcpu);
2c70fe44
CB
1051 /*
1052 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1053 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1054 * This ensures that the ipte instruction for this request has
1055 * already finished. We might race against a second unmapper that
1056 * wants to set the blocking bit. Lets just retry the request loop.
1057 */
8ad35755 1058 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
2c70fe44
CB
1059 int rc;
1060 rc = gmap_ipte_notify(vcpu->arch.gmap,
fda902cb 1061 kvm_s390_get_prefix(vcpu),
2c70fe44
CB
1062 PAGE_SIZE * 2);
1063 if (rc)
1064 return rc;
8ad35755 1065 goto retry;
2c70fe44 1066 }
8ad35755 1067
d3d692c8
DH
1068 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1069 vcpu->arch.sie_block->ihcpu = 0xffff;
1070 goto retry;
1071 }
1072
8ad35755
DH
1073 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1074 if (!ibs_enabled(vcpu)) {
1075 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1076 atomic_set_mask(CPUSTAT_IBS,
1077 &vcpu->arch.sie_block->cpuflags);
1078 }
1079 goto retry;
2c70fe44 1080 }
8ad35755
DH
1081
1082 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1083 if (ibs_enabled(vcpu)) {
1084 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1085 atomic_clear_mask(CPUSTAT_IBS,
1086 &vcpu->arch.sie_block->cpuflags);
1087 }
1088 goto retry;
1089 }
1090
0759d068
DH
1091 /* nothing to do, just clear the request */
1092 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1093
2c70fe44
CB
1094 return 0;
1095}
1096
fa576c58
TH
1097/**
1098 * kvm_arch_fault_in_page - fault-in guest page if necessary
1099 * @vcpu: The corresponding virtual cpu
1100 * @gpa: Guest physical address
1101 * @writable: Whether the page should be writable or not
1102 *
1103 * Make sure that a guest page has been faulted-in on the host.
1104 *
1105 * Return: Zero on success, negative error code otherwise.
1106 */
1107long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
24eb3a82 1108{
527e30b4
MS
1109 return gmap_fault(vcpu->arch.gmap, gpa,
1110 writable ? FAULT_FLAG_WRITE : 0);
24eb3a82
DD
1111}
1112
3c038e6b
DD
1113static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1114 unsigned long token)
1115{
1116 struct kvm_s390_interrupt inti;
1117 inti.parm64 = token;
1118
1119 if (start_token) {
1120 inti.type = KVM_S390_INT_PFAULT_INIT;
1121 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti));
1122 } else {
1123 inti.type = KVM_S390_INT_PFAULT_DONE;
1124 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1125 }
1126}
1127
1128void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1129 struct kvm_async_pf *work)
1130{
1131 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1132 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1133}
1134
1135void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1136 struct kvm_async_pf *work)
1137{
1138 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1139 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1140}
1141
1142void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1143 struct kvm_async_pf *work)
1144{
1145 /* s390 will always inject the page directly */
1146}
1147
1148bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1149{
1150 /*
1151 * s390 will always inject the page directly,
1152 * but we still want check_async_completion to cleanup
1153 */
1154 return true;
1155}
1156
1157static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1158{
1159 hva_t hva;
1160 struct kvm_arch_async_pf arch;
1161 int rc;
1162
1163 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1164 return 0;
1165 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1166 vcpu->arch.pfault_compare)
1167 return 0;
1168 if (psw_extint_disabled(vcpu))
1169 return 0;
1170 if (kvm_cpu_has_interrupt(vcpu))
1171 return 0;
1172 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1173 return 0;
1174 if (!vcpu->arch.gmap->pfault_enabled)
1175 return 0;
1176
81480cc1
HC
1177 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1178 hva += current->thread.gmap_addr & ~PAGE_MASK;
1179 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
3c038e6b
DD
1180 return 0;
1181
1182 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1183 return rc;
1184}
1185
3fb4c40f 1186static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 1187{
3fb4c40f 1188 int rc, cpuflags;
e168bf8d 1189
3c038e6b
DD
1190 /*
1191 * On s390 notifications for arriving pages will be delivered directly
1192 * to the guest but the house keeping for completed pfaults is
1193 * handled outside the worker.
1194 */
1195 kvm_check_async_pf_completion(vcpu);
1196
5a32c1af 1197 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
1198
1199 if (need_resched())
1200 schedule();
1201
d3a73acb 1202 if (test_cpu_flag(CIF_MCCK_PENDING))
71cde587
CB
1203 s390_handle_mcck();
1204
79395031
JF
1205 if (!kvm_is_ucontrol(vcpu->kvm)) {
1206 rc = kvm_s390_deliver_pending_interrupts(vcpu);
1207 if (rc)
1208 return rc;
1209 }
0ff31867 1210
2c70fe44
CB
1211 rc = kvm_s390_handle_requests(vcpu);
1212 if (rc)
1213 return rc;
1214
27291e21
DH
1215 if (guestdbg_enabled(vcpu)) {
1216 kvm_s390_backup_guest_per_regs(vcpu);
1217 kvm_s390_patch_guest_per_regs(vcpu);
1218 }
1219
b0c632db 1220 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
1221 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1222 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1223 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 1224
3fb4c40f
TH
1225 return 0;
1226}
1227
1228static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1229{
24eb3a82 1230 int rc = -1;
2b29a9fd
DD
1231
1232 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1233 vcpu->arch.sie_block->icptcode);
1234 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1235
27291e21
DH
1236 if (guestdbg_enabled(vcpu))
1237 kvm_s390_restore_guest_per_regs(vcpu);
1238
3fb4c40f 1239 if (exit_reason >= 0) {
7c470539 1240 rc = 0;
210b1607
TH
1241 } else if (kvm_is_ucontrol(vcpu->kvm)) {
1242 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1243 vcpu->run->s390_ucontrol.trans_exc_code =
1244 current->thread.gmap_addr;
1245 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1246 rc = -EREMOTE;
24eb3a82
DD
1247
1248 } else if (current->thread.gmap_pfault) {
3c038e6b 1249 trace_kvm_s390_major_guest_pfault(vcpu);
24eb3a82 1250 current->thread.gmap_pfault = 0;
fa576c58 1251 if (kvm_arch_setup_async_pf(vcpu)) {
24eb3a82 1252 rc = 0;
fa576c58
TH
1253 } else {
1254 gpa_t gpa = current->thread.gmap_addr;
1255 rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1256 }
24eb3a82
DD
1257 }
1258
1259 if (rc == -1) {
699bde3b
CB
1260 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1261 trace_kvm_s390_sie_fault(vcpu);
1262 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1f0d0f09 1263 }
b0c632db 1264
5a32c1af 1265 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
3fb4c40f 1266
a76ccff6
TH
1267 if (rc == 0) {
1268 if (kvm_is_ucontrol(vcpu->kvm))
2955c83f
CB
1269 /* Don't exit for host interrupts. */
1270 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
a76ccff6
TH
1271 else
1272 rc = kvm_handle_sie_intercept(vcpu);
1273 }
1274
3fb4c40f
TH
1275 return rc;
1276}
1277
1278static int __vcpu_run(struct kvm_vcpu *vcpu)
1279{
1280 int rc, exit_reason;
1281
800c1065
TH
1282 /*
1283 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1284 * ning the guest), so that memslots (and other stuff) are protected
1285 */
1286 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1287
a76ccff6
TH
1288 do {
1289 rc = vcpu_pre_run(vcpu);
1290 if (rc)
1291 break;
3fb4c40f 1292
800c1065 1293 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
1294 /*
1295 * As PF_VCPU will be used in fault handler, between
1296 * guest_enter and guest_exit should be no uaccess.
1297 */
1298 preempt_disable();
1299 kvm_guest_enter();
1300 preempt_enable();
1301 exit_reason = sie64a(vcpu->arch.sie_block,
1302 vcpu->run->s.regs.gprs);
1303 kvm_guest_exit();
800c1065 1304 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
1305
1306 rc = vcpu_post_run(vcpu, exit_reason);
27291e21 1307 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
3fb4c40f 1308
800c1065 1309 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 1310 return rc;
b0c632db
HC
1311}
1312
b028ee3e
DH
1313static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1314{
1315 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1316 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1317 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
1318 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1319 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1320 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
d3d692c8
DH
1321 /* some control register changes require a tlb flush */
1322 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
b028ee3e
DH
1323 }
1324 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
1325 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
1326 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
1327 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
1328 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
1329 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
1330 }
1331 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
1332 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
1333 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
1334 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
1335 }
1336 kvm_run->kvm_dirty_regs = 0;
1337}
1338
1339static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1340{
1341 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1342 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1343 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1344 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1345 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
1346 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
1347 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
1348 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
1349 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
1350 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1351 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1352 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1353}
1354
b0c632db
HC
1355int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1356{
8f2abe6a 1357 int rc;
b0c632db
HC
1358 sigset_t sigsaved;
1359
27291e21
DH
1360 if (guestdbg_exit_pending(vcpu)) {
1361 kvm_s390_prepare_debug_exit(vcpu);
1362 return 0;
1363 }
1364
b0c632db
HC
1365 if (vcpu->sigset_active)
1366 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1367
6352e4d2
DH
1368 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
1369 kvm_s390_vcpu_start(vcpu);
1370 } else if (is_vcpu_stopped(vcpu)) {
1371 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
1372 vcpu->vcpu_id);
1373 return -EINVAL;
1374 }
b0c632db 1375
b028ee3e 1376 sync_regs(vcpu, kvm_run);
d7b0b5eb 1377
dab4079d 1378 might_fault();
a76ccff6 1379 rc = __vcpu_run(vcpu);
9ace903d 1380
b1d16c49
CE
1381 if (signal_pending(current) && !rc) {
1382 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 1383 rc = -EINTR;
b1d16c49 1384 }
8f2abe6a 1385
27291e21
DH
1386 if (guestdbg_exit_pending(vcpu) && !rc) {
1387 kvm_s390_prepare_debug_exit(vcpu);
1388 rc = 0;
1389 }
1390
b8e660b8 1391 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
1392 /* intercept cannot be handled in-kernel, prepare kvm-run */
1393 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
1394 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
1395 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
1396 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
1397 rc = 0;
1398 }
1399
1400 if (rc == -EREMOTE) {
1401 /* intercept was handled, but userspace support is needed
1402 * kvm_run has been prepared by the handler */
1403 rc = 0;
1404 }
b0c632db 1405
b028ee3e 1406 store_regs(vcpu, kvm_run);
d7b0b5eb 1407
b0c632db
HC
1408 if (vcpu->sigset_active)
1409 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1410
b0c632db 1411 vcpu->stat.exit_userspace++;
7e8e6ab4 1412 return rc;
b0c632db
HC
1413}
1414
b0c632db
HC
1415/*
1416 * store status at address
1417 * we use have two special cases:
1418 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1419 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1420 */
d0bce605 1421int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
b0c632db 1422{
092670cd 1423 unsigned char archmode = 1;
fda902cb 1424 unsigned int px;
178bd789 1425 u64 clkcomp;
d0bce605 1426 int rc;
b0c632db 1427
d0bce605
HC
1428 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1429 if (write_guest_abs(vcpu, 163, &archmode, 1))
b0c632db 1430 return -EFAULT;
d0bce605
HC
1431 gpa = SAVE_AREA_BASE;
1432 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1433 if (write_guest_real(vcpu, 163, &archmode, 1))
b0c632db 1434 return -EFAULT;
d0bce605
HC
1435 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1436 }
1437 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1438 vcpu->arch.guest_fpregs.fprs, 128);
1439 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1440 vcpu->run->s.regs.gprs, 128);
1441 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1442 &vcpu->arch.sie_block->gpsw, 16);
fda902cb 1443 px = kvm_s390_get_prefix(vcpu);
d0bce605 1444 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
fda902cb 1445 &px, 4);
d0bce605
HC
1446 rc |= write_guest_abs(vcpu,
1447 gpa + offsetof(struct save_area, fp_ctrl_reg),
1448 &vcpu->arch.guest_fpregs.fpc, 4);
1449 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1450 &vcpu->arch.sie_block->todpr, 4);
1451 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1452 &vcpu->arch.sie_block->cputm, 8);
178bd789 1453 clkcomp = vcpu->arch.sie_block->ckc >> 8;
d0bce605
HC
1454 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1455 &clkcomp, 8);
1456 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1457 &vcpu->run->s.regs.acrs, 64);
1458 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1459 &vcpu->arch.sie_block->gcr, 128);
1460 return rc ? -EFAULT : 0;
b0c632db
HC
1461}
1462
e879892c
TH
1463int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1464{
1465 /*
1466 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1467 * copying in vcpu load/put. Lets update our copies before we save
1468 * it into the save area
1469 */
1470 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1471 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1472 save_access_regs(vcpu->run->s.regs.acrs);
1473
1474 return kvm_s390_store_status_unloaded(vcpu, addr);
1475}
1476
8ad35755
DH
1477static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1478{
1479 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
1480 kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
1481 exit_sie_sync(vcpu);
1482}
1483
1484static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
1485{
1486 unsigned int i;
1487 struct kvm_vcpu *vcpu;
1488
1489 kvm_for_each_vcpu(i, vcpu, kvm) {
1490 __disable_ibs_on_vcpu(vcpu);
1491 }
1492}
1493
1494static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1495{
1496 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
1497 kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
1498 exit_sie_sync(vcpu);
1499}
1500
6852d7b6
DH
1501void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
1502{
8ad35755
DH
1503 int i, online_vcpus, started_vcpus = 0;
1504
1505 if (!is_vcpu_stopped(vcpu))
1506 return;
1507
6852d7b6 1508 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
8ad35755 1509 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 1510 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
1511 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1512
1513 for (i = 0; i < online_vcpus; i++) {
1514 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
1515 started_vcpus++;
1516 }
1517
1518 if (started_vcpus == 0) {
1519 /* we're the only active VCPU -> speed it up */
1520 __enable_ibs_on_vcpu(vcpu);
1521 } else if (started_vcpus == 1) {
1522 /*
1523 * As we are starting a second VCPU, we have to disable
1524 * the IBS facility on all VCPUs to remove potentially
1525 * oustanding ENABLE requests.
1526 */
1527 __disable_ibs_on_all_vcpus(vcpu->kvm);
1528 }
1529
6852d7b6 1530 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
1531 /*
1532 * Another VCPU might have used IBS while we were offline.
1533 * Let's play safe and flush the VCPU at startup.
1534 */
d3d692c8 1535 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
433b9ee4 1536 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 1537 return;
6852d7b6
DH
1538}
1539
1540void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
1541{
8ad35755
DH
1542 int i, online_vcpus, started_vcpus = 0;
1543 struct kvm_vcpu *started_vcpu = NULL;
1544
1545 if (is_vcpu_stopped(vcpu))
1546 return;
1547
6852d7b6 1548 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
8ad35755 1549 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 1550 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
1551 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1552
32f5ff63 1553 /* Need to lock access to action_bits to avoid a SIGP race condition */
4ae3c081 1554 spin_lock(&vcpu->arch.local_int.lock);
6852d7b6 1555 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
32f5ff63
DH
1556
1557 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
1558 vcpu->arch.local_int.action_bits &=
1559 ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP);
4ae3c081 1560 spin_unlock(&vcpu->arch.local_int.lock);
32f5ff63 1561
8ad35755
DH
1562 __disable_ibs_on_vcpu(vcpu);
1563
1564 for (i = 0; i < online_vcpus; i++) {
1565 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
1566 started_vcpus++;
1567 started_vcpu = vcpu->kvm->vcpus[i];
1568 }
1569 }
1570
1571 if (started_vcpus == 1) {
1572 /*
1573 * As we only have one VCPU left, we want to enable the
1574 * IBS facility for that VCPU to speed it up.
1575 */
1576 __enable_ibs_on_vcpu(started_vcpu);
1577 }
1578
433b9ee4 1579 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 1580 return;
6852d7b6
DH
1581}
1582
d6712df9
CH
1583static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1584 struct kvm_enable_cap *cap)
1585{
1586 int r;
1587
1588 if (cap->flags)
1589 return -EINVAL;
1590
1591 switch (cap->cap) {
fa6b7fe9
CH
1592 case KVM_CAP_S390_CSS_SUPPORT:
1593 if (!vcpu->kvm->arch.css_support) {
1594 vcpu->kvm->arch.css_support = 1;
1595 trace_kvm_s390_enable_css(vcpu->kvm);
1596 }
1597 r = 0;
1598 break;
d6712df9
CH
1599 default:
1600 r = -EINVAL;
1601 break;
1602 }
1603 return r;
1604}
1605
b0c632db
HC
1606long kvm_arch_vcpu_ioctl(struct file *filp,
1607 unsigned int ioctl, unsigned long arg)
1608{
1609 struct kvm_vcpu *vcpu = filp->private_data;
1610 void __user *argp = (void __user *)arg;
800c1065 1611 int idx;
bc923cc9 1612 long r;
b0c632db 1613
93736624
AK
1614 switch (ioctl) {
1615 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
1616 struct kvm_s390_interrupt s390int;
1617
93736624 1618 r = -EFAULT;
ba5c1e9b 1619 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
1620 break;
1621 r = kvm_s390_inject_vcpu(vcpu, &s390int);
1622 break;
ba5c1e9b 1623 }
b0c632db 1624 case KVM_S390_STORE_STATUS:
800c1065 1625 idx = srcu_read_lock(&vcpu->kvm->srcu);
bc923cc9 1626 r = kvm_s390_vcpu_store_status(vcpu, arg);
800c1065 1627 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 1628 break;
b0c632db
HC
1629 case KVM_S390_SET_INITIAL_PSW: {
1630 psw_t psw;
1631
bc923cc9 1632 r = -EFAULT;
b0c632db 1633 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
1634 break;
1635 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1636 break;
b0c632db
HC
1637 }
1638 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
1639 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1640 break;
14eebd91
CO
1641 case KVM_SET_ONE_REG:
1642 case KVM_GET_ONE_REG: {
1643 struct kvm_one_reg reg;
1644 r = -EFAULT;
1645 if (copy_from_user(&reg, argp, sizeof(reg)))
1646 break;
1647 if (ioctl == KVM_SET_ONE_REG)
1648 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1649 else
1650 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1651 break;
1652 }
27e0393f
CO
1653#ifdef CONFIG_KVM_S390_UCONTROL
1654 case KVM_S390_UCAS_MAP: {
1655 struct kvm_s390_ucas_mapping ucasmap;
1656
1657 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1658 r = -EFAULT;
1659 break;
1660 }
1661
1662 if (!kvm_is_ucontrol(vcpu->kvm)) {
1663 r = -EINVAL;
1664 break;
1665 }
1666
1667 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1668 ucasmap.vcpu_addr, ucasmap.length);
1669 break;
1670 }
1671 case KVM_S390_UCAS_UNMAP: {
1672 struct kvm_s390_ucas_mapping ucasmap;
1673
1674 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1675 r = -EFAULT;
1676 break;
1677 }
1678
1679 if (!kvm_is_ucontrol(vcpu->kvm)) {
1680 r = -EINVAL;
1681 break;
1682 }
1683
1684 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1685 ucasmap.length);
1686 break;
1687 }
1688#endif
ccc7910f 1689 case KVM_S390_VCPU_FAULT: {
527e30b4 1690 r = gmap_fault(vcpu->arch.gmap, arg, 0);
ccc7910f
CO
1691 break;
1692 }
d6712df9
CH
1693 case KVM_ENABLE_CAP:
1694 {
1695 struct kvm_enable_cap cap;
1696 r = -EFAULT;
1697 if (copy_from_user(&cap, argp, sizeof(cap)))
1698 break;
1699 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1700 break;
1701 }
b0c632db 1702 default:
3e6afcf1 1703 r = -ENOTTY;
b0c632db 1704 }
bc923cc9 1705 return r;
b0c632db
HC
1706}
1707
5b1c1493
CO
1708int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1709{
1710#ifdef CONFIG_KVM_S390_UCONTROL
1711 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1712 && (kvm_is_ucontrol(vcpu->kvm))) {
1713 vmf->page = virt_to_page(vcpu->arch.sie_block);
1714 get_page(vmf->page);
1715 return 0;
1716 }
1717#endif
1718 return VM_FAULT_SIGBUS;
1719}
1720
5587027c
AK
1721int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1722 unsigned long npages)
db3fe4eb
TY
1723{
1724 return 0;
1725}
1726
b0c632db 1727/* Section: memory related */
f7784b8e
MT
1728int kvm_arch_prepare_memory_region(struct kvm *kvm,
1729 struct kvm_memory_slot *memslot,
7b6195a9
TY
1730 struct kvm_userspace_memory_region *mem,
1731 enum kvm_mr_change change)
b0c632db 1732{
dd2887e7
NW
1733 /* A few sanity checks. We can have memory slots which have to be
1734 located/ended at a segment boundary (1MB). The memory in userland is
1735 ok to be fragmented into various different vmas. It is okay to mmap()
1736 and munmap() stuff in this slot after doing this call at any time */
b0c632db 1737
598841ca 1738 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
1739 return -EINVAL;
1740
598841ca 1741 if (mem->memory_size & 0xffffful)
b0c632db
HC
1742 return -EINVAL;
1743
f7784b8e
MT
1744 return 0;
1745}
1746
1747void kvm_arch_commit_memory_region(struct kvm *kvm,
1748 struct kvm_userspace_memory_region *mem,
8482644a
TY
1749 const struct kvm_memory_slot *old,
1750 enum kvm_mr_change change)
f7784b8e 1751{
f7850c92 1752 int rc;
f7784b8e 1753
2cef4deb
CB
1754 /* If the basics of the memslot do not change, we do not want
1755 * to update the gmap. Every update causes several unnecessary
1756 * segment translation exceptions. This is usually handled just
1757 * fine by the normal fault handler + gmap, but it will also
1758 * cause faults on the prefix page of running guest CPUs.
1759 */
1760 if (old->userspace_addr == mem->userspace_addr &&
1761 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1762 old->npages * PAGE_SIZE == mem->memory_size)
1763 return;
598841ca
CO
1764
1765 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1766 mem->guest_phys_addr, mem->memory_size);
1767 if (rc)
f7850c92 1768 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 1769 return;
b0c632db
HC
1770}
1771
b0c632db
HC
1772static int __init kvm_s390_init(void)
1773{
ef50f7ac 1774 int ret;
0ee75bea 1775 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
1776 if (ret)
1777 return ret;
1778
1779 /*
1780 * guests can ask for up to 255+1 double words, we need a full page
25985edc 1781 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
1782 * only set facilities that are known to work in KVM.
1783 */
78c4b59f
MM
1784 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1785 if (!vfacilities) {
ef50f7ac
CB
1786 kvm_exit();
1787 return -ENOMEM;
1788 }
78c4b59f 1789 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
7be81a46 1790 vfacilities[0] &= 0xff82fffbf47c2000UL;
7feb6bb8 1791 vfacilities[1] &= 0x005c000000000000UL;
ef50f7ac 1792 return 0;
b0c632db
HC
1793}
1794
1795static void __exit kvm_s390_exit(void)
1796{
78c4b59f 1797 free_page((unsigned long) vfacilities);
b0c632db
HC
1798 kvm_exit();
1799}
1800
1801module_init(kvm_s390_init);
1802module_exit(kvm_s390_exit);
566af940
CH
1803
1804/*
1805 * Enable autoloading of the kvm module.
1806 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1807 * since x86 takes a different approach.
1808 */
1809#include <linux/miscdevice.h>
1810MODULE_ALIAS_MISCDEV(KVM_MINOR);
1811MODULE_ALIAS("devname:kvm");