]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/s390/kvm/kvm-s390.c
KVM: s390: no special machine check delivery
[mirror_ubuntu-artful-kernel.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15f36ebd 14 * Jason J. Herne <jjherne@us.ibm.com>
b0c632db
HC
15 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
ca872302 20#include <linux/hrtimer.h>
b0c632db
HC
21#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
25#include <linux/slab.h>
ba5c1e9b 26#include <linux/timer.h>
cbb870c8 27#include <asm/asm-offsets.h>
b0c632db
HC
28#include <asm/lowcore.h>
29#include <asm/pgtable.h>
f5daba1d 30#include <asm/nmi.h>
a0616cde 31#include <asm/switch_to.h>
78c4b59f 32#include <asm/facility.h>
1526bf9c 33#include <asm/sclp.h>
8f2abe6a 34#include "kvm-s390.h"
b0c632db
HC
35#include "gaccess.h"
36
5786fffa
CH
37#define CREATE_TRACE_POINTS
38#include "trace.h"
ade38c31 39#include "trace-s390.h"
5786fffa 40
b0c632db
HC
41#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42
43struct kvm_stats_debugfs_item debugfs_entries[] = {
44 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 45 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
46 { "exit_validity", VCPU_STAT(exit_validity) },
47 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
48 { "exit_external_request", VCPU_STAT(exit_external_request) },
49 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
50 { "exit_instruction", VCPU_STAT(exit_instruction) },
51 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 53 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b 54 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
aba07508
DH
55 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
56 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
ba5c1e9b 57 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 58 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
59 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
60 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
61 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
62 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
63 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
64 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
65 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 66 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
67 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
68 { "instruction_spx", VCPU_STAT(instruction_spx) },
69 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
70 { "instruction_stap", VCPU_STAT(instruction_stap) },
71 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
8a242234 72 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
453423dc
CB
73 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
74 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
b31288fa 75 { "instruction_essa", VCPU_STAT(instruction_essa) },
453423dc
CB
76 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
77 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 78 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 79 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 80 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 81 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
82 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
83 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
84 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
85 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
86 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 87 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 88 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 89 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
90 { NULL }
91};
92
78c4b59f 93unsigned long *vfacilities;
2c70fe44 94static struct gmap_notifier gmap_notifier;
b0c632db 95
78c4b59f 96/* test availability of vfacility */
280ef0f1 97int test_vfacility(unsigned long nr)
78c4b59f
MM
98{
99 return __test_facility(nr, (void *) vfacilities);
100}
101
b0c632db 102/* Section: not file related */
10474ae8 103int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
104{
105 /* every s390 is virtualization enabled ;-) */
10474ae8 106 return 0;
b0c632db
HC
107}
108
109void kvm_arch_hardware_disable(void *garbage)
110{
111}
112
2c70fe44
CB
113static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
114
b0c632db
HC
115int kvm_arch_hardware_setup(void)
116{
2c70fe44
CB
117 gmap_notifier.notifier_call = kvm_gmap_notifier;
118 gmap_register_ipte_notifier(&gmap_notifier);
b0c632db
HC
119 return 0;
120}
121
122void kvm_arch_hardware_unsetup(void)
123{
2c70fe44 124 gmap_unregister_ipte_notifier(&gmap_notifier);
b0c632db
HC
125}
126
127void kvm_arch_check_processor_compat(void *rtn)
128{
129}
130
131int kvm_arch_init(void *opaque)
132{
133 return 0;
134}
135
136void kvm_arch_exit(void)
137{
138}
139
140/* Section: device related */
141long kvm_arch_dev_ioctl(struct file *filp,
142 unsigned int ioctl, unsigned long arg)
143{
144 if (ioctl == KVM_S390_ENABLE_SIE)
145 return s390_enable_sie();
146 return -EINVAL;
147}
148
784aa3d7 149int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
b0c632db 150{
d7b0b5eb
CO
151 int r;
152
2bd0ac4e 153 switch (ext) {
d7b0b5eb 154 case KVM_CAP_S390_PSW:
b6cf8788 155 case KVM_CAP_S390_GMAP:
52e16b18 156 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
157#ifdef CONFIG_KVM_S390_UCONTROL
158 case KVM_CAP_S390_UCONTROL:
159#endif
3c038e6b 160 case KVM_CAP_ASYNC_PF:
60b413c9 161 case KVM_CAP_SYNC_REGS:
14eebd91 162 case KVM_CAP_ONE_REG:
d6712df9 163 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 164 case KVM_CAP_S390_CSS_SUPPORT:
ebc32262 165 case KVM_CAP_IRQFD:
10ccaa1e 166 case KVM_CAP_IOEVENTFD:
c05c4186 167 case KVM_CAP_DEVICE_CTRL:
d938dc55 168 case KVM_CAP_ENABLE_CAP_VM:
78599d90 169 case KVM_CAP_S390_IRQCHIP:
f2061656 170 case KVM_CAP_VM_ATTRIBUTES:
6352e4d2 171 case KVM_CAP_MP_STATE:
d7b0b5eb
CO
172 r = 1;
173 break;
e726b1bd
CB
174 case KVM_CAP_NR_VCPUS:
175 case KVM_CAP_MAX_VCPUS:
176 r = KVM_MAX_VCPUS;
177 break;
e1e2e605
NW
178 case KVM_CAP_NR_MEMSLOTS:
179 r = KVM_USER_MEM_SLOTS;
180 break;
1526bf9c 181 case KVM_CAP_S390_COW:
abf09bed 182 r = MACHINE_HAS_ESOP;
1526bf9c 183 break;
2bd0ac4e 184 default:
d7b0b5eb 185 r = 0;
2bd0ac4e 186 }
d7b0b5eb 187 return r;
b0c632db
HC
188}
189
15f36ebd
JH
190static void kvm_s390_sync_dirty_log(struct kvm *kvm,
191 struct kvm_memory_slot *memslot)
192{
193 gfn_t cur_gfn, last_gfn;
194 unsigned long address;
195 struct gmap *gmap = kvm->arch.gmap;
196
197 down_read(&gmap->mm->mmap_sem);
198 /* Loop over all guest pages */
199 last_gfn = memslot->base_gfn + memslot->npages;
200 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
201 address = gfn_to_hva_memslot(memslot, cur_gfn);
202
203 if (gmap_test_and_clear_dirty(address, gmap))
204 mark_page_dirty(kvm, cur_gfn);
205 }
206 up_read(&gmap->mm->mmap_sem);
207}
208
b0c632db
HC
209/* Section: vm related */
210/*
211 * Get (and clear) the dirty memory log for a memory slot.
212 */
213int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
214 struct kvm_dirty_log *log)
215{
15f36ebd
JH
216 int r;
217 unsigned long n;
218 struct kvm_memory_slot *memslot;
219 int is_dirty = 0;
220
221 mutex_lock(&kvm->slots_lock);
222
223 r = -EINVAL;
224 if (log->slot >= KVM_USER_MEM_SLOTS)
225 goto out;
226
227 memslot = id_to_memslot(kvm->memslots, log->slot);
228 r = -ENOENT;
229 if (!memslot->dirty_bitmap)
230 goto out;
231
232 kvm_s390_sync_dirty_log(kvm, memslot);
233 r = kvm_get_dirty_log(kvm, log, &is_dirty);
234 if (r)
235 goto out;
236
237 /* Clear the dirty log */
238 if (is_dirty) {
239 n = kvm_dirty_bitmap_bytes(memslot);
240 memset(memslot->dirty_bitmap, 0, n);
241 }
242 r = 0;
243out:
244 mutex_unlock(&kvm->slots_lock);
245 return r;
b0c632db
HC
246}
247
d938dc55
CH
248static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
249{
250 int r;
251
252 if (cap->flags)
253 return -EINVAL;
254
255 switch (cap->cap) {
84223598
CH
256 case KVM_CAP_S390_IRQCHIP:
257 kvm->arch.use_irqchip = 1;
258 r = 0;
259 break;
d938dc55
CH
260 default:
261 r = -EINVAL;
262 break;
263 }
264 return r;
265}
266
4f718eab
DD
267static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
268{
269 int ret;
270 unsigned int idx;
271 switch (attr->attr) {
272 case KVM_S390_VM_MEM_ENABLE_CMMA:
273 ret = -EBUSY;
274 mutex_lock(&kvm->lock);
275 if (atomic_read(&kvm->online_vcpus) == 0) {
276 kvm->arch.use_cmma = 1;
277 ret = 0;
278 }
279 mutex_unlock(&kvm->lock);
280 break;
281 case KVM_S390_VM_MEM_CLR_CMMA:
282 mutex_lock(&kvm->lock);
283 idx = srcu_read_lock(&kvm->srcu);
284 page_table_reset_pgste(kvm->arch.gmap->mm, 0, TASK_SIZE, false);
285 srcu_read_unlock(&kvm->srcu, idx);
286 mutex_unlock(&kvm->lock);
287 ret = 0;
288 break;
289 default:
290 ret = -ENXIO;
291 break;
292 }
293 return ret;
294}
295
f2061656
DD
296static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
297{
298 int ret;
299
300 switch (attr->group) {
4f718eab
DD
301 case KVM_S390_VM_MEM_CTRL:
302 ret = kvm_s390_mem_control(kvm, attr);
303 break;
f2061656
DD
304 default:
305 ret = -ENXIO;
306 break;
307 }
308
309 return ret;
310}
311
312static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
313{
314 return -ENXIO;
315}
316
317static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
318{
319 int ret;
320
321 switch (attr->group) {
4f718eab
DD
322 case KVM_S390_VM_MEM_CTRL:
323 switch (attr->attr) {
324 case KVM_S390_VM_MEM_ENABLE_CMMA:
325 case KVM_S390_VM_MEM_CLR_CMMA:
326 ret = 0;
327 break;
328 default:
329 ret = -ENXIO;
330 break;
331 }
332 break;
f2061656
DD
333 default:
334 ret = -ENXIO;
335 break;
336 }
337
338 return ret;
339}
340
b0c632db
HC
341long kvm_arch_vm_ioctl(struct file *filp,
342 unsigned int ioctl, unsigned long arg)
343{
344 struct kvm *kvm = filp->private_data;
345 void __user *argp = (void __user *)arg;
f2061656 346 struct kvm_device_attr attr;
b0c632db
HC
347 int r;
348
349 switch (ioctl) {
ba5c1e9b
CO
350 case KVM_S390_INTERRUPT: {
351 struct kvm_s390_interrupt s390int;
352
353 r = -EFAULT;
354 if (copy_from_user(&s390int, argp, sizeof(s390int)))
355 break;
356 r = kvm_s390_inject_vm(kvm, &s390int);
357 break;
358 }
d938dc55
CH
359 case KVM_ENABLE_CAP: {
360 struct kvm_enable_cap cap;
361 r = -EFAULT;
362 if (copy_from_user(&cap, argp, sizeof(cap)))
363 break;
364 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
365 break;
366 }
84223598
CH
367 case KVM_CREATE_IRQCHIP: {
368 struct kvm_irq_routing_entry routing;
369
370 r = -EINVAL;
371 if (kvm->arch.use_irqchip) {
372 /* Set up dummy routing. */
373 memset(&routing, 0, sizeof(routing));
374 kvm_set_irq_routing(kvm, &routing, 0, 0);
375 r = 0;
376 }
377 break;
378 }
f2061656
DD
379 case KVM_SET_DEVICE_ATTR: {
380 r = -EFAULT;
381 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
382 break;
383 r = kvm_s390_vm_set_attr(kvm, &attr);
384 break;
385 }
386 case KVM_GET_DEVICE_ATTR: {
387 r = -EFAULT;
388 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
389 break;
390 r = kvm_s390_vm_get_attr(kvm, &attr);
391 break;
392 }
393 case KVM_HAS_DEVICE_ATTR: {
394 r = -EFAULT;
395 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
396 break;
397 r = kvm_s390_vm_has_attr(kvm, &attr);
398 break;
399 }
b0c632db 400 default:
367e1319 401 r = -ENOTTY;
b0c632db
HC
402 }
403
404 return r;
405}
406
e08b9637 407int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 408{
b0c632db
HC
409 int rc;
410 char debug_name[16];
f6c137ff 411 static unsigned long sca_offset;
b0c632db 412
e08b9637
CO
413 rc = -EINVAL;
414#ifdef CONFIG_KVM_S390_UCONTROL
415 if (type & ~KVM_VM_S390_UCONTROL)
416 goto out_err;
417 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
418 goto out_err;
419#else
420 if (type)
421 goto out_err;
422#endif
423
b0c632db
HC
424 rc = s390_enable_sie();
425 if (rc)
d89f5eff 426 goto out_err;
b0c632db 427
b290411a
CO
428 rc = -ENOMEM;
429
b0c632db
HC
430 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
431 if (!kvm->arch.sca)
d89f5eff 432 goto out_err;
f6c137ff
CB
433 spin_lock(&kvm_lock);
434 sca_offset = (sca_offset + 16) & 0x7f0;
435 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
436 spin_unlock(&kvm_lock);
b0c632db
HC
437
438 sprintf(debug_name, "kvm-%u", current->pid);
439
440 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
441 if (!kvm->arch.dbf)
442 goto out_nodbf;
443
ba5c1e9b
CO
444 spin_lock_init(&kvm->arch.float_int.lock);
445 INIT_LIST_HEAD(&kvm->arch.float_int.list);
8a242234 446 init_waitqueue_head(&kvm->arch.ipte_wq);
ba5c1e9b 447
b0c632db
HC
448 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
449 VM_EVENT(kvm, 3, "%s", "vm created");
450
e08b9637
CO
451 if (type & KVM_VM_S390_UCONTROL) {
452 kvm->arch.gmap = NULL;
453 } else {
454 kvm->arch.gmap = gmap_alloc(current->mm);
455 if (!kvm->arch.gmap)
456 goto out_nogmap;
2c70fe44 457 kvm->arch.gmap->private = kvm;
24eb3a82 458 kvm->arch.gmap->pfault_enabled = 0;
e08b9637 459 }
fa6b7fe9
CH
460
461 kvm->arch.css_support = 0;
84223598 462 kvm->arch.use_irqchip = 0;
fa6b7fe9 463
8ad35755
DH
464 spin_lock_init(&kvm->arch.start_stop_lock);
465
d89f5eff 466 return 0;
598841ca
CO
467out_nogmap:
468 debug_unregister(kvm->arch.dbf);
b0c632db
HC
469out_nodbf:
470 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
471out_err:
472 return rc;
b0c632db
HC
473}
474
d329c035
CB
475void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
476{
477 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 478 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
67335e63 479 kvm_s390_clear_local_irqs(vcpu);
3c038e6b 480 kvm_clear_async_pf_completion_queue(vcpu);
58f9460b
CO
481 if (!kvm_is_ucontrol(vcpu->kvm)) {
482 clear_bit(63 - vcpu->vcpu_id,
483 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
484 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
485 (__u64) vcpu->arch.sie_block)
486 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
487 }
abf4a71e 488 smp_mb();
27e0393f
CO
489
490 if (kvm_is_ucontrol(vcpu->kvm))
491 gmap_free(vcpu->arch.gmap);
492
b31605c1
DD
493 if (kvm_s390_cmma_enabled(vcpu->kvm))
494 kvm_s390_vcpu_unsetup_cmma(vcpu);
d329c035 495 free_page((unsigned long)(vcpu->arch.sie_block));
b31288fa 496
6692cef3 497 kvm_vcpu_uninit(vcpu);
b110feaf 498 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
499}
500
501static void kvm_free_vcpus(struct kvm *kvm)
502{
503 unsigned int i;
988a2cae 504 struct kvm_vcpu *vcpu;
d329c035 505
988a2cae
GN
506 kvm_for_each_vcpu(i, vcpu, kvm)
507 kvm_arch_vcpu_destroy(vcpu);
508
509 mutex_lock(&kvm->lock);
510 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
511 kvm->vcpus[i] = NULL;
512
513 atomic_set(&kvm->online_vcpus, 0);
514 mutex_unlock(&kvm->lock);
d329c035
CB
515}
516
ad8ba2cd
SY
517void kvm_arch_sync_events(struct kvm *kvm)
518{
519}
520
b0c632db
HC
521void kvm_arch_destroy_vm(struct kvm *kvm)
522{
d329c035 523 kvm_free_vcpus(kvm);
b0c632db 524 free_page((unsigned long)(kvm->arch.sca));
d329c035 525 debug_unregister(kvm->arch.dbf);
27e0393f
CO
526 if (!kvm_is_ucontrol(kvm))
527 gmap_free(kvm->arch.gmap);
841b91c5 528 kvm_s390_destroy_adapters(kvm);
67335e63 529 kvm_s390_clear_float_irqs(kvm);
b0c632db
HC
530}
531
532/* Section: vcpu related */
533int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
534{
3c038e6b
DD
535 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
536 kvm_clear_async_pf_completion_queue(vcpu);
27e0393f
CO
537 if (kvm_is_ucontrol(vcpu->kvm)) {
538 vcpu->arch.gmap = gmap_alloc(current->mm);
539 if (!vcpu->arch.gmap)
540 return -ENOMEM;
2c70fe44 541 vcpu->arch.gmap->private = vcpu->kvm;
27e0393f
CO
542 return 0;
543 }
544
598841ca 545 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
59674c1a
CB
546 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
547 KVM_SYNC_GPRS |
9eed0735
CB
548 KVM_SYNC_ACRS |
549 KVM_SYNC_CRS;
b0c632db
HC
550 return 0;
551}
552
553void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
554{
6692cef3 555 /* Nothing todo */
b0c632db
HC
556}
557
558void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
559{
4725c860
MS
560 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
561 save_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db 562 save_access_regs(vcpu->arch.host_acrs);
4725c860
MS
563 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
564 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 565 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 566 gmap_enable(vcpu->arch.gmap);
9e6dabef 567 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
568}
569
570void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
571{
9e6dabef 572 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 573 gmap_disable(vcpu->arch.gmap);
4725c860
MS
574 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
575 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 576 save_access_regs(vcpu->run->s.regs.acrs);
4725c860
MS
577 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
578 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db
HC
579 restore_access_regs(vcpu->arch.host_acrs);
580}
581
582static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
583{
584 /* this equals initial cpu reset in pop, but we don't switch to ESA */
585 vcpu->arch.sie_block->gpsw.mask = 0UL;
586 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 587 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
588 vcpu->arch.sie_block->cputm = 0UL;
589 vcpu->arch.sie_block->ckc = 0UL;
590 vcpu->arch.sie_block->todpr = 0;
591 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
592 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
593 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
594 vcpu->arch.guest_fpregs.fpc = 0;
595 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
596 vcpu->arch.sie_block->gbea = 1;
672550fb 597 vcpu->arch.sie_block->pp = 0;
3c038e6b
DD
598 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
599 kvm_clear_async_pf_completion_queue(vcpu);
6352e4d2
DH
600 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
601 kvm_s390_vcpu_stop(vcpu);
2ed10cc1 602 kvm_s390_clear_local_irqs(vcpu);
b0c632db
HC
603}
604
42897d86
MT
605int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
606{
607 return 0;
608}
609
b31605c1
DD
610void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
611{
612 free_page(vcpu->arch.sie_block->cbrlo);
613 vcpu->arch.sie_block->cbrlo = 0;
614}
615
616int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
617{
618 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
619 if (!vcpu->arch.sie_block->cbrlo)
620 return -ENOMEM;
621
622 vcpu->arch.sie_block->ecb2 |= 0x80;
623 vcpu->arch.sie_block->ecb2 &= ~0x08;
624 return 0;
625}
626
b0c632db
HC
627int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
628{
b31605c1 629 int rc = 0;
b31288fa 630
9e6dabef
CH
631 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
632 CPUSTAT_SM |
69d0d3a3
CB
633 CPUSTAT_STOPPED |
634 CPUSTAT_GED);
fc34531d 635 vcpu->arch.sie_block->ecb = 6;
7feb6bb8
MM
636 if (test_vfacility(50) && test_vfacility(73))
637 vcpu->arch.sie_block->ecb |= 0x10;
638
69d0d3a3 639 vcpu->arch.sie_block->ecb2 = 8;
4953919f 640 vcpu->arch.sie_block->eca = 0xD1002000U;
217a4406
HC
641 if (sclp_has_siif())
642 vcpu->arch.sie_block->eca |= 1;
78c4b59f 643 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
5a5e6536
MR
644 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
645 ICTL_TPROT;
646
b31605c1
DD
647 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
648 rc = kvm_s390_vcpu_setup_cmma(vcpu);
649 if (rc)
650 return rc;
b31288fa 651 }
ca872302 652 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
ca872302 653 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 654 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 655 vcpu->arch.cpu_id.version = 0xff;
b31605c1 656 return rc;
b0c632db
HC
657}
658
659struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
660 unsigned int id)
661{
4d47555a 662 struct kvm_vcpu *vcpu;
7feb6bb8 663 struct sie_page *sie_page;
4d47555a
CO
664 int rc = -EINVAL;
665
666 if (id >= KVM_MAX_VCPUS)
667 goto out;
668
669 rc = -ENOMEM;
b0c632db 670
b110feaf 671 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 672 if (!vcpu)
4d47555a 673 goto out;
b0c632db 674
7feb6bb8
MM
675 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
676 if (!sie_page)
b0c632db
HC
677 goto out_free_cpu;
678
7feb6bb8
MM
679 vcpu->arch.sie_block = &sie_page->sie_block;
680 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
681
b0c632db 682 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
683 if (!kvm_is_ucontrol(kvm)) {
684 if (!kvm->arch.sca) {
685 WARN_ON_ONCE(1);
686 goto out_free_cpu;
687 }
688 if (!kvm->arch.sca->cpu[id].sda)
689 kvm->arch.sca->cpu[id].sda =
690 (__u64) vcpu->arch.sie_block;
691 vcpu->arch.sie_block->scaoh =
692 (__u32)(((__u64)kvm->arch.sca) >> 32);
693 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
694 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
695 }
b0c632db 696
ba5c1e9b
CO
697 spin_lock_init(&vcpu->arch.local_int.lock);
698 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
699 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
d0321a24 700 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 701 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
ba5c1e9b 702
b0c632db
HC
703 rc = kvm_vcpu_init(vcpu, kvm, id);
704 if (rc)
7b06bf2f 705 goto out_free_sie_block;
b0c632db
HC
706 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
707 vcpu->arch.sie_block);
ade38c31 708 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 709
b0c632db 710 return vcpu;
7b06bf2f
WY
711out_free_sie_block:
712 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 713out_free_cpu:
b110feaf 714 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 715out:
b0c632db
HC
716 return ERR_PTR(rc);
717}
718
b0c632db
HC
719int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
720{
f87618e8 721 return kvm_cpu_has_interrupt(vcpu);
b0c632db
HC
722}
723
49b99e1e
CB
724void s390_vcpu_block(struct kvm_vcpu *vcpu)
725{
726 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
727}
728
729void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
730{
731 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
732}
733
734/*
735 * Kick a guest cpu out of SIE and wait until SIE is not running.
736 * If the CPU is not running (e.g. waiting as idle) the function will
737 * return immediately. */
738void exit_sie(struct kvm_vcpu *vcpu)
739{
740 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
741 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
742 cpu_relax();
743}
744
745/* Kick a guest cpu out of SIE and prevent SIE-reentry */
746void exit_sie_sync(struct kvm_vcpu *vcpu)
747{
748 s390_vcpu_block(vcpu);
749 exit_sie(vcpu);
750}
751
2c70fe44
CB
752static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
753{
754 int i;
755 struct kvm *kvm = gmap->private;
756 struct kvm_vcpu *vcpu;
757
758 kvm_for_each_vcpu(i, vcpu, kvm) {
759 /* match against both prefix pages */
fda902cb 760 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
2c70fe44
CB
761 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
762 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
763 exit_sie_sync(vcpu);
764 }
765 }
766}
767
b6d33834
CD
768int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
769{
770 /* kvm common code refers to this, but never calls it */
771 BUG();
772 return 0;
773}
774
14eebd91
CO
775static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
776 struct kvm_one_reg *reg)
777{
778 int r = -EINVAL;
779
780 switch (reg->id) {
29b7c71b
CO
781 case KVM_REG_S390_TODPR:
782 r = put_user(vcpu->arch.sie_block->todpr,
783 (u32 __user *)reg->addr);
784 break;
785 case KVM_REG_S390_EPOCHDIFF:
786 r = put_user(vcpu->arch.sie_block->epoch,
787 (u64 __user *)reg->addr);
788 break;
46a6dd1c
J
789 case KVM_REG_S390_CPU_TIMER:
790 r = put_user(vcpu->arch.sie_block->cputm,
791 (u64 __user *)reg->addr);
792 break;
793 case KVM_REG_S390_CLOCK_COMP:
794 r = put_user(vcpu->arch.sie_block->ckc,
795 (u64 __user *)reg->addr);
796 break;
536336c2
DD
797 case KVM_REG_S390_PFTOKEN:
798 r = put_user(vcpu->arch.pfault_token,
799 (u64 __user *)reg->addr);
800 break;
801 case KVM_REG_S390_PFCOMPARE:
802 r = put_user(vcpu->arch.pfault_compare,
803 (u64 __user *)reg->addr);
804 break;
805 case KVM_REG_S390_PFSELECT:
806 r = put_user(vcpu->arch.pfault_select,
807 (u64 __user *)reg->addr);
808 break;
672550fb
CB
809 case KVM_REG_S390_PP:
810 r = put_user(vcpu->arch.sie_block->pp,
811 (u64 __user *)reg->addr);
812 break;
afa45ff5
CB
813 case KVM_REG_S390_GBEA:
814 r = put_user(vcpu->arch.sie_block->gbea,
815 (u64 __user *)reg->addr);
816 break;
14eebd91
CO
817 default:
818 break;
819 }
820
821 return r;
822}
823
824static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
825 struct kvm_one_reg *reg)
826{
827 int r = -EINVAL;
828
829 switch (reg->id) {
29b7c71b
CO
830 case KVM_REG_S390_TODPR:
831 r = get_user(vcpu->arch.sie_block->todpr,
832 (u32 __user *)reg->addr);
833 break;
834 case KVM_REG_S390_EPOCHDIFF:
835 r = get_user(vcpu->arch.sie_block->epoch,
836 (u64 __user *)reg->addr);
837 break;
46a6dd1c
J
838 case KVM_REG_S390_CPU_TIMER:
839 r = get_user(vcpu->arch.sie_block->cputm,
840 (u64 __user *)reg->addr);
841 break;
842 case KVM_REG_S390_CLOCK_COMP:
843 r = get_user(vcpu->arch.sie_block->ckc,
844 (u64 __user *)reg->addr);
845 break;
536336c2
DD
846 case KVM_REG_S390_PFTOKEN:
847 r = get_user(vcpu->arch.pfault_token,
848 (u64 __user *)reg->addr);
849 break;
850 case KVM_REG_S390_PFCOMPARE:
851 r = get_user(vcpu->arch.pfault_compare,
852 (u64 __user *)reg->addr);
853 break;
854 case KVM_REG_S390_PFSELECT:
855 r = get_user(vcpu->arch.pfault_select,
856 (u64 __user *)reg->addr);
857 break;
672550fb
CB
858 case KVM_REG_S390_PP:
859 r = get_user(vcpu->arch.sie_block->pp,
860 (u64 __user *)reg->addr);
861 break;
afa45ff5
CB
862 case KVM_REG_S390_GBEA:
863 r = get_user(vcpu->arch.sie_block->gbea,
864 (u64 __user *)reg->addr);
865 break;
14eebd91
CO
866 default:
867 break;
868 }
869
870 return r;
871}
b6d33834 872
b0c632db
HC
873static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
874{
b0c632db 875 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
876 return 0;
877}
878
879int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
880{
5a32c1af 881 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
882 return 0;
883}
884
885int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
886{
5a32c1af 887 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
888 return 0;
889}
890
891int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
892 struct kvm_sregs *sregs)
893{
59674c1a 894 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 895 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 896 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
897 return 0;
898}
899
900int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
901 struct kvm_sregs *sregs)
902{
59674c1a 903 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 904 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
905 return 0;
906}
907
908int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
909{
4725c860
MS
910 if (test_fp_ctl(fpu->fpc))
911 return -EINVAL;
b0c632db 912 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4725c860
MS
913 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
914 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
915 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
b0c632db
HC
916 return 0;
917}
918
919int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
920{
b0c632db
HC
921 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
922 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
923 return 0;
924}
925
926static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
927{
928 int rc = 0;
929
7a42fdc2 930 if (!is_vcpu_stopped(vcpu))
b0c632db 931 rc = -EBUSY;
d7b0b5eb
CO
932 else {
933 vcpu->run->psw_mask = psw.mask;
934 vcpu->run->psw_addr = psw.addr;
935 }
b0c632db
HC
936 return rc;
937}
938
939int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
940 struct kvm_translation *tr)
941{
942 return -EINVAL; /* not implemented yet */
943}
944
27291e21
DH
945#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
946 KVM_GUESTDBG_USE_HW_BP | \
947 KVM_GUESTDBG_ENABLE)
948
d0bfb940
JK
949int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
950 struct kvm_guest_debug *dbg)
b0c632db 951{
27291e21
DH
952 int rc = 0;
953
954 vcpu->guest_debug = 0;
955 kvm_s390_clear_bp_data(vcpu);
956
2de3bfc2 957 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
27291e21
DH
958 return -EINVAL;
959
960 if (dbg->control & KVM_GUESTDBG_ENABLE) {
961 vcpu->guest_debug = dbg->control;
962 /* enforce guest PER */
963 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
964
965 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
966 rc = kvm_s390_import_bp_data(vcpu, dbg);
967 } else {
968 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
969 vcpu->arch.guestdbg.last_bp = 0;
970 }
971
972 if (rc) {
973 vcpu->guest_debug = 0;
974 kvm_s390_clear_bp_data(vcpu);
975 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
976 }
977
978 return rc;
b0c632db
HC
979}
980
62d9f0db
MT
981int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
982 struct kvm_mp_state *mp_state)
983{
6352e4d2
DH
984 /* CHECK_STOP and LOAD are not supported yet */
985 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
986 KVM_MP_STATE_OPERATING;
62d9f0db
MT
987}
988
989int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
990 struct kvm_mp_state *mp_state)
991{
6352e4d2
DH
992 int rc = 0;
993
994 /* user space knows about this interface - let it control the state */
995 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
996
997 switch (mp_state->mp_state) {
998 case KVM_MP_STATE_STOPPED:
999 kvm_s390_vcpu_stop(vcpu);
1000 break;
1001 case KVM_MP_STATE_OPERATING:
1002 kvm_s390_vcpu_start(vcpu);
1003 break;
1004 case KVM_MP_STATE_LOAD:
1005 case KVM_MP_STATE_CHECK_STOP:
1006 /* fall through - CHECK_STOP and LOAD are not supported yet */
1007 default:
1008 rc = -ENXIO;
1009 }
1010
1011 return rc;
62d9f0db
MT
1012}
1013
b31605c1
DD
1014bool kvm_s390_cmma_enabled(struct kvm *kvm)
1015{
1016 if (!MACHINE_IS_LPAR)
1017 return false;
1018 /* only enable for z10 and later */
1019 if (!MACHINE_HAS_EDAT1)
1020 return false;
1021 if (!kvm->arch.use_cmma)
1022 return false;
1023 return true;
1024}
1025
8ad35755
DH
1026static bool ibs_enabled(struct kvm_vcpu *vcpu)
1027{
1028 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1029}
1030
2c70fe44
CB
1031static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1032{
8ad35755
DH
1033retry:
1034 s390_vcpu_unblock(vcpu);
2c70fe44
CB
1035 /*
1036 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1037 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1038 * This ensures that the ipte instruction for this request has
1039 * already finished. We might race against a second unmapper that
1040 * wants to set the blocking bit. Lets just retry the request loop.
1041 */
8ad35755 1042 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
2c70fe44
CB
1043 int rc;
1044 rc = gmap_ipte_notify(vcpu->arch.gmap,
fda902cb 1045 kvm_s390_get_prefix(vcpu),
2c70fe44
CB
1046 PAGE_SIZE * 2);
1047 if (rc)
1048 return rc;
8ad35755 1049 goto retry;
2c70fe44 1050 }
8ad35755
DH
1051
1052 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1053 if (!ibs_enabled(vcpu)) {
1054 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1055 atomic_set_mask(CPUSTAT_IBS,
1056 &vcpu->arch.sie_block->cpuflags);
1057 }
1058 goto retry;
2c70fe44 1059 }
8ad35755
DH
1060
1061 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1062 if (ibs_enabled(vcpu)) {
1063 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1064 atomic_clear_mask(CPUSTAT_IBS,
1065 &vcpu->arch.sie_block->cpuflags);
1066 }
1067 goto retry;
1068 }
1069
0759d068
DH
1070 /* nothing to do, just clear the request */
1071 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1072
2c70fe44
CB
1073 return 0;
1074}
1075
fa576c58
TH
1076/**
1077 * kvm_arch_fault_in_page - fault-in guest page if necessary
1078 * @vcpu: The corresponding virtual cpu
1079 * @gpa: Guest physical address
1080 * @writable: Whether the page should be writable or not
1081 *
1082 * Make sure that a guest page has been faulted-in on the host.
1083 *
1084 * Return: Zero on success, negative error code otherwise.
1085 */
1086long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
24eb3a82 1087{
24eb3a82 1088 struct mm_struct *mm = current->mm;
fa576c58
TH
1089 hva_t hva;
1090 long rc;
1091
1092 hva = gmap_fault(gpa, vcpu->arch.gmap);
1093 if (IS_ERR_VALUE(hva))
1094 return (long)hva;
24eb3a82 1095 down_read(&mm->mmap_sem);
fa576c58 1096 rc = get_user_pages(current, mm, hva, 1, writable, 0, NULL, NULL);
24eb3a82 1097 up_read(&mm->mmap_sem);
fa576c58
TH
1098
1099 return rc < 0 ? rc : 0;
24eb3a82
DD
1100}
1101
3c038e6b
DD
1102static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1103 unsigned long token)
1104{
1105 struct kvm_s390_interrupt inti;
1106 inti.parm64 = token;
1107
1108 if (start_token) {
1109 inti.type = KVM_S390_INT_PFAULT_INIT;
1110 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti));
1111 } else {
1112 inti.type = KVM_S390_INT_PFAULT_DONE;
1113 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1114 }
1115}
1116
1117void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1118 struct kvm_async_pf *work)
1119{
1120 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1121 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1122}
1123
1124void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1125 struct kvm_async_pf *work)
1126{
1127 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1128 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1129}
1130
1131void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1132 struct kvm_async_pf *work)
1133{
1134 /* s390 will always inject the page directly */
1135}
1136
1137bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1138{
1139 /*
1140 * s390 will always inject the page directly,
1141 * but we still want check_async_completion to cleanup
1142 */
1143 return true;
1144}
1145
1146static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1147{
1148 hva_t hva;
1149 struct kvm_arch_async_pf arch;
1150 int rc;
1151
1152 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1153 return 0;
1154 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1155 vcpu->arch.pfault_compare)
1156 return 0;
1157 if (psw_extint_disabled(vcpu))
1158 return 0;
1159 if (kvm_cpu_has_interrupt(vcpu))
1160 return 0;
1161 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1162 return 0;
1163 if (!vcpu->arch.gmap->pfault_enabled)
1164 return 0;
1165
81480cc1
HC
1166 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1167 hva += current->thread.gmap_addr & ~PAGE_MASK;
1168 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
3c038e6b
DD
1169 return 0;
1170
1171 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1172 return rc;
1173}
1174
3fb4c40f 1175static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 1176{
3fb4c40f 1177 int rc, cpuflags;
e168bf8d 1178
3c038e6b
DD
1179 /*
1180 * On s390 notifications for arriving pages will be delivered directly
1181 * to the guest but the house keeping for completed pfaults is
1182 * handled outside the worker.
1183 */
1184 kvm_check_async_pf_completion(vcpu);
1185
5a32c1af 1186 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
1187
1188 if (need_resched())
1189 schedule();
1190
d3a73acb 1191 if (test_cpu_flag(CIF_MCCK_PENDING))
71cde587
CB
1192 s390_handle_mcck();
1193
d6b6d166
CO
1194 if (!kvm_is_ucontrol(vcpu->kvm))
1195 kvm_s390_deliver_pending_interrupts(vcpu);
0ff31867 1196
2c70fe44
CB
1197 rc = kvm_s390_handle_requests(vcpu);
1198 if (rc)
1199 return rc;
1200
27291e21
DH
1201 if (guestdbg_enabled(vcpu)) {
1202 kvm_s390_backup_guest_per_regs(vcpu);
1203 kvm_s390_patch_guest_per_regs(vcpu);
1204 }
1205
b0c632db 1206 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
1207 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1208 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1209 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 1210
3fb4c40f
TH
1211 return 0;
1212}
1213
1214static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1215{
24eb3a82 1216 int rc = -1;
2b29a9fd
DD
1217
1218 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1219 vcpu->arch.sie_block->icptcode);
1220 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1221
27291e21
DH
1222 if (guestdbg_enabled(vcpu))
1223 kvm_s390_restore_guest_per_regs(vcpu);
1224
3fb4c40f 1225 if (exit_reason >= 0) {
7c470539 1226 rc = 0;
210b1607
TH
1227 } else if (kvm_is_ucontrol(vcpu->kvm)) {
1228 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1229 vcpu->run->s390_ucontrol.trans_exc_code =
1230 current->thread.gmap_addr;
1231 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1232 rc = -EREMOTE;
24eb3a82
DD
1233
1234 } else if (current->thread.gmap_pfault) {
3c038e6b 1235 trace_kvm_s390_major_guest_pfault(vcpu);
24eb3a82 1236 current->thread.gmap_pfault = 0;
fa576c58 1237 if (kvm_arch_setup_async_pf(vcpu)) {
24eb3a82 1238 rc = 0;
fa576c58
TH
1239 } else {
1240 gpa_t gpa = current->thread.gmap_addr;
1241 rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1242 }
24eb3a82
DD
1243 }
1244
1245 if (rc == -1) {
699bde3b
CB
1246 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1247 trace_kvm_s390_sie_fault(vcpu);
1248 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1f0d0f09 1249 }
b0c632db 1250
5a32c1af 1251 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
3fb4c40f 1252
a76ccff6
TH
1253 if (rc == 0) {
1254 if (kvm_is_ucontrol(vcpu->kvm))
2955c83f
CB
1255 /* Don't exit for host interrupts. */
1256 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
a76ccff6
TH
1257 else
1258 rc = kvm_handle_sie_intercept(vcpu);
1259 }
1260
3fb4c40f
TH
1261 return rc;
1262}
1263
1264static int __vcpu_run(struct kvm_vcpu *vcpu)
1265{
1266 int rc, exit_reason;
1267
800c1065
TH
1268 /*
1269 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1270 * ning the guest), so that memslots (and other stuff) are protected
1271 */
1272 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1273
a76ccff6
TH
1274 do {
1275 rc = vcpu_pre_run(vcpu);
1276 if (rc)
1277 break;
3fb4c40f 1278
800c1065 1279 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
1280 /*
1281 * As PF_VCPU will be used in fault handler, between
1282 * guest_enter and guest_exit should be no uaccess.
1283 */
1284 preempt_disable();
1285 kvm_guest_enter();
1286 preempt_enable();
1287 exit_reason = sie64a(vcpu->arch.sie_block,
1288 vcpu->run->s.regs.gprs);
1289 kvm_guest_exit();
800c1065 1290 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
1291
1292 rc = vcpu_post_run(vcpu, exit_reason);
27291e21 1293 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
3fb4c40f 1294
800c1065 1295 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 1296 return rc;
b0c632db
HC
1297}
1298
1299int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1300{
8f2abe6a 1301 int rc;
b0c632db
HC
1302 sigset_t sigsaved;
1303
27291e21
DH
1304 if (guestdbg_exit_pending(vcpu)) {
1305 kvm_s390_prepare_debug_exit(vcpu);
1306 return 0;
1307 }
1308
b0c632db
HC
1309 if (vcpu->sigset_active)
1310 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1311
6352e4d2
DH
1312 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
1313 kvm_s390_vcpu_start(vcpu);
1314 } else if (is_vcpu_stopped(vcpu)) {
1315 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
1316 vcpu->vcpu_id);
1317 return -EINVAL;
1318 }
b0c632db 1319
d7b0b5eb
CO
1320 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1321 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
fbfa3049 1322 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
60b413c9 1323 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
9eed0735 1324 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
9eed0735
CB
1325 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
1326 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1327 }
fbfa3049 1328 kvm_run->kvm_dirty_regs = 0;
d7b0b5eb 1329
dab4079d 1330 might_fault();
a76ccff6 1331 rc = __vcpu_run(vcpu);
9ace903d 1332
b1d16c49
CE
1333 if (signal_pending(current) && !rc) {
1334 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 1335 rc = -EINTR;
b1d16c49 1336 }
8f2abe6a 1337
27291e21
DH
1338 if (guestdbg_exit_pending(vcpu) && !rc) {
1339 kvm_s390_prepare_debug_exit(vcpu);
1340 rc = 0;
1341 }
1342
b8e660b8 1343 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
1344 /* intercept cannot be handled in-kernel, prepare kvm-run */
1345 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
1346 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
1347 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
1348 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
1349 rc = 0;
1350 }
1351
1352 if (rc == -EREMOTE) {
1353 /* intercept was handled, but userspace support is needed
1354 * kvm_run has been prepared by the handler */
1355 rc = 0;
1356 }
b0c632db 1357
d7b0b5eb
CO
1358 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1359 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
fda902cb 1360 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
9eed0735 1361 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
d7b0b5eb 1362
b0c632db
HC
1363 if (vcpu->sigset_active)
1364 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1365
b0c632db 1366 vcpu->stat.exit_userspace++;
7e8e6ab4 1367 return rc;
b0c632db
HC
1368}
1369
b0c632db
HC
1370/*
1371 * store status at address
1372 * we use have two special cases:
1373 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1374 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1375 */
d0bce605 1376int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
b0c632db 1377{
092670cd 1378 unsigned char archmode = 1;
fda902cb 1379 unsigned int px;
178bd789 1380 u64 clkcomp;
d0bce605 1381 int rc;
b0c632db 1382
d0bce605
HC
1383 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1384 if (write_guest_abs(vcpu, 163, &archmode, 1))
b0c632db 1385 return -EFAULT;
d0bce605
HC
1386 gpa = SAVE_AREA_BASE;
1387 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1388 if (write_guest_real(vcpu, 163, &archmode, 1))
b0c632db 1389 return -EFAULT;
d0bce605
HC
1390 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1391 }
1392 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1393 vcpu->arch.guest_fpregs.fprs, 128);
1394 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1395 vcpu->run->s.regs.gprs, 128);
1396 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1397 &vcpu->arch.sie_block->gpsw, 16);
fda902cb 1398 px = kvm_s390_get_prefix(vcpu);
d0bce605 1399 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
fda902cb 1400 &px, 4);
d0bce605
HC
1401 rc |= write_guest_abs(vcpu,
1402 gpa + offsetof(struct save_area, fp_ctrl_reg),
1403 &vcpu->arch.guest_fpregs.fpc, 4);
1404 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1405 &vcpu->arch.sie_block->todpr, 4);
1406 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1407 &vcpu->arch.sie_block->cputm, 8);
178bd789 1408 clkcomp = vcpu->arch.sie_block->ckc >> 8;
d0bce605
HC
1409 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1410 &clkcomp, 8);
1411 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1412 &vcpu->run->s.regs.acrs, 64);
1413 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1414 &vcpu->arch.sie_block->gcr, 128);
1415 return rc ? -EFAULT : 0;
b0c632db
HC
1416}
1417
e879892c
TH
1418int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1419{
1420 /*
1421 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1422 * copying in vcpu load/put. Lets update our copies before we save
1423 * it into the save area
1424 */
1425 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1426 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1427 save_access_regs(vcpu->run->s.regs.acrs);
1428
1429 return kvm_s390_store_status_unloaded(vcpu, addr);
1430}
1431
8ad35755
DH
1432static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1433{
1434 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
1435 kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
1436 exit_sie_sync(vcpu);
1437}
1438
1439static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
1440{
1441 unsigned int i;
1442 struct kvm_vcpu *vcpu;
1443
1444 kvm_for_each_vcpu(i, vcpu, kvm) {
1445 __disable_ibs_on_vcpu(vcpu);
1446 }
1447}
1448
1449static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1450{
1451 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
1452 kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
1453 exit_sie_sync(vcpu);
1454}
1455
6852d7b6
DH
1456void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
1457{
8ad35755
DH
1458 int i, online_vcpus, started_vcpus = 0;
1459
1460 if (!is_vcpu_stopped(vcpu))
1461 return;
1462
6852d7b6 1463 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
8ad35755 1464 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 1465 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
1466 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1467
1468 for (i = 0; i < online_vcpus; i++) {
1469 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
1470 started_vcpus++;
1471 }
1472
1473 if (started_vcpus == 0) {
1474 /* we're the only active VCPU -> speed it up */
1475 __enable_ibs_on_vcpu(vcpu);
1476 } else if (started_vcpus == 1) {
1477 /*
1478 * As we are starting a second VCPU, we have to disable
1479 * the IBS facility on all VCPUs to remove potentially
1480 * oustanding ENABLE requests.
1481 */
1482 __disable_ibs_on_all_vcpus(vcpu->kvm);
1483 }
1484
6852d7b6 1485 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
1486 /*
1487 * Another VCPU might have used IBS while we were offline.
1488 * Let's play safe and flush the VCPU at startup.
1489 */
1490 vcpu->arch.sie_block->ihcpu = 0xffff;
433b9ee4 1491 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 1492 return;
6852d7b6
DH
1493}
1494
1495void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
1496{
8ad35755
DH
1497 int i, online_vcpus, started_vcpus = 0;
1498 struct kvm_vcpu *started_vcpu = NULL;
1499
1500 if (is_vcpu_stopped(vcpu))
1501 return;
1502
6852d7b6 1503 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
8ad35755 1504 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 1505 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
1506 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1507
32f5ff63 1508 /* Need to lock access to action_bits to avoid a SIGP race condition */
4ae3c081 1509 spin_lock(&vcpu->arch.local_int.lock);
6852d7b6 1510 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
32f5ff63
DH
1511
1512 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
1513 vcpu->arch.local_int.action_bits &=
1514 ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP);
4ae3c081 1515 spin_unlock(&vcpu->arch.local_int.lock);
32f5ff63 1516
8ad35755
DH
1517 __disable_ibs_on_vcpu(vcpu);
1518
1519 for (i = 0; i < online_vcpus; i++) {
1520 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
1521 started_vcpus++;
1522 started_vcpu = vcpu->kvm->vcpus[i];
1523 }
1524 }
1525
1526 if (started_vcpus == 1) {
1527 /*
1528 * As we only have one VCPU left, we want to enable the
1529 * IBS facility for that VCPU to speed it up.
1530 */
1531 __enable_ibs_on_vcpu(started_vcpu);
1532 }
1533
433b9ee4 1534 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 1535 return;
6852d7b6
DH
1536}
1537
d6712df9
CH
1538static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1539 struct kvm_enable_cap *cap)
1540{
1541 int r;
1542
1543 if (cap->flags)
1544 return -EINVAL;
1545
1546 switch (cap->cap) {
fa6b7fe9
CH
1547 case KVM_CAP_S390_CSS_SUPPORT:
1548 if (!vcpu->kvm->arch.css_support) {
1549 vcpu->kvm->arch.css_support = 1;
1550 trace_kvm_s390_enable_css(vcpu->kvm);
1551 }
1552 r = 0;
1553 break;
d6712df9
CH
1554 default:
1555 r = -EINVAL;
1556 break;
1557 }
1558 return r;
1559}
1560
b0c632db
HC
1561long kvm_arch_vcpu_ioctl(struct file *filp,
1562 unsigned int ioctl, unsigned long arg)
1563{
1564 struct kvm_vcpu *vcpu = filp->private_data;
1565 void __user *argp = (void __user *)arg;
800c1065 1566 int idx;
bc923cc9 1567 long r;
b0c632db 1568
93736624
AK
1569 switch (ioctl) {
1570 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
1571 struct kvm_s390_interrupt s390int;
1572
93736624 1573 r = -EFAULT;
ba5c1e9b 1574 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
1575 break;
1576 r = kvm_s390_inject_vcpu(vcpu, &s390int);
1577 break;
ba5c1e9b 1578 }
b0c632db 1579 case KVM_S390_STORE_STATUS:
800c1065 1580 idx = srcu_read_lock(&vcpu->kvm->srcu);
bc923cc9 1581 r = kvm_s390_vcpu_store_status(vcpu, arg);
800c1065 1582 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 1583 break;
b0c632db
HC
1584 case KVM_S390_SET_INITIAL_PSW: {
1585 psw_t psw;
1586
bc923cc9 1587 r = -EFAULT;
b0c632db 1588 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
1589 break;
1590 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1591 break;
b0c632db
HC
1592 }
1593 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
1594 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1595 break;
14eebd91
CO
1596 case KVM_SET_ONE_REG:
1597 case KVM_GET_ONE_REG: {
1598 struct kvm_one_reg reg;
1599 r = -EFAULT;
1600 if (copy_from_user(&reg, argp, sizeof(reg)))
1601 break;
1602 if (ioctl == KVM_SET_ONE_REG)
1603 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1604 else
1605 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1606 break;
1607 }
27e0393f
CO
1608#ifdef CONFIG_KVM_S390_UCONTROL
1609 case KVM_S390_UCAS_MAP: {
1610 struct kvm_s390_ucas_mapping ucasmap;
1611
1612 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1613 r = -EFAULT;
1614 break;
1615 }
1616
1617 if (!kvm_is_ucontrol(vcpu->kvm)) {
1618 r = -EINVAL;
1619 break;
1620 }
1621
1622 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1623 ucasmap.vcpu_addr, ucasmap.length);
1624 break;
1625 }
1626 case KVM_S390_UCAS_UNMAP: {
1627 struct kvm_s390_ucas_mapping ucasmap;
1628
1629 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1630 r = -EFAULT;
1631 break;
1632 }
1633
1634 if (!kvm_is_ucontrol(vcpu->kvm)) {
1635 r = -EINVAL;
1636 break;
1637 }
1638
1639 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1640 ucasmap.length);
1641 break;
1642 }
1643#endif
ccc7910f
CO
1644 case KVM_S390_VCPU_FAULT: {
1645 r = gmap_fault(arg, vcpu->arch.gmap);
1646 if (!IS_ERR_VALUE(r))
1647 r = 0;
1648 break;
1649 }
d6712df9
CH
1650 case KVM_ENABLE_CAP:
1651 {
1652 struct kvm_enable_cap cap;
1653 r = -EFAULT;
1654 if (copy_from_user(&cap, argp, sizeof(cap)))
1655 break;
1656 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1657 break;
1658 }
b0c632db 1659 default:
3e6afcf1 1660 r = -ENOTTY;
b0c632db 1661 }
bc923cc9 1662 return r;
b0c632db
HC
1663}
1664
5b1c1493
CO
1665int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1666{
1667#ifdef CONFIG_KVM_S390_UCONTROL
1668 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1669 && (kvm_is_ucontrol(vcpu->kvm))) {
1670 vmf->page = virt_to_page(vcpu->arch.sie_block);
1671 get_page(vmf->page);
1672 return 0;
1673 }
1674#endif
1675 return VM_FAULT_SIGBUS;
1676}
1677
5587027c 1678void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
db3fe4eb
TY
1679 struct kvm_memory_slot *dont)
1680{
1681}
1682
5587027c
AK
1683int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1684 unsigned long npages)
db3fe4eb
TY
1685{
1686 return 0;
1687}
1688
e59dbe09
TY
1689void kvm_arch_memslots_updated(struct kvm *kvm)
1690{
1691}
1692
b0c632db 1693/* Section: memory related */
f7784b8e
MT
1694int kvm_arch_prepare_memory_region(struct kvm *kvm,
1695 struct kvm_memory_slot *memslot,
7b6195a9
TY
1696 struct kvm_userspace_memory_region *mem,
1697 enum kvm_mr_change change)
b0c632db 1698{
dd2887e7
NW
1699 /* A few sanity checks. We can have memory slots which have to be
1700 located/ended at a segment boundary (1MB). The memory in userland is
1701 ok to be fragmented into various different vmas. It is okay to mmap()
1702 and munmap() stuff in this slot after doing this call at any time */
b0c632db 1703
598841ca 1704 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
1705 return -EINVAL;
1706
598841ca 1707 if (mem->memory_size & 0xffffful)
b0c632db
HC
1708 return -EINVAL;
1709
f7784b8e
MT
1710 return 0;
1711}
1712
1713void kvm_arch_commit_memory_region(struct kvm *kvm,
1714 struct kvm_userspace_memory_region *mem,
8482644a
TY
1715 const struct kvm_memory_slot *old,
1716 enum kvm_mr_change change)
f7784b8e 1717{
f7850c92 1718 int rc;
f7784b8e 1719
2cef4deb
CB
1720 /* If the basics of the memslot do not change, we do not want
1721 * to update the gmap. Every update causes several unnecessary
1722 * segment translation exceptions. This is usually handled just
1723 * fine by the normal fault handler + gmap, but it will also
1724 * cause faults on the prefix page of running guest CPUs.
1725 */
1726 if (old->userspace_addr == mem->userspace_addr &&
1727 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1728 old->npages * PAGE_SIZE == mem->memory_size)
1729 return;
598841ca
CO
1730
1731 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1732 mem->guest_phys_addr, mem->memory_size);
1733 if (rc)
f7850c92 1734 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 1735 return;
b0c632db
HC
1736}
1737
2df72e9b
MT
1738void kvm_arch_flush_shadow_all(struct kvm *kvm)
1739{
1740}
1741
1742void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1743 struct kvm_memory_slot *slot)
34d4cb8f
MT
1744{
1745}
1746
b0c632db
HC
1747static int __init kvm_s390_init(void)
1748{
ef50f7ac 1749 int ret;
0ee75bea 1750 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
1751 if (ret)
1752 return ret;
1753
1754 /*
1755 * guests can ask for up to 255+1 double words, we need a full page
25985edc 1756 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
1757 * only set facilities that are known to work in KVM.
1758 */
78c4b59f
MM
1759 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1760 if (!vfacilities) {
ef50f7ac
CB
1761 kvm_exit();
1762 return -ENOMEM;
1763 }
78c4b59f 1764 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
d208c79d 1765 vfacilities[0] &= 0xff82fff3f4fc2000UL;
7feb6bb8 1766 vfacilities[1] &= 0x005c000000000000UL;
ef50f7ac 1767 return 0;
b0c632db
HC
1768}
1769
1770static void __exit kvm_s390_exit(void)
1771{
78c4b59f 1772 free_page((unsigned long) vfacilities);
b0c632db
HC
1773 kvm_exit();
1774}
1775
1776module_init(kvm_s390_init);
1777module_exit(kvm_s390_exit);
566af940
CH
1778
1779/*
1780 * Enable autoloading of the kvm module.
1781 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1782 * since x86 takes a different approach.
1783 */
1784#include <linux/miscdevice.h>
1785MODULE_ALIAS_MISCDEV(KVM_MINOR);
1786MODULE_ALIAS("devname:kvm");