]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/s390/kvm/kvm-s390.c
KVM: s390: synchronize more registers with kvm_run
[mirror_ubuntu-bionic-kernel.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15f36ebd 14 * Jason J. Herne <jjherne@us.ibm.com>
b0c632db
HC
15 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
ca872302 20#include <linux/hrtimer.h>
b0c632db
HC
21#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
25#include <linux/slab.h>
ba5c1e9b 26#include <linux/timer.h>
cbb870c8 27#include <asm/asm-offsets.h>
b0c632db
HC
28#include <asm/lowcore.h>
29#include <asm/pgtable.h>
f5daba1d 30#include <asm/nmi.h>
a0616cde 31#include <asm/switch_to.h>
78c4b59f 32#include <asm/facility.h>
1526bf9c 33#include <asm/sclp.h>
8f2abe6a 34#include "kvm-s390.h"
b0c632db
HC
35#include "gaccess.h"
36
5786fffa
CH
37#define CREATE_TRACE_POINTS
38#include "trace.h"
ade38c31 39#include "trace-s390.h"
5786fffa 40
b0c632db
HC
41#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42
43struct kvm_stats_debugfs_item debugfs_entries[] = {
44 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 45 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
46 { "exit_validity", VCPU_STAT(exit_validity) },
47 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
48 { "exit_external_request", VCPU_STAT(exit_external_request) },
49 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
50 { "exit_instruction", VCPU_STAT(exit_instruction) },
51 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 53 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b 54 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
aba07508
DH
55 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
56 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
ba5c1e9b 57 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 58 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
59 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
60 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
61 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
62 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
63 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
64 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
65 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 66 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
67 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
68 { "instruction_spx", VCPU_STAT(instruction_spx) },
69 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
70 { "instruction_stap", VCPU_STAT(instruction_stap) },
71 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
8a242234 72 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
453423dc
CB
73 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
74 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
b31288fa 75 { "instruction_essa", VCPU_STAT(instruction_essa) },
453423dc
CB
76 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
77 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 78 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 79 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 80 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 81 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
82 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
83 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
84 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
85 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
86 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 87 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 88 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 89 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
90 { NULL }
91};
92
78c4b59f 93unsigned long *vfacilities;
2c70fe44 94static struct gmap_notifier gmap_notifier;
b0c632db 95
78c4b59f 96/* test availability of vfacility */
280ef0f1 97int test_vfacility(unsigned long nr)
78c4b59f
MM
98{
99 return __test_facility(nr, (void *) vfacilities);
100}
101
b0c632db 102/* Section: not file related */
10474ae8 103int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
104{
105 /* every s390 is virtualization enabled ;-) */
10474ae8 106 return 0;
b0c632db
HC
107}
108
109void kvm_arch_hardware_disable(void *garbage)
110{
111}
112
2c70fe44
CB
113static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
114
b0c632db
HC
115int kvm_arch_hardware_setup(void)
116{
2c70fe44
CB
117 gmap_notifier.notifier_call = kvm_gmap_notifier;
118 gmap_register_ipte_notifier(&gmap_notifier);
b0c632db
HC
119 return 0;
120}
121
122void kvm_arch_hardware_unsetup(void)
123{
2c70fe44 124 gmap_unregister_ipte_notifier(&gmap_notifier);
b0c632db
HC
125}
126
127void kvm_arch_check_processor_compat(void *rtn)
128{
129}
130
131int kvm_arch_init(void *opaque)
132{
133 return 0;
134}
135
136void kvm_arch_exit(void)
137{
138}
139
140/* Section: device related */
141long kvm_arch_dev_ioctl(struct file *filp,
142 unsigned int ioctl, unsigned long arg)
143{
144 if (ioctl == KVM_S390_ENABLE_SIE)
145 return s390_enable_sie();
146 return -EINVAL;
147}
148
784aa3d7 149int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
b0c632db 150{
d7b0b5eb
CO
151 int r;
152
2bd0ac4e 153 switch (ext) {
d7b0b5eb 154 case KVM_CAP_S390_PSW:
b6cf8788 155 case KVM_CAP_S390_GMAP:
52e16b18 156 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
157#ifdef CONFIG_KVM_S390_UCONTROL
158 case KVM_CAP_S390_UCONTROL:
159#endif
3c038e6b 160 case KVM_CAP_ASYNC_PF:
60b413c9 161 case KVM_CAP_SYNC_REGS:
14eebd91 162 case KVM_CAP_ONE_REG:
d6712df9 163 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 164 case KVM_CAP_S390_CSS_SUPPORT:
ebc32262 165 case KVM_CAP_IRQFD:
10ccaa1e 166 case KVM_CAP_IOEVENTFD:
c05c4186 167 case KVM_CAP_DEVICE_CTRL:
d938dc55 168 case KVM_CAP_ENABLE_CAP_VM:
78599d90 169 case KVM_CAP_S390_IRQCHIP:
f2061656 170 case KVM_CAP_VM_ATTRIBUTES:
6352e4d2 171 case KVM_CAP_MP_STATE:
d7b0b5eb
CO
172 r = 1;
173 break;
e726b1bd
CB
174 case KVM_CAP_NR_VCPUS:
175 case KVM_CAP_MAX_VCPUS:
176 r = KVM_MAX_VCPUS;
177 break;
e1e2e605
NW
178 case KVM_CAP_NR_MEMSLOTS:
179 r = KVM_USER_MEM_SLOTS;
180 break;
1526bf9c 181 case KVM_CAP_S390_COW:
abf09bed 182 r = MACHINE_HAS_ESOP;
1526bf9c 183 break;
2bd0ac4e 184 default:
d7b0b5eb 185 r = 0;
2bd0ac4e 186 }
d7b0b5eb 187 return r;
b0c632db
HC
188}
189
15f36ebd
JH
190static void kvm_s390_sync_dirty_log(struct kvm *kvm,
191 struct kvm_memory_slot *memslot)
192{
193 gfn_t cur_gfn, last_gfn;
194 unsigned long address;
195 struct gmap *gmap = kvm->arch.gmap;
196
197 down_read(&gmap->mm->mmap_sem);
198 /* Loop over all guest pages */
199 last_gfn = memslot->base_gfn + memslot->npages;
200 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
201 address = gfn_to_hva_memslot(memslot, cur_gfn);
202
203 if (gmap_test_and_clear_dirty(address, gmap))
204 mark_page_dirty(kvm, cur_gfn);
205 }
206 up_read(&gmap->mm->mmap_sem);
207}
208
b0c632db
HC
209/* Section: vm related */
210/*
211 * Get (and clear) the dirty memory log for a memory slot.
212 */
213int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
214 struct kvm_dirty_log *log)
215{
15f36ebd
JH
216 int r;
217 unsigned long n;
218 struct kvm_memory_slot *memslot;
219 int is_dirty = 0;
220
221 mutex_lock(&kvm->slots_lock);
222
223 r = -EINVAL;
224 if (log->slot >= KVM_USER_MEM_SLOTS)
225 goto out;
226
227 memslot = id_to_memslot(kvm->memslots, log->slot);
228 r = -ENOENT;
229 if (!memslot->dirty_bitmap)
230 goto out;
231
232 kvm_s390_sync_dirty_log(kvm, memslot);
233 r = kvm_get_dirty_log(kvm, log, &is_dirty);
234 if (r)
235 goto out;
236
237 /* Clear the dirty log */
238 if (is_dirty) {
239 n = kvm_dirty_bitmap_bytes(memslot);
240 memset(memslot->dirty_bitmap, 0, n);
241 }
242 r = 0;
243out:
244 mutex_unlock(&kvm->slots_lock);
245 return r;
b0c632db
HC
246}
247
d938dc55
CH
248static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
249{
250 int r;
251
252 if (cap->flags)
253 return -EINVAL;
254
255 switch (cap->cap) {
84223598
CH
256 case KVM_CAP_S390_IRQCHIP:
257 kvm->arch.use_irqchip = 1;
258 r = 0;
259 break;
d938dc55
CH
260 default:
261 r = -EINVAL;
262 break;
263 }
264 return r;
265}
266
4f718eab
DD
267static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
268{
269 int ret;
270 unsigned int idx;
271 switch (attr->attr) {
272 case KVM_S390_VM_MEM_ENABLE_CMMA:
273 ret = -EBUSY;
274 mutex_lock(&kvm->lock);
275 if (atomic_read(&kvm->online_vcpus) == 0) {
276 kvm->arch.use_cmma = 1;
277 ret = 0;
278 }
279 mutex_unlock(&kvm->lock);
280 break;
281 case KVM_S390_VM_MEM_CLR_CMMA:
282 mutex_lock(&kvm->lock);
283 idx = srcu_read_lock(&kvm->srcu);
284 page_table_reset_pgste(kvm->arch.gmap->mm, 0, TASK_SIZE, false);
285 srcu_read_unlock(&kvm->srcu, idx);
286 mutex_unlock(&kvm->lock);
287 ret = 0;
288 break;
289 default:
290 ret = -ENXIO;
291 break;
292 }
293 return ret;
294}
295
f2061656
DD
296static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
297{
298 int ret;
299
300 switch (attr->group) {
4f718eab
DD
301 case KVM_S390_VM_MEM_CTRL:
302 ret = kvm_s390_mem_control(kvm, attr);
303 break;
f2061656
DD
304 default:
305 ret = -ENXIO;
306 break;
307 }
308
309 return ret;
310}
311
312static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
313{
314 return -ENXIO;
315}
316
317static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
318{
319 int ret;
320
321 switch (attr->group) {
4f718eab
DD
322 case KVM_S390_VM_MEM_CTRL:
323 switch (attr->attr) {
324 case KVM_S390_VM_MEM_ENABLE_CMMA:
325 case KVM_S390_VM_MEM_CLR_CMMA:
326 ret = 0;
327 break;
328 default:
329 ret = -ENXIO;
330 break;
331 }
332 break;
f2061656
DD
333 default:
334 ret = -ENXIO;
335 break;
336 }
337
338 return ret;
339}
340
b0c632db
HC
341long kvm_arch_vm_ioctl(struct file *filp,
342 unsigned int ioctl, unsigned long arg)
343{
344 struct kvm *kvm = filp->private_data;
345 void __user *argp = (void __user *)arg;
f2061656 346 struct kvm_device_attr attr;
b0c632db
HC
347 int r;
348
349 switch (ioctl) {
ba5c1e9b
CO
350 case KVM_S390_INTERRUPT: {
351 struct kvm_s390_interrupt s390int;
352
353 r = -EFAULT;
354 if (copy_from_user(&s390int, argp, sizeof(s390int)))
355 break;
356 r = kvm_s390_inject_vm(kvm, &s390int);
357 break;
358 }
d938dc55
CH
359 case KVM_ENABLE_CAP: {
360 struct kvm_enable_cap cap;
361 r = -EFAULT;
362 if (copy_from_user(&cap, argp, sizeof(cap)))
363 break;
364 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
365 break;
366 }
84223598
CH
367 case KVM_CREATE_IRQCHIP: {
368 struct kvm_irq_routing_entry routing;
369
370 r = -EINVAL;
371 if (kvm->arch.use_irqchip) {
372 /* Set up dummy routing. */
373 memset(&routing, 0, sizeof(routing));
374 kvm_set_irq_routing(kvm, &routing, 0, 0);
375 r = 0;
376 }
377 break;
378 }
f2061656
DD
379 case KVM_SET_DEVICE_ATTR: {
380 r = -EFAULT;
381 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
382 break;
383 r = kvm_s390_vm_set_attr(kvm, &attr);
384 break;
385 }
386 case KVM_GET_DEVICE_ATTR: {
387 r = -EFAULT;
388 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
389 break;
390 r = kvm_s390_vm_get_attr(kvm, &attr);
391 break;
392 }
393 case KVM_HAS_DEVICE_ATTR: {
394 r = -EFAULT;
395 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
396 break;
397 r = kvm_s390_vm_has_attr(kvm, &attr);
398 break;
399 }
b0c632db 400 default:
367e1319 401 r = -ENOTTY;
b0c632db
HC
402 }
403
404 return r;
405}
406
e08b9637 407int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 408{
b0c632db
HC
409 int rc;
410 char debug_name[16];
f6c137ff 411 static unsigned long sca_offset;
b0c632db 412
e08b9637
CO
413 rc = -EINVAL;
414#ifdef CONFIG_KVM_S390_UCONTROL
415 if (type & ~KVM_VM_S390_UCONTROL)
416 goto out_err;
417 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
418 goto out_err;
419#else
420 if (type)
421 goto out_err;
422#endif
423
b0c632db
HC
424 rc = s390_enable_sie();
425 if (rc)
d89f5eff 426 goto out_err;
b0c632db 427
b290411a
CO
428 rc = -ENOMEM;
429
b0c632db
HC
430 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
431 if (!kvm->arch.sca)
d89f5eff 432 goto out_err;
f6c137ff
CB
433 spin_lock(&kvm_lock);
434 sca_offset = (sca_offset + 16) & 0x7f0;
435 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
436 spin_unlock(&kvm_lock);
b0c632db
HC
437
438 sprintf(debug_name, "kvm-%u", current->pid);
439
440 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
441 if (!kvm->arch.dbf)
442 goto out_nodbf;
443
ba5c1e9b
CO
444 spin_lock_init(&kvm->arch.float_int.lock);
445 INIT_LIST_HEAD(&kvm->arch.float_int.list);
8a242234 446 init_waitqueue_head(&kvm->arch.ipte_wq);
ba5c1e9b 447
b0c632db
HC
448 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
449 VM_EVENT(kvm, 3, "%s", "vm created");
450
e08b9637
CO
451 if (type & KVM_VM_S390_UCONTROL) {
452 kvm->arch.gmap = NULL;
453 } else {
454 kvm->arch.gmap = gmap_alloc(current->mm);
455 if (!kvm->arch.gmap)
456 goto out_nogmap;
2c70fe44 457 kvm->arch.gmap->private = kvm;
24eb3a82 458 kvm->arch.gmap->pfault_enabled = 0;
e08b9637 459 }
fa6b7fe9
CH
460
461 kvm->arch.css_support = 0;
84223598 462 kvm->arch.use_irqchip = 0;
fa6b7fe9 463
8ad35755
DH
464 spin_lock_init(&kvm->arch.start_stop_lock);
465
d89f5eff 466 return 0;
598841ca
CO
467out_nogmap:
468 debug_unregister(kvm->arch.dbf);
b0c632db
HC
469out_nodbf:
470 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
471out_err:
472 return rc;
b0c632db
HC
473}
474
d329c035
CB
475void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
476{
477 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 478 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
67335e63 479 kvm_s390_clear_local_irqs(vcpu);
3c038e6b 480 kvm_clear_async_pf_completion_queue(vcpu);
58f9460b
CO
481 if (!kvm_is_ucontrol(vcpu->kvm)) {
482 clear_bit(63 - vcpu->vcpu_id,
483 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
484 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
485 (__u64) vcpu->arch.sie_block)
486 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
487 }
abf4a71e 488 smp_mb();
27e0393f
CO
489
490 if (kvm_is_ucontrol(vcpu->kvm))
491 gmap_free(vcpu->arch.gmap);
492
b31605c1
DD
493 if (kvm_s390_cmma_enabled(vcpu->kvm))
494 kvm_s390_vcpu_unsetup_cmma(vcpu);
d329c035 495 free_page((unsigned long)(vcpu->arch.sie_block));
b31288fa 496
6692cef3 497 kvm_vcpu_uninit(vcpu);
b110feaf 498 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
499}
500
501static void kvm_free_vcpus(struct kvm *kvm)
502{
503 unsigned int i;
988a2cae 504 struct kvm_vcpu *vcpu;
d329c035 505
988a2cae
GN
506 kvm_for_each_vcpu(i, vcpu, kvm)
507 kvm_arch_vcpu_destroy(vcpu);
508
509 mutex_lock(&kvm->lock);
510 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
511 kvm->vcpus[i] = NULL;
512
513 atomic_set(&kvm->online_vcpus, 0);
514 mutex_unlock(&kvm->lock);
d329c035
CB
515}
516
ad8ba2cd
SY
517void kvm_arch_sync_events(struct kvm *kvm)
518{
519}
520
b0c632db
HC
521void kvm_arch_destroy_vm(struct kvm *kvm)
522{
d329c035 523 kvm_free_vcpus(kvm);
b0c632db 524 free_page((unsigned long)(kvm->arch.sca));
d329c035 525 debug_unregister(kvm->arch.dbf);
27e0393f
CO
526 if (!kvm_is_ucontrol(kvm))
527 gmap_free(kvm->arch.gmap);
841b91c5 528 kvm_s390_destroy_adapters(kvm);
67335e63 529 kvm_s390_clear_float_irqs(kvm);
b0c632db
HC
530}
531
532/* Section: vcpu related */
533int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
534{
3c038e6b
DD
535 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
536 kvm_clear_async_pf_completion_queue(vcpu);
27e0393f
CO
537 if (kvm_is_ucontrol(vcpu->kvm)) {
538 vcpu->arch.gmap = gmap_alloc(current->mm);
539 if (!vcpu->arch.gmap)
540 return -ENOMEM;
2c70fe44 541 vcpu->arch.gmap->private = vcpu->kvm;
27e0393f
CO
542 return 0;
543 }
544
598841ca 545 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
59674c1a
CB
546 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
547 KVM_SYNC_GPRS |
9eed0735 548 KVM_SYNC_ACRS |
b028ee3e
DH
549 KVM_SYNC_CRS |
550 KVM_SYNC_ARCH0 |
551 KVM_SYNC_PFAULT;
b0c632db
HC
552 return 0;
553}
554
555void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
556{
6692cef3 557 /* Nothing todo */
b0c632db
HC
558}
559
560void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
561{
4725c860
MS
562 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
563 save_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db 564 save_access_regs(vcpu->arch.host_acrs);
4725c860
MS
565 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
566 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 567 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 568 gmap_enable(vcpu->arch.gmap);
9e6dabef 569 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
570}
571
572void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
573{
9e6dabef 574 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 575 gmap_disable(vcpu->arch.gmap);
4725c860
MS
576 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
577 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 578 save_access_regs(vcpu->run->s.regs.acrs);
4725c860
MS
579 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
580 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db
HC
581 restore_access_regs(vcpu->arch.host_acrs);
582}
583
584static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
585{
586 /* this equals initial cpu reset in pop, but we don't switch to ESA */
587 vcpu->arch.sie_block->gpsw.mask = 0UL;
588 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 589 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
590 vcpu->arch.sie_block->cputm = 0UL;
591 vcpu->arch.sie_block->ckc = 0UL;
592 vcpu->arch.sie_block->todpr = 0;
593 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
594 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
595 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
596 vcpu->arch.guest_fpregs.fpc = 0;
597 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
598 vcpu->arch.sie_block->gbea = 1;
672550fb 599 vcpu->arch.sie_block->pp = 0;
3c038e6b
DD
600 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
601 kvm_clear_async_pf_completion_queue(vcpu);
6352e4d2
DH
602 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
603 kvm_s390_vcpu_stop(vcpu);
2ed10cc1 604 kvm_s390_clear_local_irqs(vcpu);
b0c632db
HC
605}
606
42897d86
MT
607int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
608{
609 return 0;
610}
611
b31605c1
DD
612void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
613{
614 free_page(vcpu->arch.sie_block->cbrlo);
615 vcpu->arch.sie_block->cbrlo = 0;
616}
617
618int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
619{
620 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
621 if (!vcpu->arch.sie_block->cbrlo)
622 return -ENOMEM;
623
624 vcpu->arch.sie_block->ecb2 |= 0x80;
625 vcpu->arch.sie_block->ecb2 &= ~0x08;
626 return 0;
627}
628
b0c632db
HC
629int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
630{
b31605c1 631 int rc = 0;
b31288fa 632
9e6dabef
CH
633 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
634 CPUSTAT_SM |
69d0d3a3
CB
635 CPUSTAT_STOPPED |
636 CPUSTAT_GED);
fc34531d 637 vcpu->arch.sie_block->ecb = 6;
7feb6bb8
MM
638 if (test_vfacility(50) && test_vfacility(73))
639 vcpu->arch.sie_block->ecb |= 0x10;
640
69d0d3a3 641 vcpu->arch.sie_block->ecb2 = 8;
4953919f 642 vcpu->arch.sie_block->eca = 0xD1002000U;
217a4406
HC
643 if (sclp_has_siif())
644 vcpu->arch.sie_block->eca |= 1;
78c4b59f 645 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
5a5e6536
MR
646 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
647 ICTL_TPROT;
648
b31605c1
DD
649 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
650 rc = kvm_s390_vcpu_setup_cmma(vcpu);
651 if (rc)
652 return rc;
b31288fa 653 }
ca872302 654 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
ca872302 655 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 656 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 657 vcpu->arch.cpu_id.version = 0xff;
b31605c1 658 return rc;
b0c632db
HC
659}
660
661struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
662 unsigned int id)
663{
4d47555a 664 struct kvm_vcpu *vcpu;
7feb6bb8 665 struct sie_page *sie_page;
4d47555a
CO
666 int rc = -EINVAL;
667
668 if (id >= KVM_MAX_VCPUS)
669 goto out;
670
671 rc = -ENOMEM;
b0c632db 672
b110feaf 673 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 674 if (!vcpu)
4d47555a 675 goto out;
b0c632db 676
7feb6bb8
MM
677 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
678 if (!sie_page)
b0c632db
HC
679 goto out_free_cpu;
680
7feb6bb8
MM
681 vcpu->arch.sie_block = &sie_page->sie_block;
682 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
683
b0c632db 684 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
685 if (!kvm_is_ucontrol(kvm)) {
686 if (!kvm->arch.sca) {
687 WARN_ON_ONCE(1);
688 goto out_free_cpu;
689 }
690 if (!kvm->arch.sca->cpu[id].sda)
691 kvm->arch.sca->cpu[id].sda =
692 (__u64) vcpu->arch.sie_block;
693 vcpu->arch.sie_block->scaoh =
694 (__u32)(((__u64)kvm->arch.sca) >> 32);
695 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
696 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
697 }
b0c632db 698
ba5c1e9b
CO
699 spin_lock_init(&vcpu->arch.local_int.lock);
700 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
701 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
d0321a24 702 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 703 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
ba5c1e9b 704
b0c632db
HC
705 rc = kvm_vcpu_init(vcpu, kvm, id);
706 if (rc)
7b06bf2f 707 goto out_free_sie_block;
b0c632db
HC
708 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
709 vcpu->arch.sie_block);
ade38c31 710 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 711
b0c632db 712 return vcpu;
7b06bf2f
WY
713out_free_sie_block:
714 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 715out_free_cpu:
b110feaf 716 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 717out:
b0c632db
HC
718 return ERR_PTR(rc);
719}
720
b0c632db
HC
721int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
722{
f87618e8 723 return kvm_cpu_has_interrupt(vcpu);
b0c632db
HC
724}
725
49b99e1e
CB
726void s390_vcpu_block(struct kvm_vcpu *vcpu)
727{
728 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
729}
730
731void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
732{
733 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
734}
735
736/*
737 * Kick a guest cpu out of SIE and wait until SIE is not running.
738 * If the CPU is not running (e.g. waiting as idle) the function will
739 * return immediately. */
740void exit_sie(struct kvm_vcpu *vcpu)
741{
742 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
743 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
744 cpu_relax();
745}
746
747/* Kick a guest cpu out of SIE and prevent SIE-reentry */
748void exit_sie_sync(struct kvm_vcpu *vcpu)
749{
750 s390_vcpu_block(vcpu);
751 exit_sie(vcpu);
752}
753
2c70fe44
CB
754static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
755{
756 int i;
757 struct kvm *kvm = gmap->private;
758 struct kvm_vcpu *vcpu;
759
760 kvm_for_each_vcpu(i, vcpu, kvm) {
761 /* match against both prefix pages */
fda902cb 762 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
2c70fe44
CB
763 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
764 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
765 exit_sie_sync(vcpu);
766 }
767 }
768}
769
b6d33834
CD
770int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
771{
772 /* kvm common code refers to this, but never calls it */
773 BUG();
774 return 0;
775}
776
14eebd91
CO
777static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
778 struct kvm_one_reg *reg)
779{
780 int r = -EINVAL;
781
782 switch (reg->id) {
29b7c71b
CO
783 case KVM_REG_S390_TODPR:
784 r = put_user(vcpu->arch.sie_block->todpr,
785 (u32 __user *)reg->addr);
786 break;
787 case KVM_REG_S390_EPOCHDIFF:
788 r = put_user(vcpu->arch.sie_block->epoch,
789 (u64 __user *)reg->addr);
790 break;
46a6dd1c
J
791 case KVM_REG_S390_CPU_TIMER:
792 r = put_user(vcpu->arch.sie_block->cputm,
793 (u64 __user *)reg->addr);
794 break;
795 case KVM_REG_S390_CLOCK_COMP:
796 r = put_user(vcpu->arch.sie_block->ckc,
797 (u64 __user *)reg->addr);
798 break;
536336c2
DD
799 case KVM_REG_S390_PFTOKEN:
800 r = put_user(vcpu->arch.pfault_token,
801 (u64 __user *)reg->addr);
802 break;
803 case KVM_REG_S390_PFCOMPARE:
804 r = put_user(vcpu->arch.pfault_compare,
805 (u64 __user *)reg->addr);
806 break;
807 case KVM_REG_S390_PFSELECT:
808 r = put_user(vcpu->arch.pfault_select,
809 (u64 __user *)reg->addr);
810 break;
672550fb
CB
811 case KVM_REG_S390_PP:
812 r = put_user(vcpu->arch.sie_block->pp,
813 (u64 __user *)reg->addr);
814 break;
afa45ff5
CB
815 case KVM_REG_S390_GBEA:
816 r = put_user(vcpu->arch.sie_block->gbea,
817 (u64 __user *)reg->addr);
818 break;
14eebd91
CO
819 default:
820 break;
821 }
822
823 return r;
824}
825
826static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
827 struct kvm_one_reg *reg)
828{
829 int r = -EINVAL;
830
831 switch (reg->id) {
29b7c71b
CO
832 case KVM_REG_S390_TODPR:
833 r = get_user(vcpu->arch.sie_block->todpr,
834 (u32 __user *)reg->addr);
835 break;
836 case KVM_REG_S390_EPOCHDIFF:
837 r = get_user(vcpu->arch.sie_block->epoch,
838 (u64 __user *)reg->addr);
839 break;
46a6dd1c
J
840 case KVM_REG_S390_CPU_TIMER:
841 r = get_user(vcpu->arch.sie_block->cputm,
842 (u64 __user *)reg->addr);
843 break;
844 case KVM_REG_S390_CLOCK_COMP:
845 r = get_user(vcpu->arch.sie_block->ckc,
846 (u64 __user *)reg->addr);
847 break;
536336c2
DD
848 case KVM_REG_S390_PFTOKEN:
849 r = get_user(vcpu->arch.pfault_token,
850 (u64 __user *)reg->addr);
851 break;
852 case KVM_REG_S390_PFCOMPARE:
853 r = get_user(vcpu->arch.pfault_compare,
854 (u64 __user *)reg->addr);
855 break;
856 case KVM_REG_S390_PFSELECT:
857 r = get_user(vcpu->arch.pfault_select,
858 (u64 __user *)reg->addr);
859 break;
672550fb
CB
860 case KVM_REG_S390_PP:
861 r = get_user(vcpu->arch.sie_block->pp,
862 (u64 __user *)reg->addr);
863 break;
afa45ff5
CB
864 case KVM_REG_S390_GBEA:
865 r = get_user(vcpu->arch.sie_block->gbea,
866 (u64 __user *)reg->addr);
867 break;
14eebd91
CO
868 default:
869 break;
870 }
871
872 return r;
873}
b6d33834 874
b0c632db
HC
875static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
876{
b0c632db 877 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
878 return 0;
879}
880
881int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
882{
5a32c1af 883 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
884 return 0;
885}
886
887int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
888{
5a32c1af 889 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
890 return 0;
891}
892
893int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
894 struct kvm_sregs *sregs)
895{
59674c1a 896 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 897 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 898 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
899 return 0;
900}
901
902int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
903 struct kvm_sregs *sregs)
904{
59674c1a 905 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 906 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
907 return 0;
908}
909
910int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
911{
4725c860
MS
912 if (test_fp_ctl(fpu->fpc))
913 return -EINVAL;
b0c632db 914 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4725c860
MS
915 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
916 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
917 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
b0c632db
HC
918 return 0;
919}
920
921int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
922{
b0c632db
HC
923 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
924 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
925 return 0;
926}
927
928static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
929{
930 int rc = 0;
931
7a42fdc2 932 if (!is_vcpu_stopped(vcpu))
b0c632db 933 rc = -EBUSY;
d7b0b5eb
CO
934 else {
935 vcpu->run->psw_mask = psw.mask;
936 vcpu->run->psw_addr = psw.addr;
937 }
b0c632db
HC
938 return rc;
939}
940
941int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
942 struct kvm_translation *tr)
943{
944 return -EINVAL; /* not implemented yet */
945}
946
27291e21
DH
947#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
948 KVM_GUESTDBG_USE_HW_BP | \
949 KVM_GUESTDBG_ENABLE)
950
d0bfb940
JK
951int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
952 struct kvm_guest_debug *dbg)
b0c632db 953{
27291e21
DH
954 int rc = 0;
955
956 vcpu->guest_debug = 0;
957 kvm_s390_clear_bp_data(vcpu);
958
2de3bfc2 959 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
27291e21
DH
960 return -EINVAL;
961
962 if (dbg->control & KVM_GUESTDBG_ENABLE) {
963 vcpu->guest_debug = dbg->control;
964 /* enforce guest PER */
965 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
966
967 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
968 rc = kvm_s390_import_bp_data(vcpu, dbg);
969 } else {
970 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
971 vcpu->arch.guestdbg.last_bp = 0;
972 }
973
974 if (rc) {
975 vcpu->guest_debug = 0;
976 kvm_s390_clear_bp_data(vcpu);
977 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
978 }
979
980 return rc;
b0c632db
HC
981}
982
62d9f0db
MT
983int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
984 struct kvm_mp_state *mp_state)
985{
6352e4d2
DH
986 /* CHECK_STOP and LOAD are not supported yet */
987 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
988 KVM_MP_STATE_OPERATING;
62d9f0db
MT
989}
990
991int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
992 struct kvm_mp_state *mp_state)
993{
6352e4d2
DH
994 int rc = 0;
995
996 /* user space knows about this interface - let it control the state */
997 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
998
999 switch (mp_state->mp_state) {
1000 case KVM_MP_STATE_STOPPED:
1001 kvm_s390_vcpu_stop(vcpu);
1002 break;
1003 case KVM_MP_STATE_OPERATING:
1004 kvm_s390_vcpu_start(vcpu);
1005 break;
1006 case KVM_MP_STATE_LOAD:
1007 case KVM_MP_STATE_CHECK_STOP:
1008 /* fall through - CHECK_STOP and LOAD are not supported yet */
1009 default:
1010 rc = -ENXIO;
1011 }
1012
1013 return rc;
62d9f0db
MT
1014}
1015
b31605c1
DD
1016bool kvm_s390_cmma_enabled(struct kvm *kvm)
1017{
1018 if (!MACHINE_IS_LPAR)
1019 return false;
1020 /* only enable for z10 and later */
1021 if (!MACHINE_HAS_EDAT1)
1022 return false;
1023 if (!kvm->arch.use_cmma)
1024 return false;
1025 return true;
1026}
1027
8ad35755
DH
1028static bool ibs_enabled(struct kvm_vcpu *vcpu)
1029{
1030 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1031}
1032
2c70fe44
CB
1033static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1034{
8ad35755
DH
1035retry:
1036 s390_vcpu_unblock(vcpu);
2c70fe44
CB
1037 /*
1038 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1039 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1040 * This ensures that the ipte instruction for this request has
1041 * already finished. We might race against a second unmapper that
1042 * wants to set the blocking bit. Lets just retry the request loop.
1043 */
8ad35755 1044 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
2c70fe44
CB
1045 int rc;
1046 rc = gmap_ipte_notify(vcpu->arch.gmap,
fda902cb 1047 kvm_s390_get_prefix(vcpu),
2c70fe44
CB
1048 PAGE_SIZE * 2);
1049 if (rc)
1050 return rc;
8ad35755 1051 goto retry;
2c70fe44 1052 }
8ad35755
DH
1053
1054 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1055 if (!ibs_enabled(vcpu)) {
1056 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1057 atomic_set_mask(CPUSTAT_IBS,
1058 &vcpu->arch.sie_block->cpuflags);
1059 }
1060 goto retry;
2c70fe44 1061 }
8ad35755
DH
1062
1063 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1064 if (ibs_enabled(vcpu)) {
1065 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1066 atomic_clear_mask(CPUSTAT_IBS,
1067 &vcpu->arch.sie_block->cpuflags);
1068 }
1069 goto retry;
1070 }
1071
0759d068
DH
1072 /* nothing to do, just clear the request */
1073 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1074
2c70fe44
CB
1075 return 0;
1076}
1077
fa576c58
TH
1078/**
1079 * kvm_arch_fault_in_page - fault-in guest page if necessary
1080 * @vcpu: The corresponding virtual cpu
1081 * @gpa: Guest physical address
1082 * @writable: Whether the page should be writable or not
1083 *
1084 * Make sure that a guest page has been faulted-in on the host.
1085 *
1086 * Return: Zero on success, negative error code otherwise.
1087 */
1088long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
24eb3a82 1089{
24eb3a82 1090 struct mm_struct *mm = current->mm;
fa576c58
TH
1091 hva_t hva;
1092 long rc;
1093
1094 hva = gmap_fault(gpa, vcpu->arch.gmap);
1095 if (IS_ERR_VALUE(hva))
1096 return (long)hva;
24eb3a82 1097 down_read(&mm->mmap_sem);
fa576c58 1098 rc = get_user_pages(current, mm, hva, 1, writable, 0, NULL, NULL);
24eb3a82 1099 up_read(&mm->mmap_sem);
fa576c58
TH
1100
1101 return rc < 0 ? rc : 0;
24eb3a82
DD
1102}
1103
3c038e6b
DD
1104static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1105 unsigned long token)
1106{
1107 struct kvm_s390_interrupt inti;
1108 inti.parm64 = token;
1109
1110 if (start_token) {
1111 inti.type = KVM_S390_INT_PFAULT_INIT;
1112 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti));
1113 } else {
1114 inti.type = KVM_S390_INT_PFAULT_DONE;
1115 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1116 }
1117}
1118
1119void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1120 struct kvm_async_pf *work)
1121{
1122 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1123 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1124}
1125
1126void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1127 struct kvm_async_pf *work)
1128{
1129 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1130 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1131}
1132
1133void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1134 struct kvm_async_pf *work)
1135{
1136 /* s390 will always inject the page directly */
1137}
1138
1139bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1140{
1141 /*
1142 * s390 will always inject the page directly,
1143 * but we still want check_async_completion to cleanup
1144 */
1145 return true;
1146}
1147
1148static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1149{
1150 hva_t hva;
1151 struct kvm_arch_async_pf arch;
1152 int rc;
1153
1154 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1155 return 0;
1156 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1157 vcpu->arch.pfault_compare)
1158 return 0;
1159 if (psw_extint_disabled(vcpu))
1160 return 0;
1161 if (kvm_cpu_has_interrupt(vcpu))
1162 return 0;
1163 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1164 return 0;
1165 if (!vcpu->arch.gmap->pfault_enabled)
1166 return 0;
1167
81480cc1
HC
1168 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1169 hva += current->thread.gmap_addr & ~PAGE_MASK;
1170 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
3c038e6b
DD
1171 return 0;
1172
1173 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1174 return rc;
1175}
1176
3fb4c40f 1177static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 1178{
3fb4c40f 1179 int rc, cpuflags;
e168bf8d 1180
3c038e6b
DD
1181 /*
1182 * On s390 notifications for arriving pages will be delivered directly
1183 * to the guest but the house keeping for completed pfaults is
1184 * handled outside the worker.
1185 */
1186 kvm_check_async_pf_completion(vcpu);
1187
5a32c1af 1188 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
1189
1190 if (need_resched())
1191 schedule();
1192
d3a73acb 1193 if (test_cpu_flag(CIF_MCCK_PENDING))
71cde587
CB
1194 s390_handle_mcck();
1195
d6b6d166
CO
1196 if (!kvm_is_ucontrol(vcpu->kvm))
1197 kvm_s390_deliver_pending_interrupts(vcpu);
0ff31867 1198
2c70fe44
CB
1199 rc = kvm_s390_handle_requests(vcpu);
1200 if (rc)
1201 return rc;
1202
27291e21
DH
1203 if (guestdbg_enabled(vcpu)) {
1204 kvm_s390_backup_guest_per_regs(vcpu);
1205 kvm_s390_patch_guest_per_regs(vcpu);
1206 }
1207
b0c632db 1208 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
1209 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1210 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1211 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 1212
3fb4c40f
TH
1213 return 0;
1214}
1215
1216static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1217{
24eb3a82 1218 int rc = -1;
2b29a9fd
DD
1219
1220 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1221 vcpu->arch.sie_block->icptcode);
1222 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1223
27291e21
DH
1224 if (guestdbg_enabled(vcpu))
1225 kvm_s390_restore_guest_per_regs(vcpu);
1226
3fb4c40f 1227 if (exit_reason >= 0) {
7c470539 1228 rc = 0;
210b1607
TH
1229 } else if (kvm_is_ucontrol(vcpu->kvm)) {
1230 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1231 vcpu->run->s390_ucontrol.trans_exc_code =
1232 current->thread.gmap_addr;
1233 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1234 rc = -EREMOTE;
24eb3a82
DD
1235
1236 } else if (current->thread.gmap_pfault) {
3c038e6b 1237 trace_kvm_s390_major_guest_pfault(vcpu);
24eb3a82 1238 current->thread.gmap_pfault = 0;
fa576c58 1239 if (kvm_arch_setup_async_pf(vcpu)) {
24eb3a82 1240 rc = 0;
fa576c58
TH
1241 } else {
1242 gpa_t gpa = current->thread.gmap_addr;
1243 rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1244 }
24eb3a82
DD
1245 }
1246
1247 if (rc == -1) {
699bde3b
CB
1248 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1249 trace_kvm_s390_sie_fault(vcpu);
1250 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1f0d0f09 1251 }
b0c632db 1252
5a32c1af 1253 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
3fb4c40f 1254
a76ccff6
TH
1255 if (rc == 0) {
1256 if (kvm_is_ucontrol(vcpu->kvm))
2955c83f
CB
1257 /* Don't exit for host interrupts. */
1258 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
a76ccff6
TH
1259 else
1260 rc = kvm_handle_sie_intercept(vcpu);
1261 }
1262
3fb4c40f
TH
1263 return rc;
1264}
1265
1266static int __vcpu_run(struct kvm_vcpu *vcpu)
1267{
1268 int rc, exit_reason;
1269
800c1065
TH
1270 /*
1271 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1272 * ning the guest), so that memslots (and other stuff) are protected
1273 */
1274 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1275
a76ccff6
TH
1276 do {
1277 rc = vcpu_pre_run(vcpu);
1278 if (rc)
1279 break;
3fb4c40f 1280
800c1065 1281 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
1282 /*
1283 * As PF_VCPU will be used in fault handler, between
1284 * guest_enter and guest_exit should be no uaccess.
1285 */
1286 preempt_disable();
1287 kvm_guest_enter();
1288 preempt_enable();
1289 exit_reason = sie64a(vcpu->arch.sie_block,
1290 vcpu->run->s.regs.gprs);
1291 kvm_guest_exit();
800c1065 1292 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
1293
1294 rc = vcpu_post_run(vcpu, exit_reason);
27291e21 1295 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
3fb4c40f 1296
800c1065 1297 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 1298 return rc;
b0c632db
HC
1299}
1300
b028ee3e
DH
1301static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1302{
1303 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1304 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1305 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
1306 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1307 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1308 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
1309 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1310 }
1311 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
1312 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
1313 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
1314 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
1315 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
1316 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
1317 }
1318 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
1319 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
1320 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
1321 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
1322 }
1323 kvm_run->kvm_dirty_regs = 0;
1324}
1325
1326static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1327{
1328 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1329 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1330 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1331 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1332 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
1333 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
1334 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
1335 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
1336 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
1337 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1338 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1339 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1340}
1341
b0c632db
HC
1342int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1343{
8f2abe6a 1344 int rc;
b0c632db
HC
1345 sigset_t sigsaved;
1346
27291e21
DH
1347 if (guestdbg_exit_pending(vcpu)) {
1348 kvm_s390_prepare_debug_exit(vcpu);
1349 return 0;
1350 }
1351
b0c632db
HC
1352 if (vcpu->sigset_active)
1353 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1354
6352e4d2
DH
1355 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
1356 kvm_s390_vcpu_start(vcpu);
1357 } else if (is_vcpu_stopped(vcpu)) {
1358 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
1359 vcpu->vcpu_id);
1360 return -EINVAL;
1361 }
b0c632db 1362
b028ee3e 1363 sync_regs(vcpu, kvm_run);
d7b0b5eb 1364
dab4079d 1365 might_fault();
a76ccff6 1366 rc = __vcpu_run(vcpu);
9ace903d 1367
b1d16c49
CE
1368 if (signal_pending(current) && !rc) {
1369 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 1370 rc = -EINTR;
b1d16c49 1371 }
8f2abe6a 1372
27291e21
DH
1373 if (guestdbg_exit_pending(vcpu) && !rc) {
1374 kvm_s390_prepare_debug_exit(vcpu);
1375 rc = 0;
1376 }
1377
b8e660b8 1378 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
1379 /* intercept cannot be handled in-kernel, prepare kvm-run */
1380 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
1381 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
1382 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
1383 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
1384 rc = 0;
1385 }
1386
1387 if (rc == -EREMOTE) {
1388 /* intercept was handled, but userspace support is needed
1389 * kvm_run has been prepared by the handler */
1390 rc = 0;
1391 }
b0c632db 1392
b028ee3e 1393 store_regs(vcpu, kvm_run);
d7b0b5eb 1394
b0c632db
HC
1395 if (vcpu->sigset_active)
1396 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1397
b0c632db 1398 vcpu->stat.exit_userspace++;
7e8e6ab4 1399 return rc;
b0c632db
HC
1400}
1401
b0c632db
HC
1402/*
1403 * store status at address
1404 * we use have two special cases:
1405 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1406 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1407 */
d0bce605 1408int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
b0c632db 1409{
092670cd 1410 unsigned char archmode = 1;
fda902cb 1411 unsigned int px;
178bd789 1412 u64 clkcomp;
d0bce605 1413 int rc;
b0c632db 1414
d0bce605
HC
1415 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1416 if (write_guest_abs(vcpu, 163, &archmode, 1))
b0c632db 1417 return -EFAULT;
d0bce605
HC
1418 gpa = SAVE_AREA_BASE;
1419 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1420 if (write_guest_real(vcpu, 163, &archmode, 1))
b0c632db 1421 return -EFAULT;
d0bce605
HC
1422 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1423 }
1424 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1425 vcpu->arch.guest_fpregs.fprs, 128);
1426 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1427 vcpu->run->s.regs.gprs, 128);
1428 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1429 &vcpu->arch.sie_block->gpsw, 16);
fda902cb 1430 px = kvm_s390_get_prefix(vcpu);
d0bce605 1431 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
fda902cb 1432 &px, 4);
d0bce605
HC
1433 rc |= write_guest_abs(vcpu,
1434 gpa + offsetof(struct save_area, fp_ctrl_reg),
1435 &vcpu->arch.guest_fpregs.fpc, 4);
1436 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1437 &vcpu->arch.sie_block->todpr, 4);
1438 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1439 &vcpu->arch.sie_block->cputm, 8);
178bd789 1440 clkcomp = vcpu->arch.sie_block->ckc >> 8;
d0bce605
HC
1441 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1442 &clkcomp, 8);
1443 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1444 &vcpu->run->s.regs.acrs, 64);
1445 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1446 &vcpu->arch.sie_block->gcr, 128);
1447 return rc ? -EFAULT : 0;
b0c632db
HC
1448}
1449
e879892c
TH
1450int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1451{
1452 /*
1453 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1454 * copying in vcpu load/put. Lets update our copies before we save
1455 * it into the save area
1456 */
1457 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1458 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1459 save_access_regs(vcpu->run->s.regs.acrs);
1460
1461 return kvm_s390_store_status_unloaded(vcpu, addr);
1462}
1463
8ad35755
DH
1464static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1465{
1466 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
1467 kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
1468 exit_sie_sync(vcpu);
1469}
1470
1471static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
1472{
1473 unsigned int i;
1474 struct kvm_vcpu *vcpu;
1475
1476 kvm_for_each_vcpu(i, vcpu, kvm) {
1477 __disable_ibs_on_vcpu(vcpu);
1478 }
1479}
1480
1481static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1482{
1483 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
1484 kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
1485 exit_sie_sync(vcpu);
1486}
1487
6852d7b6
DH
1488void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
1489{
8ad35755
DH
1490 int i, online_vcpus, started_vcpus = 0;
1491
1492 if (!is_vcpu_stopped(vcpu))
1493 return;
1494
6852d7b6 1495 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
8ad35755 1496 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 1497 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
1498 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1499
1500 for (i = 0; i < online_vcpus; i++) {
1501 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
1502 started_vcpus++;
1503 }
1504
1505 if (started_vcpus == 0) {
1506 /* we're the only active VCPU -> speed it up */
1507 __enable_ibs_on_vcpu(vcpu);
1508 } else if (started_vcpus == 1) {
1509 /*
1510 * As we are starting a second VCPU, we have to disable
1511 * the IBS facility on all VCPUs to remove potentially
1512 * oustanding ENABLE requests.
1513 */
1514 __disable_ibs_on_all_vcpus(vcpu->kvm);
1515 }
1516
6852d7b6 1517 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
1518 /*
1519 * Another VCPU might have used IBS while we were offline.
1520 * Let's play safe and flush the VCPU at startup.
1521 */
1522 vcpu->arch.sie_block->ihcpu = 0xffff;
433b9ee4 1523 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 1524 return;
6852d7b6
DH
1525}
1526
1527void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
1528{
8ad35755
DH
1529 int i, online_vcpus, started_vcpus = 0;
1530 struct kvm_vcpu *started_vcpu = NULL;
1531
1532 if (is_vcpu_stopped(vcpu))
1533 return;
1534
6852d7b6 1535 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
8ad35755 1536 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 1537 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
1538 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1539
32f5ff63 1540 /* Need to lock access to action_bits to avoid a SIGP race condition */
4ae3c081 1541 spin_lock(&vcpu->arch.local_int.lock);
6852d7b6 1542 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
32f5ff63
DH
1543
1544 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
1545 vcpu->arch.local_int.action_bits &=
1546 ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP);
4ae3c081 1547 spin_unlock(&vcpu->arch.local_int.lock);
32f5ff63 1548
8ad35755
DH
1549 __disable_ibs_on_vcpu(vcpu);
1550
1551 for (i = 0; i < online_vcpus; i++) {
1552 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
1553 started_vcpus++;
1554 started_vcpu = vcpu->kvm->vcpus[i];
1555 }
1556 }
1557
1558 if (started_vcpus == 1) {
1559 /*
1560 * As we only have one VCPU left, we want to enable the
1561 * IBS facility for that VCPU to speed it up.
1562 */
1563 __enable_ibs_on_vcpu(started_vcpu);
1564 }
1565
433b9ee4 1566 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 1567 return;
6852d7b6
DH
1568}
1569
d6712df9
CH
1570static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1571 struct kvm_enable_cap *cap)
1572{
1573 int r;
1574
1575 if (cap->flags)
1576 return -EINVAL;
1577
1578 switch (cap->cap) {
fa6b7fe9
CH
1579 case KVM_CAP_S390_CSS_SUPPORT:
1580 if (!vcpu->kvm->arch.css_support) {
1581 vcpu->kvm->arch.css_support = 1;
1582 trace_kvm_s390_enable_css(vcpu->kvm);
1583 }
1584 r = 0;
1585 break;
d6712df9
CH
1586 default:
1587 r = -EINVAL;
1588 break;
1589 }
1590 return r;
1591}
1592
b0c632db
HC
1593long kvm_arch_vcpu_ioctl(struct file *filp,
1594 unsigned int ioctl, unsigned long arg)
1595{
1596 struct kvm_vcpu *vcpu = filp->private_data;
1597 void __user *argp = (void __user *)arg;
800c1065 1598 int idx;
bc923cc9 1599 long r;
b0c632db 1600
93736624
AK
1601 switch (ioctl) {
1602 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
1603 struct kvm_s390_interrupt s390int;
1604
93736624 1605 r = -EFAULT;
ba5c1e9b 1606 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
1607 break;
1608 r = kvm_s390_inject_vcpu(vcpu, &s390int);
1609 break;
ba5c1e9b 1610 }
b0c632db 1611 case KVM_S390_STORE_STATUS:
800c1065 1612 idx = srcu_read_lock(&vcpu->kvm->srcu);
bc923cc9 1613 r = kvm_s390_vcpu_store_status(vcpu, arg);
800c1065 1614 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 1615 break;
b0c632db
HC
1616 case KVM_S390_SET_INITIAL_PSW: {
1617 psw_t psw;
1618
bc923cc9 1619 r = -EFAULT;
b0c632db 1620 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
1621 break;
1622 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1623 break;
b0c632db
HC
1624 }
1625 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
1626 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1627 break;
14eebd91
CO
1628 case KVM_SET_ONE_REG:
1629 case KVM_GET_ONE_REG: {
1630 struct kvm_one_reg reg;
1631 r = -EFAULT;
1632 if (copy_from_user(&reg, argp, sizeof(reg)))
1633 break;
1634 if (ioctl == KVM_SET_ONE_REG)
1635 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1636 else
1637 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1638 break;
1639 }
27e0393f
CO
1640#ifdef CONFIG_KVM_S390_UCONTROL
1641 case KVM_S390_UCAS_MAP: {
1642 struct kvm_s390_ucas_mapping ucasmap;
1643
1644 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1645 r = -EFAULT;
1646 break;
1647 }
1648
1649 if (!kvm_is_ucontrol(vcpu->kvm)) {
1650 r = -EINVAL;
1651 break;
1652 }
1653
1654 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1655 ucasmap.vcpu_addr, ucasmap.length);
1656 break;
1657 }
1658 case KVM_S390_UCAS_UNMAP: {
1659 struct kvm_s390_ucas_mapping ucasmap;
1660
1661 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1662 r = -EFAULT;
1663 break;
1664 }
1665
1666 if (!kvm_is_ucontrol(vcpu->kvm)) {
1667 r = -EINVAL;
1668 break;
1669 }
1670
1671 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1672 ucasmap.length);
1673 break;
1674 }
1675#endif
ccc7910f
CO
1676 case KVM_S390_VCPU_FAULT: {
1677 r = gmap_fault(arg, vcpu->arch.gmap);
1678 if (!IS_ERR_VALUE(r))
1679 r = 0;
1680 break;
1681 }
d6712df9
CH
1682 case KVM_ENABLE_CAP:
1683 {
1684 struct kvm_enable_cap cap;
1685 r = -EFAULT;
1686 if (copy_from_user(&cap, argp, sizeof(cap)))
1687 break;
1688 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1689 break;
1690 }
b0c632db 1691 default:
3e6afcf1 1692 r = -ENOTTY;
b0c632db 1693 }
bc923cc9 1694 return r;
b0c632db
HC
1695}
1696
5b1c1493
CO
1697int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1698{
1699#ifdef CONFIG_KVM_S390_UCONTROL
1700 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1701 && (kvm_is_ucontrol(vcpu->kvm))) {
1702 vmf->page = virt_to_page(vcpu->arch.sie_block);
1703 get_page(vmf->page);
1704 return 0;
1705 }
1706#endif
1707 return VM_FAULT_SIGBUS;
1708}
1709
5587027c 1710void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
db3fe4eb
TY
1711 struct kvm_memory_slot *dont)
1712{
1713}
1714
5587027c
AK
1715int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1716 unsigned long npages)
db3fe4eb
TY
1717{
1718 return 0;
1719}
1720
e59dbe09
TY
1721void kvm_arch_memslots_updated(struct kvm *kvm)
1722{
1723}
1724
b0c632db 1725/* Section: memory related */
f7784b8e
MT
1726int kvm_arch_prepare_memory_region(struct kvm *kvm,
1727 struct kvm_memory_slot *memslot,
7b6195a9
TY
1728 struct kvm_userspace_memory_region *mem,
1729 enum kvm_mr_change change)
b0c632db 1730{
dd2887e7
NW
1731 /* A few sanity checks. We can have memory slots which have to be
1732 located/ended at a segment boundary (1MB). The memory in userland is
1733 ok to be fragmented into various different vmas. It is okay to mmap()
1734 and munmap() stuff in this slot after doing this call at any time */
b0c632db 1735
598841ca 1736 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
1737 return -EINVAL;
1738
598841ca 1739 if (mem->memory_size & 0xffffful)
b0c632db
HC
1740 return -EINVAL;
1741
f7784b8e
MT
1742 return 0;
1743}
1744
1745void kvm_arch_commit_memory_region(struct kvm *kvm,
1746 struct kvm_userspace_memory_region *mem,
8482644a
TY
1747 const struct kvm_memory_slot *old,
1748 enum kvm_mr_change change)
f7784b8e 1749{
f7850c92 1750 int rc;
f7784b8e 1751
2cef4deb
CB
1752 /* If the basics of the memslot do not change, we do not want
1753 * to update the gmap. Every update causes several unnecessary
1754 * segment translation exceptions. This is usually handled just
1755 * fine by the normal fault handler + gmap, but it will also
1756 * cause faults on the prefix page of running guest CPUs.
1757 */
1758 if (old->userspace_addr == mem->userspace_addr &&
1759 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1760 old->npages * PAGE_SIZE == mem->memory_size)
1761 return;
598841ca
CO
1762
1763 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1764 mem->guest_phys_addr, mem->memory_size);
1765 if (rc)
f7850c92 1766 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 1767 return;
b0c632db
HC
1768}
1769
2df72e9b
MT
1770void kvm_arch_flush_shadow_all(struct kvm *kvm)
1771{
1772}
1773
1774void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1775 struct kvm_memory_slot *slot)
34d4cb8f
MT
1776{
1777}
1778
b0c632db
HC
1779static int __init kvm_s390_init(void)
1780{
ef50f7ac 1781 int ret;
0ee75bea 1782 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
1783 if (ret)
1784 return ret;
1785
1786 /*
1787 * guests can ask for up to 255+1 double words, we need a full page
25985edc 1788 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
1789 * only set facilities that are known to work in KVM.
1790 */
78c4b59f
MM
1791 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1792 if (!vfacilities) {
ef50f7ac
CB
1793 kvm_exit();
1794 return -ENOMEM;
1795 }
78c4b59f 1796 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
d208c79d 1797 vfacilities[0] &= 0xff82fff3f4fc2000UL;
7feb6bb8 1798 vfacilities[1] &= 0x005c000000000000UL;
ef50f7ac 1799 return 0;
b0c632db
HC
1800}
1801
1802static void __exit kvm_s390_exit(void)
1803{
78c4b59f 1804 free_page((unsigned long) vfacilities);
b0c632db
HC
1805 kvm_exit();
1806}
1807
1808module_init(kvm_s390_init);
1809module_exit(kvm_s390_exit);
566af940
CH
1810
1811/*
1812 * Enable autoloading of the kvm module.
1813 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1814 * since x86 takes a different approach.
1815 */
1816#include <linux/miscdevice.h>
1817MODULE_ALIAS_MISCDEV(KVM_MINOR);
1818MODULE_ALIAS("devname:kvm");