]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/s390/kvm/kvm-s390.c
KVM: s390/mm: cleanup gmap function arguments, variable names
[mirror_ubuntu-hirsute-kernel.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15f36ebd 14 * Jason J. Herne <jjherne@us.ibm.com>
b0c632db
HC
15 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
ca872302 20#include <linux/hrtimer.h>
b0c632db
HC
21#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
25#include <linux/slab.h>
ba5c1e9b 26#include <linux/timer.h>
cbb870c8 27#include <asm/asm-offsets.h>
b0c632db
HC
28#include <asm/lowcore.h>
29#include <asm/pgtable.h>
f5daba1d 30#include <asm/nmi.h>
a0616cde 31#include <asm/switch_to.h>
78c4b59f 32#include <asm/facility.h>
1526bf9c 33#include <asm/sclp.h>
8f2abe6a 34#include "kvm-s390.h"
b0c632db
HC
35#include "gaccess.h"
36
5786fffa
CH
37#define CREATE_TRACE_POINTS
38#include "trace.h"
ade38c31 39#include "trace-s390.h"
5786fffa 40
b0c632db
HC
41#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42
43struct kvm_stats_debugfs_item debugfs_entries[] = {
44 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 45 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
46 { "exit_validity", VCPU_STAT(exit_validity) },
47 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
48 { "exit_external_request", VCPU_STAT(exit_external_request) },
49 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
50 { "exit_instruction", VCPU_STAT(exit_instruction) },
51 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 53 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b 54 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
aba07508
DH
55 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
56 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
ba5c1e9b 57 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 58 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
59 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
60 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
61 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
62 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
63 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
64 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
65 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 66 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
67 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
68 { "instruction_spx", VCPU_STAT(instruction_spx) },
69 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
70 { "instruction_stap", VCPU_STAT(instruction_stap) },
71 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
8a242234 72 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
453423dc
CB
73 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
74 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
b31288fa 75 { "instruction_essa", VCPU_STAT(instruction_essa) },
453423dc
CB
76 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
77 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 78 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 79 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 80 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 81 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
82 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
83 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
84 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
85 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
86 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 87 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 88 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 89 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
90 { NULL }
91};
92
78c4b59f 93unsigned long *vfacilities;
2c70fe44 94static struct gmap_notifier gmap_notifier;
b0c632db 95
78c4b59f 96/* test availability of vfacility */
280ef0f1 97int test_vfacility(unsigned long nr)
78c4b59f
MM
98{
99 return __test_facility(nr, (void *) vfacilities);
100}
101
b0c632db 102/* Section: not file related */
10474ae8 103int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
104{
105 /* every s390 is virtualization enabled ;-) */
10474ae8 106 return 0;
b0c632db
HC
107}
108
109void kvm_arch_hardware_disable(void *garbage)
110{
111}
112
2c70fe44
CB
113static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
114
b0c632db
HC
115int kvm_arch_hardware_setup(void)
116{
2c70fe44
CB
117 gmap_notifier.notifier_call = kvm_gmap_notifier;
118 gmap_register_ipte_notifier(&gmap_notifier);
b0c632db
HC
119 return 0;
120}
121
122void kvm_arch_hardware_unsetup(void)
123{
2c70fe44 124 gmap_unregister_ipte_notifier(&gmap_notifier);
b0c632db
HC
125}
126
127void kvm_arch_check_processor_compat(void *rtn)
128{
129}
130
131int kvm_arch_init(void *opaque)
132{
133 return 0;
134}
135
136void kvm_arch_exit(void)
137{
138}
139
140/* Section: device related */
141long kvm_arch_dev_ioctl(struct file *filp,
142 unsigned int ioctl, unsigned long arg)
143{
144 if (ioctl == KVM_S390_ENABLE_SIE)
145 return s390_enable_sie();
146 return -EINVAL;
147}
148
784aa3d7 149int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
b0c632db 150{
d7b0b5eb
CO
151 int r;
152
2bd0ac4e 153 switch (ext) {
d7b0b5eb 154 case KVM_CAP_S390_PSW:
b6cf8788 155 case KVM_CAP_S390_GMAP:
52e16b18 156 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
157#ifdef CONFIG_KVM_S390_UCONTROL
158 case KVM_CAP_S390_UCONTROL:
159#endif
3c038e6b 160 case KVM_CAP_ASYNC_PF:
60b413c9 161 case KVM_CAP_SYNC_REGS:
14eebd91 162 case KVM_CAP_ONE_REG:
d6712df9 163 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 164 case KVM_CAP_S390_CSS_SUPPORT:
ebc32262 165 case KVM_CAP_IRQFD:
10ccaa1e 166 case KVM_CAP_IOEVENTFD:
c05c4186 167 case KVM_CAP_DEVICE_CTRL:
d938dc55 168 case KVM_CAP_ENABLE_CAP_VM:
78599d90 169 case KVM_CAP_S390_IRQCHIP:
f2061656 170 case KVM_CAP_VM_ATTRIBUTES:
6352e4d2 171 case KVM_CAP_MP_STATE:
d7b0b5eb
CO
172 r = 1;
173 break;
e726b1bd
CB
174 case KVM_CAP_NR_VCPUS:
175 case KVM_CAP_MAX_VCPUS:
176 r = KVM_MAX_VCPUS;
177 break;
e1e2e605
NW
178 case KVM_CAP_NR_MEMSLOTS:
179 r = KVM_USER_MEM_SLOTS;
180 break;
1526bf9c 181 case KVM_CAP_S390_COW:
abf09bed 182 r = MACHINE_HAS_ESOP;
1526bf9c 183 break;
2bd0ac4e 184 default:
d7b0b5eb 185 r = 0;
2bd0ac4e 186 }
d7b0b5eb 187 return r;
b0c632db
HC
188}
189
15f36ebd
JH
190static void kvm_s390_sync_dirty_log(struct kvm *kvm,
191 struct kvm_memory_slot *memslot)
192{
193 gfn_t cur_gfn, last_gfn;
194 unsigned long address;
195 struct gmap *gmap = kvm->arch.gmap;
196
197 down_read(&gmap->mm->mmap_sem);
198 /* Loop over all guest pages */
199 last_gfn = memslot->base_gfn + memslot->npages;
200 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
201 address = gfn_to_hva_memslot(memslot, cur_gfn);
202
203 if (gmap_test_and_clear_dirty(address, gmap))
204 mark_page_dirty(kvm, cur_gfn);
205 }
206 up_read(&gmap->mm->mmap_sem);
207}
208
b0c632db
HC
209/* Section: vm related */
210/*
211 * Get (and clear) the dirty memory log for a memory slot.
212 */
213int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
214 struct kvm_dirty_log *log)
215{
15f36ebd
JH
216 int r;
217 unsigned long n;
218 struct kvm_memory_slot *memslot;
219 int is_dirty = 0;
220
221 mutex_lock(&kvm->slots_lock);
222
223 r = -EINVAL;
224 if (log->slot >= KVM_USER_MEM_SLOTS)
225 goto out;
226
227 memslot = id_to_memslot(kvm->memslots, log->slot);
228 r = -ENOENT;
229 if (!memslot->dirty_bitmap)
230 goto out;
231
232 kvm_s390_sync_dirty_log(kvm, memslot);
233 r = kvm_get_dirty_log(kvm, log, &is_dirty);
234 if (r)
235 goto out;
236
237 /* Clear the dirty log */
238 if (is_dirty) {
239 n = kvm_dirty_bitmap_bytes(memslot);
240 memset(memslot->dirty_bitmap, 0, n);
241 }
242 r = 0;
243out:
244 mutex_unlock(&kvm->slots_lock);
245 return r;
b0c632db
HC
246}
247
d938dc55
CH
248static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
249{
250 int r;
251
252 if (cap->flags)
253 return -EINVAL;
254
255 switch (cap->cap) {
84223598
CH
256 case KVM_CAP_S390_IRQCHIP:
257 kvm->arch.use_irqchip = 1;
258 r = 0;
259 break;
d938dc55
CH
260 default:
261 r = -EINVAL;
262 break;
263 }
264 return r;
265}
266
4f718eab
DD
267static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
268{
269 int ret;
270 unsigned int idx;
271 switch (attr->attr) {
272 case KVM_S390_VM_MEM_ENABLE_CMMA:
273 ret = -EBUSY;
274 mutex_lock(&kvm->lock);
275 if (atomic_read(&kvm->online_vcpus) == 0) {
276 kvm->arch.use_cmma = 1;
277 ret = 0;
278 }
279 mutex_unlock(&kvm->lock);
280 break;
281 case KVM_S390_VM_MEM_CLR_CMMA:
282 mutex_lock(&kvm->lock);
283 idx = srcu_read_lock(&kvm->srcu);
284 page_table_reset_pgste(kvm->arch.gmap->mm, 0, TASK_SIZE, false);
285 srcu_read_unlock(&kvm->srcu, idx);
286 mutex_unlock(&kvm->lock);
287 ret = 0;
288 break;
289 default:
290 ret = -ENXIO;
291 break;
292 }
293 return ret;
294}
295
f2061656
DD
296static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
297{
298 int ret;
299
300 switch (attr->group) {
4f718eab
DD
301 case KVM_S390_VM_MEM_CTRL:
302 ret = kvm_s390_mem_control(kvm, attr);
303 break;
f2061656
DD
304 default:
305 ret = -ENXIO;
306 break;
307 }
308
309 return ret;
310}
311
312static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
313{
314 return -ENXIO;
315}
316
317static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
318{
319 int ret;
320
321 switch (attr->group) {
4f718eab
DD
322 case KVM_S390_VM_MEM_CTRL:
323 switch (attr->attr) {
324 case KVM_S390_VM_MEM_ENABLE_CMMA:
325 case KVM_S390_VM_MEM_CLR_CMMA:
326 ret = 0;
327 break;
328 default:
329 ret = -ENXIO;
330 break;
331 }
332 break;
f2061656
DD
333 default:
334 ret = -ENXIO;
335 break;
336 }
337
338 return ret;
339}
340
b0c632db
HC
341long kvm_arch_vm_ioctl(struct file *filp,
342 unsigned int ioctl, unsigned long arg)
343{
344 struct kvm *kvm = filp->private_data;
345 void __user *argp = (void __user *)arg;
f2061656 346 struct kvm_device_attr attr;
b0c632db
HC
347 int r;
348
349 switch (ioctl) {
ba5c1e9b
CO
350 case KVM_S390_INTERRUPT: {
351 struct kvm_s390_interrupt s390int;
352
353 r = -EFAULT;
354 if (copy_from_user(&s390int, argp, sizeof(s390int)))
355 break;
356 r = kvm_s390_inject_vm(kvm, &s390int);
357 break;
358 }
d938dc55
CH
359 case KVM_ENABLE_CAP: {
360 struct kvm_enable_cap cap;
361 r = -EFAULT;
362 if (copy_from_user(&cap, argp, sizeof(cap)))
363 break;
364 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
365 break;
366 }
84223598
CH
367 case KVM_CREATE_IRQCHIP: {
368 struct kvm_irq_routing_entry routing;
369
370 r = -EINVAL;
371 if (kvm->arch.use_irqchip) {
372 /* Set up dummy routing. */
373 memset(&routing, 0, sizeof(routing));
374 kvm_set_irq_routing(kvm, &routing, 0, 0);
375 r = 0;
376 }
377 break;
378 }
f2061656
DD
379 case KVM_SET_DEVICE_ATTR: {
380 r = -EFAULT;
381 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
382 break;
383 r = kvm_s390_vm_set_attr(kvm, &attr);
384 break;
385 }
386 case KVM_GET_DEVICE_ATTR: {
387 r = -EFAULT;
388 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
389 break;
390 r = kvm_s390_vm_get_attr(kvm, &attr);
391 break;
392 }
393 case KVM_HAS_DEVICE_ATTR: {
394 r = -EFAULT;
395 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
396 break;
397 r = kvm_s390_vm_has_attr(kvm, &attr);
398 break;
399 }
b0c632db 400 default:
367e1319 401 r = -ENOTTY;
b0c632db
HC
402 }
403
404 return r;
405}
406
e08b9637 407int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 408{
b0c632db
HC
409 int rc;
410 char debug_name[16];
f6c137ff 411 static unsigned long sca_offset;
b0c632db 412
e08b9637
CO
413 rc = -EINVAL;
414#ifdef CONFIG_KVM_S390_UCONTROL
415 if (type & ~KVM_VM_S390_UCONTROL)
416 goto out_err;
417 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
418 goto out_err;
419#else
420 if (type)
421 goto out_err;
422#endif
423
b0c632db
HC
424 rc = s390_enable_sie();
425 if (rc)
d89f5eff 426 goto out_err;
b0c632db 427
b290411a
CO
428 rc = -ENOMEM;
429
b0c632db
HC
430 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
431 if (!kvm->arch.sca)
d89f5eff 432 goto out_err;
f6c137ff
CB
433 spin_lock(&kvm_lock);
434 sca_offset = (sca_offset + 16) & 0x7f0;
435 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
436 spin_unlock(&kvm_lock);
b0c632db
HC
437
438 sprintf(debug_name, "kvm-%u", current->pid);
439
440 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
441 if (!kvm->arch.dbf)
442 goto out_nodbf;
443
ba5c1e9b
CO
444 spin_lock_init(&kvm->arch.float_int.lock);
445 INIT_LIST_HEAD(&kvm->arch.float_int.list);
8a242234 446 init_waitqueue_head(&kvm->arch.ipte_wq);
ba5c1e9b 447
b0c632db
HC
448 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
449 VM_EVENT(kvm, 3, "%s", "vm created");
450
e08b9637
CO
451 if (type & KVM_VM_S390_UCONTROL) {
452 kvm->arch.gmap = NULL;
453 } else {
454 kvm->arch.gmap = gmap_alloc(current->mm);
455 if (!kvm->arch.gmap)
456 goto out_nogmap;
2c70fe44 457 kvm->arch.gmap->private = kvm;
24eb3a82 458 kvm->arch.gmap->pfault_enabled = 0;
e08b9637 459 }
fa6b7fe9
CH
460
461 kvm->arch.css_support = 0;
84223598 462 kvm->arch.use_irqchip = 0;
fa6b7fe9 463
8ad35755
DH
464 spin_lock_init(&kvm->arch.start_stop_lock);
465
d89f5eff 466 return 0;
598841ca
CO
467out_nogmap:
468 debug_unregister(kvm->arch.dbf);
b0c632db
HC
469out_nodbf:
470 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
471out_err:
472 return rc;
b0c632db
HC
473}
474
d329c035
CB
475void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
476{
477 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 478 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
67335e63 479 kvm_s390_clear_local_irqs(vcpu);
3c038e6b 480 kvm_clear_async_pf_completion_queue(vcpu);
58f9460b
CO
481 if (!kvm_is_ucontrol(vcpu->kvm)) {
482 clear_bit(63 - vcpu->vcpu_id,
483 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
484 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
485 (__u64) vcpu->arch.sie_block)
486 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
487 }
abf4a71e 488 smp_mb();
27e0393f
CO
489
490 if (kvm_is_ucontrol(vcpu->kvm))
491 gmap_free(vcpu->arch.gmap);
492
b31605c1
DD
493 if (kvm_s390_cmma_enabled(vcpu->kvm))
494 kvm_s390_vcpu_unsetup_cmma(vcpu);
d329c035 495 free_page((unsigned long)(vcpu->arch.sie_block));
b31288fa 496
6692cef3 497 kvm_vcpu_uninit(vcpu);
b110feaf 498 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
499}
500
501static void kvm_free_vcpus(struct kvm *kvm)
502{
503 unsigned int i;
988a2cae 504 struct kvm_vcpu *vcpu;
d329c035 505
988a2cae
GN
506 kvm_for_each_vcpu(i, vcpu, kvm)
507 kvm_arch_vcpu_destroy(vcpu);
508
509 mutex_lock(&kvm->lock);
510 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
511 kvm->vcpus[i] = NULL;
512
513 atomic_set(&kvm->online_vcpus, 0);
514 mutex_unlock(&kvm->lock);
d329c035
CB
515}
516
ad8ba2cd
SY
517void kvm_arch_sync_events(struct kvm *kvm)
518{
519}
520
b0c632db
HC
521void kvm_arch_destroy_vm(struct kvm *kvm)
522{
d329c035 523 kvm_free_vcpus(kvm);
b0c632db 524 free_page((unsigned long)(kvm->arch.sca));
d329c035 525 debug_unregister(kvm->arch.dbf);
27e0393f
CO
526 if (!kvm_is_ucontrol(kvm))
527 gmap_free(kvm->arch.gmap);
841b91c5 528 kvm_s390_destroy_adapters(kvm);
67335e63 529 kvm_s390_clear_float_irqs(kvm);
b0c632db
HC
530}
531
532/* Section: vcpu related */
533int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
534{
3c038e6b
DD
535 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
536 kvm_clear_async_pf_completion_queue(vcpu);
27e0393f
CO
537 if (kvm_is_ucontrol(vcpu->kvm)) {
538 vcpu->arch.gmap = gmap_alloc(current->mm);
539 if (!vcpu->arch.gmap)
540 return -ENOMEM;
2c70fe44 541 vcpu->arch.gmap->private = vcpu->kvm;
27e0393f
CO
542 return 0;
543 }
544
598841ca 545 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
59674c1a
CB
546 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
547 KVM_SYNC_GPRS |
9eed0735 548 KVM_SYNC_ACRS |
b028ee3e
DH
549 KVM_SYNC_CRS |
550 KVM_SYNC_ARCH0 |
551 KVM_SYNC_PFAULT;
b0c632db
HC
552 return 0;
553}
554
555void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
556{
6692cef3 557 /* Nothing todo */
b0c632db
HC
558}
559
560void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
561{
4725c860
MS
562 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
563 save_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db 564 save_access_regs(vcpu->arch.host_acrs);
4725c860
MS
565 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
566 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 567 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 568 gmap_enable(vcpu->arch.gmap);
9e6dabef 569 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
570}
571
572void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
573{
9e6dabef 574 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 575 gmap_disable(vcpu->arch.gmap);
4725c860
MS
576 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
577 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 578 save_access_regs(vcpu->run->s.regs.acrs);
4725c860
MS
579 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
580 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db
HC
581 restore_access_regs(vcpu->arch.host_acrs);
582}
583
584static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
585{
586 /* this equals initial cpu reset in pop, but we don't switch to ESA */
587 vcpu->arch.sie_block->gpsw.mask = 0UL;
588 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 589 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
590 vcpu->arch.sie_block->cputm = 0UL;
591 vcpu->arch.sie_block->ckc = 0UL;
592 vcpu->arch.sie_block->todpr = 0;
593 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
594 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
595 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
596 vcpu->arch.guest_fpregs.fpc = 0;
597 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
598 vcpu->arch.sie_block->gbea = 1;
672550fb 599 vcpu->arch.sie_block->pp = 0;
3c038e6b
DD
600 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
601 kvm_clear_async_pf_completion_queue(vcpu);
6352e4d2
DH
602 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
603 kvm_s390_vcpu_stop(vcpu);
2ed10cc1 604 kvm_s390_clear_local_irqs(vcpu);
b0c632db
HC
605}
606
42897d86
MT
607int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
608{
609 return 0;
610}
611
b31605c1
DD
612void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
613{
614 free_page(vcpu->arch.sie_block->cbrlo);
615 vcpu->arch.sie_block->cbrlo = 0;
616}
617
618int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
619{
620 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
621 if (!vcpu->arch.sie_block->cbrlo)
622 return -ENOMEM;
623
624 vcpu->arch.sie_block->ecb2 |= 0x80;
625 vcpu->arch.sie_block->ecb2 &= ~0x08;
626 return 0;
627}
628
b0c632db
HC
629int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
630{
b31605c1 631 int rc = 0;
b31288fa 632
9e6dabef
CH
633 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
634 CPUSTAT_SM |
69d0d3a3
CB
635 CPUSTAT_STOPPED |
636 CPUSTAT_GED);
fc34531d 637 vcpu->arch.sie_block->ecb = 6;
7feb6bb8
MM
638 if (test_vfacility(50) && test_vfacility(73))
639 vcpu->arch.sie_block->ecb |= 0x10;
640
69d0d3a3 641 vcpu->arch.sie_block->ecb2 = 8;
4953919f 642 vcpu->arch.sie_block->eca = 0xD1002000U;
217a4406
HC
643 if (sclp_has_siif())
644 vcpu->arch.sie_block->eca |= 1;
78c4b59f 645 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
5a5e6536
MR
646 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
647 ICTL_TPROT;
648
b31605c1
DD
649 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
650 rc = kvm_s390_vcpu_setup_cmma(vcpu);
651 if (rc)
652 return rc;
b31288fa 653 }
ca872302 654 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
ca872302 655 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 656 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 657 vcpu->arch.cpu_id.version = 0xff;
b31605c1 658 return rc;
b0c632db
HC
659}
660
661struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
662 unsigned int id)
663{
4d47555a 664 struct kvm_vcpu *vcpu;
7feb6bb8 665 struct sie_page *sie_page;
4d47555a
CO
666 int rc = -EINVAL;
667
668 if (id >= KVM_MAX_VCPUS)
669 goto out;
670
671 rc = -ENOMEM;
b0c632db 672
b110feaf 673 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 674 if (!vcpu)
4d47555a 675 goto out;
b0c632db 676
7feb6bb8
MM
677 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
678 if (!sie_page)
b0c632db
HC
679 goto out_free_cpu;
680
7feb6bb8
MM
681 vcpu->arch.sie_block = &sie_page->sie_block;
682 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
683
b0c632db 684 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
685 if (!kvm_is_ucontrol(kvm)) {
686 if (!kvm->arch.sca) {
687 WARN_ON_ONCE(1);
688 goto out_free_cpu;
689 }
690 if (!kvm->arch.sca->cpu[id].sda)
691 kvm->arch.sca->cpu[id].sda =
692 (__u64) vcpu->arch.sie_block;
693 vcpu->arch.sie_block->scaoh =
694 (__u32)(((__u64)kvm->arch.sca) >> 32);
695 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
696 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
697 }
b0c632db 698
ba5c1e9b
CO
699 spin_lock_init(&vcpu->arch.local_int.lock);
700 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
701 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
d0321a24 702 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 703 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
ba5c1e9b 704
b0c632db
HC
705 rc = kvm_vcpu_init(vcpu, kvm, id);
706 if (rc)
7b06bf2f 707 goto out_free_sie_block;
b0c632db
HC
708 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
709 vcpu->arch.sie_block);
ade38c31 710 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 711
b0c632db 712 return vcpu;
7b06bf2f
WY
713out_free_sie_block:
714 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 715out_free_cpu:
b110feaf 716 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 717out:
b0c632db
HC
718 return ERR_PTR(rc);
719}
720
b0c632db
HC
721int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
722{
f87618e8 723 return kvm_cpu_has_interrupt(vcpu);
b0c632db
HC
724}
725
49b99e1e
CB
726void s390_vcpu_block(struct kvm_vcpu *vcpu)
727{
728 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
729}
730
731void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
732{
733 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
734}
735
736/*
737 * Kick a guest cpu out of SIE and wait until SIE is not running.
738 * If the CPU is not running (e.g. waiting as idle) the function will
739 * return immediately. */
740void exit_sie(struct kvm_vcpu *vcpu)
741{
742 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
743 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
744 cpu_relax();
745}
746
747/* Kick a guest cpu out of SIE and prevent SIE-reentry */
748void exit_sie_sync(struct kvm_vcpu *vcpu)
749{
750 s390_vcpu_block(vcpu);
751 exit_sie(vcpu);
752}
753
2c70fe44
CB
754static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
755{
756 int i;
757 struct kvm *kvm = gmap->private;
758 struct kvm_vcpu *vcpu;
759
760 kvm_for_each_vcpu(i, vcpu, kvm) {
761 /* match against both prefix pages */
fda902cb 762 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
2c70fe44
CB
763 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
764 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
765 exit_sie_sync(vcpu);
766 }
767 }
768}
769
b6d33834
CD
770int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
771{
772 /* kvm common code refers to this, but never calls it */
773 BUG();
774 return 0;
775}
776
14eebd91
CO
777static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
778 struct kvm_one_reg *reg)
779{
780 int r = -EINVAL;
781
782 switch (reg->id) {
29b7c71b
CO
783 case KVM_REG_S390_TODPR:
784 r = put_user(vcpu->arch.sie_block->todpr,
785 (u32 __user *)reg->addr);
786 break;
787 case KVM_REG_S390_EPOCHDIFF:
788 r = put_user(vcpu->arch.sie_block->epoch,
789 (u64 __user *)reg->addr);
790 break;
46a6dd1c
J
791 case KVM_REG_S390_CPU_TIMER:
792 r = put_user(vcpu->arch.sie_block->cputm,
793 (u64 __user *)reg->addr);
794 break;
795 case KVM_REG_S390_CLOCK_COMP:
796 r = put_user(vcpu->arch.sie_block->ckc,
797 (u64 __user *)reg->addr);
798 break;
536336c2
DD
799 case KVM_REG_S390_PFTOKEN:
800 r = put_user(vcpu->arch.pfault_token,
801 (u64 __user *)reg->addr);
802 break;
803 case KVM_REG_S390_PFCOMPARE:
804 r = put_user(vcpu->arch.pfault_compare,
805 (u64 __user *)reg->addr);
806 break;
807 case KVM_REG_S390_PFSELECT:
808 r = put_user(vcpu->arch.pfault_select,
809 (u64 __user *)reg->addr);
810 break;
672550fb
CB
811 case KVM_REG_S390_PP:
812 r = put_user(vcpu->arch.sie_block->pp,
813 (u64 __user *)reg->addr);
814 break;
afa45ff5
CB
815 case KVM_REG_S390_GBEA:
816 r = put_user(vcpu->arch.sie_block->gbea,
817 (u64 __user *)reg->addr);
818 break;
14eebd91
CO
819 default:
820 break;
821 }
822
823 return r;
824}
825
826static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
827 struct kvm_one_reg *reg)
828{
829 int r = -EINVAL;
830
831 switch (reg->id) {
29b7c71b
CO
832 case KVM_REG_S390_TODPR:
833 r = get_user(vcpu->arch.sie_block->todpr,
834 (u32 __user *)reg->addr);
835 break;
836 case KVM_REG_S390_EPOCHDIFF:
837 r = get_user(vcpu->arch.sie_block->epoch,
838 (u64 __user *)reg->addr);
839 break;
46a6dd1c
J
840 case KVM_REG_S390_CPU_TIMER:
841 r = get_user(vcpu->arch.sie_block->cputm,
842 (u64 __user *)reg->addr);
843 break;
844 case KVM_REG_S390_CLOCK_COMP:
845 r = get_user(vcpu->arch.sie_block->ckc,
846 (u64 __user *)reg->addr);
847 break;
536336c2
DD
848 case KVM_REG_S390_PFTOKEN:
849 r = get_user(vcpu->arch.pfault_token,
850 (u64 __user *)reg->addr);
851 break;
852 case KVM_REG_S390_PFCOMPARE:
853 r = get_user(vcpu->arch.pfault_compare,
854 (u64 __user *)reg->addr);
855 break;
856 case KVM_REG_S390_PFSELECT:
857 r = get_user(vcpu->arch.pfault_select,
858 (u64 __user *)reg->addr);
859 break;
672550fb
CB
860 case KVM_REG_S390_PP:
861 r = get_user(vcpu->arch.sie_block->pp,
862 (u64 __user *)reg->addr);
863 break;
afa45ff5
CB
864 case KVM_REG_S390_GBEA:
865 r = get_user(vcpu->arch.sie_block->gbea,
866 (u64 __user *)reg->addr);
867 break;
14eebd91
CO
868 default:
869 break;
870 }
871
872 return r;
873}
b6d33834 874
b0c632db
HC
875static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
876{
b0c632db 877 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
878 return 0;
879}
880
881int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
882{
5a32c1af 883 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
884 return 0;
885}
886
887int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
888{
5a32c1af 889 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
890 return 0;
891}
892
893int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
894 struct kvm_sregs *sregs)
895{
59674c1a 896 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 897 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 898 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
899 return 0;
900}
901
902int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
903 struct kvm_sregs *sregs)
904{
59674c1a 905 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 906 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
907 return 0;
908}
909
910int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
911{
4725c860
MS
912 if (test_fp_ctl(fpu->fpc))
913 return -EINVAL;
b0c632db 914 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4725c860
MS
915 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
916 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
917 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
b0c632db
HC
918 return 0;
919}
920
921int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
922{
b0c632db
HC
923 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
924 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
925 return 0;
926}
927
928static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
929{
930 int rc = 0;
931
7a42fdc2 932 if (!is_vcpu_stopped(vcpu))
b0c632db 933 rc = -EBUSY;
d7b0b5eb
CO
934 else {
935 vcpu->run->psw_mask = psw.mask;
936 vcpu->run->psw_addr = psw.addr;
937 }
b0c632db
HC
938 return rc;
939}
940
941int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
942 struct kvm_translation *tr)
943{
944 return -EINVAL; /* not implemented yet */
945}
946
27291e21
DH
947#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
948 KVM_GUESTDBG_USE_HW_BP | \
949 KVM_GUESTDBG_ENABLE)
950
d0bfb940
JK
951int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
952 struct kvm_guest_debug *dbg)
b0c632db 953{
27291e21
DH
954 int rc = 0;
955
956 vcpu->guest_debug = 0;
957 kvm_s390_clear_bp_data(vcpu);
958
2de3bfc2 959 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
27291e21
DH
960 return -EINVAL;
961
962 if (dbg->control & KVM_GUESTDBG_ENABLE) {
963 vcpu->guest_debug = dbg->control;
964 /* enforce guest PER */
965 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
966
967 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
968 rc = kvm_s390_import_bp_data(vcpu, dbg);
969 } else {
970 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
971 vcpu->arch.guestdbg.last_bp = 0;
972 }
973
974 if (rc) {
975 vcpu->guest_debug = 0;
976 kvm_s390_clear_bp_data(vcpu);
977 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
978 }
979
980 return rc;
b0c632db
HC
981}
982
62d9f0db
MT
983int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
984 struct kvm_mp_state *mp_state)
985{
6352e4d2
DH
986 /* CHECK_STOP and LOAD are not supported yet */
987 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
988 KVM_MP_STATE_OPERATING;
62d9f0db
MT
989}
990
991int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
992 struct kvm_mp_state *mp_state)
993{
6352e4d2
DH
994 int rc = 0;
995
996 /* user space knows about this interface - let it control the state */
997 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
998
999 switch (mp_state->mp_state) {
1000 case KVM_MP_STATE_STOPPED:
1001 kvm_s390_vcpu_stop(vcpu);
1002 break;
1003 case KVM_MP_STATE_OPERATING:
1004 kvm_s390_vcpu_start(vcpu);
1005 break;
1006 case KVM_MP_STATE_LOAD:
1007 case KVM_MP_STATE_CHECK_STOP:
1008 /* fall through - CHECK_STOP and LOAD are not supported yet */
1009 default:
1010 rc = -ENXIO;
1011 }
1012
1013 return rc;
62d9f0db
MT
1014}
1015
b31605c1
DD
1016bool kvm_s390_cmma_enabled(struct kvm *kvm)
1017{
1018 if (!MACHINE_IS_LPAR)
1019 return false;
1020 /* only enable for z10 and later */
1021 if (!MACHINE_HAS_EDAT1)
1022 return false;
1023 if (!kvm->arch.use_cmma)
1024 return false;
1025 return true;
1026}
1027
8ad35755
DH
1028static bool ibs_enabled(struct kvm_vcpu *vcpu)
1029{
1030 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1031}
1032
2c70fe44
CB
1033static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1034{
8ad35755
DH
1035retry:
1036 s390_vcpu_unblock(vcpu);
2c70fe44
CB
1037 /*
1038 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1039 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1040 * This ensures that the ipte instruction for this request has
1041 * already finished. We might race against a second unmapper that
1042 * wants to set the blocking bit. Lets just retry the request loop.
1043 */
8ad35755 1044 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
2c70fe44
CB
1045 int rc;
1046 rc = gmap_ipte_notify(vcpu->arch.gmap,
fda902cb 1047 kvm_s390_get_prefix(vcpu),
2c70fe44
CB
1048 PAGE_SIZE * 2);
1049 if (rc)
1050 return rc;
8ad35755 1051 goto retry;
2c70fe44 1052 }
8ad35755 1053
d3d692c8
DH
1054 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1055 vcpu->arch.sie_block->ihcpu = 0xffff;
1056 goto retry;
1057 }
1058
8ad35755
DH
1059 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1060 if (!ibs_enabled(vcpu)) {
1061 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1062 atomic_set_mask(CPUSTAT_IBS,
1063 &vcpu->arch.sie_block->cpuflags);
1064 }
1065 goto retry;
2c70fe44 1066 }
8ad35755
DH
1067
1068 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1069 if (ibs_enabled(vcpu)) {
1070 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1071 atomic_clear_mask(CPUSTAT_IBS,
1072 &vcpu->arch.sie_block->cpuflags);
1073 }
1074 goto retry;
1075 }
1076
0759d068
DH
1077 /* nothing to do, just clear the request */
1078 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1079
2c70fe44
CB
1080 return 0;
1081}
1082
fa576c58
TH
1083/**
1084 * kvm_arch_fault_in_page - fault-in guest page if necessary
1085 * @vcpu: The corresponding virtual cpu
1086 * @gpa: Guest physical address
1087 * @writable: Whether the page should be writable or not
1088 *
1089 * Make sure that a guest page has been faulted-in on the host.
1090 *
1091 * Return: Zero on success, negative error code otherwise.
1092 */
1093long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
24eb3a82 1094{
24eb3a82 1095 struct mm_struct *mm = current->mm;
fa576c58
TH
1096 hva_t hva;
1097 long rc;
1098
6e0a0431 1099 hva = gmap_fault(vcpu->arch.gmap, gpa);
fa576c58
TH
1100 if (IS_ERR_VALUE(hva))
1101 return (long)hva;
24eb3a82 1102 down_read(&mm->mmap_sem);
fa576c58 1103 rc = get_user_pages(current, mm, hva, 1, writable, 0, NULL, NULL);
24eb3a82 1104 up_read(&mm->mmap_sem);
fa576c58
TH
1105
1106 return rc < 0 ? rc : 0;
24eb3a82
DD
1107}
1108
3c038e6b
DD
1109static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1110 unsigned long token)
1111{
1112 struct kvm_s390_interrupt inti;
1113 inti.parm64 = token;
1114
1115 if (start_token) {
1116 inti.type = KVM_S390_INT_PFAULT_INIT;
1117 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti));
1118 } else {
1119 inti.type = KVM_S390_INT_PFAULT_DONE;
1120 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1121 }
1122}
1123
1124void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1125 struct kvm_async_pf *work)
1126{
1127 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1128 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1129}
1130
1131void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1132 struct kvm_async_pf *work)
1133{
1134 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1135 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1136}
1137
1138void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1139 struct kvm_async_pf *work)
1140{
1141 /* s390 will always inject the page directly */
1142}
1143
1144bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1145{
1146 /*
1147 * s390 will always inject the page directly,
1148 * but we still want check_async_completion to cleanup
1149 */
1150 return true;
1151}
1152
1153static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1154{
1155 hva_t hva;
1156 struct kvm_arch_async_pf arch;
1157 int rc;
1158
1159 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1160 return 0;
1161 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1162 vcpu->arch.pfault_compare)
1163 return 0;
1164 if (psw_extint_disabled(vcpu))
1165 return 0;
1166 if (kvm_cpu_has_interrupt(vcpu))
1167 return 0;
1168 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1169 return 0;
1170 if (!vcpu->arch.gmap->pfault_enabled)
1171 return 0;
1172
81480cc1
HC
1173 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1174 hva += current->thread.gmap_addr & ~PAGE_MASK;
1175 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
3c038e6b
DD
1176 return 0;
1177
1178 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1179 return rc;
1180}
1181
3fb4c40f 1182static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 1183{
3fb4c40f 1184 int rc, cpuflags;
e168bf8d 1185
3c038e6b
DD
1186 /*
1187 * On s390 notifications for arriving pages will be delivered directly
1188 * to the guest but the house keeping for completed pfaults is
1189 * handled outside the worker.
1190 */
1191 kvm_check_async_pf_completion(vcpu);
1192
5a32c1af 1193 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
1194
1195 if (need_resched())
1196 schedule();
1197
d3a73acb 1198 if (test_cpu_flag(CIF_MCCK_PENDING))
71cde587
CB
1199 s390_handle_mcck();
1200
79395031
JF
1201 if (!kvm_is_ucontrol(vcpu->kvm)) {
1202 rc = kvm_s390_deliver_pending_interrupts(vcpu);
1203 if (rc)
1204 return rc;
1205 }
0ff31867 1206
2c70fe44
CB
1207 rc = kvm_s390_handle_requests(vcpu);
1208 if (rc)
1209 return rc;
1210
27291e21
DH
1211 if (guestdbg_enabled(vcpu)) {
1212 kvm_s390_backup_guest_per_regs(vcpu);
1213 kvm_s390_patch_guest_per_regs(vcpu);
1214 }
1215
b0c632db 1216 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
1217 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1218 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1219 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 1220
3fb4c40f
TH
1221 return 0;
1222}
1223
1224static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1225{
24eb3a82 1226 int rc = -1;
2b29a9fd
DD
1227
1228 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1229 vcpu->arch.sie_block->icptcode);
1230 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1231
27291e21
DH
1232 if (guestdbg_enabled(vcpu))
1233 kvm_s390_restore_guest_per_regs(vcpu);
1234
3fb4c40f 1235 if (exit_reason >= 0) {
7c470539 1236 rc = 0;
210b1607
TH
1237 } else if (kvm_is_ucontrol(vcpu->kvm)) {
1238 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1239 vcpu->run->s390_ucontrol.trans_exc_code =
1240 current->thread.gmap_addr;
1241 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1242 rc = -EREMOTE;
24eb3a82
DD
1243
1244 } else if (current->thread.gmap_pfault) {
3c038e6b 1245 trace_kvm_s390_major_guest_pfault(vcpu);
24eb3a82 1246 current->thread.gmap_pfault = 0;
fa576c58 1247 if (kvm_arch_setup_async_pf(vcpu)) {
24eb3a82 1248 rc = 0;
fa576c58
TH
1249 } else {
1250 gpa_t gpa = current->thread.gmap_addr;
1251 rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1252 }
24eb3a82
DD
1253 }
1254
1255 if (rc == -1) {
699bde3b
CB
1256 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1257 trace_kvm_s390_sie_fault(vcpu);
1258 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1f0d0f09 1259 }
b0c632db 1260
5a32c1af 1261 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
3fb4c40f 1262
a76ccff6
TH
1263 if (rc == 0) {
1264 if (kvm_is_ucontrol(vcpu->kvm))
2955c83f
CB
1265 /* Don't exit for host interrupts. */
1266 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
a76ccff6
TH
1267 else
1268 rc = kvm_handle_sie_intercept(vcpu);
1269 }
1270
3fb4c40f
TH
1271 return rc;
1272}
1273
1274static int __vcpu_run(struct kvm_vcpu *vcpu)
1275{
1276 int rc, exit_reason;
1277
800c1065
TH
1278 /*
1279 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1280 * ning the guest), so that memslots (and other stuff) are protected
1281 */
1282 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1283
a76ccff6
TH
1284 do {
1285 rc = vcpu_pre_run(vcpu);
1286 if (rc)
1287 break;
3fb4c40f 1288
800c1065 1289 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
1290 /*
1291 * As PF_VCPU will be used in fault handler, between
1292 * guest_enter and guest_exit should be no uaccess.
1293 */
1294 preempt_disable();
1295 kvm_guest_enter();
1296 preempt_enable();
1297 exit_reason = sie64a(vcpu->arch.sie_block,
1298 vcpu->run->s.regs.gprs);
1299 kvm_guest_exit();
800c1065 1300 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
1301
1302 rc = vcpu_post_run(vcpu, exit_reason);
27291e21 1303 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
3fb4c40f 1304
800c1065 1305 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 1306 return rc;
b0c632db
HC
1307}
1308
b028ee3e
DH
1309static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1310{
1311 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1312 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1313 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
1314 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1315 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1316 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
d3d692c8
DH
1317 /* some control register changes require a tlb flush */
1318 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
b028ee3e
DH
1319 }
1320 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
1321 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
1322 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
1323 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
1324 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
1325 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
1326 }
1327 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
1328 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
1329 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
1330 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
1331 }
1332 kvm_run->kvm_dirty_regs = 0;
1333}
1334
1335static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1336{
1337 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1338 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1339 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1340 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1341 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
1342 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
1343 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
1344 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
1345 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
1346 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1347 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1348 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1349}
1350
b0c632db
HC
1351int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1352{
8f2abe6a 1353 int rc;
b0c632db
HC
1354 sigset_t sigsaved;
1355
27291e21
DH
1356 if (guestdbg_exit_pending(vcpu)) {
1357 kvm_s390_prepare_debug_exit(vcpu);
1358 return 0;
1359 }
1360
b0c632db
HC
1361 if (vcpu->sigset_active)
1362 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1363
6352e4d2
DH
1364 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
1365 kvm_s390_vcpu_start(vcpu);
1366 } else if (is_vcpu_stopped(vcpu)) {
1367 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
1368 vcpu->vcpu_id);
1369 return -EINVAL;
1370 }
b0c632db 1371
b028ee3e 1372 sync_regs(vcpu, kvm_run);
d7b0b5eb 1373
dab4079d 1374 might_fault();
a76ccff6 1375 rc = __vcpu_run(vcpu);
9ace903d 1376
b1d16c49
CE
1377 if (signal_pending(current) && !rc) {
1378 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 1379 rc = -EINTR;
b1d16c49 1380 }
8f2abe6a 1381
27291e21
DH
1382 if (guestdbg_exit_pending(vcpu) && !rc) {
1383 kvm_s390_prepare_debug_exit(vcpu);
1384 rc = 0;
1385 }
1386
b8e660b8 1387 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
1388 /* intercept cannot be handled in-kernel, prepare kvm-run */
1389 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
1390 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
1391 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
1392 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
1393 rc = 0;
1394 }
1395
1396 if (rc == -EREMOTE) {
1397 /* intercept was handled, but userspace support is needed
1398 * kvm_run has been prepared by the handler */
1399 rc = 0;
1400 }
b0c632db 1401
b028ee3e 1402 store_regs(vcpu, kvm_run);
d7b0b5eb 1403
b0c632db
HC
1404 if (vcpu->sigset_active)
1405 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1406
b0c632db 1407 vcpu->stat.exit_userspace++;
7e8e6ab4 1408 return rc;
b0c632db
HC
1409}
1410
b0c632db
HC
1411/*
1412 * store status at address
1413 * we use have two special cases:
1414 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1415 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1416 */
d0bce605 1417int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
b0c632db 1418{
092670cd 1419 unsigned char archmode = 1;
fda902cb 1420 unsigned int px;
178bd789 1421 u64 clkcomp;
d0bce605 1422 int rc;
b0c632db 1423
d0bce605
HC
1424 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1425 if (write_guest_abs(vcpu, 163, &archmode, 1))
b0c632db 1426 return -EFAULT;
d0bce605
HC
1427 gpa = SAVE_AREA_BASE;
1428 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1429 if (write_guest_real(vcpu, 163, &archmode, 1))
b0c632db 1430 return -EFAULT;
d0bce605
HC
1431 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1432 }
1433 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1434 vcpu->arch.guest_fpregs.fprs, 128);
1435 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1436 vcpu->run->s.regs.gprs, 128);
1437 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1438 &vcpu->arch.sie_block->gpsw, 16);
fda902cb 1439 px = kvm_s390_get_prefix(vcpu);
d0bce605 1440 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
fda902cb 1441 &px, 4);
d0bce605
HC
1442 rc |= write_guest_abs(vcpu,
1443 gpa + offsetof(struct save_area, fp_ctrl_reg),
1444 &vcpu->arch.guest_fpregs.fpc, 4);
1445 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1446 &vcpu->arch.sie_block->todpr, 4);
1447 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1448 &vcpu->arch.sie_block->cputm, 8);
178bd789 1449 clkcomp = vcpu->arch.sie_block->ckc >> 8;
d0bce605
HC
1450 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1451 &clkcomp, 8);
1452 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1453 &vcpu->run->s.regs.acrs, 64);
1454 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1455 &vcpu->arch.sie_block->gcr, 128);
1456 return rc ? -EFAULT : 0;
b0c632db
HC
1457}
1458
e879892c
TH
1459int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1460{
1461 /*
1462 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1463 * copying in vcpu load/put. Lets update our copies before we save
1464 * it into the save area
1465 */
1466 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1467 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1468 save_access_regs(vcpu->run->s.regs.acrs);
1469
1470 return kvm_s390_store_status_unloaded(vcpu, addr);
1471}
1472
8ad35755
DH
1473static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1474{
1475 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
1476 kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
1477 exit_sie_sync(vcpu);
1478}
1479
1480static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
1481{
1482 unsigned int i;
1483 struct kvm_vcpu *vcpu;
1484
1485 kvm_for_each_vcpu(i, vcpu, kvm) {
1486 __disable_ibs_on_vcpu(vcpu);
1487 }
1488}
1489
1490static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1491{
1492 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
1493 kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
1494 exit_sie_sync(vcpu);
1495}
1496
6852d7b6
DH
1497void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
1498{
8ad35755
DH
1499 int i, online_vcpus, started_vcpus = 0;
1500
1501 if (!is_vcpu_stopped(vcpu))
1502 return;
1503
6852d7b6 1504 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
8ad35755 1505 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 1506 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
1507 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1508
1509 for (i = 0; i < online_vcpus; i++) {
1510 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
1511 started_vcpus++;
1512 }
1513
1514 if (started_vcpus == 0) {
1515 /* we're the only active VCPU -> speed it up */
1516 __enable_ibs_on_vcpu(vcpu);
1517 } else if (started_vcpus == 1) {
1518 /*
1519 * As we are starting a second VCPU, we have to disable
1520 * the IBS facility on all VCPUs to remove potentially
1521 * oustanding ENABLE requests.
1522 */
1523 __disable_ibs_on_all_vcpus(vcpu->kvm);
1524 }
1525
6852d7b6 1526 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
1527 /*
1528 * Another VCPU might have used IBS while we were offline.
1529 * Let's play safe and flush the VCPU at startup.
1530 */
d3d692c8 1531 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
433b9ee4 1532 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 1533 return;
6852d7b6
DH
1534}
1535
1536void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
1537{
8ad35755
DH
1538 int i, online_vcpus, started_vcpus = 0;
1539 struct kvm_vcpu *started_vcpu = NULL;
1540
1541 if (is_vcpu_stopped(vcpu))
1542 return;
1543
6852d7b6 1544 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
8ad35755 1545 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 1546 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
1547 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1548
32f5ff63 1549 /* Need to lock access to action_bits to avoid a SIGP race condition */
4ae3c081 1550 spin_lock(&vcpu->arch.local_int.lock);
6852d7b6 1551 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
32f5ff63
DH
1552
1553 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
1554 vcpu->arch.local_int.action_bits &=
1555 ~(ACTION_STOP_ON_STOP | ACTION_STORE_ON_STOP);
4ae3c081 1556 spin_unlock(&vcpu->arch.local_int.lock);
32f5ff63 1557
8ad35755
DH
1558 __disable_ibs_on_vcpu(vcpu);
1559
1560 for (i = 0; i < online_vcpus; i++) {
1561 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
1562 started_vcpus++;
1563 started_vcpu = vcpu->kvm->vcpus[i];
1564 }
1565 }
1566
1567 if (started_vcpus == 1) {
1568 /*
1569 * As we only have one VCPU left, we want to enable the
1570 * IBS facility for that VCPU to speed it up.
1571 */
1572 __enable_ibs_on_vcpu(started_vcpu);
1573 }
1574
433b9ee4 1575 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 1576 return;
6852d7b6
DH
1577}
1578
d6712df9
CH
1579static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1580 struct kvm_enable_cap *cap)
1581{
1582 int r;
1583
1584 if (cap->flags)
1585 return -EINVAL;
1586
1587 switch (cap->cap) {
fa6b7fe9
CH
1588 case KVM_CAP_S390_CSS_SUPPORT:
1589 if (!vcpu->kvm->arch.css_support) {
1590 vcpu->kvm->arch.css_support = 1;
1591 trace_kvm_s390_enable_css(vcpu->kvm);
1592 }
1593 r = 0;
1594 break;
d6712df9
CH
1595 default:
1596 r = -EINVAL;
1597 break;
1598 }
1599 return r;
1600}
1601
b0c632db
HC
1602long kvm_arch_vcpu_ioctl(struct file *filp,
1603 unsigned int ioctl, unsigned long arg)
1604{
1605 struct kvm_vcpu *vcpu = filp->private_data;
1606 void __user *argp = (void __user *)arg;
800c1065 1607 int idx;
bc923cc9 1608 long r;
b0c632db 1609
93736624
AK
1610 switch (ioctl) {
1611 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
1612 struct kvm_s390_interrupt s390int;
1613
93736624 1614 r = -EFAULT;
ba5c1e9b 1615 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
1616 break;
1617 r = kvm_s390_inject_vcpu(vcpu, &s390int);
1618 break;
ba5c1e9b 1619 }
b0c632db 1620 case KVM_S390_STORE_STATUS:
800c1065 1621 idx = srcu_read_lock(&vcpu->kvm->srcu);
bc923cc9 1622 r = kvm_s390_vcpu_store_status(vcpu, arg);
800c1065 1623 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 1624 break;
b0c632db
HC
1625 case KVM_S390_SET_INITIAL_PSW: {
1626 psw_t psw;
1627
bc923cc9 1628 r = -EFAULT;
b0c632db 1629 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
1630 break;
1631 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1632 break;
b0c632db
HC
1633 }
1634 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
1635 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1636 break;
14eebd91
CO
1637 case KVM_SET_ONE_REG:
1638 case KVM_GET_ONE_REG: {
1639 struct kvm_one_reg reg;
1640 r = -EFAULT;
1641 if (copy_from_user(&reg, argp, sizeof(reg)))
1642 break;
1643 if (ioctl == KVM_SET_ONE_REG)
1644 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1645 else
1646 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1647 break;
1648 }
27e0393f
CO
1649#ifdef CONFIG_KVM_S390_UCONTROL
1650 case KVM_S390_UCAS_MAP: {
1651 struct kvm_s390_ucas_mapping ucasmap;
1652
1653 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1654 r = -EFAULT;
1655 break;
1656 }
1657
1658 if (!kvm_is_ucontrol(vcpu->kvm)) {
1659 r = -EINVAL;
1660 break;
1661 }
1662
1663 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1664 ucasmap.vcpu_addr, ucasmap.length);
1665 break;
1666 }
1667 case KVM_S390_UCAS_UNMAP: {
1668 struct kvm_s390_ucas_mapping ucasmap;
1669
1670 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1671 r = -EFAULT;
1672 break;
1673 }
1674
1675 if (!kvm_is_ucontrol(vcpu->kvm)) {
1676 r = -EINVAL;
1677 break;
1678 }
1679
1680 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1681 ucasmap.length);
1682 break;
1683 }
1684#endif
ccc7910f 1685 case KVM_S390_VCPU_FAULT: {
6e0a0431 1686 r = gmap_fault(vcpu->arch.gmap, arg);
ccc7910f
CO
1687 if (!IS_ERR_VALUE(r))
1688 r = 0;
1689 break;
1690 }
d6712df9
CH
1691 case KVM_ENABLE_CAP:
1692 {
1693 struct kvm_enable_cap cap;
1694 r = -EFAULT;
1695 if (copy_from_user(&cap, argp, sizeof(cap)))
1696 break;
1697 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1698 break;
1699 }
b0c632db 1700 default:
3e6afcf1 1701 r = -ENOTTY;
b0c632db 1702 }
bc923cc9 1703 return r;
b0c632db
HC
1704}
1705
5b1c1493
CO
1706int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1707{
1708#ifdef CONFIG_KVM_S390_UCONTROL
1709 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1710 && (kvm_is_ucontrol(vcpu->kvm))) {
1711 vmf->page = virt_to_page(vcpu->arch.sie_block);
1712 get_page(vmf->page);
1713 return 0;
1714 }
1715#endif
1716 return VM_FAULT_SIGBUS;
1717}
1718
5587027c 1719void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
db3fe4eb
TY
1720 struct kvm_memory_slot *dont)
1721{
1722}
1723
5587027c
AK
1724int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1725 unsigned long npages)
db3fe4eb
TY
1726{
1727 return 0;
1728}
1729
e59dbe09
TY
1730void kvm_arch_memslots_updated(struct kvm *kvm)
1731{
1732}
1733
b0c632db 1734/* Section: memory related */
f7784b8e
MT
1735int kvm_arch_prepare_memory_region(struct kvm *kvm,
1736 struct kvm_memory_slot *memslot,
7b6195a9
TY
1737 struct kvm_userspace_memory_region *mem,
1738 enum kvm_mr_change change)
b0c632db 1739{
dd2887e7
NW
1740 /* A few sanity checks. We can have memory slots which have to be
1741 located/ended at a segment boundary (1MB). The memory in userland is
1742 ok to be fragmented into various different vmas. It is okay to mmap()
1743 and munmap() stuff in this slot after doing this call at any time */
b0c632db 1744
598841ca 1745 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
1746 return -EINVAL;
1747
598841ca 1748 if (mem->memory_size & 0xffffful)
b0c632db
HC
1749 return -EINVAL;
1750
f7784b8e
MT
1751 return 0;
1752}
1753
1754void kvm_arch_commit_memory_region(struct kvm *kvm,
1755 struct kvm_userspace_memory_region *mem,
8482644a
TY
1756 const struct kvm_memory_slot *old,
1757 enum kvm_mr_change change)
f7784b8e 1758{
f7850c92 1759 int rc;
f7784b8e 1760
2cef4deb
CB
1761 /* If the basics of the memslot do not change, we do not want
1762 * to update the gmap. Every update causes several unnecessary
1763 * segment translation exceptions. This is usually handled just
1764 * fine by the normal fault handler + gmap, but it will also
1765 * cause faults on the prefix page of running guest CPUs.
1766 */
1767 if (old->userspace_addr == mem->userspace_addr &&
1768 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1769 old->npages * PAGE_SIZE == mem->memory_size)
1770 return;
598841ca
CO
1771
1772 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1773 mem->guest_phys_addr, mem->memory_size);
1774 if (rc)
f7850c92 1775 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 1776 return;
b0c632db
HC
1777}
1778
2df72e9b
MT
1779void kvm_arch_flush_shadow_all(struct kvm *kvm)
1780{
1781}
1782
1783void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1784 struct kvm_memory_slot *slot)
34d4cb8f
MT
1785{
1786}
1787
b0c632db
HC
1788static int __init kvm_s390_init(void)
1789{
ef50f7ac 1790 int ret;
0ee75bea 1791 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
1792 if (ret)
1793 return ret;
1794
1795 /*
1796 * guests can ask for up to 255+1 double words, we need a full page
25985edc 1797 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
1798 * only set facilities that are known to work in KVM.
1799 */
78c4b59f
MM
1800 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1801 if (!vfacilities) {
ef50f7ac
CB
1802 kvm_exit();
1803 return -ENOMEM;
1804 }
78c4b59f 1805 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
d208c79d 1806 vfacilities[0] &= 0xff82fff3f4fc2000UL;
7feb6bb8 1807 vfacilities[1] &= 0x005c000000000000UL;
ef50f7ac 1808 return 0;
b0c632db
HC
1809}
1810
1811static void __exit kvm_s390_exit(void)
1812{
78c4b59f 1813 free_page((unsigned long) vfacilities);
b0c632db
HC
1814 kvm_exit();
1815}
1816
1817module_init(kvm_s390_init);
1818module_exit(kvm_s390_exit);
566af940
CH
1819
1820/*
1821 * Enable autoloading of the kvm module.
1822 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1823 * since x86 takes a different approach.
1824 */
1825#include <linux/miscdevice.h>
1826MODULE_ALIAS_MISCDEV(KVM_MINOR);
1827MODULE_ALIAS("devname:kvm");