]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/s390/kvm/kvm-s390.c
KVM: x86: Run PIT work in own kthread
[mirror_ubuntu-jammy-kernel.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db
HC
1/*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
628eb9b8 4 * Copyright IBM Corp. 2008,2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
a0616cde 30#include <asm/switch_to.h>
8f2abe6a 31#include "kvm-s390.h"
b0c632db
HC
32#include "gaccess.h"
33
34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 38 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 49 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
50 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
51 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
52 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
53 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
54 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
55 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
56 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
453423dc
CB
57 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
58 { "instruction_spx", VCPU_STAT(instruction_spx) },
59 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
60 { "instruction_stap", VCPU_STAT(instruction_stap) },
61 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
62 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
63 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
64 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
65 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 66 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 67 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 68 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 69 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
70 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
71 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
72 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
73 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
74 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 75 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 76 { "diagnose_44", VCPU_STAT(diagnose_44) },
b0c632db
HC
77 { NULL }
78};
79
ef50f7ac 80static unsigned long long *facilities;
b0c632db
HC
81
82/* Section: not file related */
10474ae8 83int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
84{
85 /* every s390 is virtualization enabled ;-) */
10474ae8 86 return 0;
b0c632db
HC
87}
88
89void kvm_arch_hardware_disable(void *garbage)
90{
91}
92
b0c632db
HC
93int kvm_arch_hardware_setup(void)
94{
95 return 0;
96}
97
98void kvm_arch_hardware_unsetup(void)
99{
100}
101
102void kvm_arch_check_processor_compat(void *rtn)
103{
104}
105
106int kvm_arch_init(void *opaque)
107{
108 return 0;
109}
110
111void kvm_arch_exit(void)
112{
113}
114
115/* Section: device related */
116long kvm_arch_dev_ioctl(struct file *filp,
117 unsigned int ioctl, unsigned long arg)
118{
119 if (ioctl == KVM_S390_ENABLE_SIE)
120 return s390_enable_sie();
121 return -EINVAL;
122}
123
124int kvm_dev_ioctl_check_extension(long ext)
125{
d7b0b5eb
CO
126 int r;
127
2bd0ac4e 128 switch (ext) {
d7b0b5eb 129 case KVM_CAP_S390_PSW:
b6cf8788 130 case KVM_CAP_S390_GMAP:
52e16b18 131 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
132#ifdef CONFIG_KVM_S390_UCONTROL
133 case KVM_CAP_S390_UCONTROL:
134#endif
60b413c9 135 case KVM_CAP_SYNC_REGS:
d7b0b5eb
CO
136 r = 1;
137 break;
2bd0ac4e 138 default:
d7b0b5eb 139 r = 0;
2bd0ac4e 140 }
d7b0b5eb 141 return r;
b0c632db
HC
142}
143
144/* Section: vm related */
145/*
146 * Get (and clear) the dirty memory log for a memory slot.
147 */
148int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
149 struct kvm_dirty_log *log)
150{
151 return 0;
152}
153
154long kvm_arch_vm_ioctl(struct file *filp,
155 unsigned int ioctl, unsigned long arg)
156{
157 struct kvm *kvm = filp->private_data;
158 void __user *argp = (void __user *)arg;
159 int r;
160
161 switch (ioctl) {
ba5c1e9b
CO
162 case KVM_S390_INTERRUPT: {
163 struct kvm_s390_interrupt s390int;
164
165 r = -EFAULT;
166 if (copy_from_user(&s390int, argp, sizeof(s390int)))
167 break;
168 r = kvm_s390_inject_vm(kvm, &s390int);
169 break;
170 }
b0c632db 171 default:
367e1319 172 r = -ENOTTY;
b0c632db
HC
173 }
174
175 return r;
176}
177
e08b9637 178int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 179{
b0c632db
HC
180 int rc;
181 char debug_name[16];
182
e08b9637
CO
183 rc = -EINVAL;
184#ifdef CONFIG_KVM_S390_UCONTROL
185 if (type & ~KVM_VM_S390_UCONTROL)
186 goto out_err;
187 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
188 goto out_err;
189#else
190 if (type)
191 goto out_err;
192#endif
193
b0c632db
HC
194 rc = s390_enable_sie();
195 if (rc)
d89f5eff 196 goto out_err;
b0c632db 197
b290411a
CO
198 rc = -ENOMEM;
199
b0c632db
HC
200 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
201 if (!kvm->arch.sca)
d89f5eff 202 goto out_err;
b0c632db
HC
203
204 sprintf(debug_name, "kvm-%u", current->pid);
205
206 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
207 if (!kvm->arch.dbf)
208 goto out_nodbf;
209
ba5c1e9b
CO
210 spin_lock_init(&kvm->arch.float_int.lock);
211 INIT_LIST_HEAD(&kvm->arch.float_int.list);
212
b0c632db
HC
213 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
214 VM_EVENT(kvm, 3, "%s", "vm created");
215
e08b9637
CO
216 if (type & KVM_VM_S390_UCONTROL) {
217 kvm->arch.gmap = NULL;
218 } else {
219 kvm->arch.gmap = gmap_alloc(current->mm);
220 if (!kvm->arch.gmap)
221 goto out_nogmap;
222 }
d89f5eff 223 return 0;
598841ca
CO
224out_nogmap:
225 debug_unregister(kvm->arch.dbf);
b0c632db
HC
226out_nodbf:
227 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
228out_err:
229 return rc;
b0c632db
HC
230}
231
d329c035
CB
232void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
233{
234 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
58f9460b
CO
235 if (!kvm_is_ucontrol(vcpu->kvm)) {
236 clear_bit(63 - vcpu->vcpu_id,
237 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
238 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
239 (__u64) vcpu->arch.sie_block)
240 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
241 }
abf4a71e 242 smp_mb();
27e0393f
CO
243
244 if (kvm_is_ucontrol(vcpu->kvm))
245 gmap_free(vcpu->arch.gmap);
246
d329c035 247 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 248 kvm_vcpu_uninit(vcpu);
d329c035
CB
249 kfree(vcpu);
250}
251
252static void kvm_free_vcpus(struct kvm *kvm)
253{
254 unsigned int i;
988a2cae 255 struct kvm_vcpu *vcpu;
d329c035 256
988a2cae
GN
257 kvm_for_each_vcpu(i, vcpu, kvm)
258 kvm_arch_vcpu_destroy(vcpu);
259
260 mutex_lock(&kvm->lock);
261 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
262 kvm->vcpus[i] = NULL;
263
264 atomic_set(&kvm->online_vcpus, 0);
265 mutex_unlock(&kvm->lock);
d329c035
CB
266}
267
ad8ba2cd
SY
268void kvm_arch_sync_events(struct kvm *kvm)
269{
270}
271
b0c632db
HC
272void kvm_arch_destroy_vm(struct kvm *kvm)
273{
d329c035 274 kvm_free_vcpus(kvm);
b0c632db 275 free_page((unsigned long)(kvm->arch.sca));
d329c035 276 debug_unregister(kvm->arch.dbf);
27e0393f
CO
277 if (!kvm_is_ucontrol(kvm))
278 gmap_free(kvm->arch.gmap);
b0c632db
HC
279}
280
281/* Section: vcpu related */
282int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
283{
27e0393f
CO
284 if (kvm_is_ucontrol(vcpu->kvm)) {
285 vcpu->arch.gmap = gmap_alloc(current->mm);
286 if (!vcpu->arch.gmap)
287 return -ENOMEM;
288 return 0;
289 }
290
598841ca 291 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
59674c1a
CB
292 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
293 KVM_SYNC_GPRS |
9eed0735
CB
294 KVM_SYNC_ACRS |
295 KVM_SYNC_CRS;
b0c632db
HC
296 return 0;
297}
298
299void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
300{
6692cef3 301 /* Nothing todo */
b0c632db
HC
302}
303
304void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
305{
306 save_fp_regs(&vcpu->arch.host_fpregs);
307 save_access_regs(vcpu->arch.host_acrs);
308 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
309 restore_fp_regs(&vcpu->arch.guest_fpregs);
59674c1a 310 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 311 gmap_enable(vcpu->arch.gmap);
9e6dabef 312 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
313}
314
315void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
316{
9e6dabef 317 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 318 gmap_disable(vcpu->arch.gmap);
b0c632db 319 save_fp_regs(&vcpu->arch.guest_fpregs);
59674c1a 320 save_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
321 restore_fp_regs(&vcpu->arch.host_fpregs);
322 restore_access_regs(vcpu->arch.host_acrs);
323}
324
325static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
326{
327 /* this equals initial cpu reset in pop, but we don't switch to ESA */
328 vcpu->arch.sie_block->gpsw.mask = 0UL;
329 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 330 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
331 vcpu->arch.sie_block->cputm = 0UL;
332 vcpu->arch.sie_block->ckc = 0UL;
333 vcpu->arch.sie_block->todpr = 0;
334 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
335 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
336 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
337 vcpu->arch.guest_fpregs.fpc = 0;
338 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
339 vcpu->arch.sie_block->gbea = 1;
340}
341
342int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
343{
9e6dabef
CH
344 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
345 CPUSTAT_SM |
346 CPUSTAT_STOPPED);
fc34531d 347 vcpu->arch.sie_block->ecb = 6;
b0c632db 348 vcpu->arch.sie_block->eca = 0xC1002001U;
ef50f7ac 349 vcpu->arch.sie_block->fac = (int) (long) facilities;
ca872302
CB
350 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
351 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
352 (unsigned long) vcpu);
353 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 354 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 355 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
356 return 0;
357}
358
359struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
360 unsigned int id)
361{
4d47555a
CO
362 struct kvm_vcpu *vcpu;
363 int rc = -EINVAL;
364
365 if (id >= KVM_MAX_VCPUS)
366 goto out;
367
368 rc = -ENOMEM;
b0c632db 369
4d47555a 370 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
b0c632db 371 if (!vcpu)
4d47555a 372 goto out;
b0c632db 373
180c12fb
CB
374 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
375 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
376
377 if (!vcpu->arch.sie_block)
378 goto out_free_cpu;
379
380 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
381 if (!kvm_is_ucontrol(kvm)) {
382 if (!kvm->arch.sca) {
383 WARN_ON_ONCE(1);
384 goto out_free_cpu;
385 }
386 if (!kvm->arch.sca->cpu[id].sda)
387 kvm->arch.sca->cpu[id].sda =
388 (__u64) vcpu->arch.sie_block;
389 vcpu->arch.sie_block->scaoh =
390 (__u32)(((__u64)kvm->arch.sca) >> 32);
391 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
392 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
393 }
b0c632db 394
ba5c1e9b
CO
395 spin_lock_init(&vcpu->arch.local_int.lock);
396 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
397 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 398 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b
CO
399 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
400 init_waitqueue_head(&vcpu->arch.local_int.wq);
5288fbf0 401 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 402 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 403
b0c632db
HC
404 rc = kvm_vcpu_init(vcpu, kvm, id);
405 if (rc)
7b06bf2f 406 goto out_free_sie_block;
b0c632db
HC
407 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
408 vcpu->arch.sie_block);
409
b0c632db 410 return vcpu;
7b06bf2f
WY
411out_free_sie_block:
412 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db
HC
413out_free_cpu:
414 kfree(vcpu);
4d47555a 415out:
b0c632db
HC
416 return ERR_PTR(rc);
417}
418
b0c632db
HC
419int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
420{
421 /* kvm common code refers to this, but never calls it */
422 BUG();
423 return 0;
424}
425
b6d33834
CD
426int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
427{
428 /* kvm common code refers to this, but never calls it */
429 BUG();
430 return 0;
431}
432
433
b0c632db
HC
434static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
435{
b0c632db 436 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
437 return 0;
438}
439
440int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
441{
5a32c1af 442 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
443 return 0;
444}
445
446int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
447{
5a32c1af 448 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
449 return 0;
450}
451
452int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
453 struct kvm_sregs *sregs)
454{
59674c1a 455 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 456 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 457 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
458 return 0;
459}
460
461int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
462 struct kvm_sregs *sregs)
463{
59674c1a 464 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 465 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
466 return 0;
467}
468
469int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
470{
b0c632db 471 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
85175587 472 vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
7eef87dc 473 restore_fp_regs(&vcpu->arch.guest_fpregs);
b0c632db
HC
474 return 0;
475}
476
477int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
478{
b0c632db
HC
479 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
480 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
481 return 0;
482}
483
484static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
485{
486 int rc = 0;
487
9e6dabef 488 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
b0c632db 489 rc = -EBUSY;
d7b0b5eb
CO
490 else {
491 vcpu->run->psw_mask = psw.mask;
492 vcpu->run->psw_addr = psw.addr;
493 }
b0c632db
HC
494 return rc;
495}
496
497int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
498 struct kvm_translation *tr)
499{
500 return -EINVAL; /* not implemented yet */
501}
502
d0bfb940
JK
503int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
504 struct kvm_guest_debug *dbg)
b0c632db
HC
505{
506 return -EINVAL; /* not implemented yet */
507}
508
62d9f0db
MT
509int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
510 struct kvm_mp_state *mp_state)
511{
512 return -EINVAL; /* not implemented yet */
513}
514
515int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
516 struct kvm_mp_state *mp_state)
517{
518 return -EINVAL; /* not implemented yet */
519}
520
e168bf8d 521static int __vcpu_run(struct kvm_vcpu *vcpu)
b0c632db 522{
e168bf8d
CO
523 int rc;
524
5a32c1af 525 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
526
527 if (need_resched())
528 schedule();
529
71cde587
CB
530 if (test_thread_flag(TIF_MCCK_PENDING))
531 s390_handle_mcck();
532
d6b6d166
CO
533 if (!kvm_is_ucontrol(vcpu->kvm))
534 kvm_s390_deliver_pending_interrupts(vcpu);
0ff31867 535
b0c632db
HC
536 vcpu->arch.sie_block->icptcode = 0;
537 local_irq_disable();
538 kvm_guest_enter();
539 local_irq_enable();
540 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
541 atomic_read(&vcpu->arch.sie_block->cpuflags));
5a32c1af 542 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
e168bf8d
CO
543 if (rc) {
544 if (kvm_is_ucontrol(vcpu->kvm)) {
545 rc = SIE_INTERCEPT_UCONTROL;
546 } else {
547 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
548 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
549 rc = 0;
550 }
1f0d0f09 551 }
b0c632db
HC
552 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
553 vcpu->arch.sie_block->icptcode);
554 local_irq_disable();
555 kvm_guest_exit();
556 local_irq_enable();
557
5a32c1af 558 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
e168bf8d 559 return rc;
b0c632db
HC
560}
561
562int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
563{
8f2abe6a 564 int rc;
b0c632db
HC
565 sigset_t sigsaved;
566
9ace903d 567rerun_vcpu:
b0c632db
HC
568 if (vcpu->sigset_active)
569 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
570
9e6dabef 571 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db 572
ba5c1e9b
CO
573 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
574
8f2abe6a
CB
575 switch (kvm_run->exit_reason) {
576 case KVM_EXIT_S390_SIEIC:
8f2abe6a 577 case KVM_EXIT_UNKNOWN:
9ace903d 578 case KVM_EXIT_INTR:
8f2abe6a 579 case KVM_EXIT_S390_RESET:
e168bf8d 580 case KVM_EXIT_S390_UCONTROL:
8f2abe6a
CB
581 break;
582 default:
583 BUG();
584 }
585
d7b0b5eb
CO
586 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
587 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
60b413c9
CB
588 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
589 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
590 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
591 }
9eed0735
CB
592 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
593 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
594 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
595 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
596 }
d7b0b5eb 597
dab4079d 598 might_fault();
8f2abe6a
CB
599
600 do {
e168bf8d
CO
601 rc = __vcpu_run(vcpu);
602 if (rc)
603 break;
c0d744a9
CO
604 if (kvm_is_ucontrol(vcpu->kvm))
605 rc = -EOPNOTSUPP;
606 else
607 rc = kvm_handle_sie_intercept(vcpu);
8f2abe6a
CB
608 } while (!signal_pending(current) && !rc);
609
9ace903d
CE
610 if (rc == SIE_INTERCEPT_RERUNVCPU)
611 goto rerun_vcpu;
612
b1d16c49
CE
613 if (signal_pending(current) && !rc) {
614 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 615 rc = -EINTR;
b1d16c49 616 }
8f2abe6a 617
e168bf8d
CO
618#ifdef CONFIG_KVM_S390_UCONTROL
619 if (rc == SIE_INTERCEPT_UCONTROL) {
620 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
621 kvm_run->s390_ucontrol.trans_exc_code =
622 current->thread.gmap_addr;
623 kvm_run->s390_ucontrol.pgm_code = 0x10;
624 rc = 0;
625 }
626#endif
627
b8e660b8 628 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
629 /* intercept cannot be handled in-kernel, prepare kvm-run */
630 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
631 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
632 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
633 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
634 rc = 0;
635 }
636
637 if (rc == -EREMOTE) {
638 /* intercept was handled, but userspace support is needed
639 * kvm_run has been prepared by the handler */
640 rc = 0;
641 }
b0c632db 642
d7b0b5eb
CO
643 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
644 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
60b413c9 645 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
9eed0735 646 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
d7b0b5eb 647
b0c632db
HC
648 if (vcpu->sigset_active)
649 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
650
b0c632db 651 vcpu->stat.exit_userspace++;
7e8e6ab4 652 return rc;
b0c632db
HC
653}
654
092670cd 655static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
b0c632db
HC
656 unsigned long n, int prefix)
657{
658 if (prefix)
659 return copy_to_guest(vcpu, guestdest, from, n);
660 else
661 return copy_to_guest_absolute(vcpu, guestdest, from, n);
662}
663
664/*
665 * store status at address
666 * we use have two special cases:
667 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
668 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
669 */
971eb77f 670int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db 671{
092670cd 672 unsigned char archmode = 1;
b0c632db
HC
673 int prefix;
674
675 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
676 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
677 return -EFAULT;
678 addr = SAVE_AREA_BASE;
679 prefix = 0;
680 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
681 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
682 return -EFAULT;
683 addr = SAVE_AREA_BASE;
684 prefix = 1;
685 } else
686 prefix = 0;
687
f64ca217 688 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
689 vcpu->arch.guest_fpregs.fprs, 128, prefix))
690 return -EFAULT;
691
f64ca217 692 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
5a32c1af 693 vcpu->run->s.regs.gprs, 128, prefix))
b0c632db
HC
694 return -EFAULT;
695
f64ca217 696 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
697 &vcpu->arch.sie_block->gpsw, 16, prefix))
698 return -EFAULT;
699
f64ca217 700 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
701 &vcpu->arch.sie_block->prefix, 4, prefix))
702 return -EFAULT;
703
704 if (__guestcopy(vcpu,
f64ca217 705 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
706 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
707 return -EFAULT;
708
f64ca217 709 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
710 &vcpu->arch.sie_block->todpr, 4, prefix))
711 return -EFAULT;
712
f64ca217 713 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
714 &vcpu->arch.sie_block->cputm, 8, prefix))
715 return -EFAULT;
716
f64ca217 717 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
718 &vcpu->arch.sie_block->ckc, 8, prefix))
719 return -EFAULT;
720
f64ca217 721 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
59674c1a 722 &vcpu->run->s.regs.acrs, 64, prefix))
b0c632db
HC
723 return -EFAULT;
724
725 if (__guestcopy(vcpu,
f64ca217 726 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
727 &vcpu->arch.sie_block->gcr, 128, prefix))
728 return -EFAULT;
729 return 0;
730}
731
b0c632db
HC
732long kvm_arch_vcpu_ioctl(struct file *filp,
733 unsigned int ioctl, unsigned long arg)
734{
735 struct kvm_vcpu *vcpu = filp->private_data;
736 void __user *argp = (void __user *)arg;
bc923cc9 737 long r;
b0c632db 738
93736624
AK
739 switch (ioctl) {
740 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
741 struct kvm_s390_interrupt s390int;
742
93736624 743 r = -EFAULT;
ba5c1e9b 744 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
745 break;
746 r = kvm_s390_inject_vcpu(vcpu, &s390int);
747 break;
ba5c1e9b 748 }
b0c632db 749 case KVM_S390_STORE_STATUS:
bc923cc9
AK
750 r = kvm_s390_vcpu_store_status(vcpu, arg);
751 break;
b0c632db
HC
752 case KVM_S390_SET_INITIAL_PSW: {
753 psw_t psw;
754
bc923cc9 755 r = -EFAULT;
b0c632db 756 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
757 break;
758 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
759 break;
b0c632db
HC
760 }
761 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
762 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
763 break;
27e0393f
CO
764#ifdef CONFIG_KVM_S390_UCONTROL
765 case KVM_S390_UCAS_MAP: {
766 struct kvm_s390_ucas_mapping ucasmap;
767
768 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
769 r = -EFAULT;
770 break;
771 }
772
773 if (!kvm_is_ucontrol(vcpu->kvm)) {
774 r = -EINVAL;
775 break;
776 }
777
778 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
779 ucasmap.vcpu_addr, ucasmap.length);
780 break;
781 }
782 case KVM_S390_UCAS_UNMAP: {
783 struct kvm_s390_ucas_mapping ucasmap;
784
785 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
786 r = -EFAULT;
787 break;
788 }
789
790 if (!kvm_is_ucontrol(vcpu->kvm)) {
791 r = -EINVAL;
792 break;
793 }
794
795 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
796 ucasmap.length);
797 break;
798 }
799#endif
ccc7910f
CO
800 case KVM_S390_VCPU_FAULT: {
801 r = gmap_fault(arg, vcpu->arch.gmap);
802 if (!IS_ERR_VALUE(r))
803 r = 0;
804 break;
805 }
b0c632db 806 default:
3e6afcf1 807 r = -ENOTTY;
b0c632db 808 }
bc923cc9 809 return r;
b0c632db
HC
810}
811
5b1c1493
CO
812int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
813{
814#ifdef CONFIG_KVM_S390_UCONTROL
815 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
816 && (kvm_is_ucontrol(vcpu->kvm))) {
817 vmf->page = virt_to_page(vcpu->arch.sie_block);
818 get_page(vmf->page);
819 return 0;
820 }
821#endif
822 return VM_FAULT_SIGBUS;
823}
824
db3fe4eb
TY
825void kvm_arch_free_memslot(struct kvm_memory_slot *free,
826 struct kvm_memory_slot *dont)
827{
828}
829
830int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
831{
832 return 0;
833}
834
b0c632db 835/* Section: memory related */
f7784b8e
MT
836int kvm_arch_prepare_memory_region(struct kvm *kvm,
837 struct kvm_memory_slot *memslot,
838 struct kvm_memory_slot old,
839 struct kvm_userspace_memory_region *mem,
840 int user_alloc)
b0c632db
HC
841{
842 /* A few sanity checks. We can have exactly one memory slot which has
843 to start at guest virtual zero and which has to be located at a
844 page boundary in userland and which has to end at a page boundary.
845 The memory in userland is ok to be fragmented into various different
846 vmas. It is okay to mmap() and munmap() stuff in this slot after
847 doing this call at any time */
848
628eb9b8 849 if (mem->slot)
b0c632db
HC
850 return -EINVAL;
851
852 if (mem->guest_phys_addr)
853 return -EINVAL;
854
598841ca 855 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
856 return -EINVAL;
857
598841ca 858 if (mem->memory_size & 0xffffful)
b0c632db
HC
859 return -EINVAL;
860
2668dab7
CO
861 if (!user_alloc)
862 return -EINVAL;
863
f7784b8e
MT
864 return 0;
865}
866
867void kvm_arch_commit_memory_region(struct kvm *kvm,
868 struct kvm_userspace_memory_region *mem,
869 struct kvm_memory_slot old,
870 int user_alloc)
871{
f7850c92 872 int rc;
f7784b8e 873
598841ca
CO
874
875 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
876 mem->guest_phys_addr, mem->memory_size);
877 if (rc)
f7850c92 878 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 879 return;
b0c632db
HC
880}
881
34d4cb8f
MT
882void kvm_arch_flush_shadow(struct kvm *kvm)
883{
884}
885
b0c632db
HC
886static int __init kvm_s390_init(void)
887{
ef50f7ac 888 int ret;
0ee75bea 889 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
890 if (ret)
891 return ret;
892
893 /*
894 * guests can ask for up to 255+1 double words, we need a full page
25985edc 895 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
896 * only set facilities that are known to work in KVM.
897 */
c2f0e8c8 898 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
ef50f7ac
CB
899 if (!facilities) {
900 kvm_exit();
901 return -ENOMEM;
902 }
14375bc4 903 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
6d00d00b 904 facilities[0] &= 0xff00fff3f47c0000ULL;
9950f8be 905 facilities[1] &= 0x201c000000000000ULL;
ef50f7ac 906 return 0;
b0c632db
HC
907}
908
909static void __exit kvm_s390_exit(void)
910{
ef50f7ac 911 free_page((unsigned long) facilities);
b0c632db
HC
912 kvm_exit();
913}
914
915module_init(kvm_s390_init);
916module_exit(kvm_s390_exit);