]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/s390/kvm/kvm-s390.c
s390/lgr: Add init check to lgr_info_log()
[mirror_ubuntu-artful-kernel.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db
HC
1/*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
628eb9b8 4 * Copyright IBM Corp. 2008,2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
a0616cde 30#include <asm/switch_to.h>
1526bf9c 31#include <asm/sclp.h>
8f2abe6a 32#include "kvm-s390.h"
b0c632db
HC
33#include "gaccess.h"
34
35#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
36
37struct kvm_stats_debugfs_item debugfs_entries[] = {
38 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 39 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
40 { "exit_validity", VCPU_STAT(exit_validity) },
41 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
42 { "exit_external_request", VCPU_STAT(exit_external_request) },
43 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
44 { "exit_instruction", VCPU_STAT(exit_instruction) },
45 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
46 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 47 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
48 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
49 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 50 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
51 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
52 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
53 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
54 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
55 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
56 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
57 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
453423dc
CB
58 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
59 { "instruction_spx", VCPU_STAT(instruction_spx) },
60 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
61 { "instruction_stap", VCPU_STAT(instruction_stap) },
62 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
63 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
64 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
65 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
66 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 67 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 68 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 69 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 70 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
71 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
72 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
73 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
74 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
75 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 76 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 77 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 78 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
79 { NULL }
80};
81
ef50f7ac 82static unsigned long long *facilities;
b0c632db
HC
83
84/* Section: not file related */
10474ae8 85int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
86{
87 /* every s390 is virtualization enabled ;-) */
10474ae8 88 return 0;
b0c632db
HC
89}
90
91void kvm_arch_hardware_disable(void *garbage)
92{
93}
94
b0c632db
HC
95int kvm_arch_hardware_setup(void)
96{
97 return 0;
98}
99
100void kvm_arch_hardware_unsetup(void)
101{
102}
103
104void kvm_arch_check_processor_compat(void *rtn)
105{
106}
107
108int kvm_arch_init(void *opaque)
109{
110 return 0;
111}
112
113void kvm_arch_exit(void)
114{
115}
116
117/* Section: device related */
118long kvm_arch_dev_ioctl(struct file *filp,
119 unsigned int ioctl, unsigned long arg)
120{
121 if (ioctl == KVM_S390_ENABLE_SIE)
122 return s390_enable_sie();
123 return -EINVAL;
124}
125
126int kvm_dev_ioctl_check_extension(long ext)
127{
d7b0b5eb
CO
128 int r;
129
2bd0ac4e 130 switch (ext) {
d7b0b5eb 131 case KVM_CAP_S390_PSW:
b6cf8788 132 case KVM_CAP_S390_GMAP:
52e16b18 133 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
134#ifdef CONFIG_KVM_S390_UCONTROL
135 case KVM_CAP_S390_UCONTROL:
136#endif
60b413c9 137 case KVM_CAP_SYNC_REGS:
14eebd91 138 case KVM_CAP_ONE_REG:
d7b0b5eb
CO
139 r = 1;
140 break;
e726b1bd
CB
141 case KVM_CAP_NR_VCPUS:
142 case KVM_CAP_MAX_VCPUS:
143 r = KVM_MAX_VCPUS;
144 break;
1526bf9c
CB
145 case KVM_CAP_S390_COW:
146 r = sclp_get_fac85() & 0x2;
147 break;
2bd0ac4e 148 default:
d7b0b5eb 149 r = 0;
2bd0ac4e 150 }
d7b0b5eb 151 return r;
b0c632db
HC
152}
153
154/* Section: vm related */
155/*
156 * Get (and clear) the dirty memory log for a memory slot.
157 */
158int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
159 struct kvm_dirty_log *log)
160{
161 return 0;
162}
163
164long kvm_arch_vm_ioctl(struct file *filp,
165 unsigned int ioctl, unsigned long arg)
166{
167 struct kvm *kvm = filp->private_data;
168 void __user *argp = (void __user *)arg;
169 int r;
170
171 switch (ioctl) {
ba5c1e9b
CO
172 case KVM_S390_INTERRUPT: {
173 struct kvm_s390_interrupt s390int;
174
175 r = -EFAULT;
176 if (copy_from_user(&s390int, argp, sizeof(s390int)))
177 break;
178 r = kvm_s390_inject_vm(kvm, &s390int);
179 break;
180 }
b0c632db 181 default:
367e1319 182 r = -ENOTTY;
b0c632db
HC
183 }
184
185 return r;
186}
187
e08b9637 188int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 189{
b0c632db
HC
190 int rc;
191 char debug_name[16];
192
e08b9637
CO
193 rc = -EINVAL;
194#ifdef CONFIG_KVM_S390_UCONTROL
195 if (type & ~KVM_VM_S390_UCONTROL)
196 goto out_err;
197 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
198 goto out_err;
199#else
200 if (type)
201 goto out_err;
202#endif
203
b0c632db
HC
204 rc = s390_enable_sie();
205 if (rc)
d89f5eff 206 goto out_err;
b0c632db 207
b290411a
CO
208 rc = -ENOMEM;
209
b0c632db
HC
210 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
211 if (!kvm->arch.sca)
d89f5eff 212 goto out_err;
b0c632db
HC
213
214 sprintf(debug_name, "kvm-%u", current->pid);
215
216 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
217 if (!kvm->arch.dbf)
218 goto out_nodbf;
219
ba5c1e9b
CO
220 spin_lock_init(&kvm->arch.float_int.lock);
221 INIT_LIST_HEAD(&kvm->arch.float_int.list);
222
b0c632db
HC
223 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
224 VM_EVENT(kvm, 3, "%s", "vm created");
225
e08b9637
CO
226 if (type & KVM_VM_S390_UCONTROL) {
227 kvm->arch.gmap = NULL;
228 } else {
229 kvm->arch.gmap = gmap_alloc(current->mm);
230 if (!kvm->arch.gmap)
231 goto out_nogmap;
232 }
d89f5eff 233 return 0;
598841ca
CO
234out_nogmap:
235 debug_unregister(kvm->arch.dbf);
b0c632db
HC
236out_nodbf:
237 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
238out_err:
239 return rc;
b0c632db
HC
240}
241
d329c035
CB
242void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
243{
244 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
58f9460b
CO
245 if (!kvm_is_ucontrol(vcpu->kvm)) {
246 clear_bit(63 - vcpu->vcpu_id,
247 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
248 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
249 (__u64) vcpu->arch.sie_block)
250 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
251 }
abf4a71e 252 smp_mb();
27e0393f
CO
253
254 if (kvm_is_ucontrol(vcpu->kvm))
255 gmap_free(vcpu->arch.gmap);
256
d329c035 257 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 258 kvm_vcpu_uninit(vcpu);
d329c035
CB
259 kfree(vcpu);
260}
261
262static void kvm_free_vcpus(struct kvm *kvm)
263{
264 unsigned int i;
988a2cae 265 struct kvm_vcpu *vcpu;
d329c035 266
988a2cae
GN
267 kvm_for_each_vcpu(i, vcpu, kvm)
268 kvm_arch_vcpu_destroy(vcpu);
269
270 mutex_lock(&kvm->lock);
271 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
272 kvm->vcpus[i] = NULL;
273
274 atomic_set(&kvm->online_vcpus, 0);
275 mutex_unlock(&kvm->lock);
d329c035
CB
276}
277
ad8ba2cd
SY
278void kvm_arch_sync_events(struct kvm *kvm)
279{
280}
281
b0c632db
HC
282void kvm_arch_destroy_vm(struct kvm *kvm)
283{
d329c035 284 kvm_free_vcpus(kvm);
b0c632db 285 free_page((unsigned long)(kvm->arch.sca));
d329c035 286 debug_unregister(kvm->arch.dbf);
27e0393f
CO
287 if (!kvm_is_ucontrol(kvm))
288 gmap_free(kvm->arch.gmap);
b0c632db
HC
289}
290
291/* Section: vcpu related */
292int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
293{
27e0393f
CO
294 if (kvm_is_ucontrol(vcpu->kvm)) {
295 vcpu->arch.gmap = gmap_alloc(current->mm);
296 if (!vcpu->arch.gmap)
297 return -ENOMEM;
298 return 0;
299 }
300
598841ca 301 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
59674c1a
CB
302 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
303 KVM_SYNC_GPRS |
9eed0735
CB
304 KVM_SYNC_ACRS |
305 KVM_SYNC_CRS;
b0c632db
HC
306 return 0;
307}
308
309void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
310{
6692cef3 311 /* Nothing todo */
b0c632db
HC
312}
313
314void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
315{
316 save_fp_regs(&vcpu->arch.host_fpregs);
317 save_access_regs(vcpu->arch.host_acrs);
318 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
319 restore_fp_regs(&vcpu->arch.guest_fpregs);
59674c1a 320 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 321 gmap_enable(vcpu->arch.gmap);
9e6dabef 322 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
323}
324
325void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
326{
9e6dabef 327 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 328 gmap_disable(vcpu->arch.gmap);
b0c632db 329 save_fp_regs(&vcpu->arch.guest_fpregs);
59674c1a 330 save_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
331 restore_fp_regs(&vcpu->arch.host_fpregs);
332 restore_access_regs(vcpu->arch.host_acrs);
333}
334
335static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
336{
337 /* this equals initial cpu reset in pop, but we don't switch to ESA */
338 vcpu->arch.sie_block->gpsw.mask = 0UL;
339 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 340 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
341 vcpu->arch.sie_block->cputm = 0UL;
342 vcpu->arch.sie_block->ckc = 0UL;
343 vcpu->arch.sie_block->todpr = 0;
344 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
345 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
346 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
347 vcpu->arch.guest_fpregs.fpc = 0;
348 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
349 vcpu->arch.sie_block->gbea = 1;
350}
351
352int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
353{
9e6dabef
CH
354 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
355 CPUSTAT_SM |
356 CPUSTAT_STOPPED);
fc34531d 357 vcpu->arch.sie_block->ecb = 6;
b0c632db 358 vcpu->arch.sie_block->eca = 0xC1002001U;
ef50f7ac 359 vcpu->arch.sie_block->fac = (int) (long) facilities;
ca872302
CB
360 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
361 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
362 (unsigned long) vcpu);
363 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 364 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 365 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
366 return 0;
367}
368
369struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
370 unsigned int id)
371{
4d47555a
CO
372 struct kvm_vcpu *vcpu;
373 int rc = -EINVAL;
374
375 if (id >= KVM_MAX_VCPUS)
376 goto out;
377
378 rc = -ENOMEM;
b0c632db 379
4d47555a 380 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
b0c632db 381 if (!vcpu)
4d47555a 382 goto out;
b0c632db 383
180c12fb
CB
384 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
385 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
386
387 if (!vcpu->arch.sie_block)
388 goto out_free_cpu;
389
390 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
391 if (!kvm_is_ucontrol(kvm)) {
392 if (!kvm->arch.sca) {
393 WARN_ON_ONCE(1);
394 goto out_free_cpu;
395 }
396 if (!kvm->arch.sca->cpu[id].sda)
397 kvm->arch.sca->cpu[id].sda =
398 (__u64) vcpu->arch.sie_block;
399 vcpu->arch.sie_block->scaoh =
400 (__u32)(((__u64)kvm->arch.sca) >> 32);
401 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
402 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
403 }
b0c632db 404
ba5c1e9b
CO
405 spin_lock_init(&vcpu->arch.local_int.lock);
406 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
407 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 408 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b
CO
409 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
410 init_waitqueue_head(&vcpu->arch.local_int.wq);
5288fbf0 411 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 412 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 413
b0c632db
HC
414 rc = kvm_vcpu_init(vcpu, kvm, id);
415 if (rc)
7b06bf2f 416 goto out_free_sie_block;
b0c632db
HC
417 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
418 vcpu->arch.sie_block);
419
b0c632db 420 return vcpu;
7b06bf2f
WY
421out_free_sie_block:
422 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db
HC
423out_free_cpu:
424 kfree(vcpu);
4d47555a 425out:
b0c632db
HC
426 return ERR_PTR(rc);
427}
428
b0c632db
HC
429int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
430{
431 /* kvm common code refers to this, but never calls it */
432 BUG();
433 return 0;
434}
435
b6d33834
CD
436int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
437{
438 /* kvm common code refers to this, but never calls it */
439 BUG();
440 return 0;
441}
442
14eebd91
CO
443static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
444 struct kvm_one_reg *reg)
445{
446 int r = -EINVAL;
447
448 switch (reg->id) {
29b7c71b
CO
449 case KVM_REG_S390_TODPR:
450 r = put_user(vcpu->arch.sie_block->todpr,
451 (u32 __user *)reg->addr);
452 break;
453 case KVM_REG_S390_EPOCHDIFF:
454 r = put_user(vcpu->arch.sie_block->epoch,
455 (u64 __user *)reg->addr);
456 break;
46a6dd1c
J
457 case KVM_REG_S390_CPU_TIMER:
458 r = put_user(vcpu->arch.sie_block->cputm,
459 (u64 __user *)reg->addr);
460 break;
461 case KVM_REG_S390_CLOCK_COMP:
462 r = put_user(vcpu->arch.sie_block->ckc,
463 (u64 __user *)reg->addr);
464 break;
14eebd91
CO
465 default:
466 break;
467 }
468
469 return r;
470}
471
472static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
473 struct kvm_one_reg *reg)
474{
475 int r = -EINVAL;
476
477 switch (reg->id) {
29b7c71b
CO
478 case KVM_REG_S390_TODPR:
479 r = get_user(vcpu->arch.sie_block->todpr,
480 (u32 __user *)reg->addr);
481 break;
482 case KVM_REG_S390_EPOCHDIFF:
483 r = get_user(vcpu->arch.sie_block->epoch,
484 (u64 __user *)reg->addr);
485 break;
46a6dd1c
J
486 case KVM_REG_S390_CPU_TIMER:
487 r = get_user(vcpu->arch.sie_block->cputm,
488 (u64 __user *)reg->addr);
489 break;
490 case KVM_REG_S390_CLOCK_COMP:
491 r = get_user(vcpu->arch.sie_block->ckc,
492 (u64 __user *)reg->addr);
493 break;
14eebd91
CO
494 default:
495 break;
496 }
497
498 return r;
499}
b6d33834 500
b0c632db
HC
501static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
502{
b0c632db 503 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
504 return 0;
505}
506
507int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
508{
5a32c1af 509 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
510 return 0;
511}
512
513int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
514{
5a32c1af 515 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
516 return 0;
517}
518
519int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
520 struct kvm_sregs *sregs)
521{
59674c1a 522 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 523 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 524 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
525 return 0;
526}
527
528int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
529 struct kvm_sregs *sregs)
530{
59674c1a 531 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 532 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
533 return 0;
534}
535
536int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
537{
b0c632db 538 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
85175587 539 vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
7eef87dc 540 restore_fp_regs(&vcpu->arch.guest_fpregs);
b0c632db
HC
541 return 0;
542}
543
544int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
545{
b0c632db
HC
546 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
547 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
548 return 0;
549}
550
551static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
552{
553 int rc = 0;
554
9e6dabef 555 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
b0c632db 556 rc = -EBUSY;
d7b0b5eb
CO
557 else {
558 vcpu->run->psw_mask = psw.mask;
559 vcpu->run->psw_addr = psw.addr;
560 }
b0c632db
HC
561 return rc;
562}
563
564int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
565 struct kvm_translation *tr)
566{
567 return -EINVAL; /* not implemented yet */
568}
569
d0bfb940
JK
570int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
571 struct kvm_guest_debug *dbg)
b0c632db
HC
572{
573 return -EINVAL; /* not implemented yet */
574}
575
62d9f0db
MT
576int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
577 struct kvm_mp_state *mp_state)
578{
579 return -EINVAL; /* not implemented yet */
580}
581
582int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
583 struct kvm_mp_state *mp_state)
584{
585 return -EINVAL; /* not implemented yet */
586}
587
e168bf8d 588static int __vcpu_run(struct kvm_vcpu *vcpu)
b0c632db 589{
e168bf8d
CO
590 int rc;
591
5a32c1af 592 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
593
594 if (need_resched())
595 schedule();
596
71cde587
CB
597 if (test_thread_flag(TIF_MCCK_PENDING))
598 s390_handle_mcck();
599
d6b6d166
CO
600 if (!kvm_is_ucontrol(vcpu->kvm))
601 kvm_s390_deliver_pending_interrupts(vcpu);
0ff31867 602
b0c632db
HC
603 vcpu->arch.sie_block->icptcode = 0;
604 local_irq_disable();
605 kvm_guest_enter();
606 local_irq_enable();
607 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
608 atomic_read(&vcpu->arch.sie_block->cpuflags));
5a32c1af 609 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
e168bf8d
CO
610 if (rc) {
611 if (kvm_is_ucontrol(vcpu->kvm)) {
612 rc = SIE_INTERCEPT_UCONTROL;
613 } else {
614 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
615 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
616 rc = 0;
617 }
1f0d0f09 618 }
b0c632db
HC
619 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
620 vcpu->arch.sie_block->icptcode);
621 local_irq_disable();
622 kvm_guest_exit();
623 local_irq_enable();
624
5a32c1af 625 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
e168bf8d 626 return rc;
b0c632db
HC
627}
628
629int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
630{
8f2abe6a 631 int rc;
b0c632db
HC
632 sigset_t sigsaved;
633
9ace903d 634rerun_vcpu:
b0c632db
HC
635 if (vcpu->sigset_active)
636 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
637
9e6dabef 638 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db 639
ba5c1e9b
CO
640 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
641
8f2abe6a
CB
642 switch (kvm_run->exit_reason) {
643 case KVM_EXIT_S390_SIEIC:
8f2abe6a 644 case KVM_EXIT_UNKNOWN:
9ace903d 645 case KVM_EXIT_INTR:
8f2abe6a 646 case KVM_EXIT_S390_RESET:
e168bf8d 647 case KVM_EXIT_S390_UCONTROL:
8f2abe6a
CB
648 break;
649 default:
650 BUG();
651 }
652
d7b0b5eb
CO
653 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
654 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
60b413c9
CB
655 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
656 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
657 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
658 }
9eed0735
CB
659 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
660 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
661 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
662 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
663 }
d7b0b5eb 664
dab4079d 665 might_fault();
8f2abe6a
CB
666
667 do {
e168bf8d
CO
668 rc = __vcpu_run(vcpu);
669 if (rc)
670 break;
c0d744a9
CO
671 if (kvm_is_ucontrol(vcpu->kvm))
672 rc = -EOPNOTSUPP;
673 else
674 rc = kvm_handle_sie_intercept(vcpu);
8f2abe6a
CB
675 } while (!signal_pending(current) && !rc);
676
9ace903d
CE
677 if (rc == SIE_INTERCEPT_RERUNVCPU)
678 goto rerun_vcpu;
679
b1d16c49
CE
680 if (signal_pending(current) && !rc) {
681 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 682 rc = -EINTR;
b1d16c49 683 }
8f2abe6a 684
e168bf8d
CO
685#ifdef CONFIG_KVM_S390_UCONTROL
686 if (rc == SIE_INTERCEPT_UCONTROL) {
687 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
688 kvm_run->s390_ucontrol.trans_exc_code =
689 current->thread.gmap_addr;
690 kvm_run->s390_ucontrol.pgm_code = 0x10;
691 rc = 0;
692 }
693#endif
694
b8e660b8 695 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
696 /* intercept cannot be handled in-kernel, prepare kvm-run */
697 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
698 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
699 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
700 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
701 rc = 0;
702 }
703
704 if (rc == -EREMOTE) {
705 /* intercept was handled, but userspace support is needed
706 * kvm_run has been prepared by the handler */
707 rc = 0;
708 }
b0c632db 709
d7b0b5eb
CO
710 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
711 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
60b413c9 712 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
9eed0735 713 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
d7b0b5eb 714
b0c632db
HC
715 if (vcpu->sigset_active)
716 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
717
b0c632db 718 vcpu->stat.exit_userspace++;
7e8e6ab4 719 return rc;
b0c632db
HC
720}
721
092670cd 722static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
b0c632db
HC
723 unsigned long n, int prefix)
724{
725 if (prefix)
726 return copy_to_guest(vcpu, guestdest, from, n);
727 else
728 return copy_to_guest_absolute(vcpu, guestdest, from, n);
729}
730
731/*
732 * store status at address
733 * we use have two special cases:
734 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
735 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
736 */
971eb77f 737int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db 738{
092670cd 739 unsigned char archmode = 1;
b0c632db
HC
740 int prefix;
741
742 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
743 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
744 return -EFAULT;
745 addr = SAVE_AREA_BASE;
746 prefix = 0;
747 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
748 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
749 return -EFAULT;
750 addr = SAVE_AREA_BASE;
751 prefix = 1;
752 } else
753 prefix = 0;
754
f64ca217 755 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
756 vcpu->arch.guest_fpregs.fprs, 128, prefix))
757 return -EFAULT;
758
f64ca217 759 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
5a32c1af 760 vcpu->run->s.regs.gprs, 128, prefix))
b0c632db
HC
761 return -EFAULT;
762
f64ca217 763 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
764 &vcpu->arch.sie_block->gpsw, 16, prefix))
765 return -EFAULT;
766
f64ca217 767 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
768 &vcpu->arch.sie_block->prefix, 4, prefix))
769 return -EFAULT;
770
771 if (__guestcopy(vcpu,
f64ca217 772 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
773 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
774 return -EFAULT;
775
f64ca217 776 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
777 &vcpu->arch.sie_block->todpr, 4, prefix))
778 return -EFAULT;
779
f64ca217 780 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
781 &vcpu->arch.sie_block->cputm, 8, prefix))
782 return -EFAULT;
783
f64ca217 784 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
785 &vcpu->arch.sie_block->ckc, 8, prefix))
786 return -EFAULT;
787
f64ca217 788 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
59674c1a 789 &vcpu->run->s.regs.acrs, 64, prefix))
b0c632db
HC
790 return -EFAULT;
791
792 if (__guestcopy(vcpu,
f64ca217 793 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
794 &vcpu->arch.sie_block->gcr, 128, prefix))
795 return -EFAULT;
796 return 0;
797}
798
b0c632db
HC
799long kvm_arch_vcpu_ioctl(struct file *filp,
800 unsigned int ioctl, unsigned long arg)
801{
802 struct kvm_vcpu *vcpu = filp->private_data;
803 void __user *argp = (void __user *)arg;
bc923cc9 804 long r;
b0c632db 805
93736624
AK
806 switch (ioctl) {
807 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
808 struct kvm_s390_interrupt s390int;
809
93736624 810 r = -EFAULT;
ba5c1e9b 811 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
812 break;
813 r = kvm_s390_inject_vcpu(vcpu, &s390int);
814 break;
ba5c1e9b 815 }
b0c632db 816 case KVM_S390_STORE_STATUS:
bc923cc9
AK
817 r = kvm_s390_vcpu_store_status(vcpu, arg);
818 break;
b0c632db
HC
819 case KVM_S390_SET_INITIAL_PSW: {
820 psw_t psw;
821
bc923cc9 822 r = -EFAULT;
b0c632db 823 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
824 break;
825 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
826 break;
b0c632db
HC
827 }
828 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
829 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
830 break;
14eebd91
CO
831 case KVM_SET_ONE_REG:
832 case KVM_GET_ONE_REG: {
833 struct kvm_one_reg reg;
834 r = -EFAULT;
835 if (copy_from_user(&reg, argp, sizeof(reg)))
836 break;
837 if (ioctl == KVM_SET_ONE_REG)
838 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
839 else
840 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
841 break;
842 }
27e0393f
CO
843#ifdef CONFIG_KVM_S390_UCONTROL
844 case KVM_S390_UCAS_MAP: {
845 struct kvm_s390_ucas_mapping ucasmap;
846
847 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
848 r = -EFAULT;
849 break;
850 }
851
852 if (!kvm_is_ucontrol(vcpu->kvm)) {
853 r = -EINVAL;
854 break;
855 }
856
857 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
858 ucasmap.vcpu_addr, ucasmap.length);
859 break;
860 }
861 case KVM_S390_UCAS_UNMAP: {
862 struct kvm_s390_ucas_mapping ucasmap;
863
864 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
865 r = -EFAULT;
866 break;
867 }
868
869 if (!kvm_is_ucontrol(vcpu->kvm)) {
870 r = -EINVAL;
871 break;
872 }
873
874 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
875 ucasmap.length);
876 break;
877 }
878#endif
ccc7910f
CO
879 case KVM_S390_VCPU_FAULT: {
880 r = gmap_fault(arg, vcpu->arch.gmap);
881 if (!IS_ERR_VALUE(r))
882 r = 0;
883 break;
884 }
b0c632db 885 default:
3e6afcf1 886 r = -ENOTTY;
b0c632db 887 }
bc923cc9 888 return r;
b0c632db
HC
889}
890
5b1c1493
CO
891int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
892{
893#ifdef CONFIG_KVM_S390_UCONTROL
894 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
895 && (kvm_is_ucontrol(vcpu->kvm))) {
896 vmf->page = virt_to_page(vcpu->arch.sie_block);
897 get_page(vmf->page);
898 return 0;
899 }
900#endif
901 return VM_FAULT_SIGBUS;
902}
903
db3fe4eb
TY
904void kvm_arch_free_memslot(struct kvm_memory_slot *free,
905 struct kvm_memory_slot *dont)
906{
907}
908
909int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
910{
911 return 0;
912}
913
b0c632db 914/* Section: memory related */
f7784b8e
MT
915int kvm_arch_prepare_memory_region(struct kvm *kvm,
916 struct kvm_memory_slot *memslot,
917 struct kvm_memory_slot old,
918 struct kvm_userspace_memory_region *mem,
919 int user_alloc)
b0c632db
HC
920{
921 /* A few sanity checks. We can have exactly one memory slot which has
922 to start at guest virtual zero and which has to be located at a
923 page boundary in userland and which has to end at a page boundary.
924 The memory in userland is ok to be fragmented into various different
925 vmas. It is okay to mmap() and munmap() stuff in this slot after
926 doing this call at any time */
927
628eb9b8 928 if (mem->slot)
b0c632db
HC
929 return -EINVAL;
930
931 if (mem->guest_phys_addr)
932 return -EINVAL;
933
598841ca 934 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
935 return -EINVAL;
936
598841ca 937 if (mem->memory_size & 0xffffful)
b0c632db
HC
938 return -EINVAL;
939
2668dab7
CO
940 if (!user_alloc)
941 return -EINVAL;
942
f7784b8e
MT
943 return 0;
944}
945
946void kvm_arch_commit_memory_region(struct kvm *kvm,
947 struct kvm_userspace_memory_region *mem,
948 struct kvm_memory_slot old,
949 int user_alloc)
950{
f7850c92 951 int rc;
f7784b8e 952
598841ca
CO
953
954 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
955 mem->guest_phys_addr, mem->memory_size);
956 if (rc)
f7850c92 957 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 958 return;
b0c632db
HC
959}
960
34d4cb8f
MT
961void kvm_arch_flush_shadow(struct kvm *kvm)
962{
963}
964
b0c632db
HC
965static int __init kvm_s390_init(void)
966{
ef50f7ac 967 int ret;
0ee75bea 968 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
969 if (ret)
970 return ret;
971
972 /*
973 * guests can ask for up to 255+1 double words, we need a full page
25985edc 974 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
975 * only set facilities that are known to work in KVM.
976 */
c2f0e8c8 977 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
ef50f7ac
CB
978 if (!facilities) {
979 kvm_exit();
980 return -ENOMEM;
981 }
14375bc4 982 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
6d00d00b 983 facilities[0] &= 0xff00fff3f47c0000ULL;
9950f8be 984 facilities[1] &= 0x201c000000000000ULL;
ef50f7ac 985 return 0;
b0c632db
HC
986}
987
988static void __exit kvm_s390_exit(void)
989{
ef50f7ac 990 free_page((unsigned long) facilities);
b0c632db
HC
991 kvm_exit();
992}
993
994module_init(kvm_s390_init);
995module_exit(kvm_s390_exit);