]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/s390/kvm/kvm-s390.c
Fix common misspellings
[mirror_ubuntu-bionic-kernel.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db
HC
1/*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
628eb9b8 4 * Copyright IBM Corp. 2008,2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
ef50f7ac 30#include <asm/system.h>
8f2abe6a 31#include "kvm-s390.h"
b0c632db
HC
32#include "gaccess.h"
33
34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 38 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
50 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
51 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
52 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
53 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
54 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
55 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
453423dc
CB
56 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
57 { "instruction_spx", VCPU_STAT(instruction_spx) },
58 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
59 { "instruction_stap", VCPU_STAT(instruction_stap) },
60 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
61 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
62 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
63 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
64 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
5288fbf0
CB
65 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
66 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
67 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
68 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
69 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
70 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
e28acfea 71 { "diagnose_44", VCPU_STAT(diagnose_44) },
b0c632db
HC
72 { NULL }
73};
74
ef50f7ac 75static unsigned long long *facilities;
b0c632db
HC
76
77/* Section: not file related */
10474ae8 78int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
79{
80 /* every s390 is virtualization enabled ;-) */
10474ae8 81 return 0;
b0c632db
HC
82}
83
84void kvm_arch_hardware_disable(void *garbage)
85{
86}
87
b0c632db
HC
88int kvm_arch_hardware_setup(void)
89{
90 return 0;
91}
92
93void kvm_arch_hardware_unsetup(void)
94{
95}
96
97void kvm_arch_check_processor_compat(void *rtn)
98{
99}
100
101int kvm_arch_init(void *opaque)
102{
103 return 0;
104}
105
106void kvm_arch_exit(void)
107{
108}
109
110/* Section: device related */
111long kvm_arch_dev_ioctl(struct file *filp,
112 unsigned int ioctl, unsigned long arg)
113{
114 if (ioctl == KVM_S390_ENABLE_SIE)
115 return s390_enable_sie();
116 return -EINVAL;
117}
118
119int kvm_dev_ioctl_check_extension(long ext)
120{
d7b0b5eb
CO
121 int r;
122
2bd0ac4e 123 switch (ext) {
d7b0b5eb
CO
124 case KVM_CAP_S390_PSW:
125 r = 1;
126 break;
2bd0ac4e 127 default:
d7b0b5eb 128 r = 0;
2bd0ac4e 129 }
d7b0b5eb 130 return r;
b0c632db
HC
131}
132
133/* Section: vm related */
134/*
135 * Get (and clear) the dirty memory log for a memory slot.
136 */
137int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
138 struct kvm_dirty_log *log)
139{
140 return 0;
141}
142
143long kvm_arch_vm_ioctl(struct file *filp,
144 unsigned int ioctl, unsigned long arg)
145{
146 struct kvm *kvm = filp->private_data;
147 void __user *argp = (void __user *)arg;
148 int r;
149
150 switch (ioctl) {
ba5c1e9b
CO
151 case KVM_S390_INTERRUPT: {
152 struct kvm_s390_interrupt s390int;
153
154 r = -EFAULT;
155 if (copy_from_user(&s390int, argp, sizeof(s390int)))
156 break;
157 r = kvm_s390_inject_vm(kvm, &s390int);
158 break;
159 }
b0c632db 160 default:
367e1319 161 r = -ENOTTY;
b0c632db
HC
162 }
163
164 return r;
165}
166
d89f5eff 167int kvm_arch_init_vm(struct kvm *kvm)
b0c632db 168{
b0c632db
HC
169 int rc;
170 char debug_name[16];
171
172 rc = s390_enable_sie();
173 if (rc)
d89f5eff 174 goto out_err;
b0c632db
HC
175
176 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
177 if (!kvm->arch.sca)
d89f5eff 178 goto out_err;
b0c632db
HC
179
180 sprintf(debug_name, "kvm-%u", current->pid);
181
182 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
183 if (!kvm->arch.dbf)
184 goto out_nodbf;
185
ba5c1e9b
CO
186 spin_lock_init(&kvm->arch.float_int.lock);
187 INIT_LIST_HEAD(&kvm->arch.float_int.list);
188
b0c632db
HC
189 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
190 VM_EVENT(kvm, 3, "%s", "vm created");
191
d89f5eff 192 return 0;
b0c632db
HC
193out_nodbf:
194 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
195out_err:
196 return rc;
b0c632db
HC
197}
198
d329c035
CB
199void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
200{
201 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
fc34531d 202 clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
abf4a71e
CO
203 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
204 (__u64) vcpu->arch.sie_block)
205 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
206 smp_mb();
d329c035 207 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 208 kvm_vcpu_uninit(vcpu);
d329c035
CB
209 kfree(vcpu);
210}
211
212static void kvm_free_vcpus(struct kvm *kvm)
213{
214 unsigned int i;
988a2cae 215 struct kvm_vcpu *vcpu;
d329c035 216
988a2cae
GN
217 kvm_for_each_vcpu(i, vcpu, kvm)
218 kvm_arch_vcpu_destroy(vcpu);
219
220 mutex_lock(&kvm->lock);
221 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
222 kvm->vcpus[i] = NULL;
223
224 atomic_set(&kvm->online_vcpus, 0);
225 mutex_unlock(&kvm->lock);
d329c035
CB
226}
227
ad8ba2cd
SY
228void kvm_arch_sync_events(struct kvm *kvm)
229{
230}
231
b0c632db
HC
232void kvm_arch_destroy_vm(struct kvm *kvm)
233{
d329c035 234 kvm_free_vcpus(kvm);
b0c632db 235 free_page((unsigned long)(kvm->arch.sca));
d329c035 236 debug_unregister(kvm->arch.dbf);
b0c632db
HC
237}
238
239/* Section: vcpu related */
240int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
241{
242 return 0;
243}
244
245void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
246{
6692cef3 247 /* Nothing todo */
b0c632db
HC
248}
249
250void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
251{
252 save_fp_regs(&vcpu->arch.host_fpregs);
253 save_access_regs(vcpu->arch.host_acrs);
254 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
255 restore_fp_regs(&vcpu->arch.guest_fpregs);
256 restore_access_regs(vcpu->arch.guest_acrs);
b0c632db
HC
257}
258
259void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
260{
261 save_fp_regs(&vcpu->arch.guest_fpregs);
262 save_access_regs(vcpu->arch.guest_acrs);
263 restore_fp_regs(&vcpu->arch.host_fpregs);
264 restore_access_regs(vcpu->arch.host_acrs);
265}
266
267static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
268{
269 /* this equals initial cpu reset in pop, but we don't switch to ESA */
270 vcpu->arch.sie_block->gpsw.mask = 0UL;
271 vcpu->arch.sie_block->gpsw.addr = 0UL;
272 vcpu->arch.sie_block->prefix = 0UL;
273 vcpu->arch.sie_block->ihcpu = 0xffff;
274 vcpu->arch.sie_block->cputm = 0UL;
275 vcpu->arch.sie_block->ckc = 0UL;
276 vcpu->arch.sie_block->todpr = 0;
277 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
278 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
279 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
280 vcpu->arch.guest_fpregs.fpc = 0;
281 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
282 vcpu->arch.sie_block->gbea = 1;
283}
284
285int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
286{
287 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
628eb9b8 288 set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
fc34531d 289 vcpu->arch.sie_block->ecb = 6;
b0c632db 290 vcpu->arch.sie_block->eca = 0xC1002001U;
ef50f7ac 291 vcpu->arch.sie_block->fac = (int) (long) facilities;
ca872302
CB
292 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
293 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
294 (unsigned long) vcpu);
295 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 296 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 297 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
298 return 0;
299}
300
301struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
302 unsigned int id)
303{
304 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
305 int rc = -ENOMEM;
306
307 if (!vcpu)
308 goto out_nomem;
309
180c12fb
CB
310 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
311 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
312
313 if (!vcpu->arch.sie_block)
314 goto out_free_cpu;
315
316 vcpu->arch.sie_block->icpua = id;
317 BUG_ON(!kvm->arch.sca);
abf4a71e
CO
318 if (!kvm->arch.sca->cpu[id].sda)
319 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
b0c632db
HC
320 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
321 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
fc34531d 322 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
b0c632db 323
ba5c1e9b
CO
324 spin_lock_init(&vcpu->arch.local_int.lock);
325 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
326 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 327 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b
CO
328 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
329 init_waitqueue_head(&vcpu->arch.local_int.wq);
5288fbf0 330 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 331 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 332
b0c632db
HC
333 rc = kvm_vcpu_init(vcpu, kvm, id);
334 if (rc)
7b06bf2f 335 goto out_free_sie_block;
b0c632db
HC
336 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
337 vcpu->arch.sie_block);
338
b0c632db 339 return vcpu;
7b06bf2f
WY
340out_free_sie_block:
341 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db
HC
342out_free_cpu:
343 kfree(vcpu);
344out_nomem:
345 return ERR_PTR(rc);
346}
347
b0c632db
HC
348int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
349{
350 /* kvm common code refers to this, but never calls it */
351 BUG();
352 return 0;
353}
354
355static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
356{
b0c632db 357 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
358 return 0;
359}
360
361int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
362{
b0c632db 363 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
364 return 0;
365}
366
367int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
368{
b0c632db 369 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
b0c632db
HC
370 return 0;
371}
372
373int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
374 struct kvm_sregs *sregs)
375{
b0c632db
HC
376 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
377 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
b0c632db
HC
378 return 0;
379}
380
381int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
382 struct kvm_sregs *sregs)
383{
b0c632db
HC
384 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
385 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
386 return 0;
387}
388
389int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
390{
b0c632db
HC
391 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
392 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
b0c632db
HC
393 return 0;
394}
395
396int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
397{
b0c632db
HC
398 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
399 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
400 return 0;
401}
402
403static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
404{
405 int rc = 0;
406
b0c632db
HC
407 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
408 rc = -EBUSY;
d7b0b5eb
CO
409 else {
410 vcpu->run->psw_mask = psw.mask;
411 vcpu->run->psw_addr = psw.addr;
412 }
b0c632db
HC
413 return rc;
414}
415
416int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
417 struct kvm_translation *tr)
418{
419 return -EINVAL; /* not implemented yet */
420}
421
d0bfb940
JK
422int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
423 struct kvm_guest_debug *dbg)
b0c632db
HC
424{
425 return -EINVAL; /* not implemented yet */
426}
427
62d9f0db
MT
428int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
429 struct kvm_mp_state *mp_state)
430{
431 return -EINVAL; /* not implemented yet */
432}
433
434int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
435 struct kvm_mp_state *mp_state)
436{
437 return -EINVAL; /* not implemented yet */
438}
439
b0c632db
HC
440static void __vcpu_run(struct kvm_vcpu *vcpu)
441{
442 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
443
444 if (need_resched())
445 schedule();
446
71cde587
CB
447 if (test_thread_flag(TIF_MCCK_PENDING))
448 s390_handle_mcck();
449
0ff31867
CO
450 kvm_s390_deliver_pending_interrupts(vcpu);
451
b0c632db
HC
452 vcpu->arch.sie_block->icptcode = 0;
453 local_irq_disable();
454 kvm_guest_enter();
455 local_irq_enable();
456 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
457 atomic_read(&vcpu->arch.sie_block->cpuflags));
1f0d0f09
CO
458 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
459 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
460 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
461 }
b0c632db
HC
462 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
463 vcpu->arch.sie_block->icptcode);
464 local_irq_disable();
465 kvm_guest_exit();
466 local_irq_enable();
467
468 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
469}
470
471int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
472{
8f2abe6a 473 int rc;
b0c632db
HC
474 sigset_t sigsaved;
475
9ace903d 476rerun_vcpu:
628eb9b8
CE
477 if (vcpu->requests)
478 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
479 kvm_s390_vcpu_set_mem(vcpu);
480
51e4d5ab 481 /* verify, that memory has been registered */
628eb9b8 482 if (!vcpu->arch.sie_block->gmslm) {
51e4d5ab 483 vcpu_put(vcpu);
628eb9b8 484 VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu");
51e4d5ab
CO
485 return -EINVAL;
486 }
487
b0c632db
HC
488 if (vcpu->sigset_active)
489 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
490
491 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
492
ba5c1e9b
CO
493 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
494
8f2abe6a
CB
495 switch (kvm_run->exit_reason) {
496 case KVM_EXIT_S390_SIEIC:
8f2abe6a 497 case KVM_EXIT_UNKNOWN:
9ace903d 498 case KVM_EXIT_INTR:
8f2abe6a
CB
499 case KVM_EXIT_S390_RESET:
500 break;
501 default:
502 BUG();
503 }
504
d7b0b5eb
CO
505 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
506 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
507
dab4079d 508 might_fault();
8f2abe6a
CB
509
510 do {
511 __vcpu_run(vcpu);
8f2abe6a
CB
512 rc = kvm_handle_sie_intercept(vcpu);
513 } while (!signal_pending(current) && !rc);
514
9ace903d
CE
515 if (rc == SIE_INTERCEPT_RERUNVCPU)
516 goto rerun_vcpu;
517
b1d16c49
CE
518 if (signal_pending(current) && !rc) {
519 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 520 rc = -EINTR;
b1d16c49 521 }
8f2abe6a 522
b8e660b8 523 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
524 /* intercept cannot be handled in-kernel, prepare kvm-run */
525 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
526 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
527 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
528 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
529 rc = 0;
530 }
531
532 if (rc == -EREMOTE) {
533 /* intercept was handled, but userspace support is needed
534 * kvm_run has been prepared by the handler */
535 rc = 0;
536 }
b0c632db 537
d7b0b5eb
CO
538 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
539 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
540
b0c632db
HC
541 if (vcpu->sigset_active)
542 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
543
b0c632db 544 vcpu->stat.exit_userspace++;
7e8e6ab4 545 return rc;
b0c632db
HC
546}
547
548static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
549 unsigned long n, int prefix)
550{
551 if (prefix)
552 return copy_to_guest(vcpu, guestdest, from, n);
553 else
554 return copy_to_guest_absolute(vcpu, guestdest, from, n);
555}
556
557/*
558 * store status at address
559 * we use have two special cases:
560 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
561 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
562 */
971eb77f 563int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db
HC
564{
565 const unsigned char archmode = 1;
566 int prefix;
567
568 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
569 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
570 return -EFAULT;
571 addr = SAVE_AREA_BASE;
572 prefix = 0;
573 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
574 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
575 return -EFAULT;
576 addr = SAVE_AREA_BASE;
577 prefix = 1;
578 } else
579 prefix = 0;
580
f64ca217 581 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
582 vcpu->arch.guest_fpregs.fprs, 128, prefix))
583 return -EFAULT;
584
f64ca217 585 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
b0c632db
HC
586 vcpu->arch.guest_gprs, 128, prefix))
587 return -EFAULT;
588
f64ca217 589 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
590 &vcpu->arch.sie_block->gpsw, 16, prefix))
591 return -EFAULT;
592
f64ca217 593 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
594 &vcpu->arch.sie_block->prefix, 4, prefix))
595 return -EFAULT;
596
597 if (__guestcopy(vcpu,
f64ca217 598 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
599 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
600 return -EFAULT;
601
f64ca217 602 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
603 &vcpu->arch.sie_block->todpr, 4, prefix))
604 return -EFAULT;
605
f64ca217 606 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
607 &vcpu->arch.sie_block->cputm, 8, prefix))
608 return -EFAULT;
609
f64ca217 610 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
611 &vcpu->arch.sie_block->ckc, 8, prefix))
612 return -EFAULT;
613
f64ca217 614 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
b0c632db
HC
615 &vcpu->arch.guest_acrs, 64, prefix))
616 return -EFAULT;
617
618 if (__guestcopy(vcpu,
f64ca217 619 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
620 &vcpu->arch.sie_block->gcr, 128, prefix))
621 return -EFAULT;
622 return 0;
623}
624
b0c632db
HC
625long kvm_arch_vcpu_ioctl(struct file *filp,
626 unsigned int ioctl, unsigned long arg)
627{
628 struct kvm_vcpu *vcpu = filp->private_data;
629 void __user *argp = (void __user *)arg;
bc923cc9 630 long r;
b0c632db 631
93736624
AK
632 switch (ioctl) {
633 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
634 struct kvm_s390_interrupt s390int;
635
93736624 636 r = -EFAULT;
ba5c1e9b 637 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
638 break;
639 r = kvm_s390_inject_vcpu(vcpu, &s390int);
640 break;
ba5c1e9b 641 }
b0c632db 642 case KVM_S390_STORE_STATUS:
bc923cc9
AK
643 r = kvm_s390_vcpu_store_status(vcpu, arg);
644 break;
b0c632db
HC
645 case KVM_S390_SET_INITIAL_PSW: {
646 psw_t psw;
647
bc923cc9 648 r = -EFAULT;
b0c632db 649 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
650 break;
651 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
652 break;
b0c632db
HC
653 }
654 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
655 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
656 break;
b0c632db 657 default:
bc923cc9 658 r = -EINVAL;
b0c632db 659 }
bc923cc9 660 return r;
b0c632db
HC
661}
662
663/* Section: memory related */
f7784b8e
MT
664int kvm_arch_prepare_memory_region(struct kvm *kvm,
665 struct kvm_memory_slot *memslot,
666 struct kvm_memory_slot old,
667 struct kvm_userspace_memory_region *mem,
668 int user_alloc)
b0c632db
HC
669{
670 /* A few sanity checks. We can have exactly one memory slot which has
671 to start at guest virtual zero and which has to be located at a
672 page boundary in userland and which has to end at a page boundary.
673 The memory in userland is ok to be fragmented into various different
674 vmas. It is okay to mmap() and munmap() stuff in this slot after
675 doing this call at any time */
676
628eb9b8 677 if (mem->slot)
b0c632db
HC
678 return -EINVAL;
679
680 if (mem->guest_phys_addr)
681 return -EINVAL;
682
683 if (mem->userspace_addr & (PAGE_SIZE - 1))
684 return -EINVAL;
685
686 if (mem->memory_size & (PAGE_SIZE - 1))
687 return -EINVAL;
688
2668dab7
CO
689 if (!user_alloc)
690 return -EINVAL;
691
f7784b8e
MT
692 return 0;
693}
694
695void kvm_arch_commit_memory_region(struct kvm *kvm,
696 struct kvm_userspace_memory_region *mem,
697 struct kvm_memory_slot old,
698 int user_alloc)
699{
700 int i;
701 struct kvm_vcpu *vcpu;
702
628eb9b8 703 /* request update of sie control block for all available vcpus */
988a2cae
GN
704 kvm_for_each_vcpu(i, vcpu, kvm) {
705 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
706 continue;
707 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
2668dab7 708 }
b0c632db
HC
709}
710
34d4cb8f
MT
711void kvm_arch_flush_shadow(struct kvm *kvm)
712{
713}
714
b0c632db
HC
715static int __init kvm_s390_init(void)
716{
ef50f7ac 717 int ret;
0ee75bea 718 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
719 if (ret)
720 return ret;
721
722 /*
723 * guests can ask for up to 255+1 double words, we need a full page
25985edc 724 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
725 * only set facilities that are known to work in KVM.
726 */
c2f0e8c8 727 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
ef50f7ac
CB
728 if (!facilities) {
729 kvm_exit();
730 return -ENOMEM;
731 }
14375bc4 732 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
6d00d00b 733 facilities[0] &= 0xff00fff3f47c0000ULL;
ef50f7ac 734 return 0;
b0c632db
HC
735}
736
737static void __exit kvm_s390_exit(void)
738{
ef50f7ac 739 free_page((unsigned long) facilities);
b0c632db
HC
740 kvm_exit();
741}
742
743module_init(kvm_s390_init);
744module_exit(kvm_s390_exit);