]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame_incremental - arch/s390/kvm/kvm-s390.c
KVM: provide synchronous registers in kvm_run
[mirror_ubuntu-jammy-kernel.git] / arch / s390 / kvm / kvm-s390.c
... / ...
CommitLineData
1/*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
4 * Copyright IBM Corp. 2008,2009
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
19#include <linux/hrtimer.h>
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
25#include <linux/timer.h>
26#include <asm/asm-offsets.h>
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
29#include <asm/nmi.h>
30#include <asm/system.h>
31#include "kvm-s390.h"
32#include "gaccess.h"
33
34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
38 { "exit_null", VCPU_STAT(exit_null) },
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
50 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
51 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
52 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
53 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
54 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
55 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
56 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
57 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
58 { "instruction_spx", VCPU_STAT(instruction_spx) },
59 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
60 { "instruction_stap", VCPU_STAT(instruction_stap) },
61 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
62 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
63 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
64 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
65 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
66 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
67 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
68 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
69 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
70 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
71 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
72 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
73 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
74 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
75 { "diagnose_10", VCPU_STAT(diagnose_10) },
76 { "diagnose_44", VCPU_STAT(diagnose_44) },
77 { NULL }
78};
79
80static unsigned long long *facilities;
81
82/* Section: not file related */
83int kvm_arch_hardware_enable(void *garbage)
84{
85 /* every s390 is virtualization enabled ;-) */
86 return 0;
87}
88
89void kvm_arch_hardware_disable(void *garbage)
90{
91}
92
93int kvm_arch_hardware_setup(void)
94{
95 return 0;
96}
97
98void kvm_arch_hardware_unsetup(void)
99{
100}
101
102void kvm_arch_check_processor_compat(void *rtn)
103{
104}
105
106int kvm_arch_init(void *opaque)
107{
108 return 0;
109}
110
111void kvm_arch_exit(void)
112{
113}
114
115/* Section: device related */
116long kvm_arch_dev_ioctl(struct file *filp,
117 unsigned int ioctl, unsigned long arg)
118{
119 if (ioctl == KVM_S390_ENABLE_SIE)
120 return s390_enable_sie();
121 return -EINVAL;
122}
123
124int kvm_dev_ioctl_check_extension(long ext)
125{
126 int r;
127
128 switch (ext) {
129 case KVM_CAP_S390_PSW:
130 case KVM_CAP_S390_GMAP:
131 case KVM_CAP_SYNC_MMU:
132#ifdef CONFIG_KVM_S390_UCONTROL
133 case KVM_CAP_S390_UCONTROL:
134#endif
135 r = 1;
136 break;
137 default:
138 r = 0;
139 }
140 return r;
141}
142
143/* Section: vm related */
144/*
145 * Get (and clear) the dirty memory log for a memory slot.
146 */
147int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
148 struct kvm_dirty_log *log)
149{
150 return 0;
151}
152
153long kvm_arch_vm_ioctl(struct file *filp,
154 unsigned int ioctl, unsigned long arg)
155{
156 struct kvm *kvm = filp->private_data;
157 void __user *argp = (void __user *)arg;
158 int r;
159
160 switch (ioctl) {
161 case KVM_S390_INTERRUPT: {
162 struct kvm_s390_interrupt s390int;
163
164 r = -EFAULT;
165 if (copy_from_user(&s390int, argp, sizeof(s390int)))
166 break;
167 r = kvm_s390_inject_vm(kvm, &s390int);
168 break;
169 }
170 default:
171 r = -ENOTTY;
172 }
173
174 return r;
175}
176
177int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
178{
179 int rc;
180 char debug_name[16];
181
182 rc = -EINVAL;
183#ifdef CONFIG_KVM_S390_UCONTROL
184 if (type & ~KVM_VM_S390_UCONTROL)
185 goto out_err;
186 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
187 goto out_err;
188#else
189 if (type)
190 goto out_err;
191#endif
192
193 rc = s390_enable_sie();
194 if (rc)
195 goto out_err;
196
197 rc = -ENOMEM;
198
199 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
200 if (!kvm->arch.sca)
201 goto out_err;
202
203 sprintf(debug_name, "kvm-%u", current->pid);
204
205 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
206 if (!kvm->arch.dbf)
207 goto out_nodbf;
208
209 spin_lock_init(&kvm->arch.float_int.lock);
210 INIT_LIST_HEAD(&kvm->arch.float_int.list);
211
212 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
213 VM_EVENT(kvm, 3, "%s", "vm created");
214
215 if (type & KVM_VM_S390_UCONTROL) {
216 kvm->arch.gmap = NULL;
217 } else {
218 kvm->arch.gmap = gmap_alloc(current->mm);
219 if (!kvm->arch.gmap)
220 goto out_nogmap;
221 }
222 return 0;
223out_nogmap:
224 debug_unregister(kvm->arch.dbf);
225out_nodbf:
226 free_page((unsigned long)(kvm->arch.sca));
227out_err:
228 return rc;
229}
230
231void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
232{
233 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
234 if (!kvm_is_ucontrol(vcpu->kvm)) {
235 clear_bit(63 - vcpu->vcpu_id,
236 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
237 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
238 (__u64) vcpu->arch.sie_block)
239 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
240 }
241 smp_mb();
242
243 if (kvm_is_ucontrol(vcpu->kvm))
244 gmap_free(vcpu->arch.gmap);
245
246 free_page((unsigned long)(vcpu->arch.sie_block));
247 kvm_vcpu_uninit(vcpu);
248 kfree(vcpu);
249}
250
251static void kvm_free_vcpus(struct kvm *kvm)
252{
253 unsigned int i;
254 struct kvm_vcpu *vcpu;
255
256 kvm_for_each_vcpu(i, vcpu, kvm)
257 kvm_arch_vcpu_destroy(vcpu);
258
259 mutex_lock(&kvm->lock);
260 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
261 kvm->vcpus[i] = NULL;
262
263 atomic_set(&kvm->online_vcpus, 0);
264 mutex_unlock(&kvm->lock);
265}
266
267void kvm_arch_sync_events(struct kvm *kvm)
268{
269}
270
271void kvm_arch_destroy_vm(struct kvm *kvm)
272{
273 kvm_free_vcpus(kvm);
274 free_page((unsigned long)(kvm->arch.sca));
275 debug_unregister(kvm->arch.dbf);
276 if (!kvm_is_ucontrol(kvm))
277 gmap_free(kvm->arch.gmap);
278}
279
280/* Section: vcpu related */
281int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
282{
283 if (kvm_is_ucontrol(vcpu->kvm)) {
284 vcpu->arch.gmap = gmap_alloc(current->mm);
285 if (!vcpu->arch.gmap)
286 return -ENOMEM;
287 return 0;
288 }
289
290 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
291 return 0;
292}
293
294void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
295{
296 /* Nothing todo */
297}
298
299void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
300{
301 save_fp_regs(&vcpu->arch.host_fpregs);
302 save_access_regs(vcpu->arch.host_acrs);
303 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
304 restore_fp_regs(&vcpu->arch.guest_fpregs);
305 restore_access_regs(vcpu->arch.guest_acrs);
306 gmap_enable(vcpu->arch.gmap);
307 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
308}
309
310void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
311{
312 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
313 gmap_disable(vcpu->arch.gmap);
314 save_fp_regs(&vcpu->arch.guest_fpregs);
315 save_access_regs(vcpu->arch.guest_acrs);
316 restore_fp_regs(&vcpu->arch.host_fpregs);
317 restore_access_regs(vcpu->arch.host_acrs);
318}
319
320static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
321{
322 /* this equals initial cpu reset in pop, but we don't switch to ESA */
323 vcpu->arch.sie_block->gpsw.mask = 0UL;
324 vcpu->arch.sie_block->gpsw.addr = 0UL;
325 kvm_s390_set_prefix(vcpu, 0);
326 vcpu->arch.sie_block->cputm = 0UL;
327 vcpu->arch.sie_block->ckc = 0UL;
328 vcpu->arch.sie_block->todpr = 0;
329 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
330 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
331 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
332 vcpu->arch.guest_fpregs.fpc = 0;
333 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
334 vcpu->arch.sie_block->gbea = 1;
335}
336
337int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
338{
339 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
340 CPUSTAT_SM |
341 CPUSTAT_STOPPED);
342 vcpu->arch.sie_block->ecb = 6;
343 vcpu->arch.sie_block->eca = 0xC1002001U;
344 vcpu->arch.sie_block->fac = (int) (long) facilities;
345 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
346 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
347 (unsigned long) vcpu);
348 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
349 get_cpu_id(&vcpu->arch.cpu_id);
350 vcpu->arch.cpu_id.version = 0xff;
351 return 0;
352}
353
354struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
355 unsigned int id)
356{
357 struct kvm_vcpu *vcpu;
358 int rc = -EINVAL;
359
360 if (id >= KVM_MAX_VCPUS)
361 goto out;
362
363 rc = -ENOMEM;
364
365 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
366 if (!vcpu)
367 goto out;
368
369 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
370 get_zeroed_page(GFP_KERNEL);
371
372 if (!vcpu->arch.sie_block)
373 goto out_free_cpu;
374
375 vcpu->arch.sie_block->icpua = id;
376 if (!kvm_is_ucontrol(kvm)) {
377 if (!kvm->arch.sca) {
378 WARN_ON_ONCE(1);
379 goto out_free_cpu;
380 }
381 if (!kvm->arch.sca->cpu[id].sda)
382 kvm->arch.sca->cpu[id].sda =
383 (__u64) vcpu->arch.sie_block;
384 vcpu->arch.sie_block->scaoh =
385 (__u32)(((__u64)kvm->arch.sca) >> 32);
386 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
387 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
388 }
389
390 spin_lock_init(&vcpu->arch.local_int.lock);
391 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
392 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
393 spin_lock(&kvm->arch.float_int.lock);
394 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
395 init_waitqueue_head(&vcpu->arch.local_int.wq);
396 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
397 spin_unlock(&kvm->arch.float_int.lock);
398
399 rc = kvm_vcpu_init(vcpu, kvm, id);
400 if (rc)
401 goto out_free_sie_block;
402 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
403 vcpu->arch.sie_block);
404
405 return vcpu;
406out_free_sie_block:
407 free_page((unsigned long)(vcpu->arch.sie_block));
408out_free_cpu:
409 kfree(vcpu);
410out:
411 return ERR_PTR(rc);
412}
413
414int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
415{
416 /* kvm common code refers to this, but never calls it */
417 BUG();
418 return 0;
419}
420
421static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
422{
423 kvm_s390_vcpu_initial_reset(vcpu);
424 return 0;
425}
426
427int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
428{
429 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
430 return 0;
431}
432
433int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
434{
435 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
436 return 0;
437}
438
439int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
440 struct kvm_sregs *sregs)
441{
442 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
443 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
444 restore_access_regs(vcpu->arch.guest_acrs);
445 return 0;
446}
447
448int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
449 struct kvm_sregs *sregs)
450{
451 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
452 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
453 return 0;
454}
455
456int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
457{
458 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
459 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
460 restore_fp_regs(&vcpu->arch.guest_fpregs);
461 return 0;
462}
463
464int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
465{
466 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
467 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
468 return 0;
469}
470
471static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
472{
473 int rc = 0;
474
475 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
476 rc = -EBUSY;
477 else {
478 vcpu->run->psw_mask = psw.mask;
479 vcpu->run->psw_addr = psw.addr;
480 }
481 return rc;
482}
483
484int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
485 struct kvm_translation *tr)
486{
487 return -EINVAL; /* not implemented yet */
488}
489
490int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
491 struct kvm_guest_debug *dbg)
492{
493 return -EINVAL; /* not implemented yet */
494}
495
496int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
497 struct kvm_mp_state *mp_state)
498{
499 return -EINVAL; /* not implemented yet */
500}
501
502int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
503 struct kvm_mp_state *mp_state)
504{
505 return -EINVAL; /* not implemented yet */
506}
507
508static int __vcpu_run(struct kvm_vcpu *vcpu)
509{
510 int rc;
511
512 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
513
514 if (need_resched())
515 schedule();
516
517 if (test_thread_flag(TIF_MCCK_PENDING))
518 s390_handle_mcck();
519
520 if (!kvm_is_ucontrol(vcpu->kvm))
521 kvm_s390_deliver_pending_interrupts(vcpu);
522
523 vcpu->arch.sie_block->icptcode = 0;
524 local_irq_disable();
525 kvm_guest_enter();
526 local_irq_enable();
527 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
528 atomic_read(&vcpu->arch.sie_block->cpuflags));
529 rc = sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs);
530 if (rc) {
531 if (kvm_is_ucontrol(vcpu->kvm)) {
532 rc = SIE_INTERCEPT_UCONTROL;
533 } else {
534 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
535 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
536 rc = 0;
537 }
538 }
539 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
540 vcpu->arch.sie_block->icptcode);
541 local_irq_disable();
542 kvm_guest_exit();
543 local_irq_enable();
544
545 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
546 return rc;
547}
548
549int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
550{
551 int rc;
552 sigset_t sigsaved;
553
554rerun_vcpu:
555 if (vcpu->sigset_active)
556 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
557
558 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
559
560 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
561
562 switch (kvm_run->exit_reason) {
563 case KVM_EXIT_S390_SIEIC:
564 case KVM_EXIT_UNKNOWN:
565 case KVM_EXIT_INTR:
566 case KVM_EXIT_S390_RESET:
567 case KVM_EXIT_S390_UCONTROL:
568 break;
569 default:
570 BUG();
571 }
572
573 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
574 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
575
576 might_fault();
577
578 do {
579 rc = __vcpu_run(vcpu);
580 if (rc)
581 break;
582 if (kvm_is_ucontrol(vcpu->kvm))
583 rc = -EOPNOTSUPP;
584 else
585 rc = kvm_handle_sie_intercept(vcpu);
586 } while (!signal_pending(current) && !rc);
587
588 if (rc == SIE_INTERCEPT_RERUNVCPU)
589 goto rerun_vcpu;
590
591 if (signal_pending(current) && !rc) {
592 kvm_run->exit_reason = KVM_EXIT_INTR;
593 rc = -EINTR;
594 }
595
596#ifdef CONFIG_KVM_S390_UCONTROL
597 if (rc == SIE_INTERCEPT_UCONTROL) {
598 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
599 kvm_run->s390_ucontrol.trans_exc_code =
600 current->thread.gmap_addr;
601 kvm_run->s390_ucontrol.pgm_code = 0x10;
602 rc = 0;
603 }
604#endif
605
606 if (rc == -EOPNOTSUPP) {
607 /* intercept cannot be handled in-kernel, prepare kvm-run */
608 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
609 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
610 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
611 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
612 rc = 0;
613 }
614
615 if (rc == -EREMOTE) {
616 /* intercept was handled, but userspace support is needed
617 * kvm_run has been prepared by the handler */
618 rc = 0;
619 }
620
621 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
622 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
623
624 if (vcpu->sigset_active)
625 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
626
627 vcpu->stat.exit_userspace++;
628 return rc;
629}
630
631static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
632 unsigned long n, int prefix)
633{
634 if (prefix)
635 return copy_to_guest(vcpu, guestdest, from, n);
636 else
637 return copy_to_guest_absolute(vcpu, guestdest, from, n);
638}
639
640/*
641 * store status at address
642 * we use have two special cases:
643 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
644 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
645 */
646int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
647{
648 unsigned char archmode = 1;
649 int prefix;
650
651 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
652 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
653 return -EFAULT;
654 addr = SAVE_AREA_BASE;
655 prefix = 0;
656 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
657 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
658 return -EFAULT;
659 addr = SAVE_AREA_BASE;
660 prefix = 1;
661 } else
662 prefix = 0;
663
664 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
665 vcpu->arch.guest_fpregs.fprs, 128, prefix))
666 return -EFAULT;
667
668 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
669 vcpu->arch.guest_gprs, 128, prefix))
670 return -EFAULT;
671
672 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
673 &vcpu->arch.sie_block->gpsw, 16, prefix))
674 return -EFAULT;
675
676 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
677 &vcpu->arch.sie_block->prefix, 4, prefix))
678 return -EFAULT;
679
680 if (__guestcopy(vcpu,
681 addr + offsetof(struct save_area, fp_ctrl_reg),
682 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
683 return -EFAULT;
684
685 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
686 &vcpu->arch.sie_block->todpr, 4, prefix))
687 return -EFAULT;
688
689 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
690 &vcpu->arch.sie_block->cputm, 8, prefix))
691 return -EFAULT;
692
693 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
694 &vcpu->arch.sie_block->ckc, 8, prefix))
695 return -EFAULT;
696
697 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
698 &vcpu->arch.guest_acrs, 64, prefix))
699 return -EFAULT;
700
701 if (__guestcopy(vcpu,
702 addr + offsetof(struct save_area, ctrl_regs),
703 &vcpu->arch.sie_block->gcr, 128, prefix))
704 return -EFAULT;
705 return 0;
706}
707
708long kvm_arch_vcpu_ioctl(struct file *filp,
709 unsigned int ioctl, unsigned long arg)
710{
711 struct kvm_vcpu *vcpu = filp->private_data;
712 void __user *argp = (void __user *)arg;
713 long r;
714
715 switch (ioctl) {
716 case KVM_S390_INTERRUPT: {
717 struct kvm_s390_interrupt s390int;
718
719 r = -EFAULT;
720 if (copy_from_user(&s390int, argp, sizeof(s390int)))
721 break;
722 r = kvm_s390_inject_vcpu(vcpu, &s390int);
723 break;
724 }
725 case KVM_S390_STORE_STATUS:
726 r = kvm_s390_vcpu_store_status(vcpu, arg);
727 break;
728 case KVM_S390_SET_INITIAL_PSW: {
729 psw_t psw;
730
731 r = -EFAULT;
732 if (copy_from_user(&psw, argp, sizeof(psw)))
733 break;
734 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
735 break;
736 }
737 case KVM_S390_INITIAL_RESET:
738 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
739 break;
740#ifdef CONFIG_KVM_S390_UCONTROL
741 case KVM_S390_UCAS_MAP: {
742 struct kvm_s390_ucas_mapping ucasmap;
743
744 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
745 r = -EFAULT;
746 break;
747 }
748
749 if (!kvm_is_ucontrol(vcpu->kvm)) {
750 r = -EINVAL;
751 break;
752 }
753
754 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
755 ucasmap.vcpu_addr, ucasmap.length);
756 break;
757 }
758 case KVM_S390_UCAS_UNMAP: {
759 struct kvm_s390_ucas_mapping ucasmap;
760
761 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
762 r = -EFAULT;
763 break;
764 }
765
766 if (!kvm_is_ucontrol(vcpu->kvm)) {
767 r = -EINVAL;
768 break;
769 }
770
771 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
772 ucasmap.length);
773 break;
774 }
775#endif
776 case KVM_S390_VCPU_FAULT: {
777 r = gmap_fault(arg, vcpu->arch.gmap);
778 if (!IS_ERR_VALUE(r))
779 r = 0;
780 break;
781 }
782 default:
783 r = -ENOTTY;
784 }
785 return r;
786}
787
788int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
789{
790#ifdef CONFIG_KVM_S390_UCONTROL
791 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
792 && (kvm_is_ucontrol(vcpu->kvm))) {
793 vmf->page = virt_to_page(vcpu->arch.sie_block);
794 get_page(vmf->page);
795 return 0;
796 }
797#endif
798 return VM_FAULT_SIGBUS;
799}
800
801/* Section: memory related */
802int kvm_arch_prepare_memory_region(struct kvm *kvm,
803 struct kvm_memory_slot *memslot,
804 struct kvm_memory_slot old,
805 struct kvm_userspace_memory_region *mem,
806 int user_alloc)
807{
808 /* A few sanity checks. We can have exactly one memory slot which has
809 to start at guest virtual zero and which has to be located at a
810 page boundary in userland and which has to end at a page boundary.
811 The memory in userland is ok to be fragmented into various different
812 vmas. It is okay to mmap() and munmap() stuff in this slot after
813 doing this call at any time */
814
815 if (mem->slot)
816 return -EINVAL;
817
818 if (mem->guest_phys_addr)
819 return -EINVAL;
820
821 if (mem->userspace_addr & 0xffffful)
822 return -EINVAL;
823
824 if (mem->memory_size & 0xffffful)
825 return -EINVAL;
826
827 if (!user_alloc)
828 return -EINVAL;
829
830 return 0;
831}
832
833void kvm_arch_commit_memory_region(struct kvm *kvm,
834 struct kvm_userspace_memory_region *mem,
835 struct kvm_memory_slot old,
836 int user_alloc)
837{
838 int rc;
839
840
841 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
842 mem->guest_phys_addr, mem->memory_size);
843 if (rc)
844 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
845 return;
846}
847
848void kvm_arch_flush_shadow(struct kvm *kvm)
849{
850}
851
852static int __init kvm_s390_init(void)
853{
854 int ret;
855 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
856 if (ret)
857 return ret;
858
859 /*
860 * guests can ask for up to 255+1 double words, we need a full page
861 * to hold the maximum amount of facilities. On the other hand, we
862 * only set facilities that are known to work in KVM.
863 */
864 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
865 if (!facilities) {
866 kvm_exit();
867 return -ENOMEM;
868 }
869 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
870 facilities[0] &= 0xff00fff3f47c0000ULL;
871 facilities[1] &= 0x201c000000000000ULL;
872 return 0;
873}
874
875static void __exit kvm_s390_exit(void)
876{
877 free_page((unsigned long) facilities);
878 kvm_exit();
879}
880
881module_init(kvm_s390_init);
882module_exit(kvm_s390_exit);