]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blob - arch/s390/kvm/kvm-s390.c
Merge commit 'kumar/next' into merge
[mirror_ubuntu-hirsute-kernel.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
4 * Copyright IBM Corp. 2008,2009
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
14 */
15
16 #include <linux/compiler.h>
17 #include <linux/err.h>
18 #include <linux/fs.h>
19 #include <linux/hrtimer.h>
20 #include <linux/init.h>
21 #include <linux/kvm.h>
22 #include <linux/kvm_host.h>
23 #include <linux/module.h>
24 #include <linux/slab.h>
25 #include <linux/timer.h>
26 #include <asm/asm-offsets.h>
27 #include <asm/lowcore.h>
28 #include <asm/pgtable.h>
29 #include <asm/nmi.h>
30 #include <asm/system.h>
31 #include "kvm-s390.h"
32 #include "gaccess.h"
33
34 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36 struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
38 { "exit_null", VCPU_STAT(exit_null) },
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
50 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
51 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
52 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
53 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
54 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
55 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
56 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
57 { "instruction_spx", VCPU_STAT(instruction_spx) },
58 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
59 { "instruction_stap", VCPU_STAT(instruction_stap) },
60 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
61 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
62 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
63 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
64 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
65 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
66 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
67 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
68 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
69 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
70 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
71 { "diagnose_44", VCPU_STAT(diagnose_44) },
72 { NULL }
73 };
74
75 static unsigned long long *facilities;
76
77 /* Section: not file related */
78 int kvm_arch_hardware_enable(void *garbage)
79 {
80 /* every s390 is virtualization enabled ;-) */
81 return 0;
82 }
83
84 void kvm_arch_hardware_disable(void *garbage)
85 {
86 }
87
88 int kvm_arch_hardware_setup(void)
89 {
90 return 0;
91 }
92
93 void kvm_arch_hardware_unsetup(void)
94 {
95 }
96
97 void kvm_arch_check_processor_compat(void *rtn)
98 {
99 }
100
101 int kvm_arch_init(void *opaque)
102 {
103 return 0;
104 }
105
106 void kvm_arch_exit(void)
107 {
108 }
109
110 /* Section: device related */
111 long kvm_arch_dev_ioctl(struct file *filp,
112 unsigned int ioctl, unsigned long arg)
113 {
114 if (ioctl == KVM_S390_ENABLE_SIE)
115 return s390_enable_sie();
116 return -EINVAL;
117 }
118
119 int kvm_dev_ioctl_check_extension(long ext)
120 {
121 int r;
122
123 switch (ext) {
124 case KVM_CAP_S390_PSW:
125 r = 1;
126 break;
127 default:
128 r = 0;
129 }
130 return r;
131 }
132
133 /* Section: vm related */
134 /*
135 * Get (and clear) the dirty memory log for a memory slot.
136 */
137 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
138 struct kvm_dirty_log *log)
139 {
140 return 0;
141 }
142
143 long kvm_arch_vm_ioctl(struct file *filp,
144 unsigned int ioctl, unsigned long arg)
145 {
146 struct kvm *kvm = filp->private_data;
147 void __user *argp = (void __user *)arg;
148 int r;
149
150 switch (ioctl) {
151 case KVM_S390_INTERRUPT: {
152 struct kvm_s390_interrupt s390int;
153
154 r = -EFAULT;
155 if (copy_from_user(&s390int, argp, sizeof(s390int)))
156 break;
157 r = kvm_s390_inject_vm(kvm, &s390int);
158 break;
159 }
160 default:
161 r = -ENOTTY;
162 }
163
164 return r;
165 }
166
167 struct kvm *kvm_arch_create_vm(void)
168 {
169 struct kvm *kvm;
170 int rc;
171 char debug_name[16];
172
173 rc = s390_enable_sie();
174 if (rc)
175 goto out_nokvm;
176
177 rc = -ENOMEM;
178 kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
179 if (!kvm)
180 goto out_nokvm;
181
182 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
183 if (!kvm->arch.sca)
184 goto out_nosca;
185
186 sprintf(debug_name, "kvm-%u", current->pid);
187
188 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
189 if (!kvm->arch.dbf)
190 goto out_nodbf;
191
192 spin_lock_init(&kvm->arch.float_int.lock);
193 INIT_LIST_HEAD(&kvm->arch.float_int.list);
194
195 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
196 VM_EVENT(kvm, 3, "%s", "vm created");
197
198 return kvm;
199 out_nodbf:
200 free_page((unsigned long)(kvm->arch.sca));
201 out_nosca:
202 kfree(kvm);
203 out_nokvm:
204 return ERR_PTR(rc);
205 }
206
207 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
208 {
209 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
210 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
211 (__u64) vcpu->arch.sie_block)
212 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
213 smp_mb();
214 free_page((unsigned long)(vcpu->arch.sie_block));
215 kvm_vcpu_uninit(vcpu);
216 kfree(vcpu);
217 }
218
219 static void kvm_free_vcpus(struct kvm *kvm)
220 {
221 unsigned int i;
222 struct kvm_vcpu *vcpu;
223
224 kvm_for_each_vcpu(i, vcpu, kvm)
225 kvm_arch_vcpu_destroy(vcpu);
226
227 mutex_lock(&kvm->lock);
228 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
229 kvm->vcpus[i] = NULL;
230
231 atomic_set(&kvm->online_vcpus, 0);
232 mutex_unlock(&kvm->lock);
233 }
234
235 void kvm_arch_sync_events(struct kvm *kvm)
236 {
237 }
238
239 void kvm_arch_destroy_vm(struct kvm *kvm)
240 {
241 kvm_free_vcpus(kvm);
242 kvm_free_physmem(kvm);
243 free_page((unsigned long)(kvm->arch.sca));
244 debug_unregister(kvm->arch.dbf);
245 cleanup_srcu_struct(&kvm->srcu);
246 kfree(kvm);
247 }
248
249 /* Section: vcpu related */
250 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
251 {
252 return 0;
253 }
254
255 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
256 {
257 /* Nothing todo */
258 }
259
260 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
261 {
262 save_fp_regs(&vcpu->arch.host_fpregs);
263 save_access_regs(vcpu->arch.host_acrs);
264 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
265 restore_fp_regs(&vcpu->arch.guest_fpregs);
266 restore_access_regs(vcpu->arch.guest_acrs);
267 }
268
269 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
270 {
271 save_fp_regs(&vcpu->arch.guest_fpregs);
272 save_access_regs(vcpu->arch.guest_acrs);
273 restore_fp_regs(&vcpu->arch.host_fpregs);
274 restore_access_regs(vcpu->arch.host_acrs);
275 }
276
277 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
278 {
279 /* this equals initial cpu reset in pop, but we don't switch to ESA */
280 vcpu->arch.sie_block->gpsw.mask = 0UL;
281 vcpu->arch.sie_block->gpsw.addr = 0UL;
282 vcpu->arch.sie_block->prefix = 0UL;
283 vcpu->arch.sie_block->ihcpu = 0xffff;
284 vcpu->arch.sie_block->cputm = 0UL;
285 vcpu->arch.sie_block->ckc = 0UL;
286 vcpu->arch.sie_block->todpr = 0;
287 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
288 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
289 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
290 vcpu->arch.guest_fpregs.fpc = 0;
291 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
292 vcpu->arch.sie_block->gbea = 1;
293 }
294
295 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
296 {
297 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
298 set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
299 vcpu->arch.sie_block->ecb = 2;
300 vcpu->arch.sie_block->eca = 0xC1002001U;
301 vcpu->arch.sie_block->fac = (int) (long) facilities;
302 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
303 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
304 (unsigned long) vcpu);
305 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
306 get_cpu_id(&vcpu->arch.cpu_id);
307 vcpu->arch.cpu_id.version = 0xff;
308 return 0;
309 }
310
311 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
312 unsigned int id)
313 {
314 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
315 int rc = -ENOMEM;
316
317 if (!vcpu)
318 goto out_nomem;
319
320 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
321 get_zeroed_page(GFP_KERNEL);
322
323 if (!vcpu->arch.sie_block)
324 goto out_free_cpu;
325
326 vcpu->arch.sie_block->icpua = id;
327 BUG_ON(!kvm->arch.sca);
328 if (!kvm->arch.sca->cpu[id].sda)
329 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
330 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
331 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
332
333 spin_lock_init(&vcpu->arch.local_int.lock);
334 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
335 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
336 spin_lock(&kvm->arch.float_int.lock);
337 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
338 init_waitqueue_head(&vcpu->arch.local_int.wq);
339 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
340 spin_unlock(&kvm->arch.float_int.lock);
341
342 rc = kvm_vcpu_init(vcpu, kvm, id);
343 if (rc)
344 goto out_free_cpu;
345 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
346 vcpu->arch.sie_block);
347
348 return vcpu;
349 out_free_cpu:
350 kfree(vcpu);
351 out_nomem:
352 return ERR_PTR(rc);
353 }
354
355 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
356 {
357 /* kvm common code refers to this, but never calls it */
358 BUG();
359 return 0;
360 }
361
362 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
363 {
364 vcpu_load(vcpu);
365 kvm_s390_vcpu_initial_reset(vcpu);
366 vcpu_put(vcpu);
367 return 0;
368 }
369
370 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
371 {
372 vcpu_load(vcpu);
373 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
374 vcpu_put(vcpu);
375 return 0;
376 }
377
378 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
379 {
380 vcpu_load(vcpu);
381 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
382 vcpu_put(vcpu);
383 return 0;
384 }
385
386 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
387 struct kvm_sregs *sregs)
388 {
389 vcpu_load(vcpu);
390 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
391 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
392 vcpu_put(vcpu);
393 return 0;
394 }
395
396 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
397 struct kvm_sregs *sregs)
398 {
399 vcpu_load(vcpu);
400 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
401 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
402 vcpu_put(vcpu);
403 return 0;
404 }
405
406 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
407 {
408 vcpu_load(vcpu);
409 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
410 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
411 vcpu_put(vcpu);
412 return 0;
413 }
414
415 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
416 {
417 vcpu_load(vcpu);
418 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
419 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
420 vcpu_put(vcpu);
421 return 0;
422 }
423
424 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
425 {
426 int rc = 0;
427
428 vcpu_load(vcpu);
429 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
430 rc = -EBUSY;
431 else {
432 vcpu->run->psw_mask = psw.mask;
433 vcpu->run->psw_addr = psw.addr;
434 }
435 vcpu_put(vcpu);
436 return rc;
437 }
438
439 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
440 struct kvm_translation *tr)
441 {
442 return -EINVAL; /* not implemented yet */
443 }
444
445 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
446 struct kvm_guest_debug *dbg)
447 {
448 return -EINVAL; /* not implemented yet */
449 }
450
451 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
452 struct kvm_mp_state *mp_state)
453 {
454 return -EINVAL; /* not implemented yet */
455 }
456
457 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
458 struct kvm_mp_state *mp_state)
459 {
460 return -EINVAL; /* not implemented yet */
461 }
462
463 static void __vcpu_run(struct kvm_vcpu *vcpu)
464 {
465 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
466
467 if (need_resched())
468 schedule();
469
470 if (test_thread_flag(TIF_MCCK_PENDING))
471 s390_handle_mcck();
472
473 kvm_s390_deliver_pending_interrupts(vcpu);
474
475 vcpu->arch.sie_block->icptcode = 0;
476 local_irq_disable();
477 kvm_guest_enter();
478 local_irq_enable();
479 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
480 atomic_read(&vcpu->arch.sie_block->cpuflags));
481 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
482 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
483 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
484 }
485 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
486 vcpu->arch.sie_block->icptcode);
487 local_irq_disable();
488 kvm_guest_exit();
489 local_irq_enable();
490
491 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
492 }
493
494 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
495 {
496 int rc;
497 sigset_t sigsaved;
498
499 vcpu_load(vcpu);
500
501 rerun_vcpu:
502 if (vcpu->requests)
503 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
504 kvm_s390_vcpu_set_mem(vcpu);
505
506 /* verify, that memory has been registered */
507 if (!vcpu->arch.sie_block->gmslm) {
508 vcpu_put(vcpu);
509 VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu");
510 return -EINVAL;
511 }
512
513 if (vcpu->sigset_active)
514 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
515
516 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
517
518 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
519
520 switch (kvm_run->exit_reason) {
521 case KVM_EXIT_S390_SIEIC:
522 case KVM_EXIT_UNKNOWN:
523 case KVM_EXIT_INTR:
524 case KVM_EXIT_S390_RESET:
525 break;
526 default:
527 BUG();
528 }
529
530 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
531 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
532
533 might_fault();
534
535 do {
536 __vcpu_run(vcpu);
537 rc = kvm_handle_sie_intercept(vcpu);
538 } while (!signal_pending(current) && !rc);
539
540 if (rc == SIE_INTERCEPT_RERUNVCPU)
541 goto rerun_vcpu;
542
543 if (signal_pending(current) && !rc) {
544 kvm_run->exit_reason = KVM_EXIT_INTR;
545 rc = -EINTR;
546 }
547
548 if (rc == -EOPNOTSUPP) {
549 /* intercept cannot be handled in-kernel, prepare kvm-run */
550 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
551 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
552 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
553 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
554 rc = 0;
555 }
556
557 if (rc == -EREMOTE) {
558 /* intercept was handled, but userspace support is needed
559 * kvm_run has been prepared by the handler */
560 rc = 0;
561 }
562
563 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
564 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
565
566 if (vcpu->sigset_active)
567 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
568
569 vcpu_put(vcpu);
570
571 vcpu->stat.exit_userspace++;
572 return rc;
573 }
574
575 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
576 unsigned long n, int prefix)
577 {
578 if (prefix)
579 return copy_to_guest(vcpu, guestdest, from, n);
580 else
581 return copy_to_guest_absolute(vcpu, guestdest, from, n);
582 }
583
584 /*
585 * store status at address
586 * we use have two special cases:
587 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
588 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
589 */
590 int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
591 {
592 const unsigned char archmode = 1;
593 int prefix;
594
595 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
596 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
597 return -EFAULT;
598 addr = SAVE_AREA_BASE;
599 prefix = 0;
600 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
601 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
602 return -EFAULT;
603 addr = SAVE_AREA_BASE;
604 prefix = 1;
605 } else
606 prefix = 0;
607
608 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
609 vcpu->arch.guest_fpregs.fprs, 128, prefix))
610 return -EFAULT;
611
612 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
613 vcpu->arch.guest_gprs, 128, prefix))
614 return -EFAULT;
615
616 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
617 &vcpu->arch.sie_block->gpsw, 16, prefix))
618 return -EFAULT;
619
620 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
621 &vcpu->arch.sie_block->prefix, 4, prefix))
622 return -EFAULT;
623
624 if (__guestcopy(vcpu,
625 addr + offsetof(struct save_area, fp_ctrl_reg),
626 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
627 return -EFAULT;
628
629 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
630 &vcpu->arch.sie_block->todpr, 4, prefix))
631 return -EFAULT;
632
633 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
634 &vcpu->arch.sie_block->cputm, 8, prefix))
635 return -EFAULT;
636
637 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
638 &vcpu->arch.sie_block->ckc, 8, prefix))
639 return -EFAULT;
640
641 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
642 &vcpu->arch.guest_acrs, 64, prefix))
643 return -EFAULT;
644
645 if (__guestcopy(vcpu,
646 addr + offsetof(struct save_area, ctrl_regs),
647 &vcpu->arch.sie_block->gcr, 128, prefix))
648 return -EFAULT;
649 return 0;
650 }
651
652 static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
653 {
654 int rc;
655
656 vcpu_load(vcpu);
657 rc = __kvm_s390_vcpu_store_status(vcpu, addr);
658 vcpu_put(vcpu);
659 return rc;
660 }
661
662 long kvm_arch_vcpu_ioctl(struct file *filp,
663 unsigned int ioctl, unsigned long arg)
664 {
665 struct kvm_vcpu *vcpu = filp->private_data;
666 void __user *argp = (void __user *)arg;
667
668 switch (ioctl) {
669 case KVM_S390_INTERRUPT: {
670 struct kvm_s390_interrupt s390int;
671
672 if (copy_from_user(&s390int, argp, sizeof(s390int)))
673 return -EFAULT;
674 return kvm_s390_inject_vcpu(vcpu, &s390int);
675 }
676 case KVM_S390_STORE_STATUS:
677 return kvm_s390_vcpu_store_status(vcpu, arg);
678 case KVM_S390_SET_INITIAL_PSW: {
679 psw_t psw;
680
681 if (copy_from_user(&psw, argp, sizeof(psw)))
682 return -EFAULT;
683 return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
684 }
685 case KVM_S390_INITIAL_RESET:
686 return kvm_arch_vcpu_ioctl_initial_reset(vcpu);
687 default:
688 ;
689 }
690 return -EINVAL;
691 }
692
693 /* Section: memory related */
694 int kvm_arch_prepare_memory_region(struct kvm *kvm,
695 struct kvm_memory_slot *memslot,
696 struct kvm_memory_slot old,
697 struct kvm_userspace_memory_region *mem,
698 int user_alloc)
699 {
700 /* A few sanity checks. We can have exactly one memory slot which has
701 to start at guest virtual zero and which has to be located at a
702 page boundary in userland and which has to end at a page boundary.
703 The memory in userland is ok to be fragmented into various different
704 vmas. It is okay to mmap() and munmap() stuff in this slot after
705 doing this call at any time */
706
707 if (mem->slot)
708 return -EINVAL;
709
710 if (mem->guest_phys_addr)
711 return -EINVAL;
712
713 if (mem->userspace_addr & (PAGE_SIZE - 1))
714 return -EINVAL;
715
716 if (mem->memory_size & (PAGE_SIZE - 1))
717 return -EINVAL;
718
719 if (!user_alloc)
720 return -EINVAL;
721
722 return 0;
723 }
724
725 void kvm_arch_commit_memory_region(struct kvm *kvm,
726 struct kvm_userspace_memory_region *mem,
727 struct kvm_memory_slot old,
728 int user_alloc)
729 {
730 int i;
731 struct kvm_vcpu *vcpu;
732
733 /* request update of sie control block for all available vcpus */
734 kvm_for_each_vcpu(i, vcpu, kvm) {
735 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
736 continue;
737 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
738 }
739 }
740
741 void kvm_arch_flush_shadow(struct kvm *kvm)
742 {
743 }
744
745 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
746 {
747 return gfn;
748 }
749
750 static int __init kvm_s390_init(void)
751 {
752 int ret;
753 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
754 if (ret)
755 return ret;
756
757 /*
758 * guests can ask for up to 255+1 double words, we need a full page
759 * to hold the maximum amount of facilites. On the other hand, we
760 * only set facilities that are known to work in KVM.
761 */
762 facilities = (unsigned long long *) get_zeroed_page(GFP_DMA);
763 if (!facilities) {
764 kvm_exit();
765 return -ENOMEM;
766 }
767 stfle(facilities, 1);
768 facilities[0] &= 0xff00fff3f0700000ULL;
769 return 0;
770 }
771
772 static void __exit kvm_s390_exit(void)
773 {
774 free_page((unsigned long) facilities);
775 kvm_exit();
776 }
777
778 module_init(kvm_s390_init);
779 module_exit(kvm_s390_exit);