]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/s390/kvm/kvm-s390.c
76f05ddaef10fbfe5a3c17d4e337f4a05e07ea78
[mirror_ubuntu-bionic-kernel.git] / arch / s390 / kvm / kvm-s390.c
1 /*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
13 */
14
15 #include <linux/compiler.h>
16 #include <linux/err.h>
17 #include <linux/fs.h>
18 #include <linux/init.h>
19 #include <linux/kvm.h>
20 #include <linux/kvm_host.h>
21 #include <linux/module.h>
22 #include <linux/slab.h>
23 #include <linux/timer.h>
24 #include <asm/lowcore.h>
25 #include <asm/pgtable.h>
26
27 #include "kvm-s390.h"
28 #include "gaccess.h"
29
30 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
31
32 struct kvm_stats_debugfs_item debugfs_entries[] = {
33 { "userspace_handled", VCPU_STAT(exit_userspace) },
34 { "exit_null", VCPU_STAT(exit_null) },
35 { "exit_validity", VCPU_STAT(exit_validity) },
36 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
37 { "exit_external_request", VCPU_STAT(exit_external_request) },
38 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
39 { "exit_instruction", VCPU_STAT(exit_instruction) },
40 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
41 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
42 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
43 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
44 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
45 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
46 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
47 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
48 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
49 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
50 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
51 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
52 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
53 { "instruction_spx", VCPU_STAT(instruction_spx) },
54 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
55 { "instruction_stap", VCPU_STAT(instruction_stap) },
56 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
57 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
58 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
59 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
60 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
61 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
62 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
63 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
64 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
65 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
66 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
67 { "diagnose_44", VCPU_STAT(diagnose_44) },
68 { NULL }
69 };
70
71
72 /* Section: not file related */
73 void kvm_arch_hardware_enable(void *garbage)
74 {
75 /* every s390 is virtualization enabled ;-) */
76 }
77
78 void kvm_arch_hardware_disable(void *garbage)
79 {
80 }
81
82 int kvm_arch_hardware_setup(void)
83 {
84 return 0;
85 }
86
87 void kvm_arch_hardware_unsetup(void)
88 {
89 }
90
91 void kvm_arch_check_processor_compat(void *rtn)
92 {
93 }
94
95 int kvm_arch_init(void *opaque)
96 {
97 return 0;
98 }
99
100 void kvm_arch_exit(void)
101 {
102 }
103
104 /* Section: device related */
105 long kvm_arch_dev_ioctl(struct file *filp,
106 unsigned int ioctl, unsigned long arg)
107 {
108 if (ioctl == KVM_S390_ENABLE_SIE)
109 return s390_enable_sie();
110 return -EINVAL;
111 }
112
113 int kvm_dev_ioctl_check_extension(long ext)
114 {
115 switch (ext) {
116 case KVM_CAP_USER_MEMORY:
117 return 1;
118 default:
119 return 0;
120 }
121 }
122
123 /* Section: vm related */
124 /*
125 * Get (and clear) the dirty memory log for a memory slot.
126 */
127 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
128 struct kvm_dirty_log *log)
129 {
130 return 0;
131 }
132
133 long kvm_arch_vm_ioctl(struct file *filp,
134 unsigned int ioctl, unsigned long arg)
135 {
136 struct kvm *kvm = filp->private_data;
137 void __user *argp = (void __user *)arg;
138 int r;
139
140 switch (ioctl) {
141 case KVM_S390_INTERRUPT: {
142 struct kvm_s390_interrupt s390int;
143
144 r = -EFAULT;
145 if (copy_from_user(&s390int, argp, sizeof(s390int)))
146 break;
147 r = kvm_s390_inject_vm(kvm, &s390int);
148 break;
149 }
150 default:
151 r = -EINVAL;
152 }
153
154 return r;
155 }
156
157 struct kvm *kvm_arch_create_vm(void)
158 {
159 struct kvm *kvm;
160 int rc;
161 char debug_name[16];
162
163 rc = s390_enable_sie();
164 if (rc)
165 goto out_nokvm;
166
167 rc = -ENOMEM;
168 kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
169 if (!kvm)
170 goto out_nokvm;
171
172 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
173 if (!kvm->arch.sca)
174 goto out_nosca;
175
176 sprintf(debug_name, "kvm-%u", current->pid);
177
178 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
179 if (!kvm->arch.dbf)
180 goto out_nodbf;
181
182 spin_lock_init(&kvm->arch.float_int.lock);
183 INIT_LIST_HEAD(&kvm->arch.float_int.list);
184
185 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
186 VM_EVENT(kvm, 3, "%s", "vm created");
187
188 return kvm;
189 out_nodbf:
190 free_page((unsigned long)(kvm->arch.sca));
191 out_nosca:
192 kfree(kvm);
193 out_nokvm:
194 return ERR_PTR(rc);
195 }
196
197 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
198 {
199 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
200 free_page((unsigned long)(vcpu->arch.sie_block));
201 kvm_vcpu_uninit(vcpu);
202 kfree(vcpu);
203 }
204
205 static void kvm_free_vcpus(struct kvm *kvm)
206 {
207 unsigned int i;
208
209 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
210 if (kvm->vcpus[i]) {
211 kvm_arch_vcpu_destroy(kvm->vcpus[i]);
212 kvm->vcpus[i] = NULL;
213 }
214 }
215 }
216
217 void kvm_arch_destroy_vm(struct kvm *kvm)
218 {
219 kvm_free_vcpus(kvm);
220 kvm_free_physmem(kvm);
221 free_page((unsigned long)(kvm->arch.sca));
222 debug_unregister(kvm->arch.dbf);
223 kfree(kvm);
224 }
225
226 /* Section: vcpu related */
227 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
228 {
229 return 0;
230 }
231
232 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
233 {
234 /* Nothing todo */
235 }
236
237 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
238 {
239 save_fp_regs(&vcpu->arch.host_fpregs);
240 save_access_regs(vcpu->arch.host_acrs);
241 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
242 restore_fp_regs(&vcpu->arch.guest_fpregs);
243 restore_access_regs(vcpu->arch.guest_acrs);
244 }
245
246 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
247 {
248 save_fp_regs(&vcpu->arch.guest_fpregs);
249 save_access_regs(vcpu->arch.guest_acrs);
250 restore_fp_regs(&vcpu->arch.host_fpregs);
251 restore_access_regs(vcpu->arch.host_acrs);
252 }
253
254 static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
255 {
256 /* this equals initial cpu reset in pop, but we don't switch to ESA */
257 vcpu->arch.sie_block->gpsw.mask = 0UL;
258 vcpu->arch.sie_block->gpsw.addr = 0UL;
259 vcpu->arch.sie_block->prefix = 0UL;
260 vcpu->arch.sie_block->ihcpu = 0xffff;
261 vcpu->arch.sie_block->cputm = 0UL;
262 vcpu->arch.sie_block->ckc = 0UL;
263 vcpu->arch.sie_block->todpr = 0;
264 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
265 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
266 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
267 vcpu->arch.guest_fpregs.fpc = 0;
268 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
269 vcpu->arch.sie_block->gbea = 1;
270 }
271
272 /* The current code can have up to 256 pages for virtio */
273 #define VIRTIODESCSPACE (256ul * 4096ul)
274
275 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
276 {
277 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
278 vcpu->arch.sie_block->gmslm = vcpu->kvm->arch.guest_memsize +
279 vcpu->kvm->arch.guest_origin +
280 VIRTIODESCSPACE - 1ul;
281 vcpu->arch.sie_block->gmsor = vcpu->kvm->arch.guest_origin;
282 vcpu->arch.sie_block->ecb = 2;
283 vcpu->arch.sie_block->eca = 0xC1002001U;
284 setup_timer(&vcpu->arch.ckc_timer, kvm_s390_idle_wakeup,
285 (unsigned long) vcpu);
286 get_cpu_id(&vcpu->arch.cpu_id);
287 vcpu->arch.cpu_id.version = 0xfe;
288 return 0;
289 }
290
291 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
292 unsigned int id)
293 {
294 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
295 int rc = -ENOMEM;
296
297 if (!vcpu)
298 goto out_nomem;
299
300 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
301 get_zeroed_page(GFP_KERNEL);
302
303 if (!vcpu->arch.sie_block)
304 goto out_free_cpu;
305
306 vcpu->arch.sie_block->icpua = id;
307 BUG_ON(!kvm->arch.sca);
308 BUG_ON(kvm->arch.sca->cpu[id].sda);
309 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
310 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
311 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
312
313 spin_lock_init(&vcpu->arch.local_int.lock);
314 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
315 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
316 spin_lock_bh(&kvm->arch.float_int.lock);
317 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
318 init_waitqueue_head(&vcpu->arch.local_int.wq);
319 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
320 spin_unlock_bh(&kvm->arch.float_int.lock);
321
322 rc = kvm_vcpu_init(vcpu, kvm, id);
323 if (rc)
324 goto out_free_cpu;
325 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
326 vcpu->arch.sie_block);
327
328 return vcpu;
329 out_free_cpu:
330 kfree(vcpu);
331 out_nomem:
332 return ERR_PTR(rc);
333 }
334
335 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
336 {
337 /* kvm common code refers to this, but never calls it */
338 BUG();
339 return 0;
340 }
341
342 static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
343 {
344 vcpu_load(vcpu);
345 kvm_s390_vcpu_initial_reset(vcpu);
346 vcpu_put(vcpu);
347 return 0;
348 }
349
350 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
351 {
352 vcpu_load(vcpu);
353 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
354 vcpu_put(vcpu);
355 return 0;
356 }
357
358 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
359 {
360 vcpu_load(vcpu);
361 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
362 vcpu_put(vcpu);
363 return 0;
364 }
365
366 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
367 struct kvm_sregs *sregs)
368 {
369 vcpu_load(vcpu);
370 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
371 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
372 vcpu_put(vcpu);
373 return 0;
374 }
375
376 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
377 struct kvm_sregs *sregs)
378 {
379 vcpu_load(vcpu);
380 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
381 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
382 vcpu_put(vcpu);
383 return 0;
384 }
385
386 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
387 {
388 vcpu_load(vcpu);
389 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
390 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
391 vcpu_put(vcpu);
392 return 0;
393 }
394
395 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
396 {
397 vcpu_load(vcpu);
398 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
399 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
400 vcpu_put(vcpu);
401 return 0;
402 }
403
404 static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
405 {
406 int rc = 0;
407
408 vcpu_load(vcpu);
409 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
410 rc = -EBUSY;
411 else
412 vcpu->arch.sie_block->gpsw = psw;
413 vcpu_put(vcpu);
414 return rc;
415 }
416
417 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
418 struct kvm_translation *tr)
419 {
420 return -EINVAL; /* not implemented yet */
421 }
422
423 int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu,
424 struct kvm_debug_guest *dbg)
425 {
426 return -EINVAL; /* not implemented yet */
427 }
428
429 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
430 struct kvm_mp_state *mp_state)
431 {
432 return -EINVAL; /* not implemented yet */
433 }
434
435 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
436 struct kvm_mp_state *mp_state)
437 {
438 return -EINVAL; /* not implemented yet */
439 }
440
441 extern void s390_handle_mcck(void);
442
443 static void __vcpu_run(struct kvm_vcpu *vcpu)
444 {
445 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
446
447 if (need_resched())
448 schedule();
449
450 if (test_thread_flag(TIF_MCCK_PENDING))
451 s390_handle_mcck();
452
453 kvm_s390_deliver_pending_interrupts(vcpu);
454
455 vcpu->arch.sie_block->icptcode = 0;
456 local_irq_disable();
457 kvm_guest_enter();
458 local_irq_enable();
459 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
460 atomic_read(&vcpu->arch.sie_block->cpuflags));
461 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
462 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
463 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
464 }
465 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
466 vcpu->arch.sie_block->icptcode);
467 local_irq_disable();
468 kvm_guest_exit();
469 local_irq_enable();
470
471 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
472 }
473
474 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
475 {
476 int rc;
477 sigset_t sigsaved;
478
479 vcpu_load(vcpu);
480
481 if (vcpu->sigset_active)
482 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
483
484 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
485
486 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
487
488 switch (kvm_run->exit_reason) {
489 case KVM_EXIT_S390_SIEIC:
490 vcpu->arch.sie_block->gpsw.mask = kvm_run->s390_sieic.mask;
491 vcpu->arch.sie_block->gpsw.addr = kvm_run->s390_sieic.addr;
492 break;
493 case KVM_EXIT_UNKNOWN:
494 case KVM_EXIT_S390_RESET:
495 break;
496 default:
497 BUG();
498 }
499
500 might_sleep();
501
502 do {
503 __vcpu_run(vcpu);
504 rc = kvm_handle_sie_intercept(vcpu);
505 } while (!signal_pending(current) && !rc);
506
507 if (signal_pending(current) && !rc)
508 rc = -EINTR;
509
510 if (rc == -ENOTSUPP) {
511 /* intercept cannot be handled in-kernel, prepare kvm-run */
512 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
513 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
514 kvm_run->s390_sieic.mask = vcpu->arch.sie_block->gpsw.mask;
515 kvm_run->s390_sieic.addr = vcpu->arch.sie_block->gpsw.addr;
516 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
517 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
518 rc = 0;
519 }
520
521 if (rc == -EREMOTE) {
522 /* intercept was handled, but userspace support is needed
523 * kvm_run has been prepared by the handler */
524 rc = 0;
525 }
526
527 if (vcpu->sigset_active)
528 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
529
530 vcpu_put(vcpu);
531
532 vcpu->stat.exit_userspace++;
533 return rc;
534 }
535
536 static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
537 unsigned long n, int prefix)
538 {
539 if (prefix)
540 return copy_to_guest(vcpu, guestdest, from, n);
541 else
542 return copy_to_guest_absolute(vcpu, guestdest, from, n);
543 }
544
545 /*
546 * store status at address
547 * we use have two special cases:
548 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
549 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
550 */
551 int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
552 {
553 const unsigned char archmode = 1;
554 int prefix;
555
556 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
557 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
558 return -EFAULT;
559 addr = SAVE_AREA_BASE;
560 prefix = 0;
561 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
562 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
563 return -EFAULT;
564 addr = SAVE_AREA_BASE;
565 prefix = 1;
566 } else
567 prefix = 0;
568
569 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, fp_regs),
570 vcpu->arch.guest_fpregs.fprs, 128, prefix))
571 return -EFAULT;
572
573 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, gp_regs),
574 vcpu->arch.guest_gprs, 128, prefix))
575 return -EFAULT;
576
577 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, psw),
578 &vcpu->arch.sie_block->gpsw, 16, prefix))
579 return -EFAULT;
580
581 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, pref_reg),
582 &vcpu->arch.sie_block->prefix, 4, prefix))
583 return -EFAULT;
584
585 if (__guestcopy(vcpu,
586 addr + offsetof(struct save_area_s390x, fp_ctrl_reg),
587 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
588 return -EFAULT;
589
590 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, tod_reg),
591 &vcpu->arch.sie_block->todpr, 4, prefix))
592 return -EFAULT;
593
594 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, timer),
595 &vcpu->arch.sie_block->cputm, 8, prefix))
596 return -EFAULT;
597
598 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, clk_cmp),
599 &vcpu->arch.sie_block->ckc, 8, prefix))
600 return -EFAULT;
601
602 if (__guestcopy(vcpu, addr + offsetof(struct save_area_s390x, acc_regs),
603 &vcpu->arch.guest_acrs, 64, prefix))
604 return -EFAULT;
605
606 if (__guestcopy(vcpu,
607 addr + offsetof(struct save_area_s390x, ctrl_regs),
608 &vcpu->arch.sie_block->gcr, 128, prefix))
609 return -EFAULT;
610 return 0;
611 }
612
613 static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
614 {
615 int rc;
616
617 vcpu_load(vcpu);
618 rc = __kvm_s390_vcpu_store_status(vcpu, addr);
619 vcpu_put(vcpu);
620 return rc;
621 }
622
623 long kvm_arch_vcpu_ioctl(struct file *filp,
624 unsigned int ioctl, unsigned long arg)
625 {
626 struct kvm_vcpu *vcpu = filp->private_data;
627 void __user *argp = (void __user *)arg;
628
629 switch (ioctl) {
630 case KVM_S390_INTERRUPT: {
631 struct kvm_s390_interrupt s390int;
632
633 if (copy_from_user(&s390int, argp, sizeof(s390int)))
634 return -EFAULT;
635 return kvm_s390_inject_vcpu(vcpu, &s390int);
636 }
637 case KVM_S390_STORE_STATUS:
638 return kvm_s390_vcpu_store_status(vcpu, arg);
639 case KVM_S390_SET_INITIAL_PSW: {
640 psw_t psw;
641
642 if (copy_from_user(&psw, argp, sizeof(psw)))
643 return -EFAULT;
644 return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
645 }
646 case KVM_S390_INITIAL_RESET:
647 return kvm_arch_vcpu_ioctl_initial_reset(vcpu);
648 default:
649 ;
650 }
651 return -EINVAL;
652 }
653
654 /* Section: memory related */
655 int kvm_arch_set_memory_region(struct kvm *kvm,
656 struct kvm_userspace_memory_region *mem,
657 struct kvm_memory_slot old,
658 int user_alloc)
659 {
660 /* A few sanity checks. We can have exactly one memory slot which has
661 to start at guest virtual zero and which has to be located at a
662 page boundary in userland and which has to end at a page boundary.
663 The memory in userland is ok to be fragmented into various different
664 vmas. It is okay to mmap() and munmap() stuff in this slot after
665 doing this call at any time */
666
667 if (mem->slot)
668 return -EINVAL;
669
670 if (mem->guest_phys_addr)
671 return -EINVAL;
672
673 if (mem->userspace_addr & (PAGE_SIZE - 1))
674 return -EINVAL;
675
676 if (mem->memory_size & (PAGE_SIZE - 1))
677 return -EINVAL;
678
679 kvm->arch.guest_origin = mem->userspace_addr;
680 kvm->arch.guest_memsize = mem->memory_size;
681
682 /* FIXME: we do want to interrupt running CPUs and update their memory
683 configuration now to avoid race conditions. But hey, changing the
684 memory layout while virtual CPUs are running is usually bad
685 programming practice. */
686
687 return 0;
688 }
689
690 void kvm_arch_flush_shadow(struct kvm *kvm)
691 {
692 }
693
694 gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
695 {
696 return gfn;
697 }
698
699 static int __init kvm_s390_init(void)
700 {
701 return kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
702 }
703
704 static void __exit kvm_s390_exit(void)
705 {
706 kvm_exit();
707 }
708
709 module_init(kvm_s390_init);
710 module_exit(kvm_s390_exit);