]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/s390/kvm/kvm-s390.c
[S390] spinlock: check virtual cpu running status
[mirror_ubuntu-artful-kernel.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db
HC
1/*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
628eb9b8 4 * Copyright IBM Corp. 2008,2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
b0c632db
HC
26#include <asm/lowcore.h>
27#include <asm/pgtable.h>
f5daba1d 28#include <asm/nmi.h>
ef50f7ac 29#include <asm/system.h>
8f2abe6a 30#include "kvm-s390.h"
b0c632db
HC
31#include "gaccess.h"
32
33#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
34
35struct kvm_stats_debugfs_item debugfs_entries[] = {
36 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 37 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
38 { "exit_validity", VCPU_STAT(exit_validity) },
39 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
40 { "exit_external_request", VCPU_STAT(exit_external_request) },
41 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
42 { "exit_instruction", VCPU_STAT(exit_instruction) },
43 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
44 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 45 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
46 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
47 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
48 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
49 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
50 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
51 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
52 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
53 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
54 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
453423dc
CB
55 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
56 { "instruction_spx", VCPU_STAT(instruction_spx) },
57 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
58 { "instruction_stap", VCPU_STAT(instruction_stap) },
59 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
60 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
61 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
62 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
63 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
5288fbf0
CB
64 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
65 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
66 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
67 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
68 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
69 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
e28acfea 70 { "diagnose_44", VCPU_STAT(diagnose_44) },
b0c632db
HC
71 { NULL }
72};
73
ef50f7ac 74static unsigned long long *facilities;
b0c632db
HC
75
76/* Section: not file related */
10474ae8 77int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
78{
79 /* every s390 is virtualization enabled ;-) */
10474ae8 80 return 0;
b0c632db
HC
81}
82
83void kvm_arch_hardware_disable(void *garbage)
84{
85}
86
b0c632db
HC
87int kvm_arch_hardware_setup(void)
88{
89 return 0;
90}
91
92void kvm_arch_hardware_unsetup(void)
93{
94}
95
96void kvm_arch_check_processor_compat(void *rtn)
97{
98}
99
100int kvm_arch_init(void *opaque)
101{
102 return 0;
103}
104
105void kvm_arch_exit(void)
106{
107}
108
109/* Section: device related */
110long kvm_arch_dev_ioctl(struct file *filp,
111 unsigned int ioctl, unsigned long arg)
112{
113 if (ioctl == KVM_S390_ENABLE_SIE)
114 return s390_enable_sie();
115 return -EINVAL;
116}
117
118int kvm_dev_ioctl_check_extension(long ext)
119{
d7b0b5eb
CO
120 int r;
121
2bd0ac4e 122 switch (ext) {
d7b0b5eb
CO
123 case KVM_CAP_S390_PSW:
124 r = 1;
125 break;
2bd0ac4e 126 default:
d7b0b5eb 127 r = 0;
2bd0ac4e 128 }
d7b0b5eb 129 return r;
b0c632db
HC
130}
131
132/* Section: vm related */
133/*
134 * Get (and clear) the dirty memory log for a memory slot.
135 */
136int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
137 struct kvm_dirty_log *log)
138{
139 return 0;
140}
141
142long kvm_arch_vm_ioctl(struct file *filp,
143 unsigned int ioctl, unsigned long arg)
144{
145 struct kvm *kvm = filp->private_data;
146 void __user *argp = (void __user *)arg;
147 int r;
148
149 switch (ioctl) {
ba5c1e9b
CO
150 case KVM_S390_INTERRUPT: {
151 struct kvm_s390_interrupt s390int;
152
153 r = -EFAULT;
154 if (copy_from_user(&s390int, argp, sizeof(s390int)))
155 break;
156 r = kvm_s390_inject_vm(kvm, &s390int);
157 break;
158 }
b0c632db 159 default:
367e1319 160 r = -ENOTTY;
b0c632db
HC
161 }
162
163 return r;
164}
165
166struct kvm *kvm_arch_create_vm(void)
167{
168 struct kvm *kvm;
169 int rc;
170 char debug_name[16];
171
172 rc = s390_enable_sie();
173 if (rc)
174 goto out_nokvm;
175
176 rc = -ENOMEM;
177 kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
178 if (!kvm)
179 goto out_nokvm;
180
181 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
182 if (!kvm->arch.sca)
183 goto out_nosca;
184
185 sprintf(debug_name, "kvm-%u", current->pid);
186
187 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
188 if (!kvm->arch.dbf)
189 goto out_nodbf;
190
ba5c1e9b
CO
191 spin_lock_init(&kvm->arch.float_int.lock);
192 INIT_LIST_HEAD(&kvm->arch.float_int.list);
193
b0c632db
HC
194 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
195 VM_EVENT(kvm, 3, "%s", "vm created");
196
b0c632db
HC
197 return kvm;
198out_nodbf:
199 free_page((unsigned long)(kvm->arch.sca));
200out_nosca:
201 kfree(kvm);
202out_nokvm:
203 return ERR_PTR(rc);
204}
205
d329c035
CB
206void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
207{
208 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
abf4a71e
CO
209 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
210 (__u64) vcpu->arch.sie_block)
211 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
212 smp_mb();
d329c035 213 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 214 kvm_vcpu_uninit(vcpu);
d329c035
CB
215 kfree(vcpu);
216}
217
218static void kvm_free_vcpus(struct kvm *kvm)
219{
220 unsigned int i;
988a2cae 221 struct kvm_vcpu *vcpu;
d329c035 222
988a2cae
GN
223 kvm_for_each_vcpu(i, vcpu, kvm)
224 kvm_arch_vcpu_destroy(vcpu);
225
226 mutex_lock(&kvm->lock);
227 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
228 kvm->vcpus[i] = NULL;
229
230 atomic_set(&kvm->online_vcpus, 0);
231 mutex_unlock(&kvm->lock);
d329c035
CB
232}
233
ad8ba2cd
SY
234void kvm_arch_sync_events(struct kvm *kvm)
235{
236}
237
b0c632db
HC
238void kvm_arch_destroy_vm(struct kvm *kvm)
239{
d329c035 240 kvm_free_vcpus(kvm);
dfdded7c 241 kvm_free_physmem(kvm);
b0c632db 242 free_page((unsigned long)(kvm->arch.sca));
d329c035 243 debug_unregister(kvm->arch.dbf);
b0c632db 244 kfree(kvm);
b0c632db
HC
245}
246
247/* Section: vcpu related */
248int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
249{
250 return 0;
251}
252
253void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
254{
6692cef3 255 /* Nothing todo */
b0c632db
HC
256}
257
258void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
259{
260 save_fp_regs(&vcpu->arch.host_fpregs);
261 save_access_regs(vcpu->arch.host_acrs);
262 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
263 restore_fp_regs(&vcpu->arch.guest_fpregs);
264 restore_access_regs(vcpu->arch.guest_acrs);
b0c632db
HC
265}
266
267void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
268{
269 save_fp_regs(&vcpu->arch.guest_fpregs);
270 save_access_regs(vcpu->arch.guest_acrs);
271 restore_fp_regs(&vcpu->arch.host_fpregs);
272 restore_access_regs(vcpu->arch.host_acrs);
273}
274
275static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
276{
277 /* this equals initial cpu reset in pop, but we don't switch to ESA */
278 vcpu->arch.sie_block->gpsw.mask = 0UL;
279 vcpu->arch.sie_block->gpsw.addr = 0UL;
280 vcpu->arch.sie_block->prefix = 0UL;
281 vcpu->arch.sie_block->ihcpu = 0xffff;
282 vcpu->arch.sie_block->cputm = 0UL;
283 vcpu->arch.sie_block->ckc = 0UL;
284 vcpu->arch.sie_block->todpr = 0;
285 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
286 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
287 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
288 vcpu->arch.guest_fpregs.fpc = 0;
289 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
290 vcpu->arch.sie_block->gbea = 1;
291}
292
293int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
294{
295 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
628eb9b8 296 set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
b0c632db
HC
297 vcpu->arch.sie_block->ecb = 2;
298 vcpu->arch.sie_block->eca = 0xC1002001U;
ef50f7ac 299 vcpu->arch.sie_block->fac = (int) (long) facilities;
ca872302
CB
300 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
301 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
302 (unsigned long) vcpu);
303 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 304 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 305 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
306 return 0;
307}
308
309struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
310 unsigned int id)
311{
312 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
313 int rc = -ENOMEM;
314
315 if (!vcpu)
316 goto out_nomem;
317
180c12fb
CB
318 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
319 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
320
321 if (!vcpu->arch.sie_block)
322 goto out_free_cpu;
323
324 vcpu->arch.sie_block->icpua = id;
325 BUG_ON(!kvm->arch.sca);
abf4a71e
CO
326 if (!kvm->arch.sca->cpu[id].sda)
327 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
b0c632db
HC
328 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
329 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
330
ba5c1e9b
CO
331 spin_lock_init(&vcpu->arch.local_int.lock);
332 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
333 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 334 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b
CO
335 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
336 init_waitqueue_head(&vcpu->arch.local_int.wq);
5288fbf0 337 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 338 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 339
b0c632db
HC
340 rc = kvm_vcpu_init(vcpu, kvm, id);
341 if (rc)
342 goto out_free_cpu;
343 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
344 vcpu->arch.sie_block);
345
b0c632db
HC
346 return vcpu;
347out_free_cpu:
348 kfree(vcpu);
349out_nomem:
350 return ERR_PTR(rc);
351}
352
b0c632db
HC
353int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
354{
355 /* kvm common code refers to this, but never calls it */
356 BUG();
357 return 0;
358}
359
360static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
361{
362 vcpu_load(vcpu);
363 kvm_s390_vcpu_initial_reset(vcpu);
364 vcpu_put(vcpu);
365 return 0;
366}
367
368int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
369{
370 vcpu_load(vcpu);
371 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
372 vcpu_put(vcpu);
373 return 0;
374}
375
376int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
377{
378 vcpu_load(vcpu);
379 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
380 vcpu_put(vcpu);
381 return 0;
382}
383
384int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
385 struct kvm_sregs *sregs)
386{
387 vcpu_load(vcpu);
388 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
389 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
390 vcpu_put(vcpu);
391 return 0;
392}
393
394int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
395 struct kvm_sregs *sregs)
396{
397 vcpu_load(vcpu);
398 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
399 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
400 vcpu_put(vcpu);
401 return 0;
402}
403
404int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
405{
406 vcpu_load(vcpu);
407 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
408 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
409 vcpu_put(vcpu);
410 return 0;
411}
412
413int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
414{
415 vcpu_load(vcpu);
416 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
417 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
418 vcpu_put(vcpu);
419 return 0;
420}
421
422static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
423{
424 int rc = 0;
425
426 vcpu_load(vcpu);
427 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
428 rc = -EBUSY;
d7b0b5eb
CO
429 else {
430 vcpu->run->psw_mask = psw.mask;
431 vcpu->run->psw_addr = psw.addr;
432 }
b0c632db
HC
433 vcpu_put(vcpu);
434 return rc;
435}
436
437int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
438 struct kvm_translation *tr)
439{
440 return -EINVAL; /* not implemented yet */
441}
442
d0bfb940
JK
443int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
444 struct kvm_guest_debug *dbg)
b0c632db
HC
445{
446 return -EINVAL; /* not implemented yet */
447}
448
62d9f0db
MT
449int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
450 struct kvm_mp_state *mp_state)
451{
452 return -EINVAL; /* not implemented yet */
453}
454
455int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
456 struct kvm_mp_state *mp_state)
457{
458 return -EINVAL; /* not implemented yet */
459}
460
b0c632db
HC
461static void __vcpu_run(struct kvm_vcpu *vcpu)
462{
463 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
464
465 if (need_resched())
466 schedule();
467
71cde587
CB
468 if (test_thread_flag(TIF_MCCK_PENDING))
469 s390_handle_mcck();
470
0ff31867
CO
471 kvm_s390_deliver_pending_interrupts(vcpu);
472
b0c632db
HC
473 vcpu->arch.sie_block->icptcode = 0;
474 local_irq_disable();
475 kvm_guest_enter();
476 local_irq_enable();
477 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
478 atomic_read(&vcpu->arch.sie_block->cpuflags));
1f0d0f09
CO
479 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
480 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
481 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
482 }
b0c632db
HC
483 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
484 vcpu->arch.sie_block->icptcode);
485 local_irq_disable();
486 kvm_guest_exit();
487 local_irq_enable();
488
489 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
490}
491
492int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
493{
8f2abe6a 494 int rc;
b0c632db
HC
495 sigset_t sigsaved;
496
497 vcpu_load(vcpu);
498
9ace903d 499rerun_vcpu:
628eb9b8
CE
500 if (vcpu->requests)
501 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
502 kvm_s390_vcpu_set_mem(vcpu);
503
51e4d5ab 504 /* verify, that memory has been registered */
628eb9b8 505 if (!vcpu->arch.sie_block->gmslm) {
51e4d5ab 506 vcpu_put(vcpu);
628eb9b8 507 VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu");
51e4d5ab
CO
508 return -EINVAL;
509 }
510
b0c632db
HC
511 if (vcpu->sigset_active)
512 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
513
514 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
515
ba5c1e9b
CO
516 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
517
8f2abe6a
CB
518 switch (kvm_run->exit_reason) {
519 case KVM_EXIT_S390_SIEIC:
8f2abe6a 520 case KVM_EXIT_UNKNOWN:
9ace903d 521 case KVM_EXIT_INTR:
8f2abe6a
CB
522 case KVM_EXIT_S390_RESET:
523 break;
524 default:
525 BUG();
526 }
527
d7b0b5eb
CO
528 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
529 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
530
dab4079d 531 might_fault();
8f2abe6a
CB
532
533 do {
534 __vcpu_run(vcpu);
8f2abe6a
CB
535 rc = kvm_handle_sie_intercept(vcpu);
536 } while (!signal_pending(current) && !rc);
537
9ace903d
CE
538 if (rc == SIE_INTERCEPT_RERUNVCPU)
539 goto rerun_vcpu;
540
b1d16c49
CE
541 if (signal_pending(current) && !rc) {
542 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 543 rc = -EINTR;
b1d16c49 544 }
8f2abe6a
CB
545
546 if (rc == -ENOTSUPP) {
547 /* intercept cannot be handled in-kernel, prepare kvm-run */
548 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
549 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
550 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
551 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
552 rc = 0;
553 }
554
555 if (rc == -EREMOTE) {
556 /* intercept was handled, but userspace support is needed
557 * kvm_run has been prepared by the handler */
558 rc = 0;
559 }
b0c632db 560
d7b0b5eb
CO
561 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
562 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
563
b0c632db
HC
564 if (vcpu->sigset_active)
565 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
566
567 vcpu_put(vcpu);
568
569 vcpu->stat.exit_userspace++;
7e8e6ab4 570 return rc;
b0c632db
HC
571}
572
573static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
574 unsigned long n, int prefix)
575{
576 if (prefix)
577 return copy_to_guest(vcpu, guestdest, from, n);
578 else
579 return copy_to_guest_absolute(vcpu, guestdest, from, n);
580}
581
582/*
583 * store status at address
584 * we use have two special cases:
585 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
586 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
587 */
588int __kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
589{
590 const unsigned char archmode = 1;
591 int prefix;
592
593 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
594 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
595 return -EFAULT;
596 addr = SAVE_AREA_BASE;
597 prefix = 0;
598 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
599 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
600 return -EFAULT;
601 addr = SAVE_AREA_BASE;
602 prefix = 1;
603 } else
604 prefix = 0;
605
f64ca217 606 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
607 vcpu->arch.guest_fpregs.fprs, 128, prefix))
608 return -EFAULT;
609
f64ca217 610 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
b0c632db
HC
611 vcpu->arch.guest_gprs, 128, prefix))
612 return -EFAULT;
613
f64ca217 614 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
615 &vcpu->arch.sie_block->gpsw, 16, prefix))
616 return -EFAULT;
617
f64ca217 618 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
619 &vcpu->arch.sie_block->prefix, 4, prefix))
620 return -EFAULT;
621
622 if (__guestcopy(vcpu,
f64ca217 623 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
624 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
625 return -EFAULT;
626
f64ca217 627 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
628 &vcpu->arch.sie_block->todpr, 4, prefix))
629 return -EFAULT;
630
f64ca217 631 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
632 &vcpu->arch.sie_block->cputm, 8, prefix))
633 return -EFAULT;
634
f64ca217 635 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
636 &vcpu->arch.sie_block->ckc, 8, prefix))
637 return -EFAULT;
638
f64ca217 639 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
b0c632db
HC
640 &vcpu->arch.guest_acrs, 64, prefix))
641 return -EFAULT;
642
643 if (__guestcopy(vcpu,
f64ca217 644 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
645 &vcpu->arch.sie_block->gcr, 128, prefix))
646 return -EFAULT;
647 return 0;
648}
649
650static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
651{
652 int rc;
653
654 vcpu_load(vcpu);
655 rc = __kvm_s390_vcpu_store_status(vcpu, addr);
656 vcpu_put(vcpu);
657 return rc;
658}
659
660long kvm_arch_vcpu_ioctl(struct file *filp,
661 unsigned int ioctl, unsigned long arg)
662{
663 struct kvm_vcpu *vcpu = filp->private_data;
664 void __user *argp = (void __user *)arg;
665
666 switch (ioctl) {
ba5c1e9b
CO
667 case KVM_S390_INTERRUPT: {
668 struct kvm_s390_interrupt s390int;
669
670 if (copy_from_user(&s390int, argp, sizeof(s390int)))
671 return -EFAULT;
672 return kvm_s390_inject_vcpu(vcpu, &s390int);
673 }
b0c632db
HC
674 case KVM_S390_STORE_STATUS:
675 return kvm_s390_vcpu_store_status(vcpu, arg);
676 case KVM_S390_SET_INITIAL_PSW: {
677 psw_t psw;
678
679 if (copy_from_user(&psw, argp, sizeof(psw)))
680 return -EFAULT;
681 return kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
682 }
683 case KVM_S390_INITIAL_RESET:
684 return kvm_arch_vcpu_ioctl_initial_reset(vcpu);
685 default:
686 ;
687 }
688 return -EINVAL;
689}
690
691/* Section: memory related */
692int kvm_arch_set_memory_region(struct kvm *kvm,
693 struct kvm_userspace_memory_region *mem,
694 struct kvm_memory_slot old,
695 int user_alloc)
696{
2668dab7 697 int i;
988a2cae 698 struct kvm_vcpu *vcpu;
2668dab7 699
b0c632db
HC
700 /* A few sanity checks. We can have exactly one memory slot which has
701 to start at guest virtual zero and which has to be located at a
702 page boundary in userland and which has to end at a page boundary.
703 The memory in userland is ok to be fragmented into various different
704 vmas. It is okay to mmap() and munmap() stuff in this slot after
705 doing this call at any time */
706
628eb9b8 707 if (mem->slot)
b0c632db
HC
708 return -EINVAL;
709
710 if (mem->guest_phys_addr)
711 return -EINVAL;
712
713 if (mem->userspace_addr & (PAGE_SIZE - 1))
714 return -EINVAL;
715
716 if (mem->memory_size & (PAGE_SIZE - 1))
717 return -EINVAL;
718
2668dab7
CO
719 if (!user_alloc)
720 return -EINVAL;
721
628eb9b8 722 /* request update of sie control block for all available vcpus */
988a2cae
GN
723 kvm_for_each_vcpu(i, vcpu, kvm) {
724 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
725 continue;
726 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
2668dab7 727 }
b0c632db
HC
728
729 return 0;
730}
731
34d4cb8f
MT
732void kvm_arch_flush_shadow(struct kvm *kvm)
733{
734}
735
b0c632db
HC
736gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
737{
738 return gfn;
739}
740
741static int __init kvm_s390_init(void)
742{
ef50f7ac
CB
743 int ret;
744 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), THIS_MODULE);
745 if (ret)
746 return ret;
747
748 /*
749 * guests can ask for up to 255+1 double words, we need a full page
750 * to hold the maximum amount of facilites. On the other hand, we
751 * only set facilities that are known to work in KVM.
752 */
753 facilities = (unsigned long long *) get_zeroed_page(GFP_DMA);
754 if (!facilities) {
755 kvm_exit();
756 return -ENOMEM;
757 }
758 stfle(facilities, 1);
759 facilities[0] &= 0xff00fff3f0700000ULL;
760 return 0;
b0c632db
HC
761}
762
763static void __exit kvm_s390_exit(void)
764{
ef50f7ac 765 free_page((unsigned long) facilities);
b0c632db
HC
766 kvm_exit();
767}
768
769module_init(kvm_s390_init);
770module_exit(kvm_s390_exit);