]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/s390/kvm/kvm-s390.c
KVM: s390: Fix RUNNING flag misinterpretation
[mirror_ubuntu-artful-kernel.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db
HC
1/*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
628eb9b8 4 * Copyright IBM Corp. 2008,2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
ef50f7ac 30#include <asm/system.h>
8f2abe6a 31#include "kvm-s390.h"
b0c632db
HC
32#include "gaccess.h"
33
34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 38 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 49 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
50 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
51 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
52 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
53 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
54 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
55 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
56 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
453423dc
CB
57 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
58 { "instruction_spx", VCPU_STAT(instruction_spx) },
59 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
60 { "instruction_stap", VCPU_STAT(instruction_stap) },
61 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
62 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
63 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
64 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
65 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 66 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 67 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
7697e71f 68 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
69 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
70 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
71 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
72 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
73 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 74 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 75 { "diagnose_44", VCPU_STAT(diagnose_44) },
b0c632db
HC
76 { NULL }
77};
78
ef50f7ac 79static unsigned long long *facilities;
b0c632db
HC
80
81/* Section: not file related */
10474ae8 82int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
83{
84 /* every s390 is virtualization enabled ;-) */
10474ae8 85 return 0;
b0c632db
HC
86}
87
88void kvm_arch_hardware_disable(void *garbage)
89{
90}
91
b0c632db
HC
92int kvm_arch_hardware_setup(void)
93{
94 return 0;
95}
96
97void kvm_arch_hardware_unsetup(void)
98{
99}
100
101void kvm_arch_check_processor_compat(void *rtn)
102{
103}
104
105int kvm_arch_init(void *opaque)
106{
107 return 0;
108}
109
110void kvm_arch_exit(void)
111{
112}
113
114/* Section: device related */
115long kvm_arch_dev_ioctl(struct file *filp,
116 unsigned int ioctl, unsigned long arg)
117{
118 if (ioctl == KVM_S390_ENABLE_SIE)
119 return s390_enable_sie();
120 return -EINVAL;
121}
122
123int kvm_dev_ioctl_check_extension(long ext)
124{
d7b0b5eb
CO
125 int r;
126
2bd0ac4e 127 switch (ext) {
d7b0b5eb 128 case KVM_CAP_S390_PSW:
b6cf8788 129 case KVM_CAP_S390_GMAP:
d7b0b5eb
CO
130 r = 1;
131 break;
2bd0ac4e 132 default:
d7b0b5eb 133 r = 0;
2bd0ac4e 134 }
d7b0b5eb 135 return r;
b0c632db
HC
136}
137
138/* Section: vm related */
139/*
140 * Get (and clear) the dirty memory log for a memory slot.
141 */
142int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
143 struct kvm_dirty_log *log)
144{
145 return 0;
146}
147
148long kvm_arch_vm_ioctl(struct file *filp,
149 unsigned int ioctl, unsigned long arg)
150{
151 struct kvm *kvm = filp->private_data;
152 void __user *argp = (void __user *)arg;
153 int r;
154
155 switch (ioctl) {
ba5c1e9b
CO
156 case KVM_S390_INTERRUPT: {
157 struct kvm_s390_interrupt s390int;
158
159 r = -EFAULT;
160 if (copy_from_user(&s390int, argp, sizeof(s390int)))
161 break;
162 r = kvm_s390_inject_vm(kvm, &s390int);
163 break;
164 }
b0c632db 165 default:
367e1319 166 r = -ENOTTY;
b0c632db
HC
167 }
168
169 return r;
170}
171
d89f5eff 172int kvm_arch_init_vm(struct kvm *kvm)
b0c632db 173{
b0c632db
HC
174 int rc;
175 char debug_name[16];
176
177 rc = s390_enable_sie();
178 if (rc)
d89f5eff 179 goto out_err;
b0c632db 180
b290411a
CO
181 rc = -ENOMEM;
182
b0c632db
HC
183 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
184 if (!kvm->arch.sca)
d89f5eff 185 goto out_err;
b0c632db
HC
186
187 sprintf(debug_name, "kvm-%u", current->pid);
188
189 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
190 if (!kvm->arch.dbf)
191 goto out_nodbf;
192
ba5c1e9b
CO
193 spin_lock_init(&kvm->arch.float_int.lock);
194 INIT_LIST_HEAD(&kvm->arch.float_int.list);
195
b0c632db
HC
196 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
197 VM_EVENT(kvm, 3, "%s", "vm created");
198
598841ca
CO
199 kvm->arch.gmap = gmap_alloc(current->mm);
200 if (!kvm->arch.gmap)
201 goto out_nogmap;
202
d89f5eff 203 return 0;
598841ca
CO
204out_nogmap:
205 debug_unregister(kvm->arch.dbf);
b0c632db
HC
206out_nodbf:
207 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
208out_err:
209 return rc;
b0c632db
HC
210}
211
d329c035
CB
212void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
213{
214 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
fc34531d 215 clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
abf4a71e
CO
216 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
217 (__u64) vcpu->arch.sie_block)
218 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
219 smp_mb();
d329c035 220 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 221 kvm_vcpu_uninit(vcpu);
d329c035
CB
222 kfree(vcpu);
223}
224
225static void kvm_free_vcpus(struct kvm *kvm)
226{
227 unsigned int i;
988a2cae 228 struct kvm_vcpu *vcpu;
d329c035 229
988a2cae
GN
230 kvm_for_each_vcpu(i, vcpu, kvm)
231 kvm_arch_vcpu_destroy(vcpu);
232
233 mutex_lock(&kvm->lock);
234 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
235 kvm->vcpus[i] = NULL;
236
237 atomic_set(&kvm->online_vcpus, 0);
238 mutex_unlock(&kvm->lock);
d329c035
CB
239}
240
ad8ba2cd
SY
241void kvm_arch_sync_events(struct kvm *kvm)
242{
243}
244
b0c632db
HC
245void kvm_arch_destroy_vm(struct kvm *kvm)
246{
d329c035 247 kvm_free_vcpus(kvm);
b0c632db 248 free_page((unsigned long)(kvm->arch.sca));
d329c035 249 debug_unregister(kvm->arch.dbf);
598841ca 250 gmap_free(kvm->arch.gmap);
b0c632db
HC
251}
252
253/* Section: vcpu related */
254int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
255{
598841ca 256 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
b0c632db
HC
257 return 0;
258}
259
260void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
261{
6692cef3 262 /* Nothing todo */
b0c632db
HC
263}
264
265void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
266{
267 save_fp_regs(&vcpu->arch.host_fpregs);
268 save_access_regs(vcpu->arch.host_acrs);
269 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
270 restore_fp_regs(&vcpu->arch.guest_fpregs);
271 restore_access_regs(vcpu->arch.guest_acrs);
480e5926 272 gmap_enable(vcpu->arch.gmap);
9e6dabef 273 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
274}
275
276void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
277{
9e6dabef 278 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 279 gmap_disable(vcpu->arch.gmap);
b0c632db
HC
280 save_fp_regs(&vcpu->arch.guest_fpregs);
281 save_access_regs(vcpu->arch.guest_acrs);
282 restore_fp_regs(&vcpu->arch.host_fpregs);
283 restore_access_regs(vcpu->arch.host_acrs);
284}
285
286static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
287{
288 /* this equals initial cpu reset in pop, but we don't switch to ESA */
289 vcpu->arch.sie_block->gpsw.mask = 0UL;
290 vcpu->arch.sie_block->gpsw.addr = 0UL;
291 vcpu->arch.sie_block->prefix = 0UL;
292 vcpu->arch.sie_block->ihcpu = 0xffff;
293 vcpu->arch.sie_block->cputm = 0UL;
294 vcpu->arch.sie_block->ckc = 0UL;
295 vcpu->arch.sie_block->todpr = 0;
296 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
297 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
298 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
299 vcpu->arch.guest_fpregs.fpc = 0;
300 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
301 vcpu->arch.sie_block->gbea = 1;
302}
303
304int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
305{
9e6dabef
CH
306 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
307 CPUSTAT_SM |
308 CPUSTAT_STOPPED);
fc34531d 309 vcpu->arch.sie_block->ecb = 6;
b0c632db 310 vcpu->arch.sie_block->eca = 0xC1002001U;
ef50f7ac 311 vcpu->arch.sie_block->fac = (int) (long) facilities;
ca872302
CB
312 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
313 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
314 (unsigned long) vcpu);
315 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 316 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 317 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
318 return 0;
319}
320
321struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
322 unsigned int id)
323{
4d47555a
CO
324 struct kvm_vcpu *vcpu;
325 int rc = -EINVAL;
326
327 if (id >= KVM_MAX_VCPUS)
328 goto out;
329
330 rc = -ENOMEM;
b0c632db 331
4d47555a 332 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
b0c632db 333 if (!vcpu)
4d47555a 334 goto out;
b0c632db 335
180c12fb
CB
336 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
337 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
338
339 if (!vcpu->arch.sie_block)
340 goto out_free_cpu;
341
342 vcpu->arch.sie_block->icpua = id;
343 BUG_ON(!kvm->arch.sca);
abf4a71e
CO
344 if (!kvm->arch.sca->cpu[id].sda)
345 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
b0c632db
HC
346 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
347 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
fc34531d 348 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
b0c632db 349
ba5c1e9b
CO
350 spin_lock_init(&vcpu->arch.local_int.lock);
351 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
352 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 353 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b
CO
354 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
355 init_waitqueue_head(&vcpu->arch.local_int.wq);
5288fbf0 356 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 357 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 358
b0c632db
HC
359 rc = kvm_vcpu_init(vcpu, kvm, id);
360 if (rc)
7b06bf2f 361 goto out_free_sie_block;
b0c632db
HC
362 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
363 vcpu->arch.sie_block);
364
b0c632db 365 return vcpu;
7b06bf2f
WY
366out_free_sie_block:
367 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db
HC
368out_free_cpu:
369 kfree(vcpu);
4d47555a 370out:
b0c632db
HC
371 return ERR_PTR(rc);
372}
373
b0c632db
HC
374int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
375{
376 /* kvm common code refers to this, but never calls it */
377 BUG();
378 return 0;
379}
380
381static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
382{
b0c632db 383 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
384 return 0;
385}
386
387int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
388{
b0c632db 389 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
390 return 0;
391}
392
393int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
394{
b0c632db 395 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
b0c632db
HC
396 return 0;
397}
398
399int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
400 struct kvm_sregs *sregs)
401{
b0c632db
HC
402 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
403 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
7eef87dc 404 restore_access_regs(vcpu->arch.guest_acrs);
b0c632db
HC
405 return 0;
406}
407
408int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
409 struct kvm_sregs *sregs)
410{
b0c632db
HC
411 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
412 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
413 return 0;
414}
415
416int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
417{
b0c632db
HC
418 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
419 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
7eef87dc 420 restore_fp_regs(&vcpu->arch.guest_fpregs);
b0c632db
HC
421 return 0;
422}
423
424int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
425{
b0c632db
HC
426 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
427 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
428 return 0;
429}
430
431static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
432{
433 int rc = 0;
434
9e6dabef 435 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
b0c632db 436 rc = -EBUSY;
d7b0b5eb
CO
437 else {
438 vcpu->run->psw_mask = psw.mask;
439 vcpu->run->psw_addr = psw.addr;
440 }
b0c632db
HC
441 return rc;
442}
443
444int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
445 struct kvm_translation *tr)
446{
447 return -EINVAL; /* not implemented yet */
448}
449
d0bfb940
JK
450int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
451 struct kvm_guest_debug *dbg)
b0c632db
HC
452{
453 return -EINVAL; /* not implemented yet */
454}
455
62d9f0db
MT
456int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
457 struct kvm_mp_state *mp_state)
458{
459 return -EINVAL; /* not implemented yet */
460}
461
462int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
463 struct kvm_mp_state *mp_state)
464{
465 return -EINVAL; /* not implemented yet */
466}
467
b0c632db
HC
468static void __vcpu_run(struct kvm_vcpu *vcpu)
469{
470 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
471
472 if (need_resched())
473 schedule();
474
71cde587
CB
475 if (test_thread_flag(TIF_MCCK_PENDING))
476 s390_handle_mcck();
477
0ff31867
CO
478 kvm_s390_deliver_pending_interrupts(vcpu);
479
b0c632db
HC
480 vcpu->arch.sie_block->icptcode = 0;
481 local_irq_disable();
482 kvm_guest_enter();
483 local_irq_enable();
484 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
485 atomic_read(&vcpu->arch.sie_block->cpuflags));
1f0d0f09
CO
486 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
487 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
488 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
489 }
b0c632db
HC
490 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
491 vcpu->arch.sie_block->icptcode);
492 local_irq_disable();
493 kvm_guest_exit();
494 local_irq_enable();
495
496 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
497}
498
499int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
500{
8f2abe6a 501 int rc;
b0c632db
HC
502 sigset_t sigsaved;
503
9ace903d 504rerun_vcpu:
b0c632db
HC
505 if (vcpu->sigset_active)
506 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
507
9e6dabef 508 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db 509
ba5c1e9b
CO
510 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
511
8f2abe6a
CB
512 switch (kvm_run->exit_reason) {
513 case KVM_EXIT_S390_SIEIC:
8f2abe6a 514 case KVM_EXIT_UNKNOWN:
9ace903d 515 case KVM_EXIT_INTR:
8f2abe6a
CB
516 case KVM_EXIT_S390_RESET:
517 break;
518 default:
519 BUG();
520 }
521
d7b0b5eb
CO
522 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
523 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
524
dab4079d 525 might_fault();
8f2abe6a
CB
526
527 do {
528 __vcpu_run(vcpu);
8f2abe6a
CB
529 rc = kvm_handle_sie_intercept(vcpu);
530 } while (!signal_pending(current) && !rc);
531
9ace903d
CE
532 if (rc == SIE_INTERCEPT_RERUNVCPU)
533 goto rerun_vcpu;
534
b1d16c49
CE
535 if (signal_pending(current) && !rc) {
536 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 537 rc = -EINTR;
b1d16c49 538 }
8f2abe6a 539
b8e660b8 540 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
541 /* intercept cannot be handled in-kernel, prepare kvm-run */
542 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
543 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
544 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
545 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
546 rc = 0;
547 }
548
549 if (rc == -EREMOTE) {
550 /* intercept was handled, but userspace support is needed
551 * kvm_run has been prepared by the handler */
552 rc = 0;
553 }
b0c632db 554
d7b0b5eb
CO
555 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
556 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
557
b0c632db
HC
558 if (vcpu->sigset_active)
559 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
560
b0c632db 561 vcpu->stat.exit_userspace++;
7e8e6ab4 562 return rc;
b0c632db
HC
563}
564
092670cd 565static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
b0c632db
HC
566 unsigned long n, int prefix)
567{
568 if (prefix)
569 return copy_to_guest(vcpu, guestdest, from, n);
570 else
571 return copy_to_guest_absolute(vcpu, guestdest, from, n);
572}
573
574/*
575 * store status at address
576 * we use have two special cases:
577 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
578 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
579 */
971eb77f 580int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db 581{
092670cd 582 unsigned char archmode = 1;
b0c632db
HC
583 int prefix;
584
585 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
586 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
587 return -EFAULT;
588 addr = SAVE_AREA_BASE;
589 prefix = 0;
590 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
591 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
592 return -EFAULT;
593 addr = SAVE_AREA_BASE;
594 prefix = 1;
595 } else
596 prefix = 0;
597
f64ca217 598 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
599 vcpu->arch.guest_fpregs.fprs, 128, prefix))
600 return -EFAULT;
601
f64ca217 602 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
b0c632db
HC
603 vcpu->arch.guest_gprs, 128, prefix))
604 return -EFAULT;
605
f64ca217 606 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
607 &vcpu->arch.sie_block->gpsw, 16, prefix))
608 return -EFAULT;
609
f64ca217 610 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
611 &vcpu->arch.sie_block->prefix, 4, prefix))
612 return -EFAULT;
613
614 if (__guestcopy(vcpu,
f64ca217 615 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
616 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
617 return -EFAULT;
618
f64ca217 619 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
620 &vcpu->arch.sie_block->todpr, 4, prefix))
621 return -EFAULT;
622
f64ca217 623 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
624 &vcpu->arch.sie_block->cputm, 8, prefix))
625 return -EFAULT;
626
f64ca217 627 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
628 &vcpu->arch.sie_block->ckc, 8, prefix))
629 return -EFAULT;
630
f64ca217 631 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
b0c632db
HC
632 &vcpu->arch.guest_acrs, 64, prefix))
633 return -EFAULT;
634
635 if (__guestcopy(vcpu,
f64ca217 636 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
637 &vcpu->arch.sie_block->gcr, 128, prefix))
638 return -EFAULT;
639 return 0;
640}
641
b0c632db
HC
642long kvm_arch_vcpu_ioctl(struct file *filp,
643 unsigned int ioctl, unsigned long arg)
644{
645 struct kvm_vcpu *vcpu = filp->private_data;
646 void __user *argp = (void __user *)arg;
bc923cc9 647 long r;
b0c632db 648
93736624
AK
649 switch (ioctl) {
650 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
651 struct kvm_s390_interrupt s390int;
652
93736624 653 r = -EFAULT;
ba5c1e9b 654 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
655 break;
656 r = kvm_s390_inject_vcpu(vcpu, &s390int);
657 break;
ba5c1e9b 658 }
b0c632db 659 case KVM_S390_STORE_STATUS:
bc923cc9
AK
660 r = kvm_s390_vcpu_store_status(vcpu, arg);
661 break;
b0c632db
HC
662 case KVM_S390_SET_INITIAL_PSW: {
663 psw_t psw;
664
bc923cc9 665 r = -EFAULT;
b0c632db 666 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
667 break;
668 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
669 break;
b0c632db
HC
670 }
671 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
672 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
673 break;
b0c632db 674 default:
bc923cc9 675 r = -EINVAL;
b0c632db 676 }
bc923cc9 677 return r;
b0c632db
HC
678}
679
680/* Section: memory related */
f7784b8e
MT
681int kvm_arch_prepare_memory_region(struct kvm *kvm,
682 struct kvm_memory_slot *memslot,
683 struct kvm_memory_slot old,
684 struct kvm_userspace_memory_region *mem,
685 int user_alloc)
b0c632db
HC
686{
687 /* A few sanity checks. We can have exactly one memory slot which has
688 to start at guest virtual zero and which has to be located at a
689 page boundary in userland and which has to end at a page boundary.
690 The memory in userland is ok to be fragmented into various different
691 vmas. It is okay to mmap() and munmap() stuff in this slot after
692 doing this call at any time */
693
628eb9b8 694 if (mem->slot)
b0c632db
HC
695 return -EINVAL;
696
697 if (mem->guest_phys_addr)
698 return -EINVAL;
699
598841ca 700 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
701 return -EINVAL;
702
598841ca 703 if (mem->memory_size & 0xffffful)
b0c632db
HC
704 return -EINVAL;
705
2668dab7
CO
706 if (!user_alloc)
707 return -EINVAL;
708
f7784b8e
MT
709 return 0;
710}
711
712void kvm_arch_commit_memory_region(struct kvm *kvm,
713 struct kvm_userspace_memory_region *mem,
714 struct kvm_memory_slot old,
715 int user_alloc)
716{
f7850c92 717 int rc;
f7784b8e 718
598841ca
CO
719
720 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
721 mem->guest_phys_addr, mem->memory_size);
722 if (rc)
f7850c92 723 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 724 return;
b0c632db
HC
725}
726
34d4cb8f
MT
727void kvm_arch_flush_shadow(struct kvm *kvm)
728{
729}
730
b0c632db
HC
731static int __init kvm_s390_init(void)
732{
ef50f7ac 733 int ret;
0ee75bea 734 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
735 if (ret)
736 return ret;
737
738 /*
739 * guests can ask for up to 255+1 double words, we need a full page
25985edc 740 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
741 * only set facilities that are known to work in KVM.
742 */
c2f0e8c8 743 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
ef50f7ac
CB
744 if (!facilities) {
745 kvm_exit();
746 return -ENOMEM;
747 }
14375bc4 748 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
6d00d00b 749 facilities[0] &= 0xff00fff3f47c0000ULL;
9950f8be 750 facilities[1] &= 0x201c000000000000ULL;
ef50f7ac 751 return 0;
b0c632db
HC
752}
753
754static void __exit kvm_s390_exit(void)
755{
ef50f7ac 756 free_page((unsigned long) facilities);
b0c632db
HC
757 kvm_exit();
758}
759
760module_init(kvm_s390_init);
761module_exit(kvm_s390_exit);