]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/s390/kvm/kvm-s390.c
KVM: MMU: remove the redundant get_written_sptes
[mirror_ubuntu-hirsute-kernel.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db
HC
1/*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
628eb9b8 4 * Copyright IBM Corp. 2008,2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
ef50f7ac 30#include <asm/system.h>
8f2abe6a 31#include "kvm-s390.h"
b0c632db
HC
32#include "gaccess.h"
33
34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 38 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 49 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
50 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
51 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
52 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
53 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
54 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
55 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
56 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
453423dc
CB
57 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
58 { "instruction_spx", VCPU_STAT(instruction_spx) },
59 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
60 { "instruction_stap", VCPU_STAT(instruction_stap) },
61 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
62 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
63 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
64 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
65 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 66 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 67 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 68 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 69 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
70 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
71 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
72 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
73 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
74 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 75 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 76 { "diagnose_44", VCPU_STAT(diagnose_44) },
b0c632db
HC
77 { NULL }
78};
79
ef50f7ac 80static unsigned long long *facilities;
b0c632db
HC
81
82/* Section: not file related */
10474ae8 83int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
84{
85 /* every s390 is virtualization enabled ;-) */
10474ae8 86 return 0;
b0c632db
HC
87}
88
89void kvm_arch_hardware_disable(void *garbage)
90{
91}
92
b0c632db
HC
93int kvm_arch_hardware_setup(void)
94{
95 return 0;
96}
97
98void kvm_arch_hardware_unsetup(void)
99{
100}
101
102void kvm_arch_check_processor_compat(void *rtn)
103{
104}
105
106int kvm_arch_init(void *opaque)
107{
108 return 0;
109}
110
111void kvm_arch_exit(void)
112{
113}
114
115/* Section: device related */
116long kvm_arch_dev_ioctl(struct file *filp,
117 unsigned int ioctl, unsigned long arg)
118{
119 if (ioctl == KVM_S390_ENABLE_SIE)
120 return s390_enable_sie();
121 return -EINVAL;
122}
123
124int kvm_dev_ioctl_check_extension(long ext)
125{
d7b0b5eb
CO
126 int r;
127
2bd0ac4e 128 switch (ext) {
d7b0b5eb 129 case KVM_CAP_S390_PSW:
b6cf8788 130 case KVM_CAP_S390_GMAP:
52e16b18 131 case KVM_CAP_SYNC_MMU:
d7b0b5eb
CO
132 r = 1;
133 break;
2bd0ac4e 134 default:
d7b0b5eb 135 r = 0;
2bd0ac4e 136 }
d7b0b5eb 137 return r;
b0c632db
HC
138}
139
140/* Section: vm related */
141/*
142 * Get (and clear) the dirty memory log for a memory slot.
143 */
144int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
145 struct kvm_dirty_log *log)
146{
147 return 0;
148}
149
150long kvm_arch_vm_ioctl(struct file *filp,
151 unsigned int ioctl, unsigned long arg)
152{
153 struct kvm *kvm = filp->private_data;
154 void __user *argp = (void __user *)arg;
155 int r;
156
157 switch (ioctl) {
ba5c1e9b
CO
158 case KVM_S390_INTERRUPT: {
159 struct kvm_s390_interrupt s390int;
160
161 r = -EFAULT;
162 if (copy_from_user(&s390int, argp, sizeof(s390int)))
163 break;
164 r = kvm_s390_inject_vm(kvm, &s390int);
165 break;
166 }
b0c632db 167 default:
367e1319 168 r = -ENOTTY;
b0c632db
HC
169 }
170
171 return r;
172}
173
d89f5eff 174int kvm_arch_init_vm(struct kvm *kvm)
b0c632db 175{
b0c632db
HC
176 int rc;
177 char debug_name[16];
178
179 rc = s390_enable_sie();
180 if (rc)
d89f5eff 181 goto out_err;
b0c632db 182
b290411a
CO
183 rc = -ENOMEM;
184
b0c632db
HC
185 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
186 if (!kvm->arch.sca)
d89f5eff 187 goto out_err;
b0c632db
HC
188
189 sprintf(debug_name, "kvm-%u", current->pid);
190
191 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
192 if (!kvm->arch.dbf)
193 goto out_nodbf;
194
ba5c1e9b
CO
195 spin_lock_init(&kvm->arch.float_int.lock);
196 INIT_LIST_HEAD(&kvm->arch.float_int.list);
197
b0c632db
HC
198 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
199 VM_EVENT(kvm, 3, "%s", "vm created");
200
598841ca
CO
201 kvm->arch.gmap = gmap_alloc(current->mm);
202 if (!kvm->arch.gmap)
203 goto out_nogmap;
204
d89f5eff 205 return 0;
598841ca
CO
206out_nogmap:
207 debug_unregister(kvm->arch.dbf);
b0c632db
HC
208out_nodbf:
209 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
210out_err:
211 return rc;
b0c632db
HC
212}
213
d329c035
CB
214void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
215{
216 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
fc34531d 217 clear_bit(63 - vcpu->vcpu_id, (unsigned long *) &vcpu->kvm->arch.sca->mcn);
abf4a71e
CO
218 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
219 (__u64) vcpu->arch.sie_block)
220 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
221 smp_mb();
d329c035 222 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 223 kvm_vcpu_uninit(vcpu);
d329c035
CB
224 kfree(vcpu);
225}
226
227static void kvm_free_vcpus(struct kvm *kvm)
228{
229 unsigned int i;
988a2cae 230 struct kvm_vcpu *vcpu;
d329c035 231
988a2cae
GN
232 kvm_for_each_vcpu(i, vcpu, kvm)
233 kvm_arch_vcpu_destroy(vcpu);
234
235 mutex_lock(&kvm->lock);
236 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
237 kvm->vcpus[i] = NULL;
238
239 atomic_set(&kvm->online_vcpus, 0);
240 mutex_unlock(&kvm->lock);
d329c035
CB
241}
242
ad8ba2cd
SY
243void kvm_arch_sync_events(struct kvm *kvm)
244{
245}
246
b0c632db
HC
247void kvm_arch_destroy_vm(struct kvm *kvm)
248{
d329c035 249 kvm_free_vcpus(kvm);
b0c632db 250 free_page((unsigned long)(kvm->arch.sca));
d329c035 251 debug_unregister(kvm->arch.dbf);
598841ca 252 gmap_free(kvm->arch.gmap);
b0c632db
HC
253}
254
255/* Section: vcpu related */
256int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
257{
598841ca 258 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
b0c632db
HC
259 return 0;
260}
261
262void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
263{
6692cef3 264 /* Nothing todo */
b0c632db
HC
265}
266
267void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
268{
269 save_fp_regs(&vcpu->arch.host_fpregs);
270 save_access_regs(vcpu->arch.host_acrs);
271 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
272 restore_fp_regs(&vcpu->arch.guest_fpregs);
273 restore_access_regs(vcpu->arch.guest_acrs);
480e5926 274 gmap_enable(vcpu->arch.gmap);
9e6dabef 275 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
276}
277
278void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
279{
9e6dabef 280 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 281 gmap_disable(vcpu->arch.gmap);
b0c632db
HC
282 save_fp_regs(&vcpu->arch.guest_fpregs);
283 save_access_regs(vcpu->arch.guest_acrs);
284 restore_fp_regs(&vcpu->arch.host_fpregs);
285 restore_access_regs(vcpu->arch.host_acrs);
286}
287
288static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
289{
290 /* this equals initial cpu reset in pop, but we don't switch to ESA */
291 vcpu->arch.sie_block->gpsw.mask = 0UL;
292 vcpu->arch.sie_block->gpsw.addr = 0UL;
293 vcpu->arch.sie_block->prefix = 0UL;
294 vcpu->arch.sie_block->ihcpu = 0xffff;
295 vcpu->arch.sie_block->cputm = 0UL;
296 vcpu->arch.sie_block->ckc = 0UL;
297 vcpu->arch.sie_block->todpr = 0;
298 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
299 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
300 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
301 vcpu->arch.guest_fpregs.fpc = 0;
302 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
303 vcpu->arch.sie_block->gbea = 1;
304}
305
306int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
307{
9e6dabef
CH
308 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
309 CPUSTAT_SM |
310 CPUSTAT_STOPPED);
fc34531d 311 vcpu->arch.sie_block->ecb = 6;
b0c632db 312 vcpu->arch.sie_block->eca = 0xC1002001U;
ef50f7ac 313 vcpu->arch.sie_block->fac = (int) (long) facilities;
ca872302
CB
314 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
315 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
316 (unsigned long) vcpu);
317 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 318 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 319 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
320 return 0;
321}
322
323struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
324 unsigned int id)
325{
4d47555a
CO
326 struct kvm_vcpu *vcpu;
327 int rc = -EINVAL;
328
329 if (id >= KVM_MAX_VCPUS)
330 goto out;
331
332 rc = -ENOMEM;
b0c632db 333
4d47555a 334 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
b0c632db 335 if (!vcpu)
4d47555a 336 goto out;
b0c632db 337
180c12fb
CB
338 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
339 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
340
341 if (!vcpu->arch.sie_block)
342 goto out_free_cpu;
343
344 vcpu->arch.sie_block->icpua = id;
345 BUG_ON(!kvm->arch.sca);
abf4a71e
CO
346 if (!kvm->arch.sca->cpu[id].sda)
347 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
b0c632db
HC
348 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
349 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
fc34531d 350 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
b0c632db 351
ba5c1e9b
CO
352 spin_lock_init(&vcpu->arch.local_int.lock);
353 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
354 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 355 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b
CO
356 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
357 init_waitqueue_head(&vcpu->arch.local_int.wq);
5288fbf0 358 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 359 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 360
b0c632db
HC
361 rc = kvm_vcpu_init(vcpu, kvm, id);
362 if (rc)
7b06bf2f 363 goto out_free_sie_block;
b0c632db
HC
364 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
365 vcpu->arch.sie_block);
366
b0c632db 367 return vcpu;
7b06bf2f
WY
368out_free_sie_block:
369 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db
HC
370out_free_cpu:
371 kfree(vcpu);
4d47555a 372out:
b0c632db
HC
373 return ERR_PTR(rc);
374}
375
b0c632db
HC
376int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
377{
378 /* kvm common code refers to this, but never calls it */
379 BUG();
380 return 0;
381}
382
383static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
384{
b0c632db 385 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
386 return 0;
387}
388
389int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
390{
b0c632db 391 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
392 return 0;
393}
394
395int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
396{
b0c632db 397 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
b0c632db
HC
398 return 0;
399}
400
401int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
402 struct kvm_sregs *sregs)
403{
b0c632db
HC
404 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
405 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
7eef87dc 406 restore_access_regs(vcpu->arch.guest_acrs);
b0c632db
HC
407 return 0;
408}
409
410int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
411 struct kvm_sregs *sregs)
412{
b0c632db
HC
413 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
414 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
415 return 0;
416}
417
418int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
419{
b0c632db
HC
420 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
421 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
7eef87dc 422 restore_fp_regs(&vcpu->arch.guest_fpregs);
b0c632db
HC
423 return 0;
424}
425
426int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
427{
b0c632db
HC
428 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
429 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
430 return 0;
431}
432
433static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
434{
435 int rc = 0;
436
9e6dabef 437 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
b0c632db 438 rc = -EBUSY;
d7b0b5eb
CO
439 else {
440 vcpu->run->psw_mask = psw.mask;
441 vcpu->run->psw_addr = psw.addr;
442 }
b0c632db
HC
443 return rc;
444}
445
446int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
447 struct kvm_translation *tr)
448{
449 return -EINVAL; /* not implemented yet */
450}
451
d0bfb940
JK
452int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
453 struct kvm_guest_debug *dbg)
b0c632db
HC
454{
455 return -EINVAL; /* not implemented yet */
456}
457
62d9f0db
MT
458int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
459 struct kvm_mp_state *mp_state)
460{
461 return -EINVAL; /* not implemented yet */
462}
463
464int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
465 struct kvm_mp_state *mp_state)
466{
467 return -EINVAL; /* not implemented yet */
468}
469
b0c632db
HC
470static void __vcpu_run(struct kvm_vcpu *vcpu)
471{
472 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
473
474 if (need_resched())
475 schedule();
476
71cde587
CB
477 if (test_thread_flag(TIF_MCCK_PENDING))
478 s390_handle_mcck();
479
0ff31867
CO
480 kvm_s390_deliver_pending_interrupts(vcpu);
481
b0c632db
HC
482 vcpu->arch.sie_block->icptcode = 0;
483 local_irq_disable();
484 kvm_guest_enter();
485 local_irq_enable();
486 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
487 atomic_read(&vcpu->arch.sie_block->cpuflags));
1f0d0f09
CO
488 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
489 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
490 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
491 }
b0c632db
HC
492 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
493 vcpu->arch.sie_block->icptcode);
494 local_irq_disable();
495 kvm_guest_exit();
496 local_irq_enable();
497
498 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
499}
500
501int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
502{
8f2abe6a 503 int rc;
b0c632db
HC
504 sigset_t sigsaved;
505
9ace903d 506rerun_vcpu:
b0c632db
HC
507 if (vcpu->sigset_active)
508 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
509
9e6dabef 510 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db 511
ba5c1e9b
CO
512 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
513
8f2abe6a
CB
514 switch (kvm_run->exit_reason) {
515 case KVM_EXIT_S390_SIEIC:
8f2abe6a 516 case KVM_EXIT_UNKNOWN:
9ace903d 517 case KVM_EXIT_INTR:
8f2abe6a
CB
518 case KVM_EXIT_S390_RESET:
519 break;
520 default:
521 BUG();
522 }
523
d7b0b5eb
CO
524 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
525 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
526
dab4079d 527 might_fault();
8f2abe6a
CB
528
529 do {
530 __vcpu_run(vcpu);
8f2abe6a
CB
531 rc = kvm_handle_sie_intercept(vcpu);
532 } while (!signal_pending(current) && !rc);
533
9ace903d
CE
534 if (rc == SIE_INTERCEPT_RERUNVCPU)
535 goto rerun_vcpu;
536
b1d16c49
CE
537 if (signal_pending(current) && !rc) {
538 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 539 rc = -EINTR;
b1d16c49 540 }
8f2abe6a 541
b8e660b8 542 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
543 /* intercept cannot be handled in-kernel, prepare kvm-run */
544 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
545 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
546 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
547 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
548 rc = 0;
549 }
550
551 if (rc == -EREMOTE) {
552 /* intercept was handled, but userspace support is needed
553 * kvm_run has been prepared by the handler */
554 rc = 0;
555 }
b0c632db 556
d7b0b5eb
CO
557 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
558 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
559
b0c632db
HC
560 if (vcpu->sigset_active)
561 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
562
b0c632db 563 vcpu->stat.exit_userspace++;
7e8e6ab4 564 return rc;
b0c632db
HC
565}
566
092670cd 567static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
b0c632db
HC
568 unsigned long n, int prefix)
569{
570 if (prefix)
571 return copy_to_guest(vcpu, guestdest, from, n);
572 else
573 return copy_to_guest_absolute(vcpu, guestdest, from, n);
574}
575
576/*
577 * store status at address
578 * we use have two special cases:
579 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
580 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
581 */
971eb77f 582int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db 583{
092670cd 584 unsigned char archmode = 1;
b0c632db
HC
585 int prefix;
586
587 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
588 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
589 return -EFAULT;
590 addr = SAVE_AREA_BASE;
591 prefix = 0;
592 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
593 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
594 return -EFAULT;
595 addr = SAVE_AREA_BASE;
596 prefix = 1;
597 } else
598 prefix = 0;
599
f64ca217 600 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
601 vcpu->arch.guest_fpregs.fprs, 128, prefix))
602 return -EFAULT;
603
f64ca217 604 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
b0c632db
HC
605 vcpu->arch.guest_gprs, 128, prefix))
606 return -EFAULT;
607
f64ca217 608 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
609 &vcpu->arch.sie_block->gpsw, 16, prefix))
610 return -EFAULT;
611
f64ca217 612 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
613 &vcpu->arch.sie_block->prefix, 4, prefix))
614 return -EFAULT;
615
616 if (__guestcopy(vcpu,
f64ca217 617 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
618 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
619 return -EFAULT;
620
f64ca217 621 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
622 &vcpu->arch.sie_block->todpr, 4, prefix))
623 return -EFAULT;
624
f64ca217 625 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
626 &vcpu->arch.sie_block->cputm, 8, prefix))
627 return -EFAULT;
628
f64ca217 629 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
630 &vcpu->arch.sie_block->ckc, 8, prefix))
631 return -EFAULT;
632
f64ca217 633 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
b0c632db
HC
634 &vcpu->arch.guest_acrs, 64, prefix))
635 return -EFAULT;
636
637 if (__guestcopy(vcpu,
f64ca217 638 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
639 &vcpu->arch.sie_block->gcr, 128, prefix))
640 return -EFAULT;
641 return 0;
642}
643
b0c632db
HC
644long kvm_arch_vcpu_ioctl(struct file *filp,
645 unsigned int ioctl, unsigned long arg)
646{
647 struct kvm_vcpu *vcpu = filp->private_data;
648 void __user *argp = (void __user *)arg;
bc923cc9 649 long r;
b0c632db 650
93736624
AK
651 switch (ioctl) {
652 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
653 struct kvm_s390_interrupt s390int;
654
93736624 655 r = -EFAULT;
ba5c1e9b 656 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
657 break;
658 r = kvm_s390_inject_vcpu(vcpu, &s390int);
659 break;
ba5c1e9b 660 }
b0c632db 661 case KVM_S390_STORE_STATUS:
bc923cc9
AK
662 r = kvm_s390_vcpu_store_status(vcpu, arg);
663 break;
b0c632db
HC
664 case KVM_S390_SET_INITIAL_PSW: {
665 psw_t psw;
666
bc923cc9 667 r = -EFAULT;
b0c632db 668 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
669 break;
670 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
671 break;
b0c632db
HC
672 }
673 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
674 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
675 break;
b0c632db 676 default:
bc923cc9 677 r = -EINVAL;
b0c632db 678 }
bc923cc9 679 return r;
b0c632db
HC
680}
681
682/* Section: memory related */
f7784b8e
MT
683int kvm_arch_prepare_memory_region(struct kvm *kvm,
684 struct kvm_memory_slot *memslot,
685 struct kvm_memory_slot old,
686 struct kvm_userspace_memory_region *mem,
687 int user_alloc)
b0c632db
HC
688{
689 /* A few sanity checks. We can have exactly one memory slot which has
690 to start at guest virtual zero and which has to be located at a
691 page boundary in userland and which has to end at a page boundary.
692 The memory in userland is ok to be fragmented into various different
693 vmas. It is okay to mmap() and munmap() stuff in this slot after
694 doing this call at any time */
695
628eb9b8 696 if (mem->slot)
b0c632db
HC
697 return -EINVAL;
698
699 if (mem->guest_phys_addr)
700 return -EINVAL;
701
598841ca 702 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
703 return -EINVAL;
704
598841ca 705 if (mem->memory_size & 0xffffful)
b0c632db
HC
706 return -EINVAL;
707
2668dab7
CO
708 if (!user_alloc)
709 return -EINVAL;
710
f7784b8e
MT
711 return 0;
712}
713
714void kvm_arch_commit_memory_region(struct kvm *kvm,
715 struct kvm_userspace_memory_region *mem,
716 struct kvm_memory_slot old,
717 int user_alloc)
718{
f7850c92 719 int rc;
f7784b8e 720
598841ca
CO
721
722 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
723 mem->guest_phys_addr, mem->memory_size);
724 if (rc)
f7850c92 725 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 726 return;
b0c632db
HC
727}
728
34d4cb8f
MT
729void kvm_arch_flush_shadow(struct kvm *kvm)
730{
731}
732
b0c632db
HC
733static int __init kvm_s390_init(void)
734{
ef50f7ac 735 int ret;
0ee75bea 736 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
737 if (ret)
738 return ret;
739
740 /*
741 * guests can ask for up to 255+1 double words, we need a full page
25985edc 742 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
743 * only set facilities that are known to work in KVM.
744 */
c2f0e8c8 745 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
ef50f7ac
CB
746 if (!facilities) {
747 kvm_exit();
748 return -ENOMEM;
749 }
14375bc4 750 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
6d00d00b 751 facilities[0] &= 0xff00fff3f47c0000ULL;
9950f8be 752 facilities[1] &= 0x201c000000000000ULL;
ef50f7ac 753 return 0;
b0c632db
HC
754}
755
756static void __exit kvm_s390_exit(void)
757{
ef50f7ac 758 free_page((unsigned long) facilities);
b0c632db
HC
759 kvm_exit();
760}
761
762module_init(kvm_s390_init);
763module_exit(kvm_s390_exit);