]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/s390/kvm/kvm-s390.c
KVM: set_memory_region: Refactor prepare_memory_region()
[mirror_ubuntu-artful-kernel.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
a0616cde 30#include <asm/switch_to.h>
1526bf9c 31#include <asm/sclp.h>
8f2abe6a 32#include "kvm-s390.h"
b0c632db
HC
33#include "gaccess.h"
34
5786fffa
CH
35#define CREATE_TRACE_POINTS
36#include "trace.h"
ade38c31 37#include "trace-s390.h"
5786fffa 38
b0c632db
HC
39#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
40
41struct kvm_stats_debugfs_item debugfs_entries[] = {
42 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 43 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
44 { "exit_validity", VCPU_STAT(exit_validity) },
45 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
46 { "exit_external_request", VCPU_STAT(exit_external_request) },
47 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
48 { "exit_instruction", VCPU_STAT(exit_instruction) },
49 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
50 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 51 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
52 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
53 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 54 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
55 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
56 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
57 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
58 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
59 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
60 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
61 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
453423dc
CB
62 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
63 { "instruction_spx", VCPU_STAT(instruction_spx) },
64 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
65 { "instruction_stap", VCPU_STAT(instruction_stap) },
66 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
67 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
68 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
69 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
70 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 71 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 72 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 73 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 74 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
75 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
76 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
77 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
78 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
79 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 80 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 81 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 82 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
83 { NULL }
84};
85
ef50f7ac 86static unsigned long long *facilities;
b0c632db
HC
87
88/* Section: not file related */
10474ae8 89int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
90{
91 /* every s390 is virtualization enabled ;-) */
10474ae8 92 return 0;
b0c632db
HC
93}
94
95void kvm_arch_hardware_disable(void *garbage)
96{
97}
98
b0c632db
HC
99int kvm_arch_hardware_setup(void)
100{
101 return 0;
102}
103
104void kvm_arch_hardware_unsetup(void)
105{
106}
107
108void kvm_arch_check_processor_compat(void *rtn)
109{
110}
111
112int kvm_arch_init(void *opaque)
113{
114 return 0;
115}
116
117void kvm_arch_exit(void)
118{
119}
120
121/* Section: device related */
122long kvm_arch_dev_ioctl(struct file *filp,
123 unsigned int ioctl, unsigned long arg)
124{
125 if (ioctl == KVM_S390_ENABLE_SIE)
126 return s390_enable_sie();
127 return -EINVAL;
128}
129
130int kvm_dev_ioctl_check_extension(long ext)
131{
d7b0b5eb
CO
132 int r;
133
2bd0ac4e 134 switch (ext) {
d7b0b5eb 135 case KVM_CAP_S390_PSW:
b6cf8788 136 case KVM_CAP_S390_GMAP:
52e16b18 137 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
138#ifdef CONFIG_KVM_S390_UCONTROL
139 case KVM_CAP_S390_UCONTROL:
140#endif
60b413c9 141 case KVM_CAP_SYNC_REGS:
14eebd91 142 case KVM_CAP_ONE_REG:
d6712df9 143 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 144 case KVM_CAP_S390_CSS_SUPPORT:
d7b0b5eb
CO
145 r = 1;
146 break;
e726b1bd
CB
147 case KVM_CAP_NR_VCPUS:
148 case KVM_CAP_MAX_VCPUS:
149 r = KVM_MAX_VCPUS;
150 break;
1526bf9c 151 case KVM_CAP_S390_COW:
abf09bed 152 r = MACHINE_HAS_ESOP;
1526bf9c 153 break;
2bd0ac4e 154 default:
d7b0b5eb 155 r = 0;
2bd0ac4e 156 }
d7b0b5eb 157 return r;
b0c632db
HC
158}
159
160/* Section: vm related */
161/*
162 * Get (and clear) the dirty memory log for a memory slot.
163 */
164int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
165 struct kvm_dirty_log *log)
166{
167 return 0;
168}
169
170long kvm_arch_vm_ioctl(struct file *filp,
171 unsigned int ioctl, unsigned long arg)
172{
173 struct kvm *kvm = filp->private_data;
174 void __user *argp = (void __user *)arg;
175 int r;
176
177 switch (ioctl) {
ba5c1e9b
CO
178 case KVM_S390_INTERRUPT: {
179 struct kvm_s390_interrupt s390int;
180
181 r = -EFAULT;
182 if (copy_from_user(&s390int, argp, sizeof(s390int)))
183 break;
184 r = kvm_s390_inject_vm(kvm, &s390int);
185 break;
186 }
b0c632db 187 default:
367e1319 188 r = -ENOTTY;
b0c632db
HC
189 }
190
191 return r;
192}
193
e08b9637 194int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 195{
b0c632db
HC
196 int rc;
197 char debug_name[16];
198
e08b9637
CO
199 rc = -EINVAL;
200#ifdef CONFIG_KVM_S390_UCONTROL
201 if (type & ~KVM_VM_S390_UCONTROL)
202 goto out_err;
203 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
204 goto out_err;
205#else
206 if (type)
207 goto out_err;
208#endif
209
b0c632db
HC
210 rc = s390_enable_sie();
211 if (rc)
d89f5eff 212 goto out_err;
b0c632db 213
b290411a
CO
214 rc = -ENOMEM;
215
b0c632db
HC
216 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
217 if (!kvm->arch.sca)
d89f5eff 218 goto out_err;
b0c632db
HC
219
220 sprintf(debug_name, "kvm-%u", current->pid);
221
222 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
223 if (!kvm->arch.dbf)
224 goto out_nodbf;
225
ba5c1e9b
CO
226 spin_lock_init(&kvm->arch.float_int.lock);
227 INIT_LIST_HEAD(&kvm->arch.float_int.list);
228
b0c632db
HC
229 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
230 VM_EVENT(kvm, 3, "%s", "vm created");
231
e08b9637
CO
232 if (type & KVM_VM_S390_UCONTROL) {
233 kvm->arch.gmap = NULL;
234 } else {
235 kvm->arch.gmap = gmap_alloc(current->mm);
236 if (!kvm->arch.gmap)
237 goto out_nogmap;
238 }
fa6b7fe9
CH
239
240 kvm->arch.css_support = 0;
241
d89f5eff 242 return 0;
598841ca
CO
243out_nogmap:
244 debug_unregister(kvm->arch.dbf);
b0c632db
HC
245out_nodbf:
246 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
247out_err:
248 return rc;
b0c632db
HC
249}
250
d329c035
CB
251void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
252{
253 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 254 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
58f9460b
CO
255 if (!kvm_is_ucontrol(vcpu->kvm)) {
256 clear_bit(63 - vcpu->vcpu_id,
257 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
258 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
259 (__u64) vcpu->arch.sie_block)
260 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
261 }
abf4a71e 262 smp_mb();
27e0393f
CO
263
264 if (kvm_is_ucontrol(vcpu->kvm))
265 gmap_free(vcpu->arch.gmap);
266
d329c035 267 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 268 kvm_vcpu_uninit(vcpu);
d329c035
CB
269 kfree(vcpu);
270}
271
272static void kvm_free_vcpus(struct kvm *kvm)
273{
274 unsigned int i;
988a2cae 275 struct kvm_vcpu *vcpu;
d329c035 276
988a2cae
GN
277 kvm_for_each_vcpu(i, vcpu, kvm)
278 kvm_arch_vcpu_destroy(vcpu);
279
280 mutex_lock(&kvm->lock);
281 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
282 kvm->vcpus[i] = NULL;
283
284 atomic_set(&kvm->online_vcpus, 0);
285 mutex_unlock(&kvm->lock);
d329c035
CB
286}
287
ad8ba2cd
SY
288void kvm_arch_sync_events(struct kvm *kvm)
289{
290}
291
b0c632db
HC
292void kvm_arch_destroy_vm(struct kvm *kvm)
293{
d329c035 294 kvm_free_vcpus(kvm);
b0c632db 295 free_page((unsigned long)(kvm->arch.sca));
d329c035 296 debug_unregister(kvm->arch.dbf);
27e0393f
CO
297 if (!kvm_is_ucontrol(kvm))
298 gmap_free(kvm->arch.gmap);
b0c632db
HC
299}
300
301/* Section: vcpu related */
302int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
303{
27e0393f
CO
304 if (kvm_is_ucontrol(vcpu->kvm)) {
305 vcpu->arch.gmap = gmap_alloc(current->mm);
306 if (!vcpu->arch.gmap)
307 return -ENOMEM;
308 return 0;
309 }
310
598841ca 311 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
59674c1a
CB
312 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
313 KVM_SYNC_GPRS |
9eed0735
CB
314 KVM_SYNC_ACRS |
315 KVM_SYNC_CRS;
b0c632db
HC
316 return 0;
317}
318
319void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
320{
6692cef3 321 /* Nothing todo */
b0c632db
HC
322}
323
324void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
325{
326 save_fp_regs(&vcpu->arch.host_fpregs);
327 save_access_regs(vcpu->arch.host_acrs);
328 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
329 restore_fp_regs(&vcpu->arch.guest_fpregs);
59674c1a 330 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 331 gmap_enable(vcpu->arch.gmap);
9e6dabef 332 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
333}
334
335void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
336{
9e6dabef 337 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 338 gmap_disable(vcpu->arch.gmap);
b0c632db 339 save_fp_regs(&vcpu->arch.guest_fpregs);
59674c1a 340 save_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
341 restore_fp_regs(&vcpu->arch.host_fpregs);
342 restore_access_regs(vcpu->arch.host_acrs);
343}
344
345static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
346{
347 /* this equals initial cpu reset in pop, but we don't switch to ESA */
348 vcpu->arch.sie_block->gpsw.mask = 0UL;
349 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 350 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
351 vcpu->arch.sie_block->cputm = 0UL;
352 vcpu->arch.sie_block->ckc = 0UL;
353 vcpu->arch.sie_block->todpr = 0;
354 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
355 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
356 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
357 vcpu->arch.guest_fpregs.fpc = 0;
358 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
359 vcpu->arch.sie_block->gbea = 1;
61bde82c 360 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
361}
362
42897d86
MT
363int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
364{
365 return 0;
366}
367
b0c632db
HC
368int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
369{
9e6dabef
CH
370 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
371 CPUSTAT_SM |
372 CPUSTAT_STOPPED);
fc34531d 373 vcpu->arch.sie_block->ecb = 6;
b0c632db 374 vcpu->arch.sie_block->eca = 0xC1002001U;
ef50f7ac 375 vcpu->arch.sie_block->fac = (int) (long) facilities;
ca872302
CB
376 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
377 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
378 (unsigned long) vcpu);
379 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 380 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 381 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
382 return 0;
383}
384
385struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
386 unsigned int id)
387{
4d47555a
CO
388 struct kvm_vcpu *vcpu;
389 int rc = -EINVAL;
390
391 if (id >= KVM_MAX_VCPUS)
392 goto out;
393
394 rc = -ENOMEM;
b0c632db 395
4d47555a 396 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
b0c632db 397 if (!vcpu)
4d47555a 398 goto out;
b0c632db 399
180c12fb
CB
400 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
401 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
402
403 if (!vcpu->arch.sie_block)
404 goto out_free_cpu;
405
406 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
407 if (!kvm_is_ucontrol(kvm)) {
408 if (!kvm->arch.sca) {
409 WARN_ON_ONCE(1);
410 goto out_free_cpu;
411 }
412 if (!kvm->arch.sca->cpu[id].sda)
413 kvm->arch.sca->cpu[id].sda =
414 (__u64) vcpu->arch.sie_block;
415 vcpu->arch.sie_block->scaoh =
416 (__u32)(((__u64)kvm->arch.sca) >> 32);
417 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
418 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
419 }
b0c632db 420
ba5c1e9b
CO
421 spin_lock_init(&vcpu->arch.local_int.lock);
422 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
423 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 424 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b
CO
425 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
426 init_waitqueue_head(&vcpu->arch.local_int.wq);
5288fbf0 427 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 428 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 429
b0c632db
HC
430 rc = kvm_vcpu_init(vcpu, kvm, id);
431 if (rc)
7b06bf2f 432 goto out_free_sie_block;
b0c632db
HC
433 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
434 vcpu->arch.sie_block);
ade38c31 435 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 436
b0c632db 437 return vcpu;
7b06bf2f
WY
438out_free_sie_block:
439 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db
HC
440out_free_cpu:
441 kfree(vcpu);
4d47555a 442out:
b0c632db
HC
443 return ERR_PTR(rc);
444}
445
b0c632db
HC
446int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
447{
448 /* kvm common code refers to this, but never calls it */
449 BUG();
450 return 0;
451}
452
b6d33834
CD
453int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
454{
455 /* kvm common code refers to this, but never calls it */
456 BUG();
457 return 0;
458}
459
14eebd91
CO
460static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
461 struct kvm_one_reg *reg)
462{
463 int r = -EINVAL;
464
465 switch (reg->id) {
29b7c71b
CO
466 case KVM_REG_S390_TODPR:
467 r = put_user(vcpu->arch.sie_block->todpr,
468 (u32 __user *)reg->addr);
469 break;
470 case KVM_REG_S390_EPOCHDIFF:
471 r = put_user(vcpu->arch.sie_block->epoch,
472 (u64 __user *)reg->addr);
473 break;
46a6dd1c
J
474 case KVM_REG_S390_CPU_TIMER:
475 r = put_user(vcpu->arch.sie_block->cputm,
476 (u64 __user *)reg->addr);
477 break;
478 case KVM_REG_S390_CLOCK_COMP:
479 r = put_user(vcpu->arch.sie_block->ckc,
480 (u64 __user *)reg->addr);
481 break;
14eebd91
CO
482 default:
483 break;
484 }
485
486 return r;
487}
488
489static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
490 struct kvm_one_reg *reg)
491{
492 int r = -EINVAL;
493
494 switch (reg->id) {
29b7c71b
CO
495 case KVM_REG_S390_TODPR:
496 r = get_user(vcpu->arch.sie_block->todpr,
497 (u32 __user *)reg->addr);
498 break;
499 case KVM_REG_S390_EPOCHDIFF:
500 r = get_user(vcpu->arch.sie_block->epoch,
501 (u64 __user *)reg->addr);
502 break;
46a6dd1c
J
503 case KVM_REG_S390_CPU_TIMER:
504 r = get_user(vcpu->arch.sie_block->cputm,
505 (u64 __user *)reg->addr);
506 break;
507 case KVM_REG_S390_CLOCK_COMP:
508 r = get_user(vcpu->arch.sie_block->ckc,
509 (u64 __user *)reg->addr);
510 break;
14eebd91
CO
511 default:
512 break;
513 }
514
515 return r;
516}
b6d33834 517
b0c632db
HC
518static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
519{
b0c632db 520 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
521 return 0;
522}
523
524int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
525{
5a32c1af 526 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
527 return 0;
528}
529
530int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
531{
5a32c1af 532 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
533 return 0;
534}
535
536int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
537 struct kvm_sregs *sregs)
538{
59674c1a 539 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 540 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 541 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
542 return 0;
543}
544
545int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
546 struct kvm_sregs *sregs)
547{
59674c1a 548 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 549 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
550 return 0;
551}
552
553int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
554{
b0c632db 555 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
85175587 556 vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
7eef87dc 557 restore_fp_regs(&vcpu->arch.guest_fpregs);
b0c632db
HC
558 return 0;
559}
560
561int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
562{
b0c632db
HC
563 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
564 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
565 return 0;
566}
567
568static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
569{
570 int rc = 0;
571
9e6dabef 572 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
b0c632db 573 rc = -EBUSY;
d7b0b5eb
CO
574 else {
575 vcpu->run->psw_mask = psw.mask;
576 vcpu->run->psw_addr = psw.addr;
577 }
b0c632db
HC
578 return rc;
579}
580
581int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
582 struct kvm_translation *tr)
583{
584 return -EINVAL; /* not implemented yet */
585}
586
d0bfb940
JK
587int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
588 struct kvm_guest_debug *dbg)
b0c632db
HC
589{
590 return -EINVAL; /* not implemented yet */
591}
592
62d9f0db
MT
593int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
594 struct kvm_mp_state *mp_state)
595{
596 return -EINVAL; /* not implemented yet */
597}
598
599int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
600 struct kvm_mp_state *mp_state)
601{
602 return -EINVAL; /* not implemented yet */
603}
604
e168bf8d 605static int __vcpu_run(struct kvm_vcpu *vcpu)
b0c632db 606{
e168bf8d
CO
607 int rc;
608
5a32c1af 609 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
610
611 if (need_resched())
612 schedule();
613
71cde587
CB
614 if (test_thread_flag(TIF_MCCK_PENDING))
615 s390_handle_mcck();
616
d6b6d166
CO
617 if (!kvm_is_ucontrol(vcpu->kvm))
618 kvm_s390_deliver_pending_interrupts(vcpu);
0ff31867 619
b0c632db 620 vcpu->arch.sie_block->icptcode = 0;
83987ace 621 preempt_disable();
b0c632db 622 kvm_guest_enter();
83987ace 623 preempt_enable();
b0c632db
HC
624 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
625 atomic_read(&vcpu->arch.sie_block->cpuflags));
5786fffa
CH
626 trace_kvm_s390_sie_enter(vcpu,
627 atomic_read(&vcpu->arch.sie_block->cpuflags));
5a32c1af 628 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
e168bf8d
CO
629 if (rc) {
630 if (kvm_is_ucontrol(vcpu->kvm)) {
631 rc = SIE_INTERCEPT_UCONTROL;
632 } else {
633 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
5786fffa 634 trace_kvm_s390_sie_fault(vcpu);
e168bf8d
CO
635 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
636 rc = 0;
637 }
1f0d0f09 638 }
b0c632db
HC
639 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
640 vcpu->arch.sie_block->icptcode);
5786fffa 641 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
b0c632db 642 kvm_guest_exit();
b0c632db 643
5a32c1af 644 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
e168bf8d 645 return rc;
b0c632db
HC
646}
647
648int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
649{
8f2abe6a 650 int rc;
b0c632db
HC
651 sigset_t sigsaved;
652
9ace903d 653rerun_vcpu:
b0c632db
HC
654 if (vcpu->sigset_active)
655 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
656
9e6dabef 657 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db 658
ba5c1e9b
CO
659 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
660
8f2abe6a
CB
661 switch (kvm_run->exit_reason) {
662 case KVM_EXIT_S390_SIEIC:
8f2abe6a 663 case KVM_EXIT_UNKNOWN:
9ace903d 664 case KVM_EXIT_INTR:
8f2abe6a 665 case KVM_EXIT_S390_RESET:
e168bf8d 666 case KVM_EXIT_S390_UCONTROL:
fa6b7fe9 667 case KVM_EXIT_S390_TSCH:
8f2abe6a
CB
668 break;
669 default:
670 BUG();
671 }
672
d7b0b5eb
CO
673 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
674 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
60b413c9
CB
675 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
676 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
677 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
678 }
9eed0735
CB
679 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
680 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
681 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
682 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
683 }
d7b0b5eb 684
dab4079d 685 might_fault();
8f2abe6a
CB
686
687 do {
e168bf8d
CO
688 rc = __vcpu_run(vcpu);
689 if (rc)
690 break;
c0d744a9
CO
691 if (kvm_is_ucontrol(vcpu->kvm))
692 rc = -EOPNOTSUPP;
693 else
694 rc = kvm_handle_sie_intercept(vcpu);
8f2abe6a
CB
695 } while (!signal_pending(current) && !rc);
696
9ace903d
CE
697 if (rc == SIE_INTERCEPT_RERUNVCPU)
698 goto rerun_vcpu;
699
b1d16c49
CE
700 if (signal_pending(current) && !rc) {
701 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 702 rc = -EINTR;
b1d16c49 703 }
8f2abe6a 704
e168bf8d
CO
705#ifdef CONFIG_KVM_S390_UCONTROL
706 if (rc == SIE_INTERCEPT_UCONTROL) {
707 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
708 kvm_run->s390_ucontrol.trans_exc_code =
709 current->thread.gmap_addr;
710 kvm_run->s390_ucontrol.pgm_code = 0x10;
711 rc = 0;
712 }
713#endif
714
b8e660b8 715 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
716 /* intercept cannot be handled in-kernel, prepare kvm-run */
717 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
718 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
719 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
720 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
721 rc = 0;
722 }
723
724 if (rc == -EREMOTE) {
725 /* intercept was handled, but userspace support is needed
726 * kvm_run has been prepared by the handler */
727 rc = 0;
728 }
b0c632db 729
d7b0b5eb
CO
730 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
731 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
60b413c9 732 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
9eed0735 733 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
d7b0b5eb 734
b0c632db
HC
735 if (vcpu->sigset_active)
736 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
737
b0c632db 738 vcpu->stat.exit_userspace++;
7e8e6ab4 739 return rc;
b0c632db
HC
740}
741
092670cd 742static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
b0c632db
HC
743 unsigned long n, int prefix)
744{
745 if (prefix)
746 return copy_to_guest(vcpu, guestdest, from, n);
747 else
748 return copy_to_guest_absolute(vcpu, guestdest, from, n);
749}
750
751/*
752 * store status at address
753 * we use have two special cases:
754 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
755 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
756 */
971eb77f 757int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db 758{
092670cd 759 unsigned char archmode = 1;
b0c632db
HC
760 int prefix;
761
762 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
763 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
764 return -EFAULT;
765 addr = SAVE_AREA_BASE;
766 prefix = 0;
767 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
768 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
769 return -EFAULT;
770 addr = SAVE_AREA_BASE;
771 prefix = 1;
772 } else
773 prefix = 0;
774
15bc8d84
CB
775 /*
776 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
777 * copying in vcpu load/put. Lets update our copies before we save
778 * it into the save area
779 */
780 save_fp_regs(&vcpu->arch.guest_fpregs);
781 save_access_regs(vcpu->run->s.regs.acrs);
782
f64ca217 783 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
784 vcpu->arch.guest_fpregs.fprs, 128, prefix))
785 return -EFAULT;
786
f64ca217 787 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
5a32c1af 788 vcpu->run->s.regs.gprs, 128, prefix))
b0c632db
HC
789 return -EFAULT;
790
f64ca217 791 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
792 &vcpu->arch.sie_block->gpsw, 16, prefix))
793 return -EFAULT;
794
f64ca217 795 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
796 &vcpu->arch.sie_block->prefix, 4, prefix))
797 return -EFAULT;
798
799 if (__guestcopy(vcpu,
f64ca217 800 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
801 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
802 return -EFAULT;
803
f64ca217 804 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
805 &vcpu->arch.sie_block->todpr, 4, prefix))
806 return -EFAULT;
807
f64ca217 808 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
809 &vcpu->arch.sie_block->cputm, 8, prefix))
810 return -EFAULT;
811
f64ca217 812 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
813 &vcpu->arch.sie_block->ckc, 8, prefix))
814 return -EFAULT;
815
f64ca217 816 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
59674c1a 817 &vcpu->run->s.regs.acrs, 64, prefix))
b0c632db
HC
818 return -EFAULT;
819
820 if (__guestcopy(vcpu,
f64ca217 821 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
822 &vcpu->arch.sie_block->gcr, 128, prefix))
823 return -EFAULT;
824 return 0;
825}
826
d6712df9
CH
827static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
828 struct kvm_enable_cap *cap)
829{
830 int r;
831
832 if (cap->flags)
833 return -EINVAL;
834
835 switch (cap->cap) {
fa6b7fe9
CH
836 case KVM_CAP_S390_CSS_SUPPORT:
837 if (!vcpu->kvm->arch.css_support) {
838 vcpu->kvm->arch.css_support = 1;
839 trace_kvm_s390_enable_css(vcpu->kvm);
840 }
841 r = 0;
842 break;
d6712df9
CH
843 default:
844 r = -EINVAL;
845 break;
846 }
847 return r;
848}
849
b0c632db
HC
850long kvm_arch_vcpu_ioctl(struct file *filp,
851 unsigned int ioctl, unsigned long arg)
852{
853 struct kvm_vcpu *vcpu = filp->private_data;
854 void __user *argp = (void __user *)arg;
bc923cc9 855 long r;
b0c632db 856
93736624
AK
857 switch (ioctl) {
858 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
859 struct kvm_s390_interrupt s390int;
860
93736624 861 r = -EFAULT;
ba5c1e9b 862 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
863 break;
864 r = kvm_s390_inject_vcpu(vcpu, &s390int);
865 break;
ba5c1e9b 866 }
b0c632db 867 case KVM_S390_STORE_STATUS:
bc923cc9
AK
868 r = kvm_s390_vcpu_store_status(vcpu, arg);
869 break;
b0c632db
HC
870 case KVM_S390_SET_INITIAL_PSW: {
871 psw_t psw;
872
bc923cc9 873 r = -EFAULT;
b0c632db 874 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
875 break;
876 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
877 break;
b0c632db
HC
878 }
879 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
880 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
881 break;
14eebd91
CO
882 case KVM_SET_ONE_REG:
883 case KVM_GET_ONE_REG: {
884 struct kvm_one_reg reg;
885 r = -EFAULT;
886 if (copy_from_user(&reg, argp, sizeof(reg)))
887 break;
888 if (ioctl == KVM_SET_ONE_REG)
889 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
890 else
891 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
892 break;
893 }
27e0393f
CO
894#ifdef CONFIG_KVM_S390_UCONTROL
895 case KVM_S390_UCAS_MAP: {
896 struct kvm_s390_ucas_mapping ucasmap;
897
898 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
899 r = -EFAULT;
900 break;
901 }
902
903 if (!kvm_is_ucontrol(vcpu->kvm)) {
904 r = -EINVAL;
905 break;
906 }
907
908 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
909 ucasmap.vcpu_addr, ucasmap.length);
910 break;
911 }
912 case KVM_S390_UCAS_UNMAP: {
913 struct kvm_s390_ucas_mapping ucasmap;
914
915 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
916 r = -EFAULT;
917 break;
918 }
919
920 if (!kvm_is_ucontrol(vcpu->kvm)) {
921 r = -EINVAL;
922 break;
923 }
924
925 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
926 ucasmap.length);
927 break;
928 }
929#endif
ccc7910f
CO
930 case KVM_S390_VCPU_FAULT: {
931 r = gmap_fault(arg, vcpu->arch.gmap);
932 if (!IS_ERR_VALUE(r))
933 r = 0;
934 break;
935 }
d6712df9
CH
936 case KVM_ENABLE_CAP:
937 {
938 struct kvm_enable_cap cap;
939 r = -EFAULT;
940 if (copy_from_user(&cap, argp, sizeof(cap)))
941 break;
942 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
943 break;
944 }
b0c632db 945 default:
3e6afcf1 946 r = -ENOTTY;
b0c632db 947 }
bc923cc9 948 return r;
b0c632db
HC
949}
950
5b1c1493
CO
951int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
952{
953#ifdef CONFIG_KVM_S390_UCONTROL
954 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
955 && (kvm_is_ucontrol(vcpu->kvm))) {
956 vmf->page = virt_to_page(vcpu->arch.sie_block);
957 get_page(vmf->page);
958 return 0;
959 }
960#endif
961 return VM_FAULT_SIGBUS;
962}
963
db3fe4eb
TY
964void kvm_arch_free_memslot(struct kvm_memory_slot *free,
965 struct kvm_memory_slot *dont)
966{
967}
968
969int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
970{
971 return 0;
972}
973
b0c632db 974/* Section: memory related */
f7784b8e
MT
975int kvm_arch_prepare_memory_region(struct kvm *kvm,
976 struct kvm_memory_slot *memslot,
7b6195a9
TY
977 struct kvm_userspace_memory_region *mem,
978 enum kvm_mr_change change)
b0c632db
HC
979{
980 /* A few sanity checks. We can have exactly one memory slot which has
981 to start at guest virtual zero and which has to be located at a
982 page boundary in userland and which has to end at a page boundary.
983 The memory in userland is ok to be fragmented into various different
984 vmas. It is okay to mmap() and munmap() stuff in this slot after
985 doing this call at any time */
986
628eb9b8 987 if (mem->slot)
b0c632db
HC
988 return -EINVAL;
989
990 if (mem->guest_phys_addr)
991 return -EINVAL;
992
598841ca 993 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
994 return -EINVAL;
995
598841ca 996 if (mem->memory_size & 0xffffful)
b0c632db
HC
997 return -EINVAL;
998
f7784b8e
MT
999 return 0;
1000}
1001
1002void kvm_arch_commit_memory_region(struct kvm *kvm,
1003 struct kvm_userspace_memory_region *mem,
462fce46 1004 struct kvm_memory_slot old)
f7784b8e 1005{
f7850c92 1006 int rc;
f7784b8e 1007
598841ca
CO
1008
1009 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1010 mem->guest_phys_addr, mem->memory_size);
1011 if (rc)
f7850c92 1012 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 1013 return;
b0c632db
HC
1014}
1015
2df72e9b
MT
1016void kvm_arch_flush_shadow_all(struct kvm *kvm)
1017{
1018}
1019
1020void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1021 struct kvm_memory_slot *slot)
34d4cb8f
MT
1022{
1023}
1024
b0c632db
HC
1025static int __init kvm_s390_init(void)
1026{
ef50f7ac 1027 int ret;
0ee75bea 1028 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
1029 if (ret)
1030 return ret;
1031
1032 /*
1033 * guests can ask for up to 255+1 double words, we need a full page
25985edc 1034 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
1035 * only set facilities that are known to work in KVM.
1036 */
c2f0e8c8 1037 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
ef50f7ac
CB
1038 if (!facilities) {
1039 kvm_exit();
1040 return -ENOMEM;
1041 }
14375bc4 1042 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
6d00d00b 1043 facilities[0] &= 0xff00fff3f47c0000ULL;
87cac8f8 1044 facilities[1] &= 0x001c000000000000ULL;
ef50f7ac 1045 return 0;
b0c632db
HC
1046}
1047
1048static void __exit kvm_s390_exit(void)
1049{
ef50f7ac 1050 free_page((unsigned long) facilities);
b0c632db
HC
1051 kvm_exit();
1052}
1053
1054module_init(kvm_s390_init);
1055module_exit(kvm_s390_exit);