]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/s390/kvm/kvm-s390.c
KVM: async_pf: Async page fault support on s390
[mirror_ubuntu-artful-kernel.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
a0616cde 30#include <asm/switch_to.h>
78c4b59f 31#include <asm/facility.h>
1526bf9c 32#include <asm/sclp.h>
8f2abe6a 33#include "kvm-s390.h"
b0c632db
HC
34#include "gaccess.h"
35
5786fffa
CH
36#define CREATE_TRACE_POINTS
37#include "trace.h"
ade38c31 38#include "trace-s390.h"
5786fffa 39
b0c632db
HC
40#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
41
42struct kvm_stats_debugfs_item debugfs_entries[] = {
43 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 44 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
45 { "exit_validity", VCPU_STAT(exit_validity) },
46 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
47 { "exit_external_request", VCPU_STAT(exit_external_request) },
48 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
49 { "exit_instruction", VCPU_STAT(exit_instruction) },
50 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
51 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 52 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
53 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
54 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 55 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
56 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
57 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
58 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
59 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
60 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
61 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
62 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 63 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
64 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
65 { "instruction_spx", VCPU_STAT(instruction_spx) },
66 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
67 { "instruction_stap", VCPU_STAT(instruction_stap) },
68 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
69 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
70 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
71 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
72 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 73 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 74 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 75 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 76 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
77 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
78 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
79 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
80 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
81 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 82 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 83 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 84 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
85 { NULL }
86};
87
78c4b59f 88unsigned long *vfacilities;
2c70fe44 89static struct gmap_notifier gmap_notifier;
b0c632db 90
78c4b59f
MM
91/* test availability of vfacility */
92static inline int test_vfacility(unsigned long nr)
93{
94 return __test_facility(nr, (void *) vfacilities);
95}
96
b0c632db 97/* Section: not file related */
10474ae8 98int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
99{
100 /* every s390 is virtualization enabled ;-) */
10474ae8 101 return 0;
b0c632db
HC
102}
103
104void kvm_arch_hardware_disable(void *garbage)
105{
106}
107
2c70fe44
CB
108static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
109
b0c632db
HC
110int kvm_arch_hardware_setup(void)
111{
2c70fe44
CB
112 gmap_notifier.notifier_call = kvm_gmap_notifier;
113 gmap_register_ipte_notifier(&gmap_notifier);
b0c632db
HC
114 return 0;
115}
116
117void kvm_arch_hardware_unsetup(void)
118{
2c70fe44 119 gmap_unregister_ipte_notifier(&gmap_notifier);
b0c632db
HC
120}
121
122void kvm_arch_check_processor_compat(void *rtn)
123{
124}
125
126int kvm_arch_init(void *opaque)
127{
128 return 0;
129}
130
131void kvm_arch_exit(void)
132{
133}
134
135/* Section: device related */
136long kvm_arch_dev_ioctl(struct file *filp,
137 unsigned int ioctl, unsigned long arg)
138{
139 if (ioctl == KVM_S390_ENABLE_SIE)
140 return s390_enable_sie();
141 return -EINVAL;
142}
143
144int kvm_dev_ioctl_check_extension(long ext)
145{
d7b0b5eb
CO
146 int r;
147
2bd0ac4e 148 switch (ext) {
d7b0b5eb 149 case KVM_CAP_S390_PSW:
b6cf8788 150 case KVM_CAP_S390_GMAP:
52e16b18 151 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
152#ifdef CONFIG_KVM_S390_UCONTROL
153 case KVM_CAP_S390_UCONTROL:
154#endif
3c038e6b 155 case KVM_CAP_ASYNC_PF:
60b413c9 156 case KVM_CAP_SYNC_REGS:
14eebd91 157 case KVM_CAP_ONE_REG:
d6712df9 158 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 159 case KVM_CAP_S390_CSS_SUPPORT:
10ccaa1e 160 case KVM_CAP_IOEVENTFD:
c05c4186 161 case KVM_CAP_DEVICE_CTRL:
d7b0b5eb
CO
162 r = 1;
163 break;
e726b1bd
CB
164 case KVM_CAP_NR_VCPUS:
165 case KVM_CAP_MAX_VCPUS:
166 r = KVM_MAX_VCPUS;
167 break;
e1e2e605
NW
168 case KVM_CAP_NR_MEMSLOTS:
169 r = KVM_USER_MEM_SLOTS;
170 break;
1526bf9c 171 case KVM_CAP_S390_COW:
abf09bed 172 r = MACHINE_HAS_ESOP;
1526bf9c 173 break;
2bd0ac4e 174 default:
d7b0b5eb 175 r = 0;
2bd0ac4e 176 }
d7b0b5eb 177 return r;
b0c632db
HC
178}
179
180/* Section: vm related */
181/*
182 * Get (and clear) the dirty memory log for a memory slot.
183 */
184int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
185 struct kvm_dirty_log *log)
186{
187 return 0;
188}
189
190long kvm_arch_vm_ioctl(struct file *filp,
191 unsigned int ioctl, unsigned long arg)
192{
193 struct kvm *kvm = filp->private_data;
194 void __user *argp = (void __user *)arg;
195 int r;
196
197 switch (ioctl) {
ba5c1e9b
CO
198 case KVM_S390_INTERRUPT: {
199 struct kvm_s390_interrupt s390int;
200
201 r = -EFAULT;
202 if (copy_from_user(&s390int, argp, sizeof(s390int)))
203 break;
204 r = kvm_s390_inject_vm(kvm, &s390int);
205 break;
206 }
b0c632db 207 default:
367e1319 208 r = -ENOTTY;
b0c632db
HC
209 }
210
211 return r;
212}
213
e08b9637 214int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 215{
b0c632db
HC
216 int rc;
217 char debug_name[16];
218
e08b9637
CO
219 rc = -EINVAL;
220#ifdef CONFIG_KVM_S390_UCONTROL
221 if (type & ~KVM_VM_S390_UCONTROL)
222 goto out_err;
223 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
224 goto out_err;
225#else
226 if (type)
227 goto out_err;
228#endif
229
b0c632db
HC
230 rc = s390_enable_sie();
231 if (rc)
d89f5eff 232 goto out_err;
b0c632db 233
b290411a
CO
234 rc = -ENOMEM;
235
b0c632db
HC
236 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
237 if (!kvm->arch.sca)
d89f5eff 238 goto out_err;
b0c632db
HC
239
240 sprintf(debug_name, "kvm-%u", current->pid);
241
242 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
243 if (!kvm->arch.dbf)
244 goto out_nodbf;
245
ba5c1e9b
CO
246 spin_lock_init(&kvm->arch.float_int.lock);
247 INIT_LIST_HEAD(&kvm->arch.float_int.list);
248
b0c632db
HC
249 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
250 VM_EVENT(kvm, 3, "%s", "vm created");
251
e08b9637
CO
252 if (type & KVM_VM_S390_UCONTROL) {
253 kvm->arch.gmap = NULL;
254 } else {
255 kvm->arch.gmap = gmap_alloc(current->mm);
256 if (!kvm->arch.gmap)
257 goto out_nogmap;
2c70fe44 258 kvm->arch.gmap->private = kvm;
24eb3a82 259 kvm->arch.gmap->pfault_enabled = 0;
e08b9637 260 }
fa6b7fe9
CH
261
262 kvm->arch.css_support = 0;
263
d89f5eff 264 return 0;
598841ca
CO
265out_nogmap:
266 debug_unregister(kvm->arch.dbf);
b0c632db
HC
267out_nodbf:
268 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
269out_err:
270 return rc;
b0c632db
HC
271}
272
d329c035
CB
273void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
274{
275 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 276 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
3c038e6b 277 kvm_clear_async_pf_completion_queue(vcpu);
58f9460b
CO
278 if (!kvm_is_ucontrol(vcpu->kvm)) {
279 clear_bit(63 - vcpu->vcpu_id,
280 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
281 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
282 (__u64) vcpu->arch.sie_block)
283 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
284 }
abf4a71e 285 smp_mb();
27e0393f
CO
286
287 if (kvm_is_ucontrol(vcpu->kvm))
288 gmap_free(vcpu->arch.gmap);
289
d329c035 290 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 291 kvm_vcpu_uninit(vcpu);
b110feaf 292 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
293}
294
295static void kvm_free_vcpus(struct kvm *kvm)
296{
297 unsigned int i;
988a2cae 298 struct kvm_vcpu *vcpu;
d329c035 299
988a2cae
GN
300 kvm_for_each_vcpu(i, vcpu, kvm)
301 kvm_arch_vcpu_destroy(vcpu);
302
303 mutex_lock(&kvm->lock);
304 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
305 kvm->vcpus[i] = NULL;
306
307 atomic_set(&kvm->online_vcpus, 0);
308 mutex_unlock(&kvm->lock);
d329c035
CB
309}
310
ad8ba2cd
SY
311void kvm_arch_sync_events(struct kvm *kvm)
312{
313}
314
b0c632db
HC
315void kvm_arch_destroy_vm(struct kvm *kvm)
316{
d329c035 317 kvm_free_vcpus(kvm);
b0c632db 318 free_page((unsigned long)(kvm->arch.sca));
d329c035 319 debug_unregister(kvm->arch.dbf);
27e0393f
CO
320 if (!kvm_is_ucontrol(kvm))
321 gmap_free(kvm->arch.gmap);
b0c632db
HC
322}
323
324/* Section: vcpu related */
325int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
326{
3c038e6b
DD
327 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
328 kvm_clear_async_pf_completion_queue(vcpu);
27e0393f
CO
329 if (kvm_is_ucontrol(vcpu->kvm)) {
330 vcpu->arch.gmap = gmap_alloc(current->mm);
331 if (!vcpu->arch.gmap)
332 return -ENOMEM;
2c70fe44 333 vcpu->arch.gmap->private = vcpu->kvm;
27e0393f
CO
334 return 0;
335 }
336
598841ca 337 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
59674c1a
CB
338 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
339 KVM_SYNC_GPRS |
9eed0735
CB
340 KVM_SYNC_ACRS |
341 KVM_SYNC_CRS;
b0c632db
HC
342 return 0;
343}
344
345void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
346{
6692cef3 347 /* Nothing todo */
b0c632db
HC
348}
349
350void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
351{
4725c860
MS
352 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
353 save_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db 354 save_access_regs(vcpu->arch.host_acrs);
4725c860
MS
355 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
356 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 357 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 358 gmap_enable(vcpu->arch.gmap);
9e6dabef 359 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
360}
361
362void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
363{
9e6dabef 364 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 365 gmap_disable(vcpu->arch.gmap);
4725c860
MS
366 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
367 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 368 save_access_regs(vcpu->run->s.regs.acrs);
4725c860
MS
369 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
370 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db
HC
371 restore_access_regs(vcpu->arch.host_acrs);
372}
373
374static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
375{
376 /* this equals initial cpu reset in pop, but we don't switch to ESA */
377 vcpu->arch.sie_block->gpsw.mask = 0UL;
378 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 379 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
380 vcpu->arch.sie_block->cputm = 0UL;
381 vcpu->arch.sie_block->ckc = 0UL;
382 vcpu->arch.sie_block->todpr = 0;
383 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
384 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
385 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
386 vcpu->arch.guest_fpregs.fpc = 0;
387 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
388 vcpu->arch.sie_block->gbea = 1;
3c038e6b
DD
389 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
390 kvm_clear_async_pf_completion_queue(vcpu);
61bde82c 391 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
392}
393
42897d86
MT
394int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
395{
396 return 0;
397}
398
b0c632db
HC
399int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
400{
9e6dabef
CH
401 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
402 CPUSTAT_SM |
69d0d3a3
CB
403 CPUSTAT_STOPPED |
404 CPUSTAT_GED);
fc34531d 405 vcpu->arch.sie_block->ecb = 6;
7feb6bb8
MM
406 if (test_vfacility(50) && test_vfacility(73))
407 vcpu->arch.sie_block->ecb |= 0x10;
408
69d0d3a3 409 vcpu->arch.sie_block->ecb2 = 8;
b0c632db 410 vcpu->arch.sie_block->eca = 0xC1002001U;
78c4b59f 411 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
ca872302
CB
412 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
413 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
414 (unsigned long) vcpu);
415 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 416 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 417 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
418 return 0;
419}
420
421struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
422 unsigned int id)
423{
4d47555a 424 struct kvm_vcpu *vcpu;
7feb6bb8 425 struct sie_page *sie_page;
4d47555a
CO
426 int rc = -EINVAL;
427
428 if (id >= KVM_MAX_VCPUS)
429 goto out;
430
431 rc = -ENOMEM;
b0c632db 432
b110feaf 433 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 434 if (!vcpu)
4d47555a 435 goto out;
b0c632db 436
7feb6bb8
MM
437 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
438 if (!sie_page)
b0c632db
HC
439 goto out_free_cpu;
440
7feb6bb8
MM
441 vcpu->arch.sie_block = &sie_page->sie_block;
442 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
443
b0c632db 444 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
445 if (!kvm_is_ucontrol(kvm)) {
446 if (!kvm->arch.sca) {
447 WARN_ON_ONCE(1);
448 goto out_free_cpu;
449 }
450 if (!kvm->arch.sca->cpu[id].sda)
451 kvm->arch.sca->cpu[id].sda =
452 (__u64) vcpu->arch.sie_block;
453 vcpu->arch.sie_block->scaoh =
454 (__u32)(((__u64)kvm->arch.sca) >> 32);
455 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
456 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
457 }
b0c632db 458
ba5c1e9b
CO
459 spin_lock_init(&vcpu->arch.local_int.lock);
460 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
461 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 462 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b 463 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
d0321a24 464 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 465 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 466 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 467
b0c632db
HC
468 rc = kvm_vcpu_init(vcpu, kvm, id);
469 if (rc)
7b06bf2f 470 goto out_free_sie_block;
b0c632db
HC
471 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
472 vcpu->arch.sie_block);
ade38c31 473 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 474
b0c632db 475 return vcpu;
7b06bf2f
WY
476out_free_sie_block:
477 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 478out_free_cpu:
b110feaf 479 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 480out:
b0c632db
HC
481 return ERR_PTR(rc);
482}
483
b0c632db
HC
484int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
485{
486 /* kvm common code refers to this, but never calls it */
487 BUG();
488 return 0;
489}
490
49b99e1e
CB
491void s390_vcpu_block(struct kvm_vcpu *vcpu)
492{
493 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
494}
495
496void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
497{
498 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
499}
500
501/*
502 * Kick a guest cpu out of SIE and wait until SIE is not running.
503 * If the CPU is not running (e.g. waiting as idle) the function will
504 * return immediately. */
505void exit_sie(struct kvm_vcpu *vcpu)
506{
507 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
508 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
509 cpu_relax();
510}
511
512/* Kick a guest cpu out of SIE and prevent SIE-reentry */
513void exit_sie_sync(struct kvm_vcpu *vcpu)
514{
515 s390_vcpu_block(vcpu);
516 exit_sie(vcpu);
517}
518
2c70fe44
CB
519static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
520{
521 int i;
522 struct kvm *kvm = gmap->private;
523 struct kvm_vcpu *vcpu;
524
525 kvm_for_each_vcpu(i, vcpu, kvm) {
526 /* match against both prefix pages */
527 if (vcpu->arch.sie_block->prefix == (address & ~0x1000UL)) {
528 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
529 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
530 exit_sie_sync(vcpu);
531 }
532 }
533}
534
b6d33834
CD
535int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
536{
537 /* kvm common code refers to this, but never calls it */
538 BUG();
539 return 0;
540}
541
14eebd91
CO
542static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
543 struct kvm_one_reg *reg)
544{
545 int r = -EINVAL;
546
547 switch (reg->id) {
29b7c71b
CO
548 case KVM_REG_S390_TODPR:
549 r = put_user(vcpu->arch.sie_block->todpr,
550 (u32 __user *)reg->addr);
551 break;
552 case KVM_REG_S390_EPOCHDIFF:
553 r = put_user(vcpu->arch.sie_block->epoch,
554 (u64 __user *)reg->addr);
555 break;
46a6dd1c
J
556 case KVM_REG_S390_CPU_TIMER:
557 r = put_user(vcpu->arch.sie_block->cputm,
558 (u64 __user *)reg->addr);
559 break;
560 case KVM_REG_S390_CLOCK_COMP:
561 r = put_user(vcpu->arch.sie_block->ckc,
562 (u64 __user *)reg->addr);
563 break;
14eebd91
CO
564 default:
565 break;
566 }
567
568 return r;
569}
570
571static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
572 struct kvm_one_reg *reg)
573{
574 int r = -EINVAL;
575
576 switch (reg->id) {
29b7c71b
CO
577 case KVM_REG_S390_TODPR:
578 r = get_user(vcpu->arch.sie_block->todpr,
579 (u32 __user *)reg->addr);
580 break;
581 case KVM_REG_S390_EPOCHDIFF:
582 r = get_user(vcpu->arch.sie_block->epoch,
583 (u64 __user *)reg->addr);
584 break;
46a6dd1c
J
585 case KVM_REG_S390_CPU_TIMER:
586 r = get_user(vcpu->arch.sie_block->cputm,
587 (u64 __user *)reg->addr);
588 break;
589 case KVM_REG_S390_CLOCK_COMP:
590 r = get_user(vcpu->arch.sie_block->ckc,
591 (u64 __user *)reg->addr);
592 break;
14eebd91
CO
593 default:
594 break;
595 }
596
597 return r;
598}
b6d33834 599
b0c632db
HC
600static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
601{
b0c632db 602 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
603 return 0;
604}
605
606int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
607{
5a32c1af 608 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
609 return 0;
610}
611
612int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
613{
5a32c1af 614 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
615 return 0;
616}
617
618int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
619 struct kvm_sregs *sregs)
620{
59674c1a 621 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 622 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 623 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
624 return 0;
625}
626
627int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
628 struct kvm_sregs *sregs)
629{
59674c1a 630 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 631 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
632 return 0;
633}
634
635int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
636{
4725c860
MS
637 if (test_fp_ctl(fpu->fpc))
638 return -EINVAL;
b0c632db 639 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4725c860
MS
640 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
641 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
642 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
b0c632db
HC
643 return 0;
644}
645
646int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
647{
b0c632db
HC
648 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
649 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
650 return 0;
651}
652
653static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
654{
655 int rc = 0;
656
9e6dabef 657 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
b0c632db 658 rc = -EBUSY;
d7b0b5eb
CO
659 else {
660 vcpu->run->psw_mask = psw.mask;
661 vcpu->run->psw_addr = psw.addr;
662 }
b0c632db
HC
663 return rc;
664}
665
666int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
667 struct kvm_translation *tr)
668{
669 return -EINVAL; /* not implemented yet */
670}
671
d0bfb940
JK
672int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
673 struct kvm_guest_debug *dbg)
b0c632db
HC
674{
675 return -EINVAL; /* not implemented yet */
676}
677
62d9f0db
MT
678int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
679 struct kvm_mp_state *mp_state)
680{
681 return -EINVAL; /* not implemented yet */
682}
683
684int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
685 struct kvm_mp_state *mp_state)
686{
687 return -EINVAL; /* not implemented yet */
688}
689
2c70fe44
CB
690static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
691{
692 /*
693 * We use MMU_RELOAD just to re-arm the ipte notifier for the
694 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
695 * This ensures that the ipte instruction for this request has
696 * already finished. We might race against a second unmapper that
697 * wants to set the blocking bit. Lets just retry the request loop.
698 */
699 while (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
700 int rc;
701 rc = gmap_ipte_notify(vcpu->arch.gmap,
702 vcpu->arch.sie_block->prefix,
703 PAGE_SIZE * 2);
704 if (rc)
705 return rc;
706 s390_vcpu_unblock(vcpu);
707 }
708 return 0;
709}
710
24eb3a82
DD
711static long kvm_arch_fault_in_sync(struct kvm_vcpu *vcpu)
712{
713 long rc;
714 hva_t fault = gmap_fault(current->thread.gmap_addr, vcpu->arch.gmap);
715 struct mm_struct *mm = current->mm;
716 down_read(&mm->mmap_sem);
717 rc = get_user_pages(current, mm, fault, 1, 1, 0, NULL, NULL);
718 up_read(&mm->mmap_sem);
719 return rc;
720}
721
3c038e6b
DD
722static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
723 unsigned long token)
724{
725 struct kvm_s390_interrupt inti;
726 inti.parm64 = token;
727
728 if (start_token) {
729 inti.type = KVM_S390_INT_PFAULT_INIT;
730 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti));
731 } else {
732 inti.type = KVM_S390_INT_PFAULT_DONE;
733 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
734 }
735}
736
737void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
738 struct kvm_async_pf *work)
739{
740 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
741 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
742}
743
744void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
745 struct kvm_async_pf *work)
746{
747 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
748 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
749}
750
751void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
752 struct kvm_async_pf *work)
753{
754 /* s390 will always inject the page directly */
755}
756
757bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
758{
759 /*
760 * s390 will always inject the page directly,
761 * but we still want check_async_completion to cleanup
762 */
763 return true;
764}
765
766static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
767{
768 hva_t hva;
769 struct kvm_arch_async_pf arch;
770 int rc;
771
772 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
773 return 0;
774 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
775 vcpu->arch.pfault_compare)
776 return 0;
777 if (psw_extint_disabled(vcpu))
778 return 0;
779 if (kvm_cpu_has_interrupt(vcpu))
780 return 0;
781 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
782 return 0;
783 if (!vcpu->arch.gmap->pfault_enabled)
784 return 0;
785
786 hva = gmap_fault(current->thread.gmap_addr, vcpu->arch.gmap);
787 if (copy_from_guest(vcpu, &arch.pfault_token, vcpu->arch.pfault_token, 8))
788 return 0;
789
790 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
791 return rc;
792}
793
3fb4c40f 794static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 795{
3fb4c40f 796 int rc, cpuflags;
e168bf8d 797
3c038e6b
DD
798 /*
799 * On s390 notifications for arriving pages will be delivered directly
800 * to the guest but the house keeping for completed pfaults is
801 * handled outside the worker.
802 */
803 kvm_check_async_pf_completion(vcpu);
804
5a32c1af 805 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
806
807 if (need_resched())
808 schedule();
809
71cde587
CB
810 if (test_thread_flag(TIF_MCCK_PENDING))
811 s390_handle_mcck();
812
d6b6d166
CO
813 if (!kvm_is_ucontrol(vcpu->kvm))
814 kvm_s390_deliver_pending_interrupts(vcpu);
0ff31867 815
2c70fe44
CB
816 rc = kvm_s390_handle_requests(vcpu);
817 if (rc)
818 return rc;
819
b0c632db 820 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
821 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
822 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
823 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 824
3fb4c40f
TH
825 return 0;
826}
827
828static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
829{
24eb3a82 830 int rc = -1;
2b29a9fd
DD
831
832 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
833 vcpu->arch.sie_block->icptcode);
834 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
835
3fb4c40f 836 if (exit_reason >= 0) {
7c470539 837 rc = 0;
210b1607
TH
838 } else if (kvm_is_ucontrol(vcpu->kvm)) {
839 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
840 vcpu->run->s390_ucontrol.trans_exc_code =
841 current->thread.gmap_addr;
842 vcpu->run->s390_ucontrol.pgm_code = 0x10;
843 rc = -EREMOTE;
24eb3a82
DD
844
845 } else if (current->thread.gmap_pfault) {
3c038e6b 846 trace_kvm_s390_major_guest_pfault(vcpu);
24eb3a82 847 current->thread.gmap_pfault = 0;
3c038e6b
DD
848 if (kvm_arch_setup_async_pf(vcpu) ||
849 (kvm_arch_fault_in_sync(vcpu) >= 0))
24eb3a82
DD
850 rc = 0;
851 }
852
853 if (rc == -1) {
699bde3b
CB
854 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
855 trace_kvm_s390_sie_fault(vcpu);
856 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1f0d0f09 857 }
b0c632db 858
5a32c1af 859 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
3fb4c40f 860
a76ccff6
TH
861 if (rc == 0) {
862 if (kvm_is_ucontrol(vcpu->kvm))
863 rc = -EOPNOTSUPP;
864 else
865 rc = kvm_handle_sie_intercept(vcpu);
866 }
867
3fb4c40f
TH
868 return rc;
869}
870
871static int __vcpu_run(struct kvm_vcpu *vcpu)
872{
873 int rc, exit_reason;
874
800c1065
TH
875 /*
876 * We try to hold kvm->srcu during most of vcpu_run (except when run-
877 * ning the guest), so that memslots (and other stuff) are protected
878 */
879 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
880
a76ccff6
TH
881 do {
882 rc = vcpu_pre_run(vcpu);
883 if (rc)
884 break;
3fb4c40f 885
800c1065 886 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
887 /*
888 * As PF_VCPU will be used in fault handler, between
889 * guest_enter and guest_exit should be no uaccess.
890 */
891 preempt_disable();
892 kvm_guest_enter();
893 preempt_enable();
894 exit_reason = sie64a(vcpu->arch.sie_block,
895 vcpu->run->s.regs.gprs);
896 kvm_guest_exit();
800c1065 897 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
898
899 rc = vcpu_post_run(vcpu, exit_reason);
900 } while (!signal_pending(current) && !rc);
3fb4c40f 901
800c1065 902 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 903 return rc;
b0c632db
HC
904}
905
906int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
907{
8f2abe6a 908 int rc;
b0c632db
HC
909 sigset_t sigsaved;
910
b0c632db
HC
911 if (vcpu->sigset_active)
912 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
913
9e6dabef 914 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db 915
ba5c1e9b
CO
916 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
917
8f2abe6a
CB
918 switch (kvm_run->exit_reason) {
919 case KVM_EXIT_S390_SIEIC:
8f2abe6a 920 case KVM_EXIT_UNKNOWN:
9ace903d 921 case KVM_EXIT_INTR:
8f2abe6a 922 case KVM_EXIT_S390_RESET:
e168bf8d 923 case KVM_EXIT_S390_UCONTROL:
fa6b7fe9 924 case KVM_EXIT_S390_TSCH:
8f2abe6a
CB
925 break;
926 default:
927 BUG();
928 }
929
d7b0b5eb
CO
930 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
931 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
60b413c9
CB
932 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
933 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
934 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
935 }
9eed0735
CB
936 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
937 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
938 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
939 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
940 }
d7b0b5eb 941
dab4079d 942 might_fault();
a76ccff6 943 rc = __vcpu_run(vcpu);
9ace903d 944
b1d16c49
CE
945 if (signal_pending(current) && !rc) {
946 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 947 rc = -EINTR;
b1d16c49 948 }
8f2abe6a 949
b8e660b8 950 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
951 /* intercept cannot be handled in-kernel, prepare kvm-run */
952 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
953 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
954 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
955 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
956 rc = 0;
957 }
958
959 if (rc == -EREMOTE) {
960 /* intercept was handled, but userspace support is needed
961 * kvm_run has been prepared by the handler */
962 rc = 0;
963 }
b0c632db 964
d7b0b5eb
CO
965 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
966 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
60b413c9 967 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
9eed0735 968 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
d7b0b5eb 969
b0c632db
HC
970 if (vcpu->sigset_active)
971 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
972
b0c632db 973 vcpu->stat.exit_userspace++;
7e8e6ab4 974 return rc;
b0c632db
HC
975}
976
092670cd 977static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
b0c632db
HC
978 unsigned long n, int prefix)
979{
980 if (prefix)
981 return copy_to_guest(vcpu, guestdest, from, n);
982 else
983 return copy_to_guest_absolute(vcpu, guestdest, from, n);
984}
985
986/*
987 * store status at address
988 * we use have two special cases:
989 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
990 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
991 */
e879892c 992int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db 993{
092670cd 994 unsigned char archmode = 1;
b0c632db 995 int prefix;
178bd789 996 u64 clkcomp;
b0c632db
HC
997
998 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
999 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
1000 return -EFAULT;
1001 addr = SAVE_AREA_BASE;
1002 prefix = 0;
1003 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
1004 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
1005 return -EFAULT;
1006 addr = SAVE_AREA_BASE;
1007 prefix = 1;
1008 } else
1009 prefix = 0;
1010
f64ca217 1011 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
1012 vcpu->arch.guest_fpregs.fprs, 128, prefix))
1013 return -EFAULT;
1014
f64ca217 1015 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
5a32c1af 1016 vcpu->run->s.regs.gprs, 128, prefix))
b0c632db
HC
1017 return -EFAULT;
1018
f64ca217 1019 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
1020 &vcpu->arch.sie_block->gpsw, 16, prefix))
1021 return -EFAULT;
1022
f64ca217 1023 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
1024 &vcpu->arch.sie_block->prefix, 4, prefix))
1025 return -EFAULT;
1026
1027 if (__guestcopy(vcpu,
f64ca217 1028 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
1029 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
1030 return -EFAULT;
1031
f64ca217 1032 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
1033 &vcpu->arch.sie_block->todpr, 4, prefix))
1034 return -EFAULT;
1035
f64ca217 1036 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
1037 &vcpu->arch.sie_block->cputm, 8, prefix))
1038 return -EFAULT;
1039
178bd789 1040 clkcomp = vcpu->arch.sie_block->ckc >> 8;
f64ca217 1041 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
178bd789 1042 &clkcomp, 8, prefix))
b0c632db
HC
1043 return -EFAULT;
1044
f64ca217 1045 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
59674c1a 1046 &vcpu->run->s.regs.acrs, 64, prefix))
b0c632db
HC
1047 return -EFAULT;
1048
1049 if (__guestcopy(vcpu,
f64ca217 1050 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
1051 &vcpu->arch.sie_block->gcr, 128, prefix))
1052 return -EFAULT;
1053 return 0;
1054}
1055
e879892c
TH
1056int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1057{
1058 /*
1059 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1060 * copying in vcpu load/put. Lets update our copies before we save
1061 * it into the save area
1062 */
1063 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1064 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1065 save_access_regs(vcpu->run->s.regs.acrs);
1066
1067 return kvm_s390_store_status_unloaded(vcpu, addr);
1068}
1069
d6712df9
CH
1070static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1071 struct kvm_enable_cap *cap)
1072{
1073 int r;
1074
1075 if (cap->flags)
1076 return -EINVAL;
1077
1078 switch (cap->cap) {
fa6b7fe9
CH
1079 case KVM_CAP_S390_CSS_SUPPORT:
1080 if (!vcpu->kvm->arch.css_support) {
1081 vcpu->kvm->arch.css_support = 1;
1082 trace_kvm_s390_enable_css(vcpu->kvm);
1083 }
1084 r = 0;
1085 break;
d6712df9
CH
1086 default:
1087 r = -EINVAL;
1088 break;
1089 }
1090 return r;
1091}
1092
b0c632db
HC
1093long kvm_arch_vcpu_ioctl(struct file *filp,
1094 unsigned int ioctl, unsigned long arg)
1095{
1096 struct kvm_vcpu *vcpu = filp->private_data;
1097 void __user *argp = (void __user *)arg;
800c1065 1098 int idx;
bc923cc9 1099 long r;
b0c632db 1100
93736624
AK
1101 switch (ioctl) {
1102 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
1103 struct kvm_s390_interrupt s390int;
1104
93736624 1105 r = -EFAULT;
ba5c1e9b 1106 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
1107 break;
1108 r = kvm_s390_inject_vcpu(vcpu, &s390int);
1109 break;
ba5c1e9b 1110 }
b0c632db 1111 case KVM_S390_STORE_STATUS:
800c1065 1112 idx = srcu_read_lock(&vcpu->kvm->srcu);
bc923cc9 1113 r = kvm_s390_vcpu_store_status(vcpu, arg);
800c1065 1114 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 1115 break;
b0c632db
HC
1116 case KVM_S390_SET_INITIAL_PSW: {
1117 psw_t psw;
1118
bc923cc9 1119 r = -EFAULT;
b0c632db 1120 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
1121 break;
1122 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1123 break;
b0c632db
HC
1124 }
1125 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
1126 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1127 break;
14eebd91
CO
1128 case KVM_SET_ONE_REG:
1129 case KVM_GET_ONE_REG: {
1130 struct kvm_one_reg reg;
1131 r = -EFAULT;
1132 if (copy_from_user(&reg, argp, sizeof(reg)))
1133 break;
1134 if (ioctl == KVM_SET_ONE_REG)
1135 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1136 else
1137 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1138 break;
1139 }
27e0393f
CO
1140#ifdef CONFIG_KVM_S390_UCONTROL
1141 case KVM_S390_UCAS_MAP: {
1142 struct kvm_s390_ucas_mapping ucasmap;
1143
1144 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1145 r = -EFAULT;
1146 break;
1147 }
1148
1149 if (!kvm_is_ucontrol(vcpu->kvm)) {
1150 r = -EINVAL;
1151 break;
1152 }
1153
1154 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1155 ucasmap.vcpu_addr, ucasmap.length);
1156 break;
1157 }
1158 case KVM_S390_UCAS_UNMAP: {
1159 struct kvm_s390_ucas_mapping ucasmap;
1160
1161 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1162 r = -EFAULT;
1163 break;
1164 }
1165
1166 if (!kvm_is_ucontrol(vcpu->kvm)) {
1167 r = -EINVAL;
1168 break;
1169 }
1170
1171 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1172 ucasmap.length);
1173 break;
1174 }
1175#endif
ccc7910f
CO
1176 case KVM_S390_VCPU_FAULT: {
1177 r = gmap_fault(arg, vcpu->arch.gmap);
1178 if (!IS_ERR_VALUE(r))
1179 r = 0;
1180 break;
1181 }
d6712df9
CH
1182 case KVM_ENABLE_CAP:
1183 {
1184 struct kvm_enable_cap cap;
1185 r = -EFAULT;
1186 if (copy_from_user(&cap, argp, sizeof(cap)))
1187 break;
1188 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1189 break;
1190 }
b0c632db 1191 default:
3e6afcf1 1192 r = -ENOTTY;
b0c632db 1193 }
bc923cc9 1194 return r;
b0c632db
HC
1195}
1196
5b1c1493
CO
1197int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1198{
1199#ifdef CONFIG_KVM_S390_UCONTROL
1200 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1201 && (kvm_is_ucontrol(vcpu->kvm))) {
1202 vmf->page = virt_to_page(vcpu->arch.sie_block);
1203 get_page(vmf->page);
1204 return 0;
1205 }
1206#endif
1207 return VM_FAULT_SIGBUS;
1208}
1209
5587027c 1210void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
db3fe4eb
TY
1211 struct kvm_memory_slot *dont)
1212{
1213}
1214
5587027c
AK
1215int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1216 unsigned long npages)
db3fe4eb
TY
1217{
1218 return 0;
1219}
1220
e59dbe09
TY
1221void kvm_arch_memslots_updated(struct kvm *kvm)
1222{
1223}
1224
b0c632db 1225/* Section: memory related */
f7784b8e
MT
1226int kvm_arch_prepare_memory_region(struct kvm *kvm,
1227 struct kvm_memory_slot *memslot,
7b6195a9
TY
1228 struct kvm_userspace_memory_region *mem,
1229 enum kvm_mr_change change)
b0c632db 1230{
dd2887e7
NW
1231 /* A few sanity checks. We can have memory slots which have to be
1232 located/ended at a segment boundary (1MB). The memory in userland is
1233 ok to be fragmented into various different vmas. It is okay to mmap()
1234 and munmap() stuff in this slot after doing this call at any time */
b0c632db 1235
598841ca 1236 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
1237 return -EINVAL;
1238
598841ca 1239 if (mem->memory_size & 0xffffful)
b0c632db
HC
1240 return -EINVAL;
1241
f7784b8e
MT
1242 return 0;
1243}
1244
1245void kvm_arch_commit_memory_region(struct kvm *kvm,
1246 struct kvm_userspace_memory_region *mem,
8482644a
TY
1247 const struct kvm_memory_slot *old,
1248 enum kvm_mr_change change)
f7784b8e 1249{
f7850c92 1250 int rc;
f7784b8e 1251
2cef4deb
CB
1252 /* If the basics of the memslot do not change, we do not want
1253 * to update the gmap. Every update causes several unnecessary
1254 * segment translation exceptions. This is usually handled just
1255 * fine by the normal fault handler + gmap, but it will also
1256 * cause faults on the prefix page of running guest CPUs.
1257 */
1258 if (old->userspace_addr == mem->userspace_addr &&
1259 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1260 old->npages * PAGE_SIZE == mem->memory_size)
1261 return;
598841ca
CO
1262
1263 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1264 mem->guest_phys_addr, mem->memory_size);
1265 if (rc)
f7850c92 1266 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 1267 return;
b0c632db
HC
1268}
1269
2df72e9b
MT
1270void kvm_arch_flush_shadow_all(struct kvm *kvm)
1271{
1272}
1273
1274void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1275 struct kvm_memory_slot *slot)
34d4cb8f
MT
1276{
1277}
1278
b0c632db
HC
1279static int __init kvm_s390_init(void)
1280{
ef50f7ac 1281 int ret;
0ee75bea 1282 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
1283 if (ret)
1284 return ret;
1285
1286 /*
1287 * guests can ask for up to 255+1 double words, we need a full page
25985edc 1288 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
1289 * only set facilities that are known to work in KVM.
1290 */
78c4b59f
MM
1291 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1292 if (!vfacilities) {
ef50f7ac
CB
1293 kvm_exit();
1294 return -ENOMEM;
1295 }
78c4b59f 1296 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
d208c79d 1297 vfacilities[0] &= 0xff82fff3f4fc2000UL;
7feb6bb8 1298 vfacilities[1] &= 0x005c000000000000UL;
ef50f7ac 1299 return 0;
b0c632db
HC
1300}
1301
1302static void __exit kvm_s390_exit(void)
1303{
78c4b59f 1304 free_page((unsigned long) vfacilities);
b0c632db
HC
1305 kvm_exit();
1306}
1307
1308module_init(kvm_s390_init);
1309module_exit(kvm_s390_exit);
566af940
CH
1310
1311/*
1312 * Enable autoloading of the kvm module.
1313 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1314 * since x86 takes a different approach.
1315 */
1316#include <linux/miscdevice.h>
1317MODULE_ALIAS_MISCDEV(KVM_MINOR);
1318MODULE_ALIAS("devname:kvm");