]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/s390/kvm/kvm-s390.c
KVM: x86: emulator_cmpxchg_emulated should mark_page_dirty
[mirror_ubuntu-jammy-kernel.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
a0616cde 30#include <asm/switch_to.h>
78c4b59f 31#include <asm/facility.h>
1526bf9c 32#include <asm/sclp.h>
8f2abe6a 33#include "kvm-s390.h"
b0c632db
HC
34#include "gaccess.h"
35
5786fffa
CH
36#define CREATE_TRACE_POINTS
37#include "trace.h"
ade38c31 38#include "trace-s390.h"
5786fffa 39
b0c632db
HC
40#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
41
42struct kvm_stats_debugfs_item debugfs_entries[] = {
43 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 44 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
45 { "exit_validity", VCPU_STAT(exit_validity) },
46 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
47 { "exit_external_request", VCPU_STAT(exit_external_request) },
48 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
49 { "exit_instruction", VCPU_STAT(exit_instruction) },
50 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
51 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 52 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
53 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
54 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 55 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
56 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
57 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
58 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
59 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
60 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
61 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
62 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 63 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
64 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
65 { "instruction_spx", VCPU_STAT(instruction_spx) },
66 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
67 { "instruction_stap", VCPU_STAT(instruction_stap) },
68 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
69 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
70 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
71 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
72 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 73 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 74 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 75 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 76 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
77 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
78 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
79 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
80 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
81 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 82 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 83 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 84 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
85 { NULL }
86};
87
78c4b59f 88unsigned long *vfacilities;
2c70fe44 89static struct gmap_notifier gmap_notifier;
b0c632db 90
78c4b59f
MM
91/* test availability of vfacility */
92static inline int test_vfacility(unsigned long nr)
93{
94 return __test_facility(nr, (void *) vfacilities);
95}
96
b0c632db 97/* Section: not file related */
10474ae8 98int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
99{
100 /* every s390 is virtualization enabled ;-) */
10474ae8 101 return 0;
b0c632db
HC
102}
103
104void kvm_arch_hardware_disable(void *garbage)
105{
106}
107
2c70fe44
CB
108static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
109
b0c632db
HC
110int kvm_arch_hardware_setup(void)
111{
2c70fe44
CB
112 gmap_notifier.notifier_call = kvm_gmap_notifier;
113 gmap_register_ipte_notifier(&gmap_notifier);
b0c632db
HC
114 return 0;
115}
116
117void kvm_arch_hardware_unsetup(void)
118{
2c70fe44 119 gmap_unregister_ipte_notifier(&gmap_notifier);
b0c632db
HC
120}
121
122void kvm_arch_check_processor_compat(void *rtn)
123{
124}
125
126int kvm_arch_init(void *opaque)
127{
128 return 0;
129}
130
131void kvm_arch_exit(void)
132{
133}
134
135/* Section: device related */
136long kvm_arch_dev_ioctl(struct file *filp,
137 unsigned int ioctl, unsigned long arg)
138{
139 if (ioctl == KVM_S390_ENABLE_SIE)
140 return s390_enable_sie();
141 return -EINVAL;
142}
143
144int kvm_dev_ioctl_check_extension(long ext)
145{
d7b0b5eb
CO
146 int r;
147
2bd0ac4e 148 switch (ext) {
d7b0b5eb 149 case KVM_CAP_S390_PSW:
b6cf8788 150 case KVM_CAP_S390_GMAP:
52e16b18 151 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
152#ifdef CONFIG_KVM_S390_UCONTROL
153 case KVM_CAP_S390_UCONTROL:
154#endif
3c038e6b 155 case KVM_CAP_ASYNC_PF:
60b413c9 156 case KVM_CAP_SYNC_REGS:
14eebd91 157 case KVM_CAP_ONE_REG:
d6712df9 158 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 159 case KVM_CAP_S390_CSS_SUPPORT:
10ccaa1e 160 case KVM_CAP_IOEVENTFD:
c05c4186 161 case KVM_CAP_DEVICE_CTRL:
d7b0b5eb
CO
162 r = 1;
163 break;
e726b1bd
CB
164 case KVM_CAP_NR_VCPUS:
165 case KVM_CAP_MAX_VCPUS:
166 r = KVM_MAX_VCPUS;
167 break;
e1e2e605
NW
168 case KVM_CAP_NR_MEMSLOTS:
169 r = KVM_USER_MEM_SLOTS;
170 break;
1526bf9c 171 case KVM_CAP_S390_COW:
abf09bed 172 r = MACHINE_HAS_ESOP;
1526bf9c 173 break;
2bd0ac4e 174 default:
d7b0b5eb 175 r = 0;
2bd0ac4e 176 }
d7b0b5eb 177 return r;
b0c632db
HC
178}
179
180/* Section: vm related */
181/*
182 * Get (and clear) the dirty memory log for a memory slot.
183 */
184int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
185 struct kvm_dirty_log *log)
186{
187 return 0;
188}
189
190long kvm_arch_vm_ioctl(struct file *filp,
191 unsigned int ioctl, unsigned long arg)
192{
193 struct kvm *kvm = filp->private_data;
194 void __user *argp = (void __user *)arg;
195 int r;
196
197 switch (ioctl) {
ba5c1e9b
CO
198 case KVM_S390_INTERRUPT: {
199 struct kvm_s390_interrupt s390int;
200
201 r = -EFAULT;
202 if (copy_from_user(&s390int, argp, sizeof(s390int)))
203 break;
204 r = kvm_s390_inject_vm(kvm, &s390int);
205 break;
206 }
b0c632db 207 default:
367e1319 208 r = -ENOTTY;
b0c632db
HC
209 }
210
211 return r;
212}
213
e08b9637 214int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 215{
b0c632db
HC
216 int rc;
217 char debug_name[16];
218
e08b9637
CO
219 rc = -EINVAL;
220#ifdef CONFIG_KVM_S390_UCONTROL
221 if (type & ~KVM_VM_S390_UCONTROL)
222 goto out_err;
223 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
224 goto out_err;
225#else
226 if (type)
227 goto out_err;
228#endif
229
b0c632db
HC
230 rc = s390_enable_sie();
231 if (rc)
d89f5eff 232 goto out_err;
b0c632db 233
b290411a
CO
234 rc = -ENOMEM;
235
b0c632db
HC
236 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
237 if (!kvm->arch.sca)
d89f5eff 238 goto out_err;
b0c632db
HC
239
240 sprintf(debug_name, "kvm-%u", current->pid);
241
242 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
243 if (!kvm->arch.dbf)
244 goto out_nodbf;
245
ba5c1e9b
CO
246 spin_lock_init(&kvm->arch.float_int.lock);
247 INIT_LIST_HEAD(&kvm->arch.float_int.list);
248
b0c632db
HC
249 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
250 VM_EVENT(kvm, 3, "%s", "vm created");
251
e08b9637
CO
252 if (type & KVM_VM_S390_UCONTROL) {
253 kvm->arch.gmap = NULL;
254 } else {
255 kvm->arch.gmap = gmap_alloc(current->mm);
256 if (!kvm->arch.gmap)
257 goto out_nogmap;
2c70fe44 258 kvm->arch.gmap->private = kvm;
24eb3a82 259 kvm->arch.gmap->pfault_enabled = 0;
e08b9637 260 }
fa6b7fe9
CH
261
262 kvm->arch.css_support = 0;
263
d89f5eff 264 return 0;
598841ca
CO
265out_nogmap:
266 debug_unregister(kvm->arch.dbf);
b0c632db
HC
267out_nodbf:
268 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
269out_err:
270 return rc;
b0c632db
HC
271}
272
d329c035
CB
273void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
274{
275 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 276 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
3c038e6b 277 kvm_clear_async_pf_completion_queue(vcpu);
58f9460b
CO
278 if (!kvm_is_ucontrol(vcpu->kvm)) {
279 clear_bit(63 - vcpu->vcpu_id,
280 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
281 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
282 (__u64) vcpu->arch.sie_block)
283 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
284 }
abf4a71e 285 smp_mb();
27e0393f
CO
286
287 if (kvm_is_ucontrol(vcpu->kvm))
288 gmap_free(vcpu->arch.gmap);
289
d329c035 290 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 291 kvm_vcpu_uninit(vcpu);
b110feaf 292 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
293}
294
295static void kvm_free_vcpus(struct kvm *kvm)
296{
297 unsigned int i;
988a2cae 298 struct kvm_vcpu *vcpu;
d329c035 299
988a2cae
GN
300 kvm_for_each_vcpu(i, vcpu, kvm)
301 kvm_arch_vcpu_destroy(vcpu);
302
303 mutex_lock(&kvm->lock);
304 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
305 kvm->vcpus[i] = NULL;
306
307 atomic_set(&kvm->online_vcpus, 0);
308 mutex_unlock(&kvm->lock);
d329c035
CB
309}
310
ad8ba2cd
SY
311void kvm_arch_sync_events(struct kvm *kvm)
312{
313}
314
b0c632db
HC
315void kvm_arch_destroy_vm(struct kvm *kvm)
316{
d329c035 317 kvm_free_vcpus(kvm);
b0c632db 318 free_page((unsigned long)(kvm->arch.sca));
d329c035 319 debug_unregister(kvm->arch.dbf);
27e0393f
CO
320 if (!kvm_is_ucontrol(kvm))
321 gmap_free(kvm->arch.gmap);
b0c632db
HC
322}
323
324/* Section: vcpu related */
325int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
326{
3c038e6b
DD
327 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
328 kvm_clear_async_pf_completion_queue(vcpu);
27e0393f
CO
329 if (kvm_is_ucontrol(vcpu->kvm)) {
330 vcpu->arch.gmap = gmap_alloc(current->mm);
331 if (!vcpu->arch.gmap)
332 return -ENOMEM;
2c70fe44 333 vcpu->arch.gmap->private = vcpu->kvm;
27e0393f
CO
334 return 0;
335 }
336
598841ca 337 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
59674c1a
CB
338 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
339 KVM_SYNC_GPRS |
9eed0735
CB
340 KVM_SYNC_ACRS |
341 KVM_SYNC_CRS;
b0c632db
HC
342 return 0;
343}
344
345void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
346{
6692cef3 347 /* Nothing todo */
b0c632db
HC
348}
349
350void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
351{
4725c860
MS
352 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
353 save_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db 354 save_access_regs(vcpu->arch.host_acrs);
4725c860
MS
355 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
356 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 357 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 358 gmap_enable(vcpu->arch.gmap);
9e6dabef 359 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
360}
361
362void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
363{
9e6dabef 364 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 365 gmap_disable(vcpu->arch.gmap);
4725c860
MS
366 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
367 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 368 save_access_regs(vcpu->run->s.regs.acrs);
4725c860
MS
369 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
370 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db
HC
371 restore_access_regs(vcpu->arch.host_acrs);
372}
373
374static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
375{
376 /* this equals initial cpu reset in pop, but we don't switch to ESA */
377 vcpu->arch.sie_block->gpsw.mask = 0UL;
378 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 379 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
380 vcpu->arch.sie_block->cputm = 0UL;
381 vcpu->arch.sie_block->ckc = 0UL;
382 vcpu->arch.sie_block->todpr = 0;
383 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
384 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
385 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
386 vcpu->arch.guest_fpregs.fpc = 0;
387 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
388 vcpu->arch.sie_block->gbea = 1;
3c038e6b
DD
389 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
390 kvm_clear_async_pf_completion_queue(vcpu);
61bde82c 391 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
392}
393
42897d86
MT
394int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
395{
396 return 0;
397}
398
b0c632db
HC
399int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
400{
9e6dabef
CH
401 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
402 CPUSTAT_SM |
69d0d3a3
CB
403 CPUSTAT_STOPPED |
404 CPUSTAT_GED);
fc34531d 405 vcpu->arch.sie_block->ecb = 6;
7feb6bb8
MM
406 if (test_vfacility(50) && test_vfacility(73))
407 vcpu->arch.sie_block->ecb |= 0x10;
408
69d0d3a3 409 vcpu->arch.sie_block->ecb2 = 8;
b0c632db 410 vcpu->arch.sie_block->eca = 0xC1002001U;
78c4b59f 411 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
ca872302
CB
412 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
413 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
414 (unsigned long) vcpu);
415 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 416 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 417 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
418 return 0;
419}
420
421struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
422 unsigned int id)
423{
4d47555a 424 struct kvm_vcpu *vcpu;
7feb6bb8 425 struct sie_page *sie_page;
4d47555a
CO
426 int rc = -EINVAL;
427
428 if (id >= KVM_MAX_VCPUS)
429 goto out;
430
431 rc = -ENOMEM;
b0c632db 432
b110feaf 433 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 434 if (!vcpu)
4d47555a 435 goto out;
b0c632db 436
7feb6bb8
MM
437 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
438 if (!sie_page)
b0c632db
HC
439 goto out_free_cpu;
440
7feb6bb8
MM
441 vcpu->arch.sie_block = &sie_page->sie_block;
442 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
443
b0c632db 444 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
445 if (!kvm_is_ucontrol(kvm)) {
446 if (!kvm->arch.sca) {
447 WARN_ON_ONCE(1);
448 goto out_free_cpu;
449 }
450 if (!kvm->arch.sca->cpu[id].sda)
451 kvm->arch.sca->cpu[id].sda =
452 (__u64) vcpu->arch.sie_block;
453 vcpu->arch.sie_block->scaoh =
454 (__u32)(((__u64)kvm->arch.sca) >> 32);
455 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
456 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
457 }
b0c632db 458
ba5c1e9b
CO
459 spin_lock_init(&vcpu->arch.local_int.lock);
460 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
461 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 462 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b 463 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
d0321a24 464 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 465 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 466 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 467
b0c632db
HC
468 rc = kvm_vcpu_init(vcpu, kvm, id);
469 if (rc)
7b06bf2f 470 goto out_free_sie_block;
b0c632db
HC
471 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
472 vcpu->arch.sie_block);
ade38c31 473 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 474
b0c632db 475 return vcpu;
7b06bf2f
WY
476out_free_sie_block:
477 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 478out_free_cpu:
b110feaf 479 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 480out:
b0c632db
HC
481 return ERR_PTR(rc);
482}
483
b0c632db
HC
484int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
485{
486 /* kvm common code refers to this, but never calls it */
487 BUG();
488 return 0;
489}
490
49b99e1e
CB
491void s390_vcpu_block(struct kvm_vcpu *vcpu)
492{
493 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
494}
495
496void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
497{
498 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
499}
500
501/*
502 * Kick a guest cpu out of SIE and wait until SIE is not running.
503 * If the CPU is not running (e.g. waiting as idle) the function will
504 * return immediately. */
505void exit_sie(struct kvm_vcpu *vcpu)
506{
507 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
508 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
509 cpu_relax();
510}
511
512/* Kick a guest cpu out of SIE and prevent SIE-reentry */
513void exit_sie_sync(struct kvm_vcpu *vcpu)
514{
515 s390_vcpu_block(vcpu);
516 exit_sie(vcpu);
517}
518
2c70fe44
CB
519static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
520{
521 int i;
522 struct kvm *kvm = gmap->private;
523 struct kvm_vcpu *vcpu;
524
525 kvm_for_each_vcpu(i, vcpu, kvm) {
526 /* match against both prefix pages */
527 if (vcpu->arch.sie_block->prefix == (address & ~0x1000UL)) {
528 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
529 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
530 exit_sie_sync(vcpu);
531 }
532 }
533}
534
b6d33834
CD
535int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
536{
537 /* kvm common code refers to this, but never calls it */
538 BUG();
539 return 0;
540}
541
14eebd91
CO
542static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
543 struct kvm_one_reg *reg)
544{
545 int r = -EINVAL;
546
547 switch (reg->id) {
29b7c71b
CO
548 case KVM_REG_S390_TODPR:
549 r = put_user(vcpu->arch.sie_block->todpr,
550 (u32 __user *)reg->addr);
551 break;
552 case KVM_REG_S390_EPOCHDIFF:
553 r = put_user(vcpu->arch.sie_block->epoch,
554 (u64 __user *)reg->addr);
555 break;
46a6dd1c
J
556 case KVM_REG_S390_CPU_TIMER:
557 r = put_user(vcpu->arch.sie_block->cputm,
558 (u64 __user *)reg->addr);
559 break;
560 case KVM_REG_S390_CLOCK_COMP:
561 r = put_user(vcpu->arch.sie_block->ckc,
562 (u64 __user *)reg->addr);
563 break;
536336c2
DD
564 case KVM_REG_S390_PFTOKEN:
565 r = put_user(vcpu->arch.pfault_token,
566 (u64 __user *)reg->addr);
567 break;
568 case KVM_REG_S390_PFCOMPARE:
569 r = put_user(vcpu->arch.pfault_compare,
570 (u64 __user *)reg->addr);
571 break;
572 case KVM_REG_S390_PFSELECT:
573 r = put_user(vcpu->arch.pfault_select,
574 (u64 __user *)reg->addr);
575 break;
14eebd91
CO
576 default:
577 break;
578 }
579
580 return r;
581}
582
583static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
584 struct kvm_one_reg *reg)
585{
586 int r = -EINVAL;
587
588 switch (reg->id) {
29b7c71b
CO
589 case KVM_REG_S390_TODPR:
590 r = get_user(vcpu->arch.sie_block->todpr,
591 (u32 __user *)reg->addr);
592 break;
593 case KVM_REG_S390_EPOCHDIFF:
594 r = get_user(vcpu->arch.sie_block->epoch,
595 (u64 __user *)reg->addr);
596 break;
46a6dd1c
J
597 case KVM_REG_S390_CPU_TIMER:
598 r = get_user(vcpu->arch.sie_block->cputm,
599 (u64 __user *)reg->addr);
600 break;
601 case KVM_REG_S390_CLOCK_COMP:
602 r = get_user(vcpu->arch.sie_block->ckc,
603 (u64 __user *)reg->addr);
604 break;
536336c2
DD
605 case KVM_REG_S390_PFTOKEN:
606 r = get_user(vcpu->arch.pfault_token,
607 (u64 __user *)reg->addr);
608 break;
609 case KVM_REG_S390_PFCOMPARE:
610 r = get_user(vcpu->arch.pfault_compare,
611 (u64 __user *)reg->addr);
612 break;
613 case KVM_REG_S390_PFSELECT:
614 r = get_user(vcpu->arch.pfault_select,
615 (u64 __user *)reg->addr);
616 break;
14eebd91
CO
617 default:
618 break;
619 }
620
621 return r;
622}
b6d33834 623
b0c632db
HC
624static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
625{
b0c632db 626 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
627 return 0;
628}
629
630int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
631{
5a32c1af 632 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
633 return 0;
634}
635
636int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
637{
5a32c1af 638 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
639 return 0;
640}
641
642int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
643 struct kvm_sregs *sregs)
644{
59674c1a 645 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 646 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 647 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
648 return 0;
649}
650
651int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
652 struct kvm_sregs *sregs)
653{
59674c1a 654 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 655 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
656 return 0;
657}
658
659int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
660{
4725c860
MS
661 if (test_fp_ctl(fpu->fpc))
662 return -EINVAL;
b0c632db 663 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4725c860
MS
664 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
665 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
666 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
b0c632db
HC
667 return 0;
668}
669
670int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
671{
b0c632db
HC
672 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
673 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
674 return 0;
675}
676
677static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
678{
679 int rc = 0;
680
9e6dabef 681 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
b0c632db 682 rc = -EBUSY;
d7b0b5eb
CO
683 else {
684 vcpu->run->psw_mask = psw.mask;
685 vcpu->run->psw_addr = psw.addr;
686 }
b0c632db
HC
687 return rc;
688}
689
690int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
691 struct kvm_translation *tr)
692{
693 return -EINVAL; /* not implemented yet */
694}
695
d0bfb940
JK
696int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
697 struct kvm_guest_debug *dbg)
b0c632db
HC
698{
699 return -EINVAL; /* not implemented yet */
700}
701
62d9f0db
MT
702int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
703 struct kvm_mp_state *mp_state)
704{
705 return -EINVAL; /* not implemented yet */
706}
707
708int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
709 struct kvm_mp_state *mp_state)
710{
711 return -EINVAL; /* not implemented yet */
712}
713
2c70fe44
CB
714static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
715{
716 /*
717 * We use MMU_RELOAD just to re-arm the ipte notifier for the
718 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
719 * This ensures that the ipte instruction for this request has
720 * already finished. We might race against a second unmapper that
721 * wants to set the blocking bit. Lets just retry the request loop.
722 */
723 while (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
724 int rc;
725 rc = gmap_ipte_notify(vcpu->arch.gmap,
726 vcpu->arch.sie_block->prefix,
727 PAGE_SIZE * 2);
728 if (rc)
729 return rc;
730 s390_vcpu_unblock(vcpu);
731 }
732 return 0;
733}
734
24eb3a82
DD
735static long kvm_arch_fault_in_sync(struct kvm_vcpu *vcpu)
736{
737 long rc;
738 hva_t fault = gmap_fault(current->thread.gmap_addr, vcpu->arch.gmap);
739 struct mm_struct *mm = current->mm;
740 down_read(&mm->mmap_sem);
741 rc = get_user_pages(current, mm, fault, 1, 1, 0, NULL, NULL);
742 up_read(&mm->mmap_sem);
743 return rc;
744}
745
3c038e6b
DD
746static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
747 unsigned long token)
748{
749 struct kvm_s390_interrupt inti;
750 inti.parm64 = token;
751
752 if (start_token) {
753 inti.type = KVM_S390_INT_PFAULT_INIT;
754 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti));
755 } else {
756 inti.type = KVM_S390_INT_PFAULT_DONE;
757 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
758 }
759}
760
761void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
762 struct kvm_async_pf *work)
763{
764 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
765 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
766}
767
768void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
769 struct kvm_async_pf *work)
770{
771 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
772 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
773}
774
775void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
776 struct kvm_async_pf *work)
777{
778 /* s390 will always inject the page directly */
779}
780
781bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
782{
783 /*
784 * s390 will always inject the page directly,
785 * but we still want check_async_completion to cleanup
786 */
787 return true;
788}
789
790static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
791{
792 hva_t hva;
793 struct kvm_arch_async_pf arch;
794 int rc;
795
796 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
797 return 0;
798 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
799 vcpu->arch.pfault_compare)
800 return 0;
801 if (psw_extint_disabled(vcpu))
802 return 0;
803 if (kvm_cpu_has_interrupt(vcpu))
804 return 0;
805 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
806 return 0;
807 if (!vcpu->arch.gmap->pfault_enabled)
808 return 0;
809
810 hva = gmap_fault(current->thread.gmap_addr, vcpu->arch.gmap);
811 if (copy_from_guest(vcpu, &arch.pfault_token, vcpu->arch.pfault_token, 8))
812 return 0;
813
814 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
815 return rc;
816}
817
3fb4c40f 818static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 819{
3fb4c40f 820 int rc, cpuflags;
e168bf8d 821
3c038e6b
DD
822 /*
823 * On s390 notifications for arriving pages will be delivered directly
824 * to the guest but the house keeping for completed pfaults is
825 * handled outside the worker.
826 */
827 kvm_check_async_pf_completion(vcpu);
828
5a32c1af 829 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
830
831 if (need_resched())
832 schedule();
833
71cde587
CB
834 if (test_thread_flag(TIF_MCCK_PENDING))
835 s390_handle_mcck();
836
d6b6d166
CO
837 if (!kvm_is_ucontrol(vcpu->kvm))
838 kvm_s390_deliver_pending_interrupts(vcpu);
0ff31867 839
2c70fe44
CB
840 rc = kvm_s390_handle_requests(vcpu);
841 if (rc)
842 return rc;
843
b0c632db 844 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
845 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
846 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
847 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 848
3fb4c40f
TH
849 return 0;
850}
851
852static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
853{
24eb3a82 854 int rc = -1;
2b29a9fd
DD
855
856 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
857 vcpu->arch.sie_block->icptcode);
858 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
859
3fb4c40f 860 if (exit_reason >= 0) {
7c470539 861 rc = 0;
210b1607
TH
862 } else if (kvm_is_ucontrol(vcpu->kvm)) {
863 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
864 vcpu->run->s390_ucontrol.trans_exc_code =
865 current->thread.gmap_addr;
866 vcpu->run->s390_ucontrol.pgm_code = 0x10;
867 rc = -EREMOTE;
24eb3a82
DD
868
869 } else if (current->thread.gmap_pfault) {
3c038e6b 870 trace_kvm_s390_major_guest_pfault(vcpu);
24eb3a82 871 current->thread.gmap_pfault = 0;
3c038e6b
DD
872 if (kvm_arch_setup_async_pf(vcpu) ||
873 (kvm_arch_fault_in_sync(vcpu) >= 0))
24eb3a82
DD
874 rc = 0;
875 }
876
877 if (rc == -1) {
699bde3b
CB
878 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
879 trace_kvm_s390_sie_fault(vcpu);
880 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1f0d0f09 881 }
b0c632db 882
5a32c1af 883 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
3fb4c40f 884
a76ccff6
TH
885 if (rc == 0) {
886 if (kvm_is_ucontrol(vcpu->kvm))
887 rc = -EOPNOTSUPP;
888 else
889 rc = kvm_handle_sie_intercept(vcpu);
890 }
891
3fb4c40f
TH
892 return rc;
893}
894
895static int __vcpu_run(struct kvm_vcpu *vcpu)
896{
897 int rc, exit_reason;
898
800c1065
TH
899 /*
900 * We try to hold kvm->srcu during most of vcpu_run (except when run-
901 * ning the guest), so that memslots (and other stuff) are protected
902 */
903 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
904
a76ccff6
TH
905 do {
906 rc = vcpu_pre_run(vcpu);
907 if (rc)
908 break;
3fb4c40f 909
800c1065 910 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
911 /*
912 * As PF_VCPU will be used in fault handler, between
913 * guest_enter and guest_exit should be no uaccess.
914 */
915 preempt_disable();
916 kvm_guest_enter();
917 preempt_enable();
918 exit_reason = sie64a(vcpu->arch.sie_block,
919 vcpu->run->s.regs.gprs);
920 kvm_guest_exit();
800c1065 921 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
922
923 rc = vcpu_post_run(vcpu, exit_reason);
924 } while (!signal_pending(current) && !rc);
3fb4c40f 925
800c1065 926 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 927 return rc;
b0c632db
HC
928}
929
930int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
931{
8f2abe6a 932 int rc;
b0c632db
HC
933 sigset_t sigsaved;
934
b0c632db
HC
935 if (vcpu->sigset_active)
936 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
937
9e6dabef 938 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db 939
ba5c1e9b
CO
940 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
941
8f2abe6a
CB
942 switch (kvm_run->exit_reason) {
943 case KVM_EXIT_S390_SIEIC:
8f2abe6a 944 case KVM_EXIT_UNKNOWN:
9ace903d 945 case KVM_EXIT_INTR:
8f2abe6a 946 case KVM_EXIT_S390_RESET:
e168bf8d 947 case KVM_EXIT_S390_UCONTROL:
fa6b7fe9 948 case KVM_EXIT_S390_TSCH:
8f2abe6a
CB
949 break;
950 default:
951 BUG();
952 }
953
d7b0b5eb
CO
954 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
955 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
60b413c9
CB
956 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
957 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
958 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
959 }
9eed0735
CB
960 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
961 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
962 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
963 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
964 }
d7b0b5eb 965
dab4079d 966 might_fault();
a76ccff6 967 rc = __vcpu_run(vcpu);
9ace903d 968
b1d16c49
CE
969 if (signal_pending(current) && !rc) {
970 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 971 rc = -EINTR;
b1d16c49 972 }
8f2abe6a 973
b8e660b8 974 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
975 /* intercept cannot be handled in-kernel, prepare kvm-run */
976 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
977 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
978 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
979 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
980 rc = 0;
981 }
982
983 if (rc == -EREMOTE) {
984 /* intercept was handled, but userspace support is needed
985 * kvm_run has been prepared by the handler */
986 rc = 0;
987 }
b0c632db 988
d7b0b5eb
CO
989 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
990 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
60b413c9 991 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
9eed0735 992 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
d7b0b5eb 993
b0c632db
HC
994 if (vcpu->sigset_active)
995 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
996
b0c632db 997 vcpu->stat.exit_userspace++;
7e8e6ab4 998 return rc;
b0c632db
HC
999}
1000
092670cd 1001static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
b0c632db
HC
1002 unsigned long n, int prefix)
1003{
1004 if (prefix)
1005 return copy_to_guest(vcpu, guestdest, from, n);
1006 else
1007 return copy_to_guest_absolute(vcpu, guestdest, from, n);
1008}
1009
1010/*
1011 * store status at address
1012 * we use have two special cases:
1013 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1014 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1015 */
e879892c 1016int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db 1017{
092670cd 1018 unsigned char archmode = 1;
b0c632db 1019 int prefix;
178bd789 1020 u64 clkcomp;
b0c632db
HC
1021
1022 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
1023 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
1024 return -EFAULT;
1025 addr = SAVE_AREA_BASE;
1026 prefix = 0;
1027 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
1028 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
1029 return -EFAULT;
1030 addr = SAVE_AREA_BASE;
1031 prefix = 1;
1032 } else
1033 prefix = 0;
1034
f64ca217 1035 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
1036 vcpu->arch.guest_fpregs.fprs, 128, prefix))
1037 return -EFAULT;
1038
f64ca217 1039 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
5a32c1af 1040 vcpu->run->s.regs.gprs, 128, prefix))
b0c632db
HC
1041 return -EFAULT;
1042
f64ca217 1043 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
1044 &vcpu->arch.sie_block->gpsw, 16, prefix))
1045 return -EFAULT;
1046
f64ca217 1047 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
1048 &vcpu->arch.sie_block->prefix, 4, prefix))
1049 return -EFAULT;
1050
1051 if (__guestcopy(vcpu,
f64ca217 1052 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
1053 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
1054 return -EFAULT;
1055
f64ca217 1056 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
1057 &vcpu->arch.sie_block->todpr, 4, prefix))
1058 return -EFAULT;
1059
f64ca217 1060 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
1061 &vcpu->arch.sie_block->cputm, 8, prefix))
1062 return -EFAULT;
1063
178bd789 1064 clkcomp = vcpu->arch.sie_block->ckc >> 8;
f64ca217 1065 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
178bd789 1066 &clkcomp, 8, prefix))
b0c632db
HC
1067 return -EFAULT;
1068
f64ca217 1069 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
59674c1a 1070 &vcpu->run->s.regs.acrs, 64, prefix))
b0c632db
HC
1071 return -EFAULT;
1072
1073 if (__guestcopy(vcpu,
f64ca217 1074 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
1075 &vcpu->arch.sie_block->gcr, 128, prefix))
1076 return -EFAULT;
1077 return 0;
1078}
1079
e879892c
TH
1080int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1081{
1082 /*
1083 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1084 * copying in vcpu load/put. Lets update our copies before we save
1085 * it into the save area
1086 */
1087 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1088 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1089 save_access_regs(vcpu->run->s.regs.acrs);
1090
1091 return kvm_s390_store_status_unloaded(vcpu, addr);
1092}
1093
d6712df9
CH
1094static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1095 struct kvm_enable_cap *cap)
1096{
1097 int r;
1098
1099 if (cap->flags)
1100 return -EINVAL;
1101
1102 switch (cap->cap) {
fa6b7fe9
CH
1103 case KVM_CAP_S390_CSS_SUPPORT:
1104 if (!vcpu->kvm->arch.css_support) {
1105 vcpu->kvm->arch.css_support = 1;
1106 trace_kvm_s390_enable_css(vcpu->kvm);
1107 }
1108 r = 0;
1109 break;
d6712df9
CH
1110 default:
1111 r = -EINVAL;
1112 break;
1113 }
1114 return r;
1115}
1116
b0c632db
HC
1117long kvm_arch_vcpu_ioctl(struct file *filp,
1118 unsigned int ioctl, unsigned long arg)
1119{
1120 struct kvm_vcpu *vcpu = filp->private_data;
1121 void __user *argp = (void __user *)arg;
800c1065 1122 int idx;
bc923cc9 1123 long r;
b0c632db 1124
93736624
AK
1125 switch (ioctl) {
1126 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
1127 struct kvm_s390_interrupt s390int;
1128
93736624 1129 r = -EFAULT;
ba5c1e9b 1130 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
1131 break;
1132 r = kvm_s390_inject_vcpu(vcpu, &s390int);
1133 break;
ba5c1e9b 1134 }
b0c632db 1135 case KVM_S390_STORE_STATUS:
800c1065 1136 idx = srcu_read_lock(&vcpu->kvm->srcu);
bc923cc9 1137 r = kvm_s390_vcpu_store_status(vcpu, arg);
800c1065 1138 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 1139 break;
b0c632db
HC
1140 case KVM_S390_SET_INITIAL_PSW: {
1141 psw_t psw;
1142
bc923cc9 1143 r = -EFAULT;
b0c632db 1144 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
1145 break;
1146 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1147 break;
b0c632db
HC
1148 }
1149 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
1150 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1151 break;
14eebd91
CO
1152 case KVM_SET_ONE_REG:
1153 case KVM_GET_ONE_REG: {
1154 struct kvm_one_reg reg;
1155 r = -EFAULT;
1156 if (copy_from_user(&reg, argp, sizeof(reg)))
1157 break;
1158 if (ioctl == KVM_SET_ONE_REG)
1159 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1160 else
1161 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1162 break;
1163 }
27e0393f
CO
1164#ifdef CONFIG_KVM_S390_UCONTROL
1165 case KVM_S390_UCAS_MAP: {
1166 struct kvm_s390_ucas_mapping ucasmap;
1167
1168 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1169 r = -EFAULT;
1170 break;
1171 }
1172
1173 if (!kvm_is_ucontrol(vcpu->kvm)) {
1174 r = -EINVAL;
1175 break;
1176 }
1177
1178 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1179 ucasmap.vcpu_addr, ucasmap.length);
1180 break;
1181 }
1182 case KVM_S390_UCAS_UNMAP: {
1183 struct kvm_s390_ucas_mapping ucasmap;
1184
1185 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1186 r = -EFAULT;
1187 break;
1188 }
1189
1190 if (!kvm_is_ucontrol(vcpu->kvm)) {
1191 r = -EINVAL;
1192 break;
1193 }
1194
1195 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1196 ucasmap.length);
1197 break;
1198 }
1199#endif
ccc7910f
CO
1200 case KVM_S390_VCPU_FAULT: {
1201 r = gmap_fault(arg, vcpu->arch.gmap);
1202 if (!IS_ERR_VALUE(r))
1203 r = 0;
1204 break;
1205 }
d6712df9
CH
1206 case KVM_ENABLE_CAP:
1207 {
1208 struct kvm_enable_cap cap;
1209 r = -EFAULT;
1210 if (copy_from_user(&cap, argp, sizeof(cap)))
1211 break;
1212 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1213 break;
1214 }
b0c632db 1215 default:
3e6afcf1 1216 r = -ENOTTY;
b0c632db 1217 }
bc923cc9 1218 return r;
b0c632db
HC
1219}
1220
5b1c1493
CO
1221int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1222{
1223#ifdef CONFIG_KVM_S390_UCONTROL
1224 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1225 && (kvm_is_ucontrol(vcpu->kvm))) {
1226 vmf->page = virt_to_page(vcpu->arch.sie_block);
1227 get_page(vmf->page);
1228 return 0;
1229 }
1230#endif
1231 return VM_FAULT_SIGBUS;
1232}
1233
5587027c 1234void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
db3fe4eb
TY
1235 struct kvm_memory_slot *dont)
1236{
1237}
1238
5587027c
AK
1239int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1240 unsigned long npages)
db3fe4eb
TY
1241{
1242 return 0;
1243}
1244
e59dbe09
TY
1245void kvm_arch_memslots_updated(struct kvm *kvm)
1246{
1247}
1248
b0c632db 1249/* Section: memory related */
f7784b8e
MT
1250int kvm_arch_prepare_memory_region(struct kvm *kvm,
1251 struct kvm_memory_slot *memslot,
7b6195a9
TY
1252 struct kvm_userspace_memory_region *mem,
1253 enum kvm_mr_change change)
b0c632db 1254{
dd2887e7
NW
1255 /* A few sanity checks. We can have memory slots which have to be
1256 located/ended at a segment boundary (1MB). The memory in userland is
1257 ok to be fragmented into various different vmas. It is okay to mmap()
1258 and munmap() stuff in this slot after doing this call at any time */
b0c632db 1259
598841ca 1260 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
1261 return -EINVAL;
1262
598841ca 1263 if (mem->memory_size & 0xffffful)
b0c632db
HC
1264 return -EINVAL;
1265
f7784b8e
MT
1266 return 0;
1267}
1268
1269void kvm_arch_commit_memory_region(struct kvm *kvm,
1270 struct kvm_userspace_memory_region *mem,
8482644a
TY
1271 const struct kvm_memory_slot *old,
1272 enum kvm_mr_change change)
f7784b8e 1273{
f7850c92 1274 int rc;
f7784b8e 1275
2cef4deb
CB
1276 /* If the basics of the memslot do not change, we do not want
1277 * to update the gmap. Every update causes several unnecessary
1278 * segment translation exceptions. This is usually handled just
1279 * fine by the normal fault handler + gmap, but it will also
1280 * cause faults on the prefix page of running guest CPUs.
1281 */
1282 if (old->userspace_addr == mem->userspace_addr &&
1283 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1284 old->npages * PAGE_SIZE == mem->memory_size)
1285 return;
598841ca
CO
1286
1287 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1288 mem->guest_phys_addr, mem->memory_size);
1289 if (rc)
f7850c92 1290 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 1291 return;
b0c632db
HC
1292}
1293
2df72e9b
MT
1294void kvm_arch_flush_shadow_all(struct kvm *kvm)
1295{
1296}
1297
1298void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1299 struct kvm_memory_slot *slot)
34d4cb8f
MT
1300{
1301}
1302
b0c632db
HC
1303static int __init kvm_s390_init(void)
1304{
ef50f7ac 1305 int ret;
0ee75bea 1306 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
1307 if (ret)
1308 return ret;
1309
1310 /*
1311 * guests can ask for up to 255+1 double words, we need a full page
25985edc 1312 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
1313 * only set facilities that are known to work in KVM.
1314 */
78c4b59f
MM
1315 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1316 if (!vfacilities) {
ef50f7ac
CB
1317 kvm_exit();
1318 return -ENOMEM;
1319 }
78c4b59f 1320 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
d208c79d 1321 vfacilities[0] &= 0xff82fff3f4fc2000UL;
7feb6bb8 1322 vfacilities[1] &= 0x005c000000000000UL;
ef50f7ac 1323 return 0;
b0c632db
HC
1324}
1325
1326static void __exit kvm_s390_exit(void)
1327{
78c4b59f 1328 free_page((unsigned long) vfacilities);
b0c632db
HC
1329 kvm_exit();
1330}
1331
1332module_init(kvm_s390_init);
1333module_exit(kvm_s390_exit);
566af940
CH
1334
1335/*
1336 * Enable autoloading of the kvm module.
1337 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1338 * since x86 takes a different approach.
1339 */
1340#include <linux/miscdevice.h>
1341MODULE_ALIAS_MISCDEV(KVM_MINOR);
1342MODULE_ALIAS("devname:kvm");