]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/s390/kvm/kvm-s390.c
KVM: s390: Push run loop into __vcpu_run
[mirror_ubuntu-jammy-kernel.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
a0616cde 30#include <asm/switch_to.h>
78c4b59f 31#include <asm/facility.h>
1526bf9c 32#include <asm/sclp.h>
8f2abe6a 33#include "kvm-s390.h"
b0c632db
HC
34#include "gaccess.h"
35
5786fffa
CH
36#define CREATE_TRACE_POINTS
37#include "trace.h"
ade38c31 38#include "trace-s390.h"
5786fffa 39
b0c632db
HC
40#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
41
42struct kvm_stats_debugfs_item debugfs_entries[] = {
43 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 44 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
45 { "exit_validity", VCPU_STAT(exit_validity) },
46 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
47 { "exit_external_request", VCPU_STAT(exit_external_request) },
48 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
49 { "exit_instruction", VCPU_STAT(exit_instruction) },
50 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
51 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 52 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
53 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
54 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 55 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
56 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
57 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
58 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
59 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
60 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
61 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
62 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 63 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
64 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
65 { "instruction_spx", VCPU_STAT(instruction_spx) },
66 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
67 { "instruction_stap", VCPU_STAT(instruction_stap) },
68 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
69 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
70 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
71 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
72 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 73 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 74 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 75 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 76 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
77 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
78 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
79 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
80 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
81 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 82 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 83 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 84 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
85 { NULL }
86};
87
78c4b59f 88unsigned long *vfacilities;
2c70fe44 89static struct gmap_notifier gmap_notifier;
b0c632db 90
78c4b59f
MM
91/* test availability of vfacility */
92static inline int test_vfacility(unsigned long nr)
93{
94 return __test_facility(nr, (void *) vfacilities);
95}
96
b0c632db 97/* Section: not file related */
10474ae8 98int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
99{
100 /* every s390 is virtualization enabled ;-) */
10474ae8 101 return 0;
b0c632db
HC
102}
103
104void kvm_arch_hardware_disable(void *garbage)
105{
106}
107
2c70fe44
CB
108static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
109
b0c632db
HC
110int kvm_arch_hardware_setup(void)
111{
2c70fe44
CB
112 gmap_notifier.notifier_call = kvm_gmap_notifier;
113 gmap_register_ipte_notifier(&gmap_notifier);
b0c632db
HC
114 return 0;
115}
116
117void kvm_arch_hardware_unsetup(void)
118{
2c70fe44 119 gmap_unregister_ipte_notifier(&gmap_notifier);
b0c632db
HC
120}
121
122void kvm_arch_check_processor_compat(void *rtn)
123{
124}
125
126int kvm_arch_init(void *opaque)
127{
128 return 0;
129}
130
131void kvm_arch_exit(void)
132{
133}
134
135/* Section: device related */
136long kvm_arch_dev_ioctl(struct file *filp,
137 unsigned int ioctl, unsigned long arg)
138{
139 if (ioctl == KVM_S390_ENABLE_SIE)
140 return s390_enable_sie();
141 return -EINVAL;
142}
143
144int kvm_dev_ioctl_check_extension(long ext)
145{
d7b0b5eb
CO
146 int r;
147
2bd0ac4e 148 switch (ext) {
d7b0b5eb 149 case KVM_CAP_S390_PSW:
b6cf8788 150 case KVM_CAP_S390_GMAP:
52e16b18 151 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
152#ifdef CONFIG_KVM_S390_UCONTROL
153 case KVM_CAP_S390_UCONTROL:
154#endif
60b413c9 155 case KVM_CAP_SYNC_REGS:
14eebd91 156 case KVM_CAP_ONE_REG:
d6712df9 157 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 158 case KVM_CAP_S390_CSS_SUPPORT:
10ccaa1e 159 case KVM_CAP_IOEVENTFD:
d7b0b5eb
CO
160 r = 1;
161 break;
e726b1bd
CB
162 case KVM_CAP_NR_VCPUS:
163 case KVM_CAP_MAX_VCPUS:
164 r = KVM_MAX_VCPUS;
165 break;
e1e2e605
NW
166 case KVM_CAP_NR_MEMSLOTS:
167 r = KVM_USER_MEM_SLOTS;
168 break;
1526bf9c 169 case KVM_CAP_S390_COW:
abf09bed 170 r = MACHINE_HAS_ESOP;
1526bf9c 171 break;
2bd0ac4e 172 default:
d7b0b5eb 173 r = 0;
2bd0ac4e 174 }
d7b0b5eb 175 return r;
b0c632db
HC
176}
177
178/* Section: vm related */
179/*
180 * Get (and clear) the dirty memory log for a memory slot.
181 */
182int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
183 struct kvm_dirty_log *log)
184{
185 return 0;
186}
187
188long kvm_arch_vm_ioctl(struct file *filp,
189 unsigned int ioctl, unsigned long arg)
190{
191 struct kvm *kvm = filp->private_data;
192 void __user *argp = (void __user *)arg;
193 int r;
194
195 switch (ioctl) {
ba5c1e9b
CO
196 case KVM_S390_INTERRUPT: {
197 struct kvm_s390_interrupt s390int;
198
199 r = -EFAULT;
200 if (copy_from_user(&s390int, argp, sizeof(s390int)))
201 break;
202 r = kvm_s390_inject_vm(kvm, &s390int);
203 break;
204 }
b0c632db 205 default:
367e1319 206 r = -ENOTTY;
b0c632db
HC
207 }
208
209 return r;
210}
211
e08b9637 212int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 213{
b0c632db
HC
214 int rc;
215 char debug_name[16];
216
e08b9637
CO
217 rc = -EINVAL;
218#ifdef CONFIG_KVM_S390_UCONTROL
219 if (type & ~KVM_VM_S390_UCONTROL)
220 goto out_err;
221 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
222 goto out_err;
223#else
224 if (type)
225 goto out_err;
226#endif
227
b0c632db
HC
228 rc = s390_enable_sie();
229 if (rc)
d89f5eff 230 goto out_err;
b0c632db 231
b290411a
CO
232 rc = -ENOMEM;
233
b0c632db
HC
234 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
235 if (!kvm->arch.sca)
d89f5eff 236 goto out_err;
b0c632db
HC
237
238 sprintf(debug_name, "kvm-%u", current->pid);
239
240 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
241 if (!kvm->arch.dbf)
242 goto out_nodbf;
243
ba5c1e9b
CO
244 spin_lock_init(&kvm->arch.float_int.lock);
245 INIT_LIST_HEAD(&kvm->arch.float_int.list);
246
b0c632db
HC
247 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
248 VM_EVENT(kvm, 3, "%s", "vm created");
249
e08b9637
CO
250 if (type & KVM_VM_S390_UCONTROL) {
251 kvm->arch.gmap = NULL;
252 } else {
253 kvm->arch.gmap = gmap_alloc(current->mm);
254 if (!kvm->arch.gmap)
255 goto out_nogmap;
2c70fe44 256 kvm->arch.gmap->private = kvm;
e08b9637 257 }
fa6b7fe9
CH
258
259 kvm->arch.css_support = 0;
260
d89f5eff 261 return 0;
598841ca
CO
262out_nogmap:
263 debug_unregister(kvm->arch.dbf);
b0c632db
HC
264out_nodbf:
265 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
266out_err:
267 return rc;
b0c632db
HC
268}
269
d329c035
CB
270void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
271{
272 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 273 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
58f9460b
CO
274 if (!kvm_is_ucontrol(vcpu->kvm)) {
275 clear_bit(63 - vcpu->vcpu_id,
276 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
277 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
278 (__u64) vcpu->arch.sie_block)
279 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
280 }
abf4a71e 281 smp_mb();
27e0393f
CO
282
283 if (kvm_is_ucontrol(vcpu->kvm))
284 gmap_free(vcpu->arch.gmap);
285
d329c035 286 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 287 kvm_vcpu_uninit(vcpu);
b110feaf 288 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
289}
290
291static void kvm_free_vcpus(struct kvm *kvm)
292{
293 unsigned int i;
988a2cae 294 struct kvm_vcpu *vcpu;
d329c035 295
988a2cae
GN
296 kvm_for_each_vcpu(i, vcpu, kvm)
297 kvm_arch_vcpu_destroy(vcpu);
298
299 mutex_lock(&kvm->lock);
300 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
301 kvm->vcpus[i] = NULL;
302
303 atomic_set(&kvm->online_vcpus, 0);
304 mutex_unlock(&kvm->lock);
d329c035
CB
305}
306
ad8ba2cd
SY
307void kvm_arch_sync_events(struct kvm *kvm)
308{
309}
310
b0c632db
HC
311void kvm_arch_destroy_vm(struct kvm *kvm)
312{
d329c035 313 kvm_free_vcpus(kvm);
b0c632db 314 free_page((unsigned long)(kvm->arch.sca));
d329c035 315 debug_unregister(kvm->arch.dbf);
27e0393f
CO
316 if (!kvm_is_ucontrol(kvm))
317 gmap_free(kvm->arch.gmap);
b0c632db
HC
318}
319
320/* Section: vcpu related */
321int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
322{
27e0393f
CO
323 if (kvm_is_ucontrol(vcpu->kvm)) {
324 vcpu->arch.gmap = gmap_alloc(current->mm);
325 if (!vcpu->arch.gmap)
326 return -ENOMEM;
2c70fe44 327 vcpu->arch.gmap->private = vcpu->kvm;
27e0393f
CO
328 return 0;
329 }
330
598841ca 331 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
59674c1a
CB
332 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
333 KVM_SYNC_GPRS |
9eed0735
CB
334 KVM_SYNC_ACRS |
335 KVM_SYNC_CRS;
b0c632db
HC
336 return 0;
337}
338
339void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
340{
6692cef3 341 /* Nothing todo */
b0c632db
HC
342}
343
344void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
345{
346 save_fp_regs(&vcpu->arch.host_fpregs);
347 save_access_regs(vcpu->arch.host_acrs);
348 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
349 restore_fp_regs(&vcpu->arch.guest_fpregs);
59674c1a 350 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 351 gmap_enable(vcpu->arch.gmap);
9e6dabef 352 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
353}
354
355void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
356{
9e6dabef 357 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 358 gmap_disable(vcpu->arch.gmap);
b0c632db 359 save_fp_regs(&vcpu->arch.guest_fpregs);
59674c1a 360 save_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
361 restore_fp_regs(&vcpu->arch.host_fpregs);
362 restore_access_regs(vcpu->arch.host_acrs);
363}
364
365static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
366{
367 /* this equals initial cpu reset in pop, but we don't switch to ESA */
368 vcpu->arch.sie_block->gpsw.mask = 0UL;
369 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 370 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
371 vcpu->arch.sie_block->cputm = 0UL;
372 vcpu->arch.sie_block->ckc = 0UL;
373 vcpu->arch.sie_block->todpr = 0;
374 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
375 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
376 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
377 vcpu->arch.guest_fpregs.fpc = 0;
378 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
379 vcpu->arch.sie_block->gbea = 1;
61bde82c 380 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
381}
382
42897d86
MT
383int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
384{
385 return 0;
386}
387
b0c632db
HC
388int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
389{
9e6dabef
CH
390 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
391 CPUSTAT_SM |
69d0d3a3
CB
392 CPUSTAT_STOPPED |
393 CPUSTAT_GED);
fc34531d 394 vcpu->arch.sie_block->ecb = 6;
69d0d3a3 395 vcpu->arch.sie_block->ecb2 = 8;
b0c632db 396 vcpu->arch.sie_block->eca = 0xC1002001U;
78c4b59f 397 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
ca872302
CB
398 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
399 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
400 (unsigned long) vcpu);
401 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 402 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 403 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
404 return 0;
405}
406
407struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
408 unsigned int id)
409{
4d47555a
CO
410 struct kvm_vcpu *vcpu;
411 int rc = -EINVAL;
412
413 if (id >= KVM_MAX_VCPUS)
414 goto out;
415
416 rc = -ENOMEM;
b0c632db 417
b110feaf 418 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 419 if (!vcpu)
4d47555a 420 goto out;
b0c632db 421
180c12fb
CB
422 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
423 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
424
425 if (!vcpu->arch.sie_block)
426 goto out_free_cpu;
427
428 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
429 if (!kvm_is_ucontrol(kvm)) {
430 if (!kvm->arch.sca) {
431 WARN_ON_ONCE(1);
432 goto out_free_cpu;
433 }
434 if (!kvm->arch.sca->cpu[id].sda)
435 kvm->arch.sca->cpu[id].sda =
436 (__u64) vcpu->arch.sie_block;
437 vcpu->arch.sie_block->scaoh =
438 (__u32)(((__u64)kvm->arch.sca) >> 32);
439 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
440 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
441 }
b0c632db 442
ba5c1e9b
CO
443 spin_lock_init(&vcpu->arch.local_int.lock);
444 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
445 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 446 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b 447 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
d0321a24 448 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 449 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 450 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 451
b0c632db
HC
452 rc = kvm_vcpu_init(vcpu, kvm, id);
453 if (rc)
7b06bf2f 454 goto out_free_sie_block;
b0c632db
HC
455 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
456 vcpu->arch.sie_block);
ade38c31 457 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 458
b0c632db 459 return vcpu;
7b06bf2f
WY
460out_free_sie_block:
461 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 462out_free_cpu:
b110feaf 463 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 464out:
b0c632db
HC
465 return ERR_PTR(rc);
466}
467
b0c632db
HC
468int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
469{
470 /* kvm common code refers to this, but never calls it */
471 BUG();
472 return 0;
473}
474
49b99e1e
CB
475void s390_vcpu_block(struct kvm_vcpu *vcpu)
476{
477 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
478}
479
480void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
481{
482 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
483}
484
485/*
486 * Kick a guest cpu out of SIE and wait until SIE is not running.
487 * If the CPU is not running (e.g. waiting as idle) the function will
488 * return immediately. */
489void exit_sie(struct kvm_vcpu *vcpu)
490{
491 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
492 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
493 cpu_relax();
494}
495
496/* Kick a guest cpu out of SIE and prevent SIE-reentry */
497void exit_sie_sync(struct kvm_vcpu *vcpu)
498{
499 s390_vcpu_block(vcpu);
500 exit_sie(vcpu);
501}
502
2c70fe44
CB
503static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
504{
505 int i;
506 struct kvm *kvm = gmap->private;
507 struct kvm_vcpu *vcpu;
508
509 kvm_for_each_vcpu(i, vcpu, kvm) {
510 /* match against both prefix pages */
511 if (vcpu->arch.sie_block->prefix == (address & ~0x1000UL)) {
512 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
513 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
514 exit_sie_sync(vcpu);
515 }
516 }
517}
518
b6d33834
CD
519int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
520{
521 /* kvm common code refers to this, but never calls it */
522 BUG();
523 return 0;
524}
525
14eebd91
CO
526static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
527 struct kvm_one_reg *reg)
528{
529 int r = -EINVAL;
530
531 switch (reg->id) {
29b7c71b
CO
532 case KVM_REG_S390_TODPR:
533 r = put_user(vcpu->arch.sie_block->todpr,
534 (u32 __user *)reg->addr);
535 break;
536 case KVM_REG_S390_EPOCHDIFF:
537 r = put_user(vcpu->arch.sie_block->epoch,
538 (u64 __user *)reg->addr);
539 break;
46a6dd1c
J
540 case KVM_REG_S390_CPU_TIMER:
541 r = put_user(vcpu->arch.sie_block->cputm,
542 (u64 __user *)reg->addr);
543 break;
544 case KVM_REG_S390_CLOCK_COMP:
545 r = put_user(vcpu->arch.sie_block->ckc,
546 (u64 __user *)reg->addr);
547 break;
14eebd91
CO
548 default:
549 break;
550 }
551
552 return r;
553}
554
555static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
556 struct kvm_one_reg *reg)
557{
558 int r = -EINVAL;
559
560 switch (reg->id) {
29b7c71b
CO
561 case KVM_REG_S390_TODPR:
562 r = get_user(vcpu->arch.sie_block->todpr,
563 (u32 __user *)reg->addr);
564 break;
565 case KVM_REG_S390_EPOCHDIFF:
566 r = get_user(vcpu->arch.sie_block->epoch,
567 (u64 __user *)reg->addr);
568 break;
46a6dd1c
J
569 case KVM_REG_S390_CPU_TIMER:
570 r = get_user(vcpu->arch.sie_block->cputm,
571 (u64 __user *)reg->addr);
572 break;
573 case KVM_REG_S390_CLOCK_COMP:
574 r = get_user(vcpu->arch.sie_block->ckc,
575 (u64 __user *)reg->addr);
576 break;
14eebd91
CO
577 default:
578 break;
579 }
580
581 return r;
582}
b6d33834 583
b0c632db
HC
584static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
585{
b0c632db 586 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
587 return 0;
588}
589
590int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
591{
5a32c1af 592 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
593 return 0;
594}
595
596int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
597{
5a32c1af 598 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
599 return 0;
600}
601
602int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
603 struct kvm_sregs *sregs)
604{
59674c1a 605 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 606 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 607 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
608 return 0;
609}
610
611int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
612 struct kvm_sregs *sregs)
613{
59674c1a 614 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 615 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
616 return 0;
617}
618
619int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
620{
b0c632db 621 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
85175587 622 vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
7eef87dc 623 restore_fp_regs(&vcpu->arch.guest_fpregs);
b0c632db
HC
624 return 0;
625}
626
627int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
628{
b0c632db
HC
629 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
630 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
631 return 0;
632}
633
634static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
635{
636 int rc = 0;
637
9e6dabef 638 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
b0c632db 639 rc = -EBUSY;
d7b0b5eb
CO
640 else {
641 vcpu->run->psw_mask = psw.mask;
642 vcpu->run->psw_addr = psw.addr;
643 }
b0c632db
HC
644 return rc;
645}
646
647int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
648 struct kvm_translation *tr)
649{
650 return -EINVAL; /* not implemented yet */
651}
652
d0bfb940
JK
653int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
654 struct kvm_guest_debug *dbg)
b0c632db
HC
655{
656 return -EINVAL; /* not implemented yet */
657}
658
62d9f0db
MT
659int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
660 struct kvm_mp_state *mp_state)
661{
662 return -EINVAL; /* not implemented yet */
663}
664
665int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
666 struct kvm_mp_state *mp_state)
667{
668 return -EINVAL; /* not implemented yet */
669}
670
2c70fe44
CB
671static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
672{
673 /*
674 * We use MMU_RELOAD just to re-arm the ipte notifier for the
675 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
676 * This ensures that the ipte instruction for this request has
677 * already finished. We might race against a second unmapper that
678 * wants to set the blocking bit. Lets just retry the request loop.
679 */
680 while (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
681 int rc;
682 rc = gmap_ipte_notify(vcpu->arch.gmap,
683 vcpu->arch.sie_block->prefix,
684 PAGE_SIZE * 2);
685 if (rc)
686 return rc;
687 s390_vcpu_unblock(vcpu);
688 }
689 return 0;
690}
691
3fb4c40f 692static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 693{
3fb4c40f 694 int rc, cpuflags;
e168bf8d 695
5a32c1af 696 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
697
698 if (need_resched())
699 schedule();
700
71cde587
CB
701 if (test_thread_flag(TIF_MCCK_PENDING))
702 s390_handle_mcck();
703
d6b6d166
CO
704 if (!kvm_is_ucontrol(vcpu->kvm))
705 kvm_s390_deliver_pending_interrupts(vcpu);
0ff31867 706
2c70fe44
CB
707 rc = kvm_s390_handle_requests(vcpu);
708 if (rc)
709 return rc;
710
b0c632db 711 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
712 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
713 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
714 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 715
3fb4c40f
TH
716 return 0;
717}
718
719static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
720{
721 int rc;
2b29a9fd
DD
722
723 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
724 vcpu->arch.sie_block->icptcode);
725 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
726
3fb4c40f 727 if (exit_reason >= 0) {
7c470539 728 rc = 0;
3fb4c40f 729 } else {
e168bf8d
CO
730 if (kvm_is_ucontrol(vcpu->kvm)) {
731 rc = SIE_INTERCEPT_UCONTROL;
732 } else {
733 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
5786fffa 734 trace_kvm_s390_sie_fault(vcpu);
db4a29cb 735 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
e168bf8d 736 }
1f0d0f09 737 }
b0c632db 738
5a32c1af 739 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
3fb4c40f 740
a76ccff6
TH
741 if (rc == 0) {
742 if (kvm_is_ucontrol(vcpu->kvm))
743 rc = -EOPNOTSUPP;
744 else
745 rc = kvm_handle_sie_intercept(vcpu);
746 }
747
3fb4c40f
TH
748 return rc;
749}
750
751static int __vcpu_run(struct kvm_vcpu *vcpu)
752{
753 int rc, exit_reason;
754
a76ccff6
TH
755 do {
756 rc = vcpu_pre_run(vcpu);
757 if (rc)
758 break;
3fb4c40f 759
a76ccff6
TH
760 /*
761 * As PF_VCPU will be used in fault handler, between
762 * guest_enter and guest_exit should be no uaccess.
763 */
764 preempt_disable();
765 kvm_guest_enter();
766 preempt_enable();
767 exit_reason = sie64a(vcpu->arch.sie_block,
768 vcpu->run->s.regs.gprs);
769 kvm_guest_exit();
770
771 rc = vcpu_post_run(vcpu, exit_reason);
772 } while (!signal_pending(current) && !rc);
3fb4c40f 773
e168bf8d 774 return rc;
b0c632db
HC
775}
776
777int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
778{
8f2abe6a 779 int rc;
b0c632db
HC
780 sigset_t sigsaved;
781
b0c632db
HC
782 if (vcpu->sigset_active)
783 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
784
9e6dabef 785 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db 786
ba5c1e9b
CO
787 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
788
8f2abe6a
CB
789 switch (kvm_run->exit_reason) {
790 case KVM_EXIT_S390_SIEIC:
8f2abe6a 791 case KVM_EXIT_UNKNOWN:
9ace903d 792 case KVM_EXIT_INTR:
8f2abe6a 793 case KVM_EXIT_S390_RESET:
e168bf8d 794 case KVM_EXIT_S390_UCONTROL:
fa6b7fe9 795 case KVM_EXIT_S390_TSCH:
8f2abe6a
CB
796 break;
797 default:
798 BUG();
799 }
800
d7b0b5eb
CO
801 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
802 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
60b413c9
CB
803 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
804 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
805 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
806 }
9eed0735
CB
807 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
808 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
809 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
810 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
811 }
d7b0b5eb 812
dab4079d 813 might_fault();
a76ccff6 814 rc = __vcpu_run(vcpu);
8f2abe6a 815
b1d16c49
CE
816 if (signal_pending(current) && !rc) {
817 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 818 rc = -EINTR;
b1d16c49 819 }
8f2abe6a 820
e168bf8d
CO
821#ifdef CONFIG_KVM_S390_UCONTROL
822 if (rc == SIE_INTERCEPT_UCONTROL) {
823 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
824 kvm_run->s390_ucontrol.trans_exc_code =
825 current->thread.gmap_addr;
826 kvm_run->s390_ucontrol.pgm_code = 0x10;
827 rc = 0;
828 }
829#endif
830
b8e660b8 831 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
832 /* intercept cannot be handled in-kernel, prepare kvm-run */
833 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
834 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
835 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
836 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
837 rc = 0;
838 }
839
840 if (rc == -EREMOTE) {
841 /* intercept was handled, but userspace support is needed
842 * kvm_run has been prepared by the handler */
843 rc = 0;
844 }
b0c632db 845
d7b0b5eb
CO
846 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
847 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
60b413c9 848 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
9eed0735 849 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
d7b0b5eb 850
b0c632db
HC
851 if (vcpu->sigset_active)
852 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
853
b0c632db 854 vcpu->stat.exit_userspace++;
7e8e6ab4 855 return rc;
b0c632db
HC
856}
857
092670cd 858static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
b0c632db
HC
859 unsigned long n, int prefix)
860{
861 if (prefix)
862 return copy_to_guest(vcpu, guestdest, from, n);
863 else
864 return copy_to_guest_absolute(vcpu, guestdest, from, n);
865}
866
867/*
868 * store status at address
869 * we use have two special cases:
870 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
871 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
872 */
971eb77f 873int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db 874{
092670cd 875 unsigned char archmode = 1;
b0c632db
HC
876 int prefix;
877
878 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
879 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
880 return -EFAULT;
881 addr = SAVE_AREA_BASE;
882 prefix = 0;
883 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
884 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
885 return -EFAULT;
886 addr = SAVE_AREA_BASE;
887 prefix = 1;
888 } else
889 prefix = 0;
890
15bc8d84
CB
891 /*
892 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
893 * copying in vcpu load/put. Lets update our copies before we save
894 * it into the save area
895 */
896 save_fp_regs(&vcpu->arch.guest_fpregs);
897 save_access_regs(vcpu->run->s.regs.acrs);
898
f64ca217 899 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
900 vcpu->arch.guest_fpregs.fprs, 128, prefix))
901 return -EFAULT;
902
f64ca217 903 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
5a32c1af 904 vcpu->run->s.regs.gprs, 128, prefix))
b0c632db
HC
905 return -EFAULT;
906
f64ca217 907 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
908 &vcpu->arch.sie_block->gpsw, 16, prefix))
909 return -EFAULT;
910
f64ca217 911 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
912 &vcpu->arch.sie_block->prefix, 4, prefix))
913 return -EFAULT;
914
915 if (__guestcopy(vcpu,
f64ca217 916 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
917 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
918 return -EFAULT;
919
f64ca217 920 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
921 &vcpu->arch.sie_block->todpr, 4, prefix))
922 return -EFAULT;
923
f64ca217 924 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
925 &vcpu->arch.sie_block->cputm, 8, prefix))
926 return -EFAULT;
927
f64ca217 928 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
929 &vcpu->arch.sie_block->ckc, 8, prefix))
930 return -EFAULT;
931
f64ca217 932 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
59674c1a 933 &vcpu->run->s.regs.acrs, 64, prefix))
b0c632db
HC
934 return -EFAULT;
935
936 if (__guestcopy(vcpu,
f64ca217 937 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
938 &vcpu->arch.sie_block->gcr, 128, prefix))
939 return -EFAULT;
940 return 0;
941}
942
d6712df9
CH
943static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
944 struct kvm_enable_cap *cap)
945{
946 int r;
947
948 if (cap->flags)
949 return -EINVAL;
950
951 switch (cap->cap) {
fa6b7fe9
CH
952 case KVM_CAP_S390_CSS_SUPPORT:
953 if (!vcpu->kvm->arch.css_support) {
954 vcpu->kvm->arch.css_support = 1;
955 trace_kvm_s390_enable_css(vcpu->kvm);
956 }
957 r = 0;
958 break;
d6712df9
CH
959 default:
960 r = -EINVAL;
961 break;
962 }
963 return r;
964}
965
b0c632db
HC
966long kvm_arch_vcpu_ioctl(struct file *filp,
967 unsigned int ioctl, unsigned long arg)
968{
969 struct kvm_vcpu *vcpu = filp->private_data;
970 void __user *argp = (void __user *)arg;
bc923cc9 971 long r;
b0c632db 972
93736624
AK
973 switch (ioctl) {
974 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
975 struct kvm_s390_interrupt s390int;
976
93736624 977 r = -EFAULT;
ba5c1e9b 978 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
979 break;
980 r = kvm_s390_inject_vcpu(vcpu, &s390int);
981 break;
ba5c1e9b 982 }
b0c632db 983 case KVM_S390_STORE_STATUS:
bc923cc9
AK
984 r = kvm_s390_vcpu_store_status(vcpu, arg);
985 break;
b0c632db
HC
986 case KVM_S390_SET_INITIAL_PSW: {
987 psw_t psw;
988
bc923cc9 989 r = -EFAULT;
b0c632db 990 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
991 break;
992 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
993 break;
b0c632db
HC
994 }
995 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
996 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
997 break;
14eebd91
CO
998 case KVM_SET_ONE_REG:
999 case KVM_GET_ONE_REG: {
1000 struct kvm_one_reg reg;
1001 r = -EFAULT;
1002 if (copy_from_user(&reg, argp, sizeof(reg)))
1003 break;
1004 if (ioctl == KVM_SET_ONE_REG)
1005 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1006 else
1007 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1008 break;
1009 }
27e0393f
CO
1010#ifdef CONFIG_KVM_S390_UCONTROL
1011 case KVM_S390_UCAS_MAP: {
1012 struct kvm_s390_ucas_mapping ucasmap;
1013
1014 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1015 r = -EFAULT;
1016 break;
1017 }
1018
1019 if (!kvm_is_ucontrol(vcpu->kvm)) {
1020 r = -EINVAL;
1021 break;
1022 }
1023
1024 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1025 ucasmap.vcpu_addr, ucasmap.length);
1026 break;
1027 }
1028 case KVM_S390_UCAS_UNMAP: {
1029 struct kvm_s390_ucas_mapping ucasmap;
1030
1031 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1032 r = -EFAULT;
1033 break;
1034 }
1035
1036 if (!kvm_is_ucontrol(vcpu->kvm)) {
1037 r = -EINVAL;
1038 break;
1039 }
1040
1041 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1042 ucasmap.length);
1043 break;
1044 }
1045#endif
ccc7910f
CO
1046 case KVM_S390_VCPU_FAULT: {
1047 r = gmap_fault(arg, vcpu->arch.gmap);
1048 if (!IS_ERR_VALUE(r))
1049 r = 0;
1050 break;
1051 }
d6712df9
CH
1052 case KVM_ENABLE_CAP:
1053 {
1054 struct kvm_enable_cap cap;
1055 r = -EFAULT;
1056 if (copy_from_user(&cap, argp, sizeof(cap)))
1057 break;
1058 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1059 break;
1060 }
b0c632db 1061 default:
3e6afcf1 1062 r = -ENOTTY;
b0c632db 1063 }
bc923cc9 1064 return r;
b0c632db
HC
1065}
1066
5b1c1493
CO
1067int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1068{
1069#ifdef CONFIG_KVM_S390_UCONTROL
1070 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1071 && (kvm_is_ucontrol(vcpu->kvm))) {
1072 vmf->page = virt_to_page(vcpu->arch.sie_block);
1073 get_page(vmf->page);
1074 return 0;
1075 }
1076#endif
1077 return VM_FAULT_SIGBUS;
1078}
1079
db3fe4eb
TY
1080void kvm_arch_free_memslot(struct kvm_memory_slot *free,
1081 struct kvm_memory_slot *dont)
1082{
1083}
1084
1085int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
1086{
1087 return 0;
1088}
1089
e59dbe09
TY
1090void kvm_arch_memslots_updated(struct kvm *kvm)
1091{
1092}
1093
b0c632db 1094/* Section: memory related */
f7784b8e
MT
1095int kvm_arch_prepare_memory_region(struct kvm *kvm,
1096 struct kvm_memory_slot *memslot,
7b6195a9
TY
1097 struct kvm_userspace_memory_region *mem,
1098 enum kvm_mr_change change)
b0c632db 1099{
dd2887e7
NW
1100 /* A few sanity checks. We can have memory slots which have to be
1101 located/ended at a segment boundary (1MB). The memory in userland is
1102 ok to be fragmented into various different vmas. It is okay to mmap()
1103 and munmap() stuff in this slot after doing this call at any time */
b0c632db 1104
598841ca 1105 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
1106 return -EINVAL;
1107
598841ca 1108 if (mem->memory_size & 0xffffful)
b0c632db
HC
1109 return -EINVAL;
1110
f7784b8e
MT
1111 return 0;
1112}
1113
1114void kvm_arch_commit_memory_region(struct kvm *kvm,
1115 struct kvm_userspace_memory_region *mem,
8482644a
TY
1116 const struct kvm_memory_slot *old,
1117 enum kvm_mr_change change)
f7784b8e 1118{
f7850c92 1119 int rc;
f7784b8e 1120
2cef4deb
CB
1121 /* If the basics of the memslot do not change, we do not want
1122 * to update the gmap. Every update causes several unnecessary
1123 * segment translation exceptions. This is usually handled just
1124 * fine by the normal fault handler + gmap, but it will also
1125 * cause faults on the prefix page of running guest CPUs.
1126 */
1127 if (old->userspace_addr == mem->userspace_addr &&
1128 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1129 old->npages * PAGE_SIZE == mem->memory_size)
1130 return;
598841ca
CO
1131
1132 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1133 mem->guest_phys_addr, mem->memory_size);
1134 if (rc)
f7850c92 1135 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 1136 return;
b0c632db
HC
1137}
1138
2df72e9b
MT
1139void kvm_arch_flush_shadow_all(struct kvm *kvm)
1140{
1141}
1142
1143void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1144 struct kvm_memory_slot *slot)
34d4cb8f
MT
1145{
1146}
1147
b0c632db
HC
1148static int __init kvm_s390_init(void)
1149{
ef50f7ac 1150 int ret;
0ee75bea 1151 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
1152 if (ret)
1153 return ret;
1154
1155 /*
1156 * guests can ask for up to 255+1 double words, we need a full page
25985edc 1157 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
1158 * only set facilities that are known to work in KVM.
1159 */
78c4b59f
MM
1160 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1161 if (!vfacilities) {
ef50f7ac
CB
1162 kvm_exit();
1163 return -ENOMEM;
1164 }
78c4b59f
MM
1165 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
1166 vfacilities[0] &= 0xff82fff3f47c0000UL;
1167 vfacilities[1] &= 0x001c000000000000UL;
ef50f7ac 1168 return 0;
b0c632db
HC
1169}
1170
1171static void __exit kvm_s390_exit(void)
1172{
78c4b59f 1173 free_page((unsigned long) vfacilities);
b0c632db
HC
1174 kvm_exit();
1175}
1176
1177module_init(kvm_s390_init);
1178module_exit(kvm_s390_exit);
566af940
CH
1179
1180/*
1181 * Enable autoloading of the kvm module.
1182 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1183 * since x86 takes a different approach.
1184 */
1185#include <linux/miscdevice.h>
1186MODULE_ALIAS_MISCDEV(KVM_MINOR);
1187MODULE_ALIAS("devname:kvm");