]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/s390/kvm/kvm-s390.c
KVM: s390: fix task size check
[mirror_ubuntu-hirsute-kernel.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
a0616cde 30#include <asm/switch_to.h>
1526bf9c 31#include <asm/sclp.h>
8f2abe6a 32#include "kvm-s390.h"
b0c632db
HC
33#include "gaccess.h"
34
5786fffa
CH
35#define CREATE_TRACE_POINTS
36#include "trace.h"
ade38c31 37#include "trace-s390.h"
5786fffa 38
b0c632db
HC
39#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
40
41struct kvm_stats_debugfs_item debugfs_entries[] = {
42 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 43 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
44 { "exit_validity", VCPU_STAT(exit_validity) },
45 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
46 { "exit_external_request", VCPU_STAT(exit_external_request) },
47 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
48 { "exit_instruction", VCPU_STAT(exit_instruction) },
49 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
50 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 51 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
52 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
53 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 54 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
55 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
56 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
57 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
58 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
59 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
60 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
61 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 62 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
63 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
64 { "instruction_spx", VCPU_STAT(instruction_spx) },
65 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
66 { "instruction_stap", VCPU_STAT(instruction_stap) },
67 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
68 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
69 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
70 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
71 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 72 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 73 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 74 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 75 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
76 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
77 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
78 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
79 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
80 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 81 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 82 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 83 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
84 { NULL }
85};
86
ef50f7ac 87static unsigned long long *facilities;
2c70fe44 88static struct gmap_notifier gmap_notifier;
b0c632db
HC
89
90/* Section: not file related */
10474ae8 91int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
92{
93 /* every s390 is virtualization enabled ;-) */
10474ae8 94 return 0;
b0c632db
HC
95}
96
97void kvm_arch_hardware_disable(void *garbage)
98{
99}
100
2c70fe44
CB
101static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
102
b0c632db
HC
103int kvm_arch_hardware_setup(void)
104{
2c70fe44
CB
105 gmap_notifier.notifier_call = kvm_gmap_notifier;
106 gmap_register_ipte_notifier(&gmap_notifier);
b0c632db
HC
107 return 0;
108}
109
110void kvm_arch_hardware_unsetup(void)
111{
2c70fe44 112 gmap_unregister_ipte_notifier(&gmap_notifier);
b0c632db
HC
113}
114
115void kvm_arch_check_processor_compat(void *rtn)
116{
117}
118
119int kvm_arch_init(void *opaque)
120{
121 return 0;
122}
123
124void kvm_arch_exit(void)
125{
126}
127
128/* Section: device related */
129long kvm_arch_dev_ioctl(struct file *filp,
130 unsigned int ioctl, unsigned long arg)
131{
132 if (ioctl == KVM_S390_ENABLE_SIE)
133 return s390_enable_sie();
134 return -EINVAL;
135}
136
137int kvm_dev_ioctl_check_extension(long ext)
138{
d7b0b5eb
CO
139 int r;
140
2bd0ac4e 141 switch (ext) {
d7b0b5eb 142 case KVM_CAP_S390_PSW:
b6cf8788 143 case KVM_CAP_S390_GMAP:
52e16b18 144 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
145#ifdef CONFIG_KVM_S390_UCONTROL
146 case KVM_CAP_S390_UCONTROL:
147#endif
60b413c9 148 case KVM_CAP_SYNC_REGS:
14eebd91 149 case KVM_CAP_ONE_REG:
d6712df9 150 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 151 case KVM_CAP_S390_CSS_SUPPORT:
10ccaa1e 152 case KVM_CAP_IOEVENTFD:
d7b0b5eb
CO
153 r = 1;
154 break;
e726b1bd
CB
155 case KVM_CAP_NR_VCPUS:
156 case KVM_CAP_MAX_VCPUS:
157 r = KVM_MAX_VCPUS;
158 break;
e1e2e605
NW
159 case KVM_CAP_NR_MEMSLOTS:
160 r = KVM_USER_MEM_SLOTS;
161 break;
1526bf9c 162 case KVM_CAP_S390_COW:
abf09bed 163 r = MACHINE_HAS_ESOP;
1526bf9c 164 break;
2bd0ac4e 165 default:
d7b0b5eb 166 r = 0;
2bd0ac4e 167 }
d7b0b5eb 168 return r;
b0c632db
HC
169}
170
171/* Section: vm related */
172/*
173 * Get (and clear) the dirty memory log for a memory slot.
174 */
175int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
176 struct kvm_dirty_log *log)
177{
178 return 0;
179}
180
181long kvm_arch_vm_ioctl(struct file *filp,
182 unsigned int ioctl, unsigned long arg)
183{
184 struct kvm *kvm = filp->private_data;
185 void __user *argp = (void __user *)arg;
186 int r;
187
188 switch (ioctl) {
ba5c1e9b
CO
189 case KVM_S390_INTERRUPT: {
190 struct kvm_s390_interrupt s390int;
191
192 r = -EFAULT;
193 if (copy_from_user(&s390int, argp, sizeof(s390int)))
194 break;
195 r = kvm_s390_inject_vm(kvm, &s390int);
196 break;
197 }
b0c632db 198 default:
367e1319 199 r = -ENOTTY;
b0c632db
HC
200 }
201
202 return r;
203}
204
e08b9637 205int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 206{
b0c632db
HC
207 int rc;
208 char debug_name[16];
209
e08b9637
CO
210 rc = -EINVAL;
211#ifdef CONFIG_KVM_S390_UCONTROL
212 if (type & ~KVM_VM_S390_UCONTROL)
213 goto out_err;
214 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
215 goto out_err;
216#else
217 if (type)
218 goto out_err;
219#endif
220
b0c632db
HC
221 rc = s390_enable_sie();
222 if (rc)
d89f5eff 223 goto out_err;
b0c632db 224
b290411a
CO
225 rc = -ENOMEM;
226
b0c632db
HC
227 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
228 if (!kvm->arch.sca)
d89f5eff 229 goto out_err;
b0c632db
HC
230
231 sprintf(debug_name, "kvm-%u", current->pid);
232
233 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
234 if (!kvm->arch.dbf)
235 goto out_nodbf;
236
ba5c1e9b
CO
237 spin_lock_init(&kvm->arch.float_int.lock);
238 INIT_LIST_HEAD(&kvm->arch.float_int.list);
239
b0c632db
HC
240 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
241 VM_EVENT(kvm, 3, "%s", "vm created");
242
e08b9637
CO
243 if (type & KVM_VM_S390_UCONTROL) {
244 kvm->arch.gmap = NULL;
245 } else {
246 kvm->arch.gmap = gmap_alloc(current->mm);
247 if (!kvm->arch.gmap)
248 goto out_nogmap;
2c70fe44 249 kvm->arch.gmap->private = kvm;
e08b9637 250 }
fa6b7fe9
CH
251
252 kvm->arch.css_support = 0;
253
d89f5eff 254 return 0;
598841ca
CO
255out_nogmap:
256 debug_unregister(kvm->arch.dbf);
b0c632db
HC
257out_nodbf:
258 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
259out_err:
260 return rc;
b0c632db
HC
261}
262
d329c035
CB
263void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
264{
265 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 266 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
58f9460b
CO
267 if (!kvm_is_ucontrol(vcpu->kvm)) {
268 clear_bit(63 - vcpu->vcpu_id,
269 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
270 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
271 (__u64) vcpu->arch.sie_block)
272 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
273 }
abf4a71e 274 smp_mb();
27e0393f
CO
275
276 if (kvm_is_ucontrol(vcpu->kvm))
277 gmap_free(vcpu->arch.gmap);
278
d329c035 279 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 280 kvm_vcpu_uninit(vcpu);
b110feaf 281 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
282}
283
284static void kvm_free_vcpus(struct kvm *kvm)
285{
286 unsigned int i;
988a2cae 287 struct kvm_vcpu *vcpu;
d329c035 288
988a2cae
GN
289 kvm_for_each_vcpu(i, vcpu, kvm)
290 kvm_arch_vcpu_destroy(vcpu);
291
292 mutex_lock(&kvm->lock);
293 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
294 kvm->vcpus[i] = NULL;
295
296 atomic_set(&kvm->online_vcpus, 0);
297 mutex_unlock(&kvm->lock);
d329c035
CB
298}
299
ad8ba2cd
SY
300void kvm_arch_sync_events(struct kvm *kvm)
301{
302}
303
b0c632db
HC
304void kvm_arch_destroy_vm(struct kvm *kvm)
305{
d329c035 306 kvm_free_vcpus(kvm);
b0c632db 307 free_page((unsigned long)(kvm->arch.sca));
d329c035 308 debug_unregister(kvm->arch.dbf);
27e0393f
CO
309 if (!kvm_is_ucontrol(kvm))
310 gmap_free(kvm->arch.gmap);
b0c632db
HC
311}
312
313/* Section: vcpu related */
314int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
315{
27e0393f
CO
316 if (kvm_is_ucontrol(vcpu->kvm)) {
317 vcpu->arch.gmap = gmap_alloc(current->mm);
318 if (!vcpu->arch.gmap)
319 return -ENOMEM;
2c70fe44 320 vcpu->arch.gmap->private = vcpu->kvm;
27e0393f
CO
321 return 0;
322 }
323
598841ca 324 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
59674c1a
CB
325 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
326 KVM_SYNC_GPRS |
9eed0735
CB
327 KVM_SYNC_ACRS |
328 KVM_SYNC_CRS;
b0c632db
HC
329 return 0;
330}
331
332void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
333{
6692cef3 334 /* Nothing todo */
b0c632db
HC
335}
336
337void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
338{
339 save_fp_regs(&vcpu->arch.host_fpregs);
340 save_access_regs(vcpu->arch.host_acrs);
341 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
342 restore_fp_regs(&vcpu->arch.guest_fpregs);
59674c1a 343 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 344 gmap_enable(vcpu->arch.gmap);
9e6dabef 345 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
346}
347
348void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
349{
9e6dabef 350 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 351 gmap_disable(vcpu->arch.gmap);
b0c632db 352 save_fp_regs(&vcpu->arch.guest_fpregs);
59674c1a 353 save_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
354 restore_fp_regs(&vcpu->arch.host_fpregs);
355 restore_access_regs(vcpu->arch.host_acrs);
356}
357
358static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
359{
360 /* this equals initial cpu reset in pop, but we don't switch to ESA */
361 vcpu->arch.sie_block->gpsw.mask = 0UL;
362 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 363 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
364 vcpu->arch.sie_block->cputm = 0UL;
365 vcpu->arch.sie_block->ckc = 0UL;
366 vcpu->arch.sie_block->todpr = 0;
367 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
368 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
369 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
370 vcpu->arch.guest_fpregs.fpc = 0;
371 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
372 vcpu->arch.sie_block->gbea = 1;
61bde82c 373 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
374}
375
42897d86
MT
376int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
377{
378 return 0;
379}
380
b0c632db
HC
381int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
382{
9e6dabef
CH
383 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
384 CPUSTAT_SM |
69d0d3a3
CB
385 CPUSTAT_STOPPED |
386 CPUSTAT_GED);
fc34531d 387 vcpu->arch.sie_block->ecb = 6;
69d0d3a3 388 vcpu->arch.sie_block->ecb2 = 8;
b0c632db 389 vcpu->arch.sie_block->eca = 0xC1002001U;
ef50f7ac 390 vcpu->arch.sie_block->fac = (int) (long) facilities;
ca872302
CB
391 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
392 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
393 (unsigned long) vcpu);
394 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 395 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 396 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
397 return 0;
398}
399
400struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
401 unsigned int id)
402{
4d47555a
CO
403 struct kvm_vcpu *vcpu;
404 int rc = -EINVAL;
405
406 if (id >= KVM_MAX_VCPUS)
407 goto out;
408
409 rc = -ENOMEM;
b0c632db 410
b110feaf 411 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 412 if (!vcpu)
4d47555a 413 goto out;
b0c632db 414
180c12fb
CB
415 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
416 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
417
418 if (!vcpu->arch.sie_block)
419 goto out_free_cpu;
420
421 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
422 if (!kvm_is_ucontrol(kvm)) {
423 if (!kvm->arch.sca) {
424 WARN_ON_ONCE(1);
425 goto out_free_cpu;
426 }
427 if (!kvm->arch.sca->cpu[id].sda)
428 kvm->arch.sca->cpu[id].sda =
429 (__u64) vcpu->arch.sie_block;
430 vcpu->arch.sie_block->scaoh =
431 (__u32)(((__u64)kvm->arch.sca) >> 32);
432 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
433 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
434 }
b0c632db 435
ba5c1e9b
CO
436 spin_lock_init(&vcpu->arch.local_int.lock);
437 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
438 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 439 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b 440 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
d0321a24 441 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 442 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 443 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 444
b0c632db
HC
445 rc = kvm_vcpu_init(vcpu, kvm, id);
446 if (rc)
7b06bf2f 447 goto out_free_sie_block;
b0c632db
HC
448 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
449 vcpu->arch.sie_block);
ade38c31 450 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 451
b0c632db 452 return vcpu;
7b06bf2f
WY
453out_free_sie_block:
454 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 455out_free_cpu:
b110feaf 456 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 457out:
b0c632db
HC
458 return ERR_PTR(rc);
459}
460
b0c632db
HC
461int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
462{
463 /* kvm common code refers to this, but never calls it */
464 BUG();
465 return 0;
466}
467
49b99e1e
CB
468void s390_vcpu_block(struct kvm_vcpu *vcpu)
469{
470 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
471}
472
473void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
474{
475 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
476}
477
478/*
479 * Kick a guest cpu out of SIE and wait until SIE is not running.
480 * If the CPU is not running (e.g. waiting as idle) the function will
481 * return immediately. */
482void exit_sie(struct kvm_vcpu *vcpu)
483{
484 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
485 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
486 cpu_relax();
487}
488
489/* Kick a guest cpu out of SIE and prevent SIE-reentry */
490void exit_sie_sync(struct kvm_vcpu *vcpu)
491{
492 s390_vcpu_block(vcpu);
493 exit_sie(vcpu);
494}
495
2c70fe44
CB
496static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
497{
498 int i;
499 struct kvm *kvm = gmap->private;
500 struct kvm_vcpu *vcpu;
501
502 kvm_for_each_vcpu(i, vcpu, kvm) {
503 /* match against both prefix pages */
504 if (vcpu->arch.sie_block->prefix == (address & ~0x1000UL)) {
505 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
506 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
507 exit_sie_sync(vcpu);
508 }
509 }
510}
511
b6d33834
CD
512int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
513{
514 /* kvm common code refers to this, but never calls it */
515 BUG();
516 return 0;
517}
518
14eebd91
CO
519static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
520 struct kvm_one_reg *reg)
521{
522 int r = -EINVAL;
523
524 switch (reg->id) {
29b7c71b
CO
525 case KVM_REG_S390_TODPR:
526 r = put_user(vcpu->arch.sie_block->todpr,
527 (u32 __user *)reg->addr);
528 break;
529 case KVM_REG_S390_EPOCHDIFF:
530 r = put_user(vcpu->arch.sie_block->epoch,
531 (u64 __user *)reg->addr);
532 break;
46a6dd1c
J
533 case KVM_REG_S390_CPU_TIMER:
534 r = put_user(vcpu->arch.sie_block->cputm,
535 (u64 __user *)reg->addr);
536 break;
537 case KVM_REG_S390_CLOCK_COMP:
538 r = put_user(vcpu->arch.sie_block->ckc,
539 (u64 __user *)reg->addr);
540 break;
14eebd91
CO
541 default:
542 break;
543 }
544
545 return r;
546}
547
548static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
549 struct kvm_one_reg *reg)
550{
551 int r = -EINVAL;
552
553 switch (reg->id) {
29b7c71b
CO
554 case KVM_REG_S390_TODPR:
555 r = get_user(vcpu->arch.sie_block->todpr,
556 (u32 __user *)reg->addr);
557 break;
558 case KVM_REG_S390_EPOCHDIFF:
559 r = get_user(vcpu->arch.sie_block->epoch,
560 (u64 __user *)reg->addr);
561 break;
46a6dd1c
J
562 case KVM_REG_S390_CPU_TIMER:
563 r = get_user(vcpu->arch.sie_block->cputm,
564 (u64 __user *)reg->addr);
565 break;
566 case KVM_REG_S390_CLOCK_COMP:
567 r = get_user(vcpu->arch.sie_block->ckc,
568 (u64 __user *)reg->addr);
569 break;
14eebd91
CO
570 default:
571 break;
572 }
573
574 return r;
575}
b6d33834 576
b0c632db
HC
577static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
578{
b0c632db 579 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
580 return 0;
581}
582
583int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
584{
5a32c1af 585 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
586 return 0;
587}
588
589int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
590{
5a32c1af 591 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
592 return 0;
593}
594
595int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
596 struct kvm_sregs *sregs)
597{
59674c1a 598 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 599 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 600 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
601 return 0;
602}
603
604int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
605 struct kvm_sregs *sregs)
606{
59674c1a 607 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 608 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
609 return 0;
610}
611
612int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
613{
b0c632db 614 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
85175587 615 vcpu->arch.guest_fpregs.fpc = fpu->fpc & FPC_VALID_MASK;
7eef87dc 616 restore_fp_regs(&vcpu->arch.guest_fpregs);
b0c632db
HC
617 return 0;
618}
619
620int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
621{
b0c632db
HC
622 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
623 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
624 return 0;
625}
626
627static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
628{
629 int rc = 0;
630
9e6dabef 631 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
b0c632db 632 rc = -EBUSY;
d7b0b5eb
CO
633 else {
634 vcpu->run->psw_mask = psw.mask;
635 vcpu->run->psw_addr = psw.addr;
636 }
b0c632db
HC
637 return rc;
638}
639
640int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
641 struct kvm_translation *tr)
642{
643 return -EINVAL; /* not implemented yet */
644}
645
d0bfb940
JK
646int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
647 struct kvm_guest_debug *dbg)
b0c632db
HC
648{
649 return -EINVAL; /* not implemented yet */
650}
651
62d9f0db
MT
652int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
653 struct kvm_mp_state *mp_state)
654{
655 return -EINVAL; /* not implemented yet */
656}
657
658int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
659 struct kvm_mp_state *mp_state)
660{
661 return -EINVAL; /* not implemented yet */
662}
663
2c70fe44
CB
664static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
665{
666 /*
667 * We use MMU_RELOAD just to re-arm the ipte notifier for the
668 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
669 * This ensures that the ipte instruction for this request has
670 * already finished. We might race against a second unmapper that
671 * wants to set the blocking bit. Lets just retry the request loop.
672 */
673 while (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
674 int rc;
675 rc = gmap_ipte_notify(vcpu->arch.gmap,
676 vcpu->arch.sie_block->prefix,
677 PAGE_SIZE * 2);
678 if (rc)
679 return rc;
680 s390_vcpu_unblock(vcpu);
681 }
682 return 0;
683}
684
e168bf8d 685static int __vcpu_run(struct kvm_vcpu *vcpu)
b0c632db 686{
e168bf8d
CO
687 int rc;
688
5a32c1af 689 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
690
691 if (need_resched())
692 schedule();
693
71cde587
CB
694 if (test_thread_flag(TIF_MCCK_PENDING))
695 s390_handle_mcck();
696
d6b6d166
CO
697 if (!kvm_is_ucontrol(vcpu->kvm))
698 kvm_s390_deliver_pending_interrupts(vcpu);
0ff31867 699
2c70fe44
CB
700 rc = kvm_s390_handle_requests(vcpu);
701 if (rc)
702 return rc;
703
b0c632db 704 vcpu->arch.sie_block->icptcode = 0;
83987ace 705 preempt_disable();
b0c632db 706 kvm_guest_enter();
83987ace 707 preempt_enable();
b0c632db
HC
708 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
709 atomic_read(&vcpu->arch.sie_block->cpuflags));
5786fffa
CH
710 trace_kvm_s390_sie_enter(vcpu,
711 atomic_read(&vcpu->arch.sie_block->cpuflags));
5a32c1af 712 rc = sie64a(vcpu->arch.sie_block, vcpu->run->s.regs.gprs);
7c470539
MS
713 if (rc > 0)
714 rc = 0;
715 if (rc < 0) {
e168bf8d
CO
716 if (kvm_is_ucontrol(vcpu->kvm)) {
717 rc = SIE_INTERCEPT_UCONTROL;
718 } else {
719 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
5786fffa 720 trace_kvm_s390_sie_fault(vcpu);
db4a29cb 721 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
e168bf8d 722 }
1f0d0f09 723 }
b0c632db
HC
724 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
725 vcpu->arch.sie_block->icptcode);
5786fffa 726 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
b0c632db 727 kvm_guest_exit();
b0c632db 728
5a32c1af 729 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
e168bf8d 730 return rc;
b0c632db
HC
731}
732
733int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
734{
8f2abe6a 735 int rc;
b0c632db
HC
736 sigset_t sigsaved;
737
9ace903d 738rerun_vcpu:
b0c632db
HC
739 if (vcpu->sigset_active)
740 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
741
9e6dabef 742 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db 743
ba5c1e9b
CO
744 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
745
8f2abe6a
CB
746 switch (kvm_run->exit_reason) {
747 case KVM_EXIT_S390_SIEIC:
8f2abe6a 748 case KVM_EXIT_UNKNOWN:
9ace903d 749 case KVM_EXIT_INTR:
8f2abe6a 750 case KVM_EXIT_S390_RESET:
e168bf8d 751 case KVM_EXIT_S390_UCONTROL:
fa6b7fe9 752 case KVM_EXIT_S390_TSCH:
8f2abe6a
CB
753 break;
754 default:
755 BUG();
756 }
757
d7b0b5eb
CO
758 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
759 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
60b413c9
CB
760 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
761 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
762 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
763 }
9eed0735
CB
764 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
765 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
766 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
767 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
768 }
d7b0b5eb 769
dab4079d 770 might_fault();
8f2abe6a
CB
771
772 do {
e168bf8d
CO
773 rc = __vcpu_run(vcpu);
774 if (rc)
775 break;
c0d744a9
CO
776 if (kvm_is_ucontrol(vcpu->kvm))
777 rc = -EOPNOTSUPP;
778 else
779 rc = kvm_handle_sie_intercept(vcpu);
8f2abe6a
CB
780 } while (!signal_pending(current) && !rc);
781
9ace903d
CE
782 if (rc == SIE_INTERCEPT_RERUNVCPU)
783 goto rerun_vcpu;
784
b1d16c49
CE
785 if (signal_pending(current) && !rc) {
786 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 787 rc = -EINTR;
b1d16c49 788 }
8f2abe6a 789
e168bf8d
CO
790#ifdef CONFIG_KVM_S390_UCONTROL
791 if (rc == SIE_INTERCEPT_UCONTROL) {
792 kvm_run->exit_reason = KVM_EXIT_S390_UCONTROL;
793 kvm_run->s390_ucontrol.trans_exc_code =
794 current->thread.gmap_addr;
795 kvm_run->s390_ucontrol.pgm_code = 0x10;
796 rc = 0;
797 }
798#endif
799
b8e660b8 800 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
801 /* intercept cannot be handled in-kernel, prepare kvm-run */
802 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
803 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
804 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
805 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
806 rc = 0;
807 }
808
809 if (rc == -EREMOTE) {
810 /* intercept was handled, but userspace support is needed
811 * kvm_run has been prepared by the handler */
812 rc = 0;
813 }
b0c632db 814
d7b0b5eb
CO
815 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
816 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
60b413c9 817 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
9eed0735 818 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
d7b0b5eb 819
b0c632db
HC
820 if (vcpu->sigset_active)
821 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
822
b0c632db 823 vcpu->stat.exit_userspace++;
7e8e6ab4 824 return rc;
b0c632db
HC
825}
826
092670cd 827static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
b0c632db
HC
828 unsigned long n, int prefix)
829{
830 if (prefix)
831 return copy_to_guest(vcpu, guestdest, from, n);
832 else
833 return copy_to_guest_absolute(vcpu, guestdest, from, n);
834}
835
836/*
837 * store status at address
838 * we use have two special cases:
839 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
840 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
841 */
971eb77f 842int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db 843{
092670cd 844 unsigned char archmode = 1;
b0c632db
HC
845 int prefix;
846
847 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
848 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
849 return -EFAULT;
850 addr = SAVE_AREA_BASE;
851 prefix = 0;
852 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
853 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
854 return -EFAULT;
855 addr = SAVE_AREA_BASE;
856 prefix = 1;
857 } else
858 prefix = 0;
859
15bc8d84
CB
860 /*
861 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
862 * copying in vcpu load/put. Lets update our copies before we save
863 * it into the save area
864 */
865 save_fp_regs(&vcpu->arch.guest_fpregs);
866 save_access_regs(vcpu->run->s.regs.acrs);
867
f64ca217 868 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
869 vcpu->arch.guest_fpregs.fprs, 128, prefix))
870 return -EFAULT;
871
f64ca217 872 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
5a32c1af 873 vcpu->run->s.regs.gprs, 128, prefix))
b0c632db
HC
874 return -EFAULT;
875
f64ca217 876 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
877 &vcpu->arch.sie_block->gpsw, 16, prefix))
878 return -EFAULT;
879
f64ca217 880 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
881 &vcpu->arch.sie_block->prefix, 4, prefix))
882 return -EFAULT;
883
884 if (__guestcopy(vcpu,
f64ca217 885 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
886 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
887 return -EFAULT;
888
f64ca217 889 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
890 &vcpu->arch.sie_block->todpr, 4, prefix))
891 return -EFAULT;
892
f64ca217 893 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
894 &vcpu->arch.sie_block->cputm, 8, prefix))
895 return -EFAULT;
896
f64ca217 897 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
898 &vcpu->arch.sie_block->ckc, 8, prefix))
899 return -EFAULT;
900
f64ca217 901 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
59674c1a 902 &vcpu->run->s.regs.acrs, 64, prefix))
b0c632db
HC
903 return -EFAULT;
904
905 if (__guestcopy(vcpu,
f64ca217 906 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
907 &vcpu->arch.sie_block->gcr, 128, prefix))
908 return -EFAULT;
909 return 0;
910}
911
d6712df9
CH
912static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
913 struct kvm_enable_cap *cap)
914{
915 int r;
916
917 if (cap->flags)
918 return -EINVAL;
919
920 switch (cap->cap) {
fa6b7fe9
CH
921 case KVM_CAP_S390_CSS_SUPPORT:
922 if (!vcpu->kvm->arch.css_support) {
923 vcpu->kvm->arch.css_support = 1;
924 trace_kvm_s390_enable_css(vcpu->kvm);
925 }
926 r = 0;
927 break;
d6712df9
CH
928 default:
929 r = -EINVAL;
930 break;
931 }
932 return r;
933}
934
b0c632db
HC
935long kvm_arch_vcpu_ioctl(struct file *filp,
936 unsigned int ioctl, unsigned long arg)
937{
938 struct kvm_vcpu *vcpu = filp->private_data;
939 void __user *argp = (void __user *)arg;
bc923cc9 940 long r;
b0c632db 941
93736624
AK
942 switch (ioctl) {
943 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
944 struct kvm_s390_interrupt s390int;
945
93736624 946 r = -EFAULT;
ba5c1e9b 947 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
948 break;
949 r = kvm_s390_inject_vcpu(vcpu, &s390int);
950 break;
ba5c1e9b 951 }
b0c632db 952 case KVM_S390_STORE_STATUS:
bc923cc9
AK
953 r = kvm_s390_vcpu_store_status(vcpu, arg);
954 break;
b0c632db
HC
955 case KVM_S390_SET_INITIAL_PSW: {
956 psw_t psw;
957
bc923cc9 958 r = -EFAULT;
b0c632db 959 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
960 break;
961 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
962 break;
b0c632db
HC
963 }
964 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
965 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
966 break;
14eebd91
CO
967 case KVM_SET_ONE_REG:
968 case KVM_GET_ONE_REG: {
969 struct kvm_one_reg reg;
970 r = -EFAULT;
971 if (copy_from_user(&reg, argp, sizeof(reg)))
972 break;
973 if (ioctl == KVM_SET_ONE_REG)
974 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
975 else
976 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
977 break;
978 }
27e0393f
CO
979#ifdef CONFIG_KVM_S390_UCONTROL
980 case KVM_S390_UCAS_MAP: {
981 struct kvm_s390_ucas_mapping ucasmap;
982
983 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
984 r = -EFAULT;
985 break;
986 }
987
988 if (!kvm_is_ucontrol(vcpu->kvm)) {
989 r = -EINVAL;
990 break;
991 }
992
993 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
994 ucasmap.vcpu_addr, ucasmap.length);
995 break;
996 }
997 case KVM_S390_UCAS_UNMAP: {
998 struct kvm_s390_ucas_mapping ucasmap;
999
1000 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1001 r = -EFAULT;
1002 break;
1003 }
1004
1005 if (!kvm_is_ucontrol(vcpu->kvm)) {
1006 r = -EINVAL;
1007 break;
1008 }
1009
1010 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1011 ucasmap.length);
1012 break;
1013 }
1014#endif
ccc7910f
CO
1015 case KVM_S390_VCPU_FAULT: {
1016 r = gmap_fault(arg, vcpu->arch.gmap);
1017 if (!IS_ERR_VALUE(r))
1018 r = 0;
1019 break;
1020 }
d6712df9
CH
1021 case KVM_ENABLE_CAP:
1022 {
1023 struct kvm_enable_cap cap;
1024 r = -EFAULT;
1025 if (copy_from_user(&cap, argp, sizeof(cap)))
1026 break;
1027 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1028 break;
1029 }
b0c632db 1030 default:
3e6afcf1 1031 r = -ENOTTY;
b0c632db 1032 }
bc923cc9 1033 return r;
b0c632db
HC
1034}
1035
5b1c1493
CO
1036int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1037{
1038#ifdef CONFIG_KVM_S390_UCONTROL
1039 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1040 && (kvm_is_ucontrol(vcpu->kvm))) {
1041 vmf->page = virt_to_page(vcpu->arch.sie_block);
1042 get_page(vmf->page);
1043 return 0;
1044 }
1045#endif
1046 return VM_FAULT_SIGBUS;
1047}
1048
db3fe4eb
TY
1049void kvm_arch_free_memslot(struct kvm_memory_slot *free,
1050 struct kvm_memory_slot *dont)
1051{
1052}
1053
1054int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
1055{
1056 return 0;
1057}
1058
e59dbe09
TY
1059void kvm_arch_memslots_updated(struct kvm *kvm)
1060{
1061}
1062
b0c632db 1063/* Section: memory related */
f7784b8e
MT
1064int kvm_arch_prepare_memory_region(struct kvm *kvm,
1065 struct kvm_memory_slot *memslot,
7b6195a9
TY
1066 struct kvm_userspace_memory_region *mem,
1067 enum kvm_mr_change change)
b0c632db 1068{
dd2887e7
NW
1069 /* A few sanity checks. We can have memory slots which have to be
1070 located/ended at a segment boundary (1MB). The memory in userland is
1071 ok to be fragmented into various different vmas. It is okay to mmap()
1072 and munmap() stuff in this slot after doing this call at any time */
b0c632db 1073
598841ca 1074 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
1075 return -EINVAL;
1076
598841ca 1077 if (mem->memory_size & 0xffffful)
b0c632db
HC
1078 return -EINVAL;
1079
f7784b8e
MT
1080 return 0;
1081}
1082
1083void kvm_arch_commit_memory_region(struct kvm *kvm,
1084 struct kvm_userspace_memory_region *mem,
8482644a
TY
1085 const struct kvm_memory_slot *old,
1086 enum kvm_mr_change change)
f7784b8e 1087{
f7850c92 1088 int rc;
f7784b8e 1089
2cef4deb
CB
1090 /* If the basics of the memslot do not change, we do not want
1091 * to update the gmap. Every update causes several unnecessary
1092 * segment translation exceptions. This is usually handled just
1093 * fine by the normal fault handler + gmap, but it will also
1094 * cause faults on the prefix page of running guest CPUs.
1095 */
1096 if (old->userspace_addr == mem->userspace_addr &&
1097 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1098 old->npages * PAGE_SIZE == mem->memory_size)
1099 return;
598841ca
CO
1100
1101 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1102 mem->guest_phys_addr, mem->memory_size);
1103 if (rc)
f7850c92 1104 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 1105 return;
b0c632db
HC
1106}
1107
2df72e9b
MT
1108void kvm_arch_flush_shadow_all(struct kvm *kvm)
1109{
1110}
1111
1112void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1113 struct kvm_memory_slot *slot)
34d4cb8f
MT
1114{
1115}
1116
b0c632db
HC
1117static int __init kvm_s390_init(void)
1118{
ef50f7ac 1119 int ret;
0ee75bea 1120 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
1121 if (ret)
1122 return ret;
1123
1124 /*
1125 * guests can ask for up to 255+1 double words, we need a full page
25985edc 1126 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
1127 * only set facilities that are known to work in KVM.
1128 */
c2f0e8c8 1129 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
ef50f7ac
CB
1130 if (!facilities) {
1131 kvm_exit();
1132 return -ENOMEM;
1133 }
14375bc4 1134 memcpy(facilities, S390_lowcore.stfle_fac_list, 16);
69d0d3a3 1135 facilities[0] &= 0xff82fff3f47c0000ULL;
87cac8f8 1136 facilities[1] &= 0x001c000000000000ULL;
ef50f7ac 1137 return 0;
b0c632db
HC
1138}
1139
1140static void __exit kvm_s390_exit(void)
1141{
ef50f7ac 1142 free_page((unsigned long) facilities);
b0c632db
HC
1143 kvm_exit();
1144}
1145
1146module_init(kvm_s390_init);
1147module_exit(kvm_s390_exit);
566af940
CH
1148
1149/*
1150 * Enable autoloading of the kvm module.
1151 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1152 * since x86 takes a different approach.
1153 */
1154#include <linux/miscdevice.h>
1155MODULE_ALIAS_MISCDEV(KVM_MINOR);
1156MODULE_ALIAS("devname:kvm");