]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/s390/kvm/kvm-s390.c
KVM: PPC: Centralize locking of arch specific vcpu ioctls
[mirror_ubuntu-jammy-kernel.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db
HC
1/*
2 * s390host.c -- hosting zSeries kernel virtual machines
3 *
628eb9b8 4 * Copyright IBM Corp. 2008,2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
b0c632db
HC
14 */
15
16#include <linux/compiler.h>
17#include <linux/err.h>
18#include <linux/fs.h>
ca872302 19#include <linux/hrtimer.h>
b0c632db
HC
20#include <linux/init.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/module.h>
24#include <linux/slab.h>
ba5c1e9b 25#include <linux/timer.h>
cbb870c8 26#include <asm/asm-offsets.h>
b0c632db
HC
27#include <asm/lowcore.h>
28#include <asm/pgtable.h>
f5daba1d 29#include <asm/nmi.h>
ef50f7ac 30#include <asm/system.h>
8f2abe6a 31#include "kvm-s390.h"
b0c632db
HC
32#include "gaccess.h"
33
34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36struct kvm_stats_debugfs_item debugfs_entries[] = {
37 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 38 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
39 { "exit_validity", VCPU_STAT(exit_validity) },
40 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
41 { "exit_external_request", VCPU_STAT(exit_external_request) },
42 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
43 { "exit_instruction", VCPU_STAT(exit_instruction) },
44 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
45 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 46 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
47 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
48 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
49 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
50 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
51 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
52 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
53 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
54 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
55 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
453423dc
CB
56 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
57 { "instruction_spx", VCPU_STAT(instruction_spx) },
58 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
59 { "instruction_stap", VCPU_STAT(instruction_stap) },
60 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
61 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
62 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
63 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
64 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
5288fbf0
CB
65 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
66 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
67 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
68 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
69 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
70 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
e28acfea 71 { "diagnose_44", VCPU_STAT(diagnose_44) },
b0c632db
HC
72 { NULL }
73};
74
ef50f7ac 75static unsigned long long *facilities;
b0c632db
HC
76
77/* Section: not file related */
10474ae8 78int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
79{
80 /* every s390 is virtualization enabled ;-) */
10474ae8 81 return 0;
b0c632db
HC
82}
83
84void kvm_arch_hardware_disable(void *garbage)
85{
86}
87
b0c632db
HC
88int kvm_arch_hardware_setup(void)
89{
90 return 0;
91}
92
93void kvm_arch_hardware_unsetup(void)
94{
95}
96
97void kvm_arch_check_processor_compat(void *rtn)
98{
99}
100
101int kvm_arch_init(void *opaque)
102{
103 return 0;
104}
105
106void kvm_arch_exit(void)
107{
108}
109
110/* Section: device related */
111long kvm_arch_dev_ioctl(struct file *filp,
112 unsigned int ioctl, unsigned long arg)
113{
114 if (ioctl == KVM_S390_ENABLE_SIE)
115 return s390_enable_sie();
116 return -EINVAL;
117}
118
119int kvm_dev_ioctl_check_extension(long ext)
120{
d7b0b5eb
CO
121 int r;
122
2bd0ac4e 123 switch (ext) {
d7b0b5eb
CO
124 case KVM_CAP_S390_PSW:
125 r = 1;
126 break;
2bd0ac4e 127 default:
d7b0b5eb 128 r = 0;
2bd0ac4e 129 }
d7b0b5eb 130 return r;
b0c632db
HC
131}
132
133/* Section: vm related */
134/*
135 * Get (and clear) the dirty memory log for a memory slot.
136 */
137int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
138 struct kvm_dirty_log *log)
139{
140 return 0;
141}
142
143long kvm_arch_vm_ioctl(struct file *filp,
144 unsigned int ioctl, unsigned long arg)
145{
146 struct kvm *kvm = filp->private_data;
147 void __user *argp = (void __user *)arg;
148 int r;
149
150 switch (ioctl) {
ba5c1e9b
CO
151 case KVM_S390_INTERRUPT: {
152 struct kvm_s390_interrupt s390int;
153
154 r = -EFAULT;
155 if (copy_from_user(&s390int, argp, sizeof(s390int)))
156 break;
157 r = kvm_s390_inject_vm(kvm, &s390int);
158 break;
159 }
b0c632db 160 default:
367e1319 161 r = -ENOTTY;
b0c632db
HC
162 }
163
164 return r;
165}
166
167struct kvm *kvm_arch_create_vm(void)
168{
169 struct kvm *kvm;
170 int rc;
171 char debug_name[16];
172
173 rc = s390_enable_sie();
174 if (rc)
175 goto out_nokvm;
176
177 rc = -ENOMEM;
178 kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
179 if (!kvm)
180 goto out_nokvm;
181
182 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
183 if (!kvm->arch.sca)
184 goto out_nosca;
185
186 sprintf(debug_name, "kvm-%u", current->pid);
187
188 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
189 if (!kvm->arch.dbf)
190 goto out_nodbf;
191
ba5c1e9b
CO
192 spin_lock_init(&kvm->arch.float_int.lock);
193 INIT_LIST_HEAD(&kvm->arch.float_int.list);
194
b0c632db
HC
195 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
196 VM_EVENT(kvm, 3, "%s", "vm created");
197
b0c632db
HC
198 return kvm;
199out_nodbf:
200 free_page((unsigned long)(kvm->arch.sca));
201out_nosca:
202 kfree(kvm);
203out_nokvm:
204 return ERR_PTR(rc);
205}
206
d329c035
CB
207void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
208{
209 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
abf4a71e
CO
210 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
211 (__u64) vcpu->arch.sie_block)
212 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
213 smp_mb();
d329c035 214 free_page((unsigned long)(vcpu->arch.sie_block));
6692cef3 215 kvm_vcpu_uninit(vcpu);
d329c035
CB
216 kfree(vcpu);
217}
218
219static void kvm_free_vcpus(struct kvm *kvm)
220{
221 unsigned int i;
988a2cae 222 struct kvm_vcpu *vcpu;
d329c035 223
988a2cae
GN
224 kvm_for_each_vcpu(i, vcpu, kvm)
225 kvm_arch_vcpu_destroy(vcpu);
226
227 mutex_lock(&kvm->lock);
228 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
229 kvm->vcpus[i] = NULL;
230
231 atomic_set(&kvm->online_vcpus, 0);
232 mutex_unlock(&kvm->lock);
d329c035
CB
233}
234
ad8ba2cd
SY
235void kvm_arch_sync_events(struct kvm *kvm)
236{
237}
238
b0c632db
HC
239void kvm_arch_destroy_vm(struct kvm *kvm)
240{
d329c035 241 kvm_free_vcpus(kvm);
dfdded7c 242 kvm_free_physmem(kvm);
b0c632db 243 free_page((unsigned long)(kvm->arch.sca));
d329c035 244 debug_unregister(kvm->arch.dbf);
64749204 245 cleanup_srcu_struct(&kvm->srcu);
b0c632db 246 kfree(kvm);
b0c632db
HC
247}
248
249/* Section: vcpu related */
250int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
251{
252 return 0;
253}
254
255void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
256{
6692cef3 257 /* Nothing todo */
b0c632db
HC
258}
259
260void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
261{
262 save_fp_regs(&vcpu->arch.host_fpregs);
263 save_access_regs(vcpu->arch.host_acrs);
264 vcpu->arch.guest_fpregs.fpc &= FPC_VALID_MASK;
265 restore_fp_regs(&vcpu->arch.guest_fpregs);
266 restore_access_regs(vcpu->arch.guest_acrs);
b0c632db
HC
267}
268
269void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
270{
271 save_fp_regs(&vcpu->arch.guest_fpregs);
272 save_access_regs(vcpu->arch.guest_acrs);
273 restore_fp_regs(&vcpu->arch.host_fpregs);
274 restore_access_regs(vcpu->arch.host_acrs);
275}
276
277static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
278{
279 /* this equals initial cpu reset in pop, but we don't switch to ESA */
280 vcpu->arch.sie_block->gpsw.mask = 0UL;
281 vcpu->arch.sie_block->gpsw.addr = 0UL;
282 vcpu->arch.sie_block->prefix = 0UL;
283 vcpu->arch.sie_block->ihcpu = 0xffff;
284 vcpu->arch.sie_block->cputm = 0UL;
285 vcpu->arch.sie_block->ckc = 0UL;
286 vcpu->arch.sie_block->todpr = 0;
287 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
288 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
289 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
290 vcpu->arch.guest_fpregs.fpc = 0;
291 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
292 vcpu->arch.sie_block->gbea = 1;
293}
294
295int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
296{
297 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH);
628eb9b8 298 set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests);
b0c632db
HC
299 vcpu->arch.sie_block->ecb = 2;
300 vcpu->arch.sie_block->eca = 0xC1002001U;
ef50f7ac 301 vcpu->arch.sie_block->fac = (int) (long) facilities;
ca872302
CB
302 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
303 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
304 (unsigned long) vcpu);
305 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 306 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 307 vcpu->arch.cpu_id.version = 0xff;
b0c632db
HC
308 return 0;
309}
310
311struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
312 unsigned int id)
313{
314 struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
315 int rc = -ENOMEM;
316
317 if (!vcpu)
318 goto out_nomem;
319
180c12fb
CB
320 vcpu->arch.sie_block = (struct kvm_s390_sie_block *)
321 get_zeroed_page(GFP_KERNEL);
b0c632db
HC
322
323 if (!vcpu->arch.sie_block)
324 goto out_free_cpu;
325
326 vcpu->arch.sie_block->icpua = id;
327 BUG_ON(!kvm->arch.sca);
abf4a71e
CO
328 if (!kvm->arch.sca->cpu[id].sda)
329 kvm->arch.sca->cpu[id].sda = (__u64) vcpu->arch.sie_block;
b0c632db
HC
330 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)kvm->arch.sca) >> 32);
331 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
332
ba5c1e9b
CO
333 spin_lock_init(&vcpu->arch.local_int.lock);
334 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
335 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
b037a4f3 336 spin_lock(&kvm->arch.float_int.lock);
ba5c1e9b
CO
337 kvm->arch.float_int.local_int[id] = &vcpu->arch.local_int;
338 init_waitqueue_head(&vcpu->arch.local_int.wq);
5288fbf0 339 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
b037a4f3 340 spin_unlock(&kvm->arch.float_int.lock);
ba5c1e9b 341
b0c632db
HC
342 rc = kvm_vcpu_init(vcpu, kvm, id);
343 if (rc)
7b06bf2f 344 goto out_free_sie_block;
b0c632db
HC
345 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
346 vcpu->arch.sie_block);
347
b0c632db 348 return vcpu;
7b06bf2f
WY
349out_free_sie_block:
350 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db
HC
351out_free_cpu:
352 kfree(vcpu);
353out_nomem:
354 return ERR_PTR(rc);
355}
356
b0c632db
HC
357int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
358{
359 /* kvm common code refers to this, but never calls it */
360 BUG();
361 return 0;
362}
363
364static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
365{
b0c632db 366 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
367 return 0;
368}
369
370int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
371{
b0c632db 372 memcpy(&vcpu->arch.guest_gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
373 return 0;
374}
375
376int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
377{
b0c632db 378 memcpy(&regs->gprs, &vcpu->arch.guest_gprs, sizeof(regs->gprs));
b0c632db
HC
379 return 0;
380}
381
382int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
383 struct kvm_sregs *sregs)
384{
b0c632db
HC
385 memcpy(&vcpu->arch.guest_acrs, &sregs->acrs, sizeof(sregs->acrs));
386 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
b0c632db
HC
387 return 0;
388}
389
390int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
391 struct kvm_sregs *sregs)
392{
b0c632db
HC
393 memcpy(&sregs->acrs, &vcpu->arch.guest_acrs, sizeof(sregs->acrs));
394 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
395 return 0;
396}
397
398int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
399{
b0c632db
HC
400 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
401 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
b0c632db
HC
402 return 0;
403}
404
405int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
406{
b0c632db
HC
407 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
408 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
409 return 0;
410}
411
412static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
413{
414 int rc = 0;
415
b0c632db
HC
416 if (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_RUNNING)
417 rc = -EBUSY;
d7b0b5eb
CO
418 else {
419 vcpu->run->psw_mask = psw.mask;
420 vcpu->run->psw_addr = psw.addr;
421 }
b0c632db
HC
422 return rc;
423}
424
425int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
426 struct kvm_translation *tr)
427{
428 return -EINVAL; /* not implemented yet */
429}
430
d0bfb940
JK
431int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
432 struct kvm_guest_debug *dbg)
b0c632db
HC
433{
434 return -EINVAL; /* not implemented yet */
435}
436
62d9f0db
MT
437int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
438 struct kvm_mp_state *mp_state)
439{
440 return -EINVAL; /* not implemented yet */
441}
442
443int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
444 struct kvm_mp_state *mp_state)
445{
446 return -EINVAL; /* not implemented yet */
447}
448
b0c632db
HC
449static void __vcpu_run(struct kvm_vcpu *vcpu)
450{
451 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16);
452
453 if (need_resched())
454 schedule();
455
71cde587
CB
456 if (test_thread_flag(TIF_MCCK_PENDING))
457 s390_handle_mcck();
458
0ff31867
CO
459 kvm_s390_deliver_pending_interrupts(vcpu);
460
b0c632db
HC
461 vcpu->arch.sie_block->icptcode = 0;
462 local_irq_disable();
463 kvm_guest_enter();
464 local_irq_enable();
465 VCPU_EVENT(vcpu, 6, "entering sie flags %x",
466 atomic_read(&vcpu->arch.sie_block->cpuflags));
1f0d0f09
CO
467 if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) {
468 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
469 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
470 }
b0c632db
HC
471 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
472 vcpu->arch.sie_block->icptcode);
473 local_irq_disable();
474 kvm_guest_exit();
475 local_irq_enable();
476
477 memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16);
478}
479
480int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
481{
8f2abe6a 482 int rc;
b0c632db
HC
483 sigset_t sigsaved;
484
9ace903d 485rerun_vcpu:
628eb9b8
CE
486 if (vcpu->requests)
487 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
488 kvm_s390_vcpu_set_mem(vcpu);
489
51e4d5ab 490 /* verify, that memory has been registered */
628eb9b8 491 if (!vcpu->arch.sie_block->gmslm) {
51e4d5ab 492 vcpu_put(vcpu);
628eb9b8 493 VCPU_EVENT(vcpu, 3, "%s", "no memory registered to run vcpu");
51e4d5ab
CO
494 return -EINVAL;
495 }
496
b0c632db
HC
497 if (vcpu->sigset_active)
498 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
499
500 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
501
ba5c1e9b
CO
502 BUG_ON(vcpu->kvm->arch.float_int.local_int[vcpu->vcpu_id] == NULL);
503
8f2abe6a
CB
504 switch (kvm_run->exit_reason) {
505 case KVM_EXIT_S390_SIEIC:
8f2abe6a 506 case KVM_EXIT_UNKNOWN:
9ace903d 507 case KVM_EXIT_INTR:
8f2abe6a
CB
508 case KVM_EXIT_S390_RESET:
509 break;
510 default:
511 BUG();
512 }
513
d7b0b5eb
CO
514 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
515 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
516
dab4079d 517 might_fault();
8f2abe6a
CB
518
519 do {
520 __vcpu_run(vcpu);
8f2abe6a
CB
521 rc = kvm_handle_sie_intercept(vcpu);
522 } while (!signal_pending(current) && !rc);
523
9ace903d
CE
524 if (rc == SIE_INTERCEPT_RERUNVCPU)
525 goto rerun_vcpu;
526
b1d16c49
CE
527 if (signal_pending(current) && !rc) {
528 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 529 rc = -EINTR;
b1d16c49 530 }
8f2abe6a 531
b8e660b8 532 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
533 /* intercept cannot be handled in-kernel, prepare kvm-run */
534 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
535 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
536 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
537 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
538 rc = 0;
539 }
540
541 if (rc == -EREMOTE) {
542 /* intercept was handled, but userspace support is needed
543 * kvm_run has been prepared by the handler */
544 rc = 0;
545 }
b0c632db 546
d7b0b5eb
CO
547 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
548 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
549
b0c632db
HC
550 if (vcpu->sigset_active)
551 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
552
b0c632db 553 vcpu->stat.exit_userspace++;
7e8e6ab4 554 return rc;
b0c632db
HC
555}
556
557static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, const void *from,
558 unsigned long n, int prefix)
559{
560 if (prefix)
561 return copy_to_guest(vcpu, guestdest, from, n);
562 else
563 return copy_to_guest_absolute(vcpu, guestdest, from, n);
564}
565
566/*
567 * store status at address
568 * we use have two special cases:
569 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
570 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
571 */
bc923cc9 572static int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db
HC
573{
574 const unsigned char archmode = 1;
575 int prefix;
576
577 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
578 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
579 return -EFAULT;
580 addr = SAVE_AREA_BASE;
581 prefix = 0;
582 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
583 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
584 return -EFAULT;
585 addr = SAVE_AREA_BASE;
586 prefix = 1;
587 } else
588 prefix = 0;
589
f64ca217 590 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
591 vcpu->arch.guest_fpregs.fprs, 128, prefix))
592 return -EFAULT;
593
f64ca217 594 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
b0c632db
HC
595 vcpu->arch.guest_gprs, 128, prefix))
596 return -EFAULT;
597
f64ca217 598 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
599 &vcpu->arch.sie_block->gpsw, 16, prefix))
600 return -EFAULT;
601
f64ca217 602 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
603 &vcpu->arch.sie_block->prefix, 4, prefix))
604 return -EFAULT;
605
606 if (__guestcopy(vcpu,
f64ca217 607 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
608 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
609 return -EFAULT;
610
f64ca217 611 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
612 &vcpu->arch.sie_block->todpr, 4, prefix))
613 return -EFAULT;
614
f64ca217 615 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
616 &vcpu->arch.sie_block->cputm, 8, prefix))
617 return -EFAULT;
618
f64ca217 619 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
b0c632db
HC
620 &vcpu->arch.sie_block->ckc, 8, prefix))
621 return -EFAULT;
622
f64ca217 623 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
b0c632db
HC
624 &vcpu->arch.guest_acrs, 64, prefix))
625 return -EFAULT;
626
627 if (__guestcopy(vcpu,
f64ca217 628 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
629 &vcpu->arch.sie_block->gcr, 128, prefix))
630 return -EFAULT;
631 return 0;
632}
633
b0c632db
HC
634long kvm_arch_vcpu_ioctl(struct file *filp,
635 unsigned int ioctl, unsigned long arg)
636{
637 struct kvm_vcpu *vcpu = filp->private_data;
638 void __user *argp = (void __user *)arg;
bc923cc9 639 long r;
b0c632db 640
bc923cc9 641 if (ioctl == KVM_S390_INTERRUPT) {
ba5c1e9b
CO
642 struct kvm_s390_interrupt s390int;
643
644 if (copy_from_user(&s390int, argp, sizeof(s390int)))
645 return -EFAULT;
646 return kvm_s390_inject_vcpu(vcpu, &s390int);
647 }
bc923cc9
AK
648
649 vcpu_load(vcpu);
650 switch (ioctl) {
b0c632db 651 case KVM_S390_STORE_STATUS:
bc923cc9
AK
652 r = kvm_s390_vcpu_store_status(vcpu, arg);
653 break;
b0c632db
HC
654 case KVM_S390_SET_INITIAL_PSW: {
655 psw_t psw;
656
bc923cc9 657 r = -EFAULT;
b0c632db 658 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
659 break;
660 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
661 break;
b0c632db
HC
662 }
663 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
664 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
665 break;
b0c632db 666 default:
bc923cc9 667 r = -EINVAL;
b0c632db 668 }
bc923cc9
AK
669 vcpu_put(vcpu);
670 return r;
b0c632db
HC
671}
672
673/* Section: memory related */
f7784b8e
MT
674int kvm_arch_prepare_memory_region(struct kvm *kvm,
675 struct kvm_memory_slot *memslot,
676 struct kvm_memory_slot old,
677 struct kvm_userspace_memory_region *mem,
678 int user_alloc)
b0c632db
HC
679{
680 /* A few sanity checks. We can have exactly one memory slot which has
681 to start at guest virtual zero and which has to be located at a
682 page boundary in userland and which has to end at a page boundary.
683 The memory in userland is ok to be fragmented into various different
684 vmas. It is okay to mmap() and munmap() stuff in this slot after
685 doing this call at any time */
686
628eb9b8 687 if (mem->slot)
b0c632db
HC
688 return -EINVAL;
689
690 if (mem->guest_phys_addr)
691 return -EINVAL;
692
693 if (mem->userspace_addr & (PAGE_SIZE - 1))
694 return -EINVAL;
695
696 if (mem->memory_size & (PAGE_SIZE - 1))
697 return -EINVAL;
698
2668dab7
CO
699 if (!user_alloc)
700 return -EINVAL;
701
f7784b8e
MT
702 return 0;
703}
704
705void kvm_arch_commit_memory_region(struct kvm *kvm,
706 struct kvm_userspace_memory_region *mem,
707 struct kvm_memory_slot old,
708 int user_alloc)
709{
710 int i;
711 struct kvm_vcpu *vcpu;
712
628eb9b8 713 /* request update of sie control block for all available vcpus */
988a2cae
GN
714 kvm_for_each_vcpu(i, vcpu, kvm) {
715 if (test_and_set_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
716 continue;
717 kvm_s390_inject_sigp_stop(vcpu, ACTION_RELOADVCPU_ON_STOP);
2668dab7 718 }
b0c632db
HC
719}
720
34d4cb8f
MT
721void kvm_arch_flush_shadow(struct kvm *kvm)
722{
723}
724
b0c632db
HC
725gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
726{
727 return gfn;
728}
729
730static int __init kvm_s390_init(void)
731{
ef50f7ac 732 int ret;
0ee75bea 733 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
734 if (ret)
735 return ret;
736
737 /*
738 * guests can ask for up to 255+1 double words, we need a full page
739 * to hold the maximum amount of facilites. On the other hand, we
740 * only set facilities that are known to work in KVM.
741 */
c2f0e8c8 742 facilities = (unsigned long long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
ef50f7ac
CB
743 if (!facilities) {
744 kvm_exit();
745 return -ENOMEM;
746 }
747 stfle(facilities, 1);
748 facilities[0] &= 0xff00fff3f0700000ULL;
749 return 0;
b0c632db
HC
750}
751
752static void __exit kvm_s390_exit(void)
753{
ef50f7ac 754 free_page((unsigned long) facilities);
b0c632db
HC
755 kvm_exit();
756}
757
758module_init(kvm_s390_init);
759module_exit(kvm_s390_exit);