]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/s390/kvm/kvm-s390.c
KVM: s390: make cmma usage conditionally
[mirror_ubuntu-jammy-kernel.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15f36ebd 14 * Jason J. Herne <jjherne@us.ibm.com>
b0c632db
HC
15 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
ca872302 20#include <linux/hrtimer.h>
b0c632db
HC
21#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
25#include <linux/slab.h>
ba5c1e9b 26#include <linux/timer.h>
cbb870c8 27#include <asm/asm-offsets.h>
b0c632db
HC
28#include <asm/lowcore.h>
29#include <asm/pgtable.h>
f5daba1d 30#include <asm/nmi.h>
a0616cde 31#include <asm/switch_to.h>
78c4b59f 32#include <asm/facility.h>
1526bf9c 33#include <asm/sclp.h>
8f2abe6a 34#include "kvm-s390.h"
b0c632db
HC
35#include "gaccess.h"
36
5786fffa
CH
37#define CREATE_TRACE_POINTS
38#include "trace.h"
ade38c31 39#include "trace-s390.h"
5786fffa 40
b0c632db
HC
41#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42
43struct kvm_stats_debugfs_item debugfs_entries[] = {
44 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 45 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
46 { "exit_validity", VCPU_STAT(exit_validity) },
47 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
48 { "exit_external_request", VCPU_STAT(exit_external_request) },
49 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
50 { "exit_instruction", VCPU_STAT(exit_instruction) },
51 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f5e10b09 53 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b
CO
54 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
55 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 56 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
57 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
58 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
59 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
60 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
61 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
62 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
63 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 64 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
65 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
66 { "instruction_spx", VCPU_STAT(instruction_spx) },
67 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
68 { "instruction_stap", VCPU_STAT(instruction_stap) },
69 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
70 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
71 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
b31288fa 72 { "instruction_essa", VCPU_STAT(instruction_essa) },
453423dc
CB
73 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
74 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 75 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 76 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 77 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 78 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0
CB
79 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
80 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
81 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
82 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
83 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
388186bc 84 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 85 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 86 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
87 { NULL }
88};
89
78c4b59f 90unsigned long *vfacilities;
2c70fe44 91static struct gmap_notifier gmap_notifier;
b0c632db 92
78c4b59f
MM
93/* test availability of vfacility */
94static inline int test_vfacility(unsigned long nr)
95{
96 return __test_facility(nr, (void *) vfacilities);
97}
98
b0c632db 99/* Section: not file related */
10474ae8 100int kvm_arch_hardware_enable(void *garbage)
b0c632db
HC
101{
102 /* every s390 is virtualization enabled ;-) */
10474ae8 103 return 0;
b0c632db
HC
104}
105
106void kvm_arch_hardware_disable(void *garbage)
107{
108}
109
2c70fe44
CB
110static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
111
b0c632db
HC
112int kvm_arch_hardware_setup(void)
113{
2c70fe44
CB
114 gmap_notifier.notifier_call = kvm_gmap_notifier;
115 gmap_register_ipte_notifier(&gmap_notifier);
b0c632db
HC
116 return 0;
117}
118
119void kvm_arch_hardware_unsetup(void)
120{
2c70fe44 121 gmap_unregister_ipte_notifier(&gmap_notifier);
b0c632db
HC
122}
123
124void kvm_arch_check_processor_compat(void *rtn)
125{
126}
127
128int kvm_arch_init(void *opaque)
129{
130 return 0;
131}
132
133void kvm_arch_exit(void)
134{
135}
136
137/* Section: device related */
138long kvm_arch_dev_ioctl(struct file *filp,
139 unsigned int ioctl, unsigned long arg)
140{
141 if (ioctl == KVM_S390_ENABLE_SIE)
142 return s390_enable_sie();
143 return -EINVAL;
144}
145
146int kvm_dev_ioctl_check_extension(long ext)
147{
d7b0b5eb
CO
148 int r;
149
2bd0ac4e 150 switch (ext) {
d7b0b5eb 151 case KVM_CAP_S390_PSW:
b6cf8788 152 case KVM_CAP_S390_GMAP:
52e16b18 153 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
154#ifdef CONFIG_KVM_S390_UCONTROL
155 case KVM_CAP_S390_UCONTROL:
156#endif
3c038e6b 157 case KVM_CAP_ASYNC_PF:
60b413c9 158 case KVM_CAP_SYNC_REGS:
14eebd91 159 case KVM_CAP_ONE_REG:
d6712df9 160 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 161 case KVM_CAP_S390_CSS_SUPPORT:
10ccaa1e 162 case KVM_CAP_IOEVENTFD:
c05c4186 163 case KVM_CAP_DEVICE_CTRL:
d938dc55 164 case KVM_CAP_ENABLE_CAP_VM:
f2061656 165 case KVM_CAP_VM_ATTRIBUTES:
d7b0b5eb
CO
166 r = 1;
167 break;
e726b1bd
CB
168 case KVM_CAP_NR_VCPUS:
169 case KVM_CAP_MAX_VCPUS:
170 r = KVM_MAX_VCPUS;
171 break;
e1e2e605
NW
172 case KVM_CAP_NR_MEMSLOTS:
173 r = KVM_USER_MEM_SLOTS;
174 break;
1526bf9c 175 case KVM_CAP_S390_COW:
abf09bed 176 r = MACHINE_HAS_ESOP;
1526bf9c 177 break;
2bd0ac4e 178 default:
d7b0b5eb 179 r = 0;
2bd0ac4e 180 }
d7b0b5eb 181 return r;
b0c632db
HC
182}
183
15f36ebd
JH
184static void kvm_s390_sync_dirty_log(struct kvm *kvm,
185 struct kvm_memory_slot *memslot)
186{
187 gfn_t cur_gfn, last_gfn;
188 unsigned long address;
189 struct gmap *gmap = kvm->arch.gmap;
190
191 down_read(&gmap->mm->mmap_sem);
192 /* Loop over all guest pages */
193 last_gfn = memslot->base_gfn + memslot->npages;
194 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
195 address = gfn_to_hva_memslot(memslot, cur_gfn);
196
197 if (gmap_test_and_clear_dirty(address, gmap))
198 mark_page_dirty(kvm, cur_gfn);
199 }
200 up_read(&gmap->mm->mmap_sem);
201}
202
b0c632db
HC
203/* Section: vm related */
204/*
205 * Get (and clear) the dirty memory log for a memory slot.
206 */
207int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
208 struct kvm_dirty_log *log)
209{
15f36ebd
JH
210 int r;
211 unsigned long n;
212 struct kvm_memory_slot *memslot;
213 int is_dirty = 0;
214
215 mutex_lock(&kvm->slots_lock);
216
217 r = -EINVAL;
218 if (log->slot >= KVM_USER_MEM_SLOTS)
219 goto out;
220
221 memslot = id_to_memslot(kvm->memslots, log->slot);
222 r = -ENOENT;
223 if (!memslot->dirty_bitmap)
224 goto out;
225
226 kvm_s390_sync_dirty_log(kvm, memslot);
227 r = kvm_get_dirty_log(kvm, log, &is_dirty);
228 if (r)
229 goto out;
230
231 /* Clear the dirty log */
232 if (is_dirty) {
233 n = kvm_dirty_bitmap_bytes(memslot);
234 memset(memslot->dirty_bitmap, 0, n);
235 }
236 r = 0;
237out:
238 mutex_unlock(&kvm->slots_lock);
239 return r;
b0c632db
HC
240}
241
d938dc55
CH
242static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
243{
244 int r;
245
246 if (cap->flags)
247 return -EINVAL;
248
249 switch (cap->cap) {
84223598
CH
250 case KVM_CAP_S390_IRQCHIP:
251 kvm->arch.use_irqchip = 1;
252 r = 0;
253 break;
d938dc55
CH
254 default:
255 r = -EINVAL;
256 break;
257 }
258 return r;
259}
260
f2061656
DD
261static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
262{
263 int ret;
264
265 switch (attr->group) {
266 default:
267 ret = -ENXIO;
268 break;
269 }
270
271 return ret;
272}
273
274static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
275{
276 return -ENXIO;
277}
278
279static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
280{
281 int ret;
282
283 switch (attr->group) {
284 default:
285 ret = -ENXIO;
286 break;
287 }
288
289 return ret;
290}
291
b0c632db
HC
292long kvm_arch_vm_ioctl(struct file *filp,
293 unsigned int ioctl, unsigned long arg)
294{
295 struct kvm *kvm = filp->private_data;
296 void __user *argp = (void __user *)arg;
f2061656 297 struct kvm_device_attr attr;
b0c632db
HC
298 int r;
299
300 switch (ioctl) {
ba5c1e9b
CO
301 case KVM_S390_INTERRUPT: {
302 struct kvm_s390_interrupt s390int;
303
304 r = -EFAULT;
305 if (copy_from_user(&s390int, argp, sizeof(s390int)))
306 break;
307 r = kvm_s390_inject_vm(kvm, &s390int);
308 break;
309 }
d938dc55
CH
310 case KVM_ENABLE_CAP: {
311 struct kvm_enable_cap cap;
312 r = -EFAULT;
313 if (copy_from_user(&cap, argp, sizeof(cap)))
314 break;
315 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
316 break;
317 }
84223598
CH
318 case KVM_CREATE_IRQCHIP: {
319 struct kvm_irq_routing_entry routing;
320
321 r = -EINVAL;
322 if (kvm->arch.use_irqchip) {
323 /* Set up dummy routing. */
324 memset(&routing, 0, sizeof(routing));
325 kvm_set_irq_routing(kvm, &routing, 0, 0);
326 r = 0;
327 }
328 break;
329 }
f2061656
DD
330 case KVM_SET_DEVICE_ATTR: {
331 r = -EFAULT;
332 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
333 break;
334 r = kvm_s390_vm_set_attr(kvm, &attr);
335 break;
336 }
337 case KVM_GET_DEVICE_ATTR: {
338 r = -EFAULT;
339 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
340 break;
341 r = kvm_s390_vm_get_attr(kvm, &attr);
342 break;
343 }
344 case KVM_HAS_DEVICE_ATTR: {
345 r = -EFAULT;
346 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
347 break;
348 r = kvm_s390_vm_has_attr(kvm, &attr);
349 break;
350 }
b0c632db 351 default:
367e1319 352 r = -ENOTTY;
b0c632db
HC
353 }
354
355 return r;
356}
357
e08b9637 358int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 359{
b0c632db
HC
360 int rc;
361 char debug_name[16];
f6c137ff 362 static unsigned long sca_offset;
b0c632db 363
e08b9637
CO
364 rc = -EINVAL;
365#ifdef CONFIG_KVM_S390_UCONTROL
366 if (type & ~KVM_VM_S390_UCONTROL)
367 goto out_err;
368 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
369 goto out_err;
370#else
371 if (type)
372 goto out_err;
373#endif
374
b0c632db
HC
375 rc = s390_enable_sie();
376 if (rc)
d89f5eff 377 goto out_err;
b0c632db 378
b290411a
CO
379 rc = -ENOMEM;
380
b0c632db
HC
381 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
382 if (!kvm->arch.sca)
d89f5eff 383 goto out_err;
f6c137ff
CB
384 spin_lock(&kvm_lock);
385 sca_offset = (sca_offset + 16) & 0x7f0;
386 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
387 spin_unlock(&kvm_lock);
b0c632db
HC
388
389 sprintf(debug_name, "kvm-%u", current->pid);
390
391 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
392 if (!kvm->arch.dbf)
393 goto out_nodbf;
394
ba5c1e9b
CO
395 spin_lock_init(&kvm->arch.float_int.lock);
396 INIT_LIST_HEAD(&kvm->arch.float_int.list);
397
b0c632db
HC
398 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
399 VM_EVENT(kvm, 3, "%s", "vm created");
400
e08b9637
CO
401 if (type & KVM_VM_S390_UCONTROL) {
402 kvm->arch.gmap = NULL;
403 } else {
404 kvm->arch.gmap = gmap_alloc(current->mm);
405 if (!kvm->arch.gmap)
406 goto out_nogmap;
2c70fe44 407 kvm->arch.gmap->private = kvm;
24eb3a82 408 kvm->arch.gmap->pfault_enabled = 0;
e08b9637 409 }
fa6b7fe9
CH
410
411 kvm->arch.css_support = 0;
84223598 412 kvm->arch.use_irqchip = 0;
fa6b7fe9 413
d89f5eff 414 return 0;
598841ca
CO
415out_nogmap:
416 debug_unregister(kvm->arch.dbf);
b0c632db
HC
417out_nodbf:
418 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
419out_err:
420 return rc;
b0c632db
HC
421}
422
d329c035
CB
423void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
424{
425 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 426 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
3c038e6b 427 kvm_clear_async_pf_completion_queue(vcpu);
58f9460b
CO
428 if (!kvm_is_ucontrol(vcpu->kvm)) {
429 clear_bit(63 - vcpu->vcpu_id,
430 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
431 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
432 (__u64) vcpu->arch.sie_block)
433 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
434 }
abf4a71e 435 smp_mb();
27e0393f
CO
436
437 if (kvm_is_ucontrol(vcpu->kvm))
438 gmap_free(vcpu->arch.gmap);
439
b31605c1
DD
440 if (kvm_s390_cmma_enabled(vcpu->kvm))
441 kvm_s390_vcpu_unsetup_cmma(vcpu);
d329c035 442 free_page((unsigned long)(vcpu->arch.sie_block));
b31288fa 443
6692cef3 444 kvm_vcpu_uninit(vcpu);
b110feaf 445 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
446}
447
448static void kvm_free_vcpus(struct kvm *kvm)
449{
450 unsigned int i;
988a2cae 451 struct kvm_vcpu *vcpu;
d329c035 452
988a2cae
GN
453 kvm_for_each_vcpu(i, vcpu, kvm)
454 kvm_arch_vcpu_destroy(vcpu);
455
456 mutex_lock(&kvm->lock);
457 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
458 kvm->vcpus[i] = NULL;
459
460 atomic_set(&kvm->online_vcpus, 0);
461 mutex_unlock(&kvm->lock);
d329c035
CB
462}
463
ad8ba2cd
SY
464void kvm_arch_sync_events(struct kvm *kvm)
465{
466}
467
b0c632db
HC
468void kvm_arch_destroy_vm(struct kvm *kvm)
469{
d329c035 470 kvm_free_vcpus(kvm);
b0c632db 471 free_page((unsigned long)(kvm->arch.sca));
d329c035 472 debug_unregister(kvm->arch.dbf);
27e0393f
CO
473 if (!kvm_is_ucontrol(kvm))
474 gmap_free(kvm->arch.gmap);
841b91c5 475 kvm_s390_destroy_adapters(kvm);
b0c632db
HC
476}
477
478/* Section: vcpu related */
479int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
480{
3c038e6b
DD
481 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
482 kvm_clear_async_pf_completion_queue(vcpu);
27e0393f
CO
483 if (kvm_is_ucontrol(vcpu->kvm)) {
484 vcpu->arch.gmap = gmap_alloc(current->mm);
485 if (!vcpu->arch.gmap)
486 return -ENOMEM;
2c70fe44 487 vcpu->arch.gmap->private = vcpu->kvm;
27e0393f
CO
488 return 0;
489 }
490
598841ca 491 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
59674c1a
CB
492 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
493 KVM_SYNC_GPRS |
9eed0735
CB
494 KVM_SYNC_ACRS |
495 KVM_SYNC_CRS;
b0c632db
HC
496 return 0;
497}
498
499void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
500{
6692cef3 501 /* Nothing todo */
b0c632db
HC
502}
503
504void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
505{
4725c860
MS
506 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
507 save_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db 508 save_access_regs(vcpu->arch.host_acrs);
4725c860
MS
509 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
510 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 511 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 512 gmap_enable(vcpu->arch.gmap);
9e6dabef 513 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
514}
515
516void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
517{
9e6dabef 518 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 519 gmap_disable(vcpu->arch.gmap);
4725c860
MS
520 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
521 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 522 save_access_regs(vcpu->run->s.regs.acrs);
4725c860
MS
523 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
524 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db
HC
525 restore_access_regs(vcpu->arch.host_acrs);
526}
527
528static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
529{
530 /* this equals initial cpu reset in pop, but we don't switch to ESA */
531 vcpu->arch.sie_block->gpsw.mask = 0UL;
532 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 533 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
534 vcpu->arch.sie_block->cputm = 0UL;
535 vcpu->arch.sie_block->ckc = 0UL;
536 vcpu->arch.sie_block->todpr = 0;
537 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
538 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
539 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
540 vcpu->arch.guest_fpregs.fpc = 0;
541 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
542 vcpu->arch.sie_block->gbea = 1;
672550fb 543 vcpu->arch.sie_block->pp = 0;
3c038e6b
DD
544 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
545 kvm_clear_async_pf_completion_queue(vcpu);
61bde82c 546 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
2ed10cc1 547 kvm_s390_clear_local_irqs(vcpu);
b0c632db
HC
548}
549
42897d86
MT
550int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
551{
552 return 0;
553}
554
b31605c1
DD
555void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
556{
557 free_page(vcpu->arch.sie_block->cbrlo);
558 vcpu->arch.sie_block->cbrlo = 0;
559}
560
561int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
562{
563 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
564 if (!vcpu->arch.sie_block->cbrlo)
565 return -ENOMEM;
566
567 vcpu->arch.sie_block->ecb2 |= 0x80;
568 vcpu->arch.sie_block->ecb2 &= ~0x08;
569 return 0;
570}
571
b0c632db
HC
572int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
573{
b31605c1 574 int rc = 0;
b31288fa 575
9e6dabef
CH
576 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
577 CPUSTAT_SM |
69d0d3a3
CB
578 CPUSTAT_STOPPED |
579 CPUSTAT_GED);
fc34531d 580 vcpu->arch.sie_block->ecb = 6;
7feb6bb8
MM
581 if (test_vfacility(50) && test_vfacility(73))
582 vcpu->arch.sie_block->ecb |= 0x10;
583
69d0d3a3 584 vcpu->arch.sie_block->ecb2 = 8;
b0c632db 585 vcpu->arch.sie_block->eca = 0xC1002001U;
78c4b59f 586 vcpu->arch.sie_block->fac = (int) (long) vfacilities;
693ffc08 587 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
b31605c1
DD
588 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
589 rc = kvm_s390_vcpu_setup_cmma(vcpu);
590 if (rc)
591 return rc;
b31288fa 592 }
ca872302
CB
593 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
594 tasklet_init(&vcpu->arch.tasklet, kvm_s390_tasklet,
595 (unsigned long) vcpu);
596 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
453423dc 597 get_cpu_id(&vcpu->arch.cpu_id);
92e6ecf3 598 vcpu->arch.cpu_id.version = 0xff;
b31605c1 599 return rc;
b0c632db
HC
600}
601
602struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
603 unsigned int id)
604{
4d47555a 605 struct kvm_vcpu *vcpu;
7feb6bb8 606 struct sie_page *sie_page;
4d47555a
CO
607 int rc = -EINVAL;
608
609 if (id >= KVM_MAX_VCPUS)
610 goto out;
611
612 rc = -ENOMEM;
b0c632db 613
b110feaf 614 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 615 if (!vcpu)
4d47555a 616 goto out;
b0c632db 617
7feb6bb8
MM
618 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
619 if (!sie_page)
b0c632db
HC
620 goto out_free_cpu;
621
7feb6bb8
MM
622 vcpu->arch.sie_block = &sie_page->sie_block;
623 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
624
b0c632db 625 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
626 if (!kvm_is_ucontrol(kvm)) {
627 if (!kvm->arch.sca) {
628 WARN_ON_ONCE(1);
629 goto out_free_cpu;
630 }
631 if (!kvm->arch.sca->cpu[id].sda)
632 kvm->arch.sca->cpu[id].sda =
633 (__u64) vcpu->arch.sie_block;
634 vcpu->arch.sie_block->scaoh =
635 (__u32)(((__u64)kvm->arch.sca) >> 32);
636 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
637 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
638 }
b0c632db 639
ba5c1e9b
CO
640 spin_lock_init(&vcpu->arch.local_int.lock);
641 INIT_LIST_HEAD(&vcpu->arch.local_int.list);
642 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
d0321a24 643 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 644 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
ba5c1e9b 645
b0c632db
HC
646 rc = kvm_vcpu_init(vcpu, kvm, id);
647 if (rc)
7b06bf2f 648 goto out_free_sie_block;
b0c632db
HC
649 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
650 vcpu->arch.sie_block);
ade38c31 651 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 652
b0c632db 653 return vcpu;
7b06bf2f
WY
654out_free_sie_block:
655 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 656out_free_cpu:
b110feaf 657 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 658out:
b0c632db
HC
659 return ERR_PTR(rc);
660}
661
b0c632db
HC
662int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
663{
f87618e8 664 return kvm_cpu_has_interrupt(vcpu);
b0c632db
HC
665}
666
49b99e1e
CB
667void s390_vcpu_block(struct kvm_vcpu *vcpu)
668{
669 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
670}
671
672void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
673{
674 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
675}
676
677/*
678 * Kick a guest cpu out of SIE and wait until SIE is not running.
679 * If the CPU is not running (e.g. waiting as idle) the function will
680 * return immediately. */
681void exit_sie(struct kvm_vcpu *vcpu)
682{
683 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
684 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
685 cpu_relax();
686}
687
688/* Kick a guest cpu out of SIE and prevent SIE-reentry */
689void exit_sie_sync(struct kvm_vcpu *vcpu)
690{
691 s390_vcpu_block(vcpu);
692 exit_sie(vcpu);
693}
694
2c70fe44
CB
695static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
696{
697 int i;
698 struct kvm *kvm = gmap->private;
699 struct kvm_vcpu *vcpu;
700
701 kvm_for_each_vcpu(i, vcpu, kvm) {
702 /* match against both prefix pages */
703 if (vcpu->arch.sie_block->prefix == (address & ~0x1000UL)) {
704 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
705 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
706 exit_sie_sync(vcpu);
707 }
708 }
709}
710
b6d33834
CD
711int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
712{
713 /* kvm common code refers to this, but never calls it */
714 BUG();
715 return 0;
716}
717
14eebd91
CO
718static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
719 struct kvm_one_reg *reg)
720{
721 int r = -EINVAL;
722
723 switch (reg->id) {
29b7c71b
CO
724 case KVM_REG_S390_TODPR:
725 r = put_user(vcpu->arch.sie_block->todpr,
726 (u32 __user *)reg->addr);
727 break;
728 case KVM_REG_S390_EPOCHDIFF:
729 r = put_user(vcpu->arch.sie_block->epoch,
730 (u64 __user *)reg->addr);
731 break;
46a6dd1c
J
732 case KVM_REG_S390_CPU_TIMER:
733 r = put_user(vcpu->arch.sie_block->cputm,
734 (u64 __user *)reg->addr);
735 break;
736 case KVM_REG_S390_CLOCK_COMP:
737 r = put_user(vcpu->arch.sie_block->ckc,
738 (u64 __user *)reg->addr);
739 break;
536336c2
DD
740 case KVM_REG_S390_PFTOKEN:
741 r = put_user(vcpu->arch.pfault_token,
742 (u64 __user *)reg->addr);
743 break;
744 case KVM_REG_S390_PFCOMPARE:
745 r = put_user(vcpu->arch.pfault_compare,
746 (u64 __user *)reg->addr);
747 break;
748 case KVM_REG_S390_PFSELECT:
749 r = put_user(vcpu->arch.pfault_select,
750 (u64 __user *)reg->addr);
751 break;
672550fb
CB
752 case KVM_REG_S390_PP:
753 r = put_user(vcpu->arch.sie_block->pp,
754 (u64 __user *)reg->addr);
755 break;
afa45ff5
CB
756 case KVM_REG_S390_GBEA:
757 r = put_user(vcpu->arch.sie_block->gbea,
758 (u64 __user *)reg->addr);
759 break;
14eebd91
CO
760 default:
761 break;
762 }
763
764 return r;
765}
766
767static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
768 struct kvm_one_reg *reg)
769{
770 int r = -EINVAL;
771
772 switch (reg->id) {
29b7c71b
CO
773 case KVM_REG_S390_TODPR:
774 r = get_user(vcpu->arch.sie_block->todpr,
775 (u32 __user *)reg->addr);
776 break;
777 case KVM_REG_S390_EPOCHDIFF:
778 r = get_user(vcpu->arch.sie_block->epoch,
779 (u64 __user *)reg->addr);
780 break;
46a6dd1c
J
781 case KVM_REG_S390_CPU_TIMER:
782 r = get_user(vcpu->arch.sie_block->cputm,
783 (u64 __user *)reg->addr);
784 break;
785 case KVM_REG_S390_CLOCK_COMP:
786 r = get_user(vcpu->arch.sie_block->ckc,
787 (u64 __user *)reg->addr);
788 break;
536336c2
DD
789 case KVM_REG_S390_PFTOKEN:
790 r = get_user(vcpu->arch.pfault_token,
791 (u64 __user *)reg->addr);
792 break;
793 case KVM_REG_S390_PFCOMPARE:
794 r = get_user(vcpu->arch.pfault_compare,
795 (u64 __user *)reg->addr);
796 break;
797 case KVM_REG_S390_PFSELECT:
798 r = get_user(vcpu->arch.pfault_select,
799 (u64 __user *)reg->addr);
800 break;
672550fb
CB
801 case KVM_REG_S390_PP:
802 r = get_user(vcpu->arch.sie_block->pp,
803 (u64 __user *)reg->addr);
804 break;
afa45ff5
CB
805 case KVM_REG_S390_GBEA:
806 r = get_user(vcpu->arch.sie_block->gbea,
807 (u64 __user *)reg->addr);
808 break;
14eebd91
CO
809 default:
810 break;
811 }
812
813 return r;
814}
b6d33834 815
b0c632db
HC
816static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
817{
b0c632db 818 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
819 return 0;
820}
821
822int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
823{
5a32c1af 824 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
825 return 0;
826}
827
828int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
829{
5a32c1af 830 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
831 return 0;
832}
833
834int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
835 struct kvm_sregs *sregs)
836{
59674c1a 837 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 838 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 839 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
840 return 0;
841}
842
843int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
844 struct kvm_sregs *sregs)
845{
59674c1a 846 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 847 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
848 return 0;
849}
850
851int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
852{
4725c860
MS
853 if (test_fp_ctl(fpu->fpc))
854 return -EINVAL;
b0c632db 855 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4725c860
MS
856 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
857 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
858 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
b0c632db
HC
859 return 0;
860}
861
862int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
863{
b0c632db
HC
864 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
865 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
866 return 0;
867}
868
869static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
870{
871 int rc = 0;
872
9e6dabef 873 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED))
b0c632db 874 rc = -EBUSY;
d7b0b5eb
CO
875 else {
876 vcpu->run->psw_mask = psw.mask;
877 vcpu->run->psw_addr = psw.addr;
878 }
b0c632db
HC
879 return rc;
880}
881
882int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
883 struct kvm_translation *tr)
884{
885 return -EINVAL; /* not implemented yet */
886}
887
d0bfb940
JK
888int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
889 struct kvm_guest_debug *dbg)
b0c632db
HC
890{
891 return -EINVAL; /* not implemented yet */
892}
893
62d9f0db
MT
894int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
895 struct kvm_mp_state *mp_state)
896{
897 return -EINVAL; /* not implemented yet */
898}
899
900int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
901 struct kvm_mp_state *mp_state)
902{
903 return -EINVAL; /* not implemented yet */
904}
905
b31605c1
DD
906bool kvm_s390_cmma_enabled(struct kvm *kvm)
907{
908 if (!MACHINE_IS_LPAR)
909 return false;
910 /* only enable for z10 and later */
911 if (!MACHINE_HAS_EDAT1)
912 return false;
913 if (!kvm->arch.use_cmma)
914 return false;
915 return true;
916}
917
2c70fe44
CB
918static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
919{
920 /*
921 * We use MMU_RELOAD just to re-arm the ipte notifier for the
922 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
923 * This ensures that the ipte instruction for this request has
924 * already finished. We might race against a second unmapper that
925 * wants to set the blocking bit. Lets just retry the request loop.
926 */
927 while (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
928 int rc;
929 rc = gmap_ipte_notify(vcpu->arch.gmap,
930 vcpu->arch.sie_block->prefix,
931 PAGE_SIZE * 2);
932 if (rc)
933 return rc;
934 s390_vcpu_unblock(vcpu);
935 }
936 return 0;
937}
938
24eb3a82
DD
939static long kvm_arch_fault_in_sync(struct kvm_vcpu *vcpu)
940{
941 long rc;
942 hva_t fault = gmap_fault(current->thread.gmap_addr, vcpu->arch.gmap);
943 struct mm_struct *mm = current->mm;
944 down_read(&mm->mmap_sem);
945 rc = get_user_pages(current, mm, fault, 1, 1, 0, NULL, NULL);
946 up_read(&mm->mmap_sem);
947 return rc;
948}
949
3c038e6b
DD
950static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
951 unsigned long token)
952{
953 struct kvm_s390_interrupt inti;
954 inti.parm64 = token;
955
956 if (start_token) {
957 inti.type = KVM_S390_INT_PFAULT_INIT;
958 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &inti));
959 } else {
960 inti.type = KVM_S390_INT_PFAULT_DONE;
961 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
962 }
963}
964
965void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
966 struct kvm_async_pf *work)
967{
968 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
969 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
970}
971
972void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
973 struct kvm_async_pf *work)
974{
975 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
976 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
977}
978
979void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
980 struct kvm_async_pf *work)
981{
982 /* s390 will always inject the page directly */
983}
984
985bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
986{
987 /*
988 * s390 will always inject the page directly,
989 * but we still want check_async_completion to cleanup
990 */
991 return true;
992}
993
994static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
995{
996 hva_t hva;
997 struct kvm_arch_async_pf arch;
998 int rc;
999
1000 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1001 return 0;
1002 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1003 vcpu->arch.pfault_compare)
1004 return 0;
1005 if (psw_extint_disabled(vcpu))
1006 return 0;
1007 if (kvm_cpu_has_interrupt(vcpu))
1008 return 0;
1009 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1010 return 0;
1011 if (!vcpu->arch.gmap->pfault_enabled)
1012 return 0;
1013
1014 hva = gmap_fault(current->thread.gmap_addr, vcpu->arch.gmap);
1015 if (copy_from_guest(vcpu, &arch.pfault_token, vcpu->arch.pfault_token, 8))
1016 return 0;
1017
1018 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1019 return rc;
1020}
1021
3fb4c40f 1022static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 1023{
3fb4c40f 1024 int rc, cpuflags;
e168bf8d 1025
3c038e6b
DD
1026 /*
1027 * On s390 notifications for arriving pages will be delivered directly
1028 * to the guest but the house keeping for completed pfaults is
1029 * handled outside the worker.
1030 */
1031 kvm_check_async_pf_completion(vcpu);
1032
5a32c1af 1033 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
1034
1035 if (need_resched())
1036 schedule();
1037
71cde587
CB
1038 if (test_thread_flag(TIF_MCCK_PENDING))
1039 s390_handle_mcck();
1040
d6b6d166
CO
1041 if (!kvm_is_ucontrol(vcpu->kvm))
1042 kvm_s390_deliver_pending_interrupts(vcpu);
0ff31867 1043
2c70fe44
CB
1044 rc = kvm_s390_handle_requests(vcpu);
1045 if (rc)
1046 return rc;
1047
b0c632db 1048 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
1049 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1050 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1051 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 1052
3fb4c40f
TH
1053 return 0;
1054}
1055
1056static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1057{
24eb3a82 1058 int rc = -1;
2b29a9fd
DD
1059
1060 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1061 vcpu->arch.sie_block->icptcode);
1062 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1063
3fb4c40f 1064 if (exit_reason >= 0) {
7c470539 1065 rc = 0;
210b1607
TH
1066 } else if (kvm_is_ucontrol(vcpu->kvm)) {
1067 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1068 vcpu->run->s390_ucontrol.trans_exc_code =
1069 current->thread.gmap_addr;
1070 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1071 rc = -EREMOTE;
24eb3a82
DD
1072
1073 } else if (current->thread.gmap_pfault) {
3c038e6b 1074 trace_kvm_s390_major_guest_pfault(vcpu);
24eb3a82 1075 current->thread.gmap_pfault = 0;
3c038e6b
DD
1076 if (kvm_arch_setup_async_pf(vcpu) ||
1077 (kvm_arch_fault_in_sync(vcpu) >= 0))
24eb3a82
DD
1078 rc = 0;
1079 }
1080
1081 if (rc == -1) {
699bde3b
CB
1082 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1083 trace_kvm_s390_sie_fault(vcpu);
1084 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1f0d0f09 1085 }
b0c632db 1086
5a32c1af 1087 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
3fb4c40f 1088
a76ccff6
TH
1089 if (rc == 0) {
1090 if (kvm_is_ucontrol(vcpu->kvm))
2955c83f
CB
1091 /* Don't exit for host interrupts. */
1092 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
a76ccff6
TH
1093 else
1094 rc = kvm_handle_sie_intercept(vcpu);
1095 }
1096
3fb4c40f
TH
1097 return rc;
1098}
1099
1100static int __vcpu_run(struct kvm_vcpu *vcpu)
1101{
1102 int rc, exit_reason;
1103
800c1065
TH
1104 /*
1105 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1106 * ning the guest), so that memslots (and other stuff) are protected
1107 */
1108 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1109
a76ccff6
TH
1110 do {
1111 rc = vcpu_pre_run(vcpu);
1112 if (rc)
1113 break;
3fb4c40f 1114
800c1065 1115 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
1116 /*
1117 * As PF_VCPU will be used in fault handler, between
1118 * guest_enter and guest_exit should be no uaccess.
1119 */
1120 preempt_disable();
1121 kvm_guest_enter();
1122 preempt_enable();
1123 exit_reason = sie64a(vcpu->arch.sie_block,
1124 vcpu->run->s.regs.gprs);
1125 kvm_guest_exit();
800c1065 1126 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
1127
1128 rc = vcpu_post_run(vcpu, exit_reason);
1129 } while (!signal_pending(current) && !rc);
3fb4c40f 1130
800c1065 1131 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 1132 return rc;
b0c632db
HC
1133}
1134
1135int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1136{
8f2abe6a 1137 int rc;
b0c632db
HC
1138 sigset_t sigsaved;
1139
b0c632db
HC
1140 if (vcpu->sigset_active)
1141 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1142
9e6dabef 1143 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
b0c632db 1144
8f2abe6a
CB
1145 switch (kvm_run->exit_reason) {
1146 case KVM_EXIT_S390_SIEIC:
8f2abe6a 1147 case KVM_EXIT_UNKNOWN:
9ace903d 1148 case KVM_EXIT_INTR:
8f2abe6a 1149 case KVM_EXIT_S390_RESET:
e168bf8d 1150 case KVM_EXIT_S390_UCONTROL:
fa6b7fe9 1151 case KVM_EXIT_S390_TSCH:
8f2abe6a
CB
1152 break;
1153 default:
1154 BUG();
1155 }
1156
d7b0b5eb
CO
1157 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1158 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
60b413c9
CB
1159 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX) {
1160 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_PREFIX;
1161 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1162 }
9eed0735
CB
1163 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1164 kvm_run->kvm_dirty_regs &= ~KVM_SYNC_CRS;
1165 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
1166 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1167 }
d7b0b5eb 1168
dab4079d 1169 might_fault();
a76ccff6 1170 rc = __vcpu_run(vcpu);
9ace903d 1171
b1d16c49
CE
1172 if (signal_pending(current) && !rc) {
1173 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 1174 rc = -EINTR;
b1d16c49 1175 }
8f2abe6a 1176
b8e660b8 1177 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
1178 /* intercept cannot be handled in-kernel, prepare kvm-run */
1179 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
1180 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
1181 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
1182 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
1183 rc = 0;
1184 }
1185
1186 if (rc == -EREMOTE) {
1187 /* intercept was handled, but userspace support is needed
1188 * kvm_run has been prepared by the handler */
1189 rc = 0;
1190 }
b0c632db 1191
d7b0b5eb
CO
1192 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1193 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
60b413c9 1194 kvm_run->s.regs.prefix = vcpu->arch.sie_block->prefix;
9eed0735 1195 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
d7b0b5eb 1196
b0c632db
HC
1197 if (vcpu->sigset_active)
1198 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1199
b0c632db 1200 vcpu->stat.exit_userspace++;
7e8e6ab4 1201 return rc;
b0c632db
HC
1202}
1203
092670cd 1204static int __guestcopy(struct kvm_vcpu *vcpu, u64 guestdest, void *from,
b0c632db
HC
1205 unsigned long n, int prefix)
1206{
1207 if (prefix)
1208 return copy_to_guest(vcpu, guestdest, from, n);
1209 else
1210 return copy_to_guest_absolute(vcpu, guestdest, from, n);
1211}
1212
1213/*
1214 * store status at address
1215 * we use have two special cases:
1216 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1217 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1218 */
e879892c 1219int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr)
b0c632db 1220{
092670cd 1221 unsigned char archmode = 1;
b0c632db 1222 int prefix;
178bd789 1223 u64 clkcomp;
b0c632db
HC
1224
1225 if (addr == KVM_S390_STORE_STATUS_NOADDR) {
1226 if (copy_to_guest_absolute(vcpu, 163ul, &archmode, 1))
1227 return -EFAULT;
1228 addr = SAVE_AREA_BASE;
1229 prefix = 0;
1230 } else if (addr == KVM_S390_STORE_STATUS_PREFIXED) {
1231 if (copy_to_guest(vcpu, 163ul, &archmode, 1))
1232 return -EFAULT;
1233 addr = SAVE_AREA_BASE;
1234 prefix = 1;
1235 } else
1236 prefix = 0;
1237
f64ca217 1238 if (__guestcopy(vcpu, addr + offsetof(struct save_area, fp_regs),
b0c632db
HC
1239 vcpu->arch.guest_fpregs.fprs, 128, prefix))
1240 return -EFAULT;
1241
f64ca217 1242 if (__guestcopy(vcpu, addr + offsetof(struct save_area, gp_regs),
5a32c1af 1243 vcpu->run->s.regs.gprs, 128, prefix))
b0c632db
HC
1244 return -EFAULT;
1245
f64ca217 1246 if (__guestcopy(vcpu, addr + offsetof(struct save_area, psw),
b0c632db
HC
1247 &vcpu->arch.sie_block->gpsw, 16, prefix))
1248 return -EFAULT;
1249
f64ca217 1250 if (__guestcopy(vcpu, addr + offsetof(struct save_area, pref_reg),
b0c632db
HC
1251 &vcpu->arch.sie_block->prefix, 4, prefix))
1252 return -EFAULT;
1253
1254 if (__guestcopy(vcpu,
f64ca217 1255 addr + offsetof(struct save_area, fp_ctrl_reg),
b0c632db
HC
1256 &vcpu->arch.guest_fpregs.fpc, 4, prefix))
1257 return -EFAULT;
1258
f64ca217 1259 if (__guestcopy(vcpu, addr + offsetof(struct save_area, tod_reg),
b0c632db
HC
1260 &vcpu->arch.sie_block->todpr, 4, prefix))
1261 return -EFAULT;
1262
f64ca217 1263 if (__guestcopy(vcpu, addr + offsetof(struct save_area, timer),
b0c632db
HC
1264 &vcpu->arch.sie_block->cputm, 8, prefix))
1265 return -EFAULT;
1266
178bd789 1267 clkcomp = vcpu->arch.sie_block->ckc >> 8;
f64ca217 1268 if (__guestcopy(vcpu, addr + offsetof(struct save_area, clk_cmp),
178bd789 1269 &clkcomp, 8, prefix))
b0c632db
HC
1270 return -EFAULT;
1271
f64ca217 1272 if (__guestcopy(vcpu, addr + offsetof(struct save_area, acc_regs),
59674c1a 1273 &vcpu->run->s.regs.acrs, 64, prefix))
b0c632db
HC
1274 return -EFAULT;
1275
1276 if (__guestcopy(vcpu,
f64ca217 1277 addr + offsetof(struct save_area, ctrl_regs),
b0c632db
HC
1278 &vcpu->arch.sie_block->gcr, 128, prefix))
1279 return -EFAULT;
1280 return 0;
1281}
1282
e879892c
TH
1283int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1284{
1285 /*
1286 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1287 * copying in vcpu load/put. Lets update our copies before we save
1288 * it into the save area
1289 */
1290 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1291 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1292 save_access_regs(vcpu->run->s.regs.acrs);
1293
1294 return kvm_s390_store_status_unloaded(vcpu, addr);
1295}
1296
d6712df9
CH
1297static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1298 struct kvm_enable_cap *cap)
1299{
1300 int r;
1301
1302 if (cap->flags)
1303 return -EINVAL;
1304
1305 switch (cap->cap) {
fa6b7fe9
CH
1306 case KVM_CAP_S390_CSS_SUPPORT:
1307 if (!vcpu->kvm->arch.css_support) {
1308 vcpu->kvm->arch.css_support = 1;
1309 trace_kvm_s390_enable_css(vcpu->kvm);
1310 }
1311 r = 0;
1312 break;
d6712df9
CH
1313 default:
1314 r = -EINVAL;
1315 break;
1316 }
1317 return r;
1318}
1319
b0c632db
HC
1320long kvm_arch_vcpu_ioctl(struct file *filp,
1321 unsigned int ioctl, unsigned long arg)
1322{
1323 struct kvm_vcpu *vcpu = filp->private_data;
1324 void __user *argp = (void __user *)arg;
800c1065 1325 int idx;
bc923cc9 1326 long r;
b0c632db 1327
93736624
AK
1328 switch (ioctl) {
1329 case KVM_S390_INTERRUPT: {
ba5c1e9b
CO
1330 struct kvm_s390_interrupt s390int;
1331
93736624 1332 r = -EFAULT;
ba5c1e9b 1333 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624
AK
1334 break;
1335 r = kvm_s390_inject_vcpu(vcpu, &s390int);
1336 break;
ba5c1e9b 1337 }
b0c632db 1338 case KVM_S390_STORE_STATUS:
800c1065 1339 idx = srcu_read_lock(&vcpu->kvm->srcu);
bc923cc9 1340 r = kvm_s390_vcpu_store_status(vcpu, arg);
800c1065 1341 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 1342 break;
b0c632db
HC
1343 case KVM_S390_SET_INITIAL_PSW: {
1344 psw_t psw;
1345
bc923cc9 1346 r = -EFAULT;
b0c632db 1347 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
1348 break;
1349 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
1350 break;
b0c632db
HC
1351 }
1352 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
1353 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
1354 break;
14eebd91
CO
1355 case KVM_SET_ONE_REG:
1356 case KVM_GET_ONE_REG: {
1357 struct kvm_one_reg reg;
1358 r = -EFAULT;
1359 if (copy_from_user(&reg, argp, sizeof(reg)))
1360 break;
1361 if (ioctl == KVM_SET_ONE_REG)
1362 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
1363 else
1364 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
1365 break;
1366 }
27e0393f
CO
1367#ifdef CONFIG_KVM_S390_UCONTROL
1368 case KVM_S390_UCAS_MAP: {
1369 struct kvm_s390_ucas_mapping ucasmap;
1370
1371 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1372 r = -EFAULT;
1373 break;
1374 }
1375
1376 if (!kvm_is_ucontrol(vcpu->kvm)) {
1377 r = -EINVAL;
1378 break;
1379 }
1380
1381 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
1382 ucasmap.vcpu_addr, ucasmap.length);
1383 break;
1384 }
1385 case KVM_S390_UCAS_UNMAP: {
1386 struct kvm_s390_ucas_mapping ucasmap;
1387
1388 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
1389 r = -EFAULT;
1390 break;
1391 }
1392
1393 if (!kvm_is_ucontrol(vcpu->kvm)) {
1394 r = -EINVAL;
1395 break;
1396 }
1397
1398 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
1399 ucasmap.length);
1400 break;
1401 }
1402#endif
ccc7910f
CO
1403 case KVM_S390_VCPU_FAULT: {
1404 r = gmap_fault(arg, vcpu->arch.gmap);
1405 if (!IS_ERR_VALUE(r))
1406 r = 0;
1407 break;
1408 }
d6712df9
CH
1409 case KVM_ENABLE_CAP:
1410 {
1411 struct kvm_enable_cap cap;
1412 r = -EFAULT;
1413 if (copy_from_user(&cap, argp, sizeof(cap)))
1414 break;
1415 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1416 break;
1417 }
b0c632db 1418 default:
3e6afcf1 1419 r = -ENOTTY;
b0c632db 1420 }
bc923cc9 1421 return r;
b0c632db
HC
1422}
1423
5b1c1493
CO
1424int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1425{
1426#ifdef CONFIG_KVM_S390_UCONTROL
1427 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
1428 && (kvm_is_ucontrol(vcpu->kvm))) {
1429 vmf->page = virt_to_page(vcpu->arch.sie_block);
1430 get_page(vmf->page);
1431 return 0;
1432 }
1433#endif
1434 return VM_FAULT_SIGBUS;
1435}
1436
5587027c 1437void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
db3fe4eb
TY
1438 struct kvm_memory_slot *dont)
1439{
1440}
1441
5587027c
AK
1442int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
1443 unsigned long npages)
db3fe4eb
TY
1444{
1445 return 0;
1446}
1447
e59dbe09
TY
1448void kvm_arch_memslots_updated(struct kvm *kvm)
1449{
1450}
1451
b0c632db 1452/* Section: memory related */
f7784b8e
MT
1453int kvm_arch_prepare_memory_region(struct kvm *kvm,
1454 struct kvm_memory_slot *memslot,
7b6195a9
TY
1455 struct kvm_userspace_memory_region *mem,
1456 enum kvm_mr_change change)
b0c632db 1457{
dd2887e7
NW
1458 /* A few sanity checks. We can have memory slots which have to be
1459 located/ended at a segment boundary (1MB). The memory in userland is
1460 ok to be fragmented into various different vmas. It is okay to mmap()
1461 and munmap() stuff in this slot after doing this call at any time */
b0c632db 1462
598841ca 1463 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
1464 return -EINVAL;
1465
598841ca 1466 if (mem->memory_size & 0xffffful)
b0c632db
HC
1467 return -EINVAL;
1468
f7784b8e
MT
1469 return 0;
1470}
1471
1472void kvm_arch_commit_memory_region(struct kvm *kvm,
1473 struct kvm_userspace_memory_region *mem,
8482644a
TY
1474 const struct kvm_memory_slot *old,
1475 enum kvm_mr_change change)
f7784b8e 1476{
f7850c92 1477 int rc;
f7784b8e 1478
2cef4deb
CB
1479 /* If the basics of the memslot do not change, we do not want
1480 * to update the gmap. Every update causes several unnecessary
1481 * segment translation exceptions. This is usually handled just
1482 * fine by the normal fault handler + gmap, but it will also
1483 * cause faults on the prefix page of running guest CPUs.
1484 */
1485 if (old->userspace_addr == mem->userspace_addr &&
1486 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
1487 old->npages * PAGE_SIZE == mem->memory_size)
1488 return;
598841ca
CO
1489
1490 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
1491 mem->guest_phys_addr, mem->memory_size);
1492 if (rc)
f7850c92 1493 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 1494 return;
b0c632db
HC
1495}
1496
2df72e9b
MT
1497void kvm_arch_flush_shadow_all(struct kvm *kvm)
1498{
1499}
1500
1501void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
1502 struct kvm_memory_slot *slot)
34d4cb8f
MT
1503{
1504}
1505
b0c632db
HC
1506static int __init kvm_s390_init(void)
1507{
ef50f7ac 1508 int ret;
0ee75bea 1509 ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
ef50f7ac
CB
1510 if (ret)
1511 return ret;
1512
1513 /*
1514 * guests can ask for up to 255+1 double words, we need a full page
25985edc 1515 * to hold the maximum amount of facilities. On the other hand, we
ef50f7ac
CB
1516 * only set facilities that are known to work in KVM.
1517 */
78c4b59f
MM
1518 vfacilities = (unsigned long *) get_zeroed_page(GFP_KERNEL|GFP_DMA);
1519 if (!vfacilities) {
ef50f7ac
CB
1520 kvm_exit();
1521 return -ENOMEM;
1522 }
78c4b59f 1523 memcpy(vfacilities, S390_lowcore.stfle_fac_list, 16);
d208c79d 1524 vfacilities[0] &= 0xff82fff3f4fc2000UL;
7feb6bb8 1525 vfacilities[1] &= 0x005c000000000000UL;
ef50f7ac 1526 return 0;
b0c632db
HC
1527}
1528
1529static void __exit kvm_s390_exit(void)
1530{
78c4b59f 1531 free_page((unsigned long) vfacilities);
b0c632db
HC
1532 kvm_exit();
1533}
1534
1535module_init(kvm_s390_init);
1536module_exit(kvm_s390_exit);
566af940
CH
1537
1538/*
1539 * Enable autoloading of the kvm module.
1540 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
1541 * since x86 takes a different approach.
1542 */
1543#include <linux/miscdevice.h>
1544MODULE_ALIAS_MISCDEV(KVM_MINOR);
1545MODULE_ALIAS("devname:kvm");