]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/s390/kvm/kvm-s390.c
KVM: s390: introduce post handlers for STSI
[mirror_ubuntu-artful-kernel.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15f36ebd 14 * Jason J. Herne <jjherne@us.ibm.com>
b0c632db
HC
15 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
ca872302 20#include <linux/hrtimer.h>
b0c632db
HC
21#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
a374e892 25#include <linux/random.h>
b0c632db 26#include <linux/slab.h>
ba5c1e9b 27#include <linux/timer.h>
41408c28 28#include <linux/vmalloc.h>
cbb870c8 29#include <asm/asm-offsets.h>
b0c632db
HC
30#include <asm/lowcore.h>
31#include <asm/pgtable.h>
f5daba1d 32#include <asm/nmi.h>
a0616cde 33#include <asm/switch_to.h>
1526bf9c 34#include <asm/sclp.h>
8f2abe6a 35#include "kvm-s390.h"
b0c632db
HC
36#include "gaccess.h"
37
5786fffa
CH
38#define CREATE_TRACE_POINTS
39#include "trace.h"
ade38c31 40#include "trace-s390.h"
5786fffa 41
41408c28
TH
42#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
43
b0c632db
HC
44#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
45
46struct kvm_stats_debugfs_item debugfs_entries[] = {
47 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 48 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
49 { "exit_validity", VCPU_STAT(exit_validity) },
50 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
51 { "exit_external_request", VCPU_STAT(exit_external_request) },
52 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
53 { "exit_instruction", VCPU_STAT(exit_instruction) },
54 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
55 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f7819512 56 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
ce2e4f0b 57 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
f5e10b09 58 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b 59 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
aba07508
DH
60 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
61 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
ba5c1e9b 62 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 63 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
64 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
65 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
66 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
67 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
68 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
69 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
70 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 71 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
72 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
73 { "instruction_spx", VCPU_STAT(instruction_spx) },
74 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
75 { "instruction_stap", VCPU_STAT(instruction_stap) },
76 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
8a242234 77 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
453423dc
CB
78 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
79 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
b31288fa 80 { "instruction_essa", VCPU_STAT(instruction_essa) },
453423dc
CB
81 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
82 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 83 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 84 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 85 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 86 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0 87 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
42cb0c9f
DH
88 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
89 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
5288fbf0 90 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
42cb0c9f
DH
91 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
92 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
cd7b4b61 93 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
5288fbf0
CB
94 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
95 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
96 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
42cb0c9f
DH
97 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
98 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
99 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
388186bc 100 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 101 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 102 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
103 { NULL }
104};
105
9d8d5786
MM
106/* upper facilities limit for kvm */
107unsigned long kvm_s390_fac_list_mask[] = {
108 0xff82fffbf4fc2000UL,
109 0x005c000000000000UL,
13211ea7 110 0x4000000000000000UL,
9d8d5786 111};
b0c632db 112
9d8d5786 113unsigned long kvm_s390_fac_list_mask_size(void)
78c4b59f 114{
9d8d5786
MM
115 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
116 return ARRAY_SIZE(kvm_s390_fac_list_mask);
78c4b59f
MM
117}
118
9d8d5786
MM
119static struct gmap_notifier gmap_notifier;
120
b0c632db 121/* Section: not file related */
13a34e06 122int kvm_arch_hardware_enable(void)
b0c632db
HC
123{
124 /* every s390 is virtualization enabled ;-) */
10474ae8 125 return 0;
b0c632db
HC
126}
127
2c70fe44
CB
128static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
129
b0c632db
HC
130int kvm_arch_hardware_setup(void)
131{
2c70fe44
CB
132 gmap_notifier.notifier_call = kvm_gmap_notifier;
133 gmap_register_ipte_notifier(&gmap_notifier);
b0c632db
HC
134 return 0;
135}
136
137void kvm_arch_hardware_unsetup(void)
138{
2c70fe44 139 gmap_unregister_ipte_notifier(&gmap_notifier);
b0c632db
HC
140}
141
b0c632db
HC
142int kvm_arch_init(void *opaque)
143{
84877d93
CH
144 /* Register floating interrupt controller interface. */
145 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
b0c632db
HC
146}
147
b0c632db
HC
148/* Section: device related */
149long kvm_arch_dev_ioctl(struct file *filp,
150 unsigned int ioctl, unsigned long arg)
151{
152 if (ioctl == KVM_S390_ENABLE_SIE)
153 return s390_enable_sie();
154 return -EINVAL;
155}
156
784aa3d7 157int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
b0c632db 158{
d7b0b5eb
CO
159 int r;
160
2bd0ac4e 161 switch (ext) {
d7b0b5eb 162 case KVM_CAP_S390_PSW:
b6cf8788 163 case KVM_CAP_S390_GMAP:
52e16b18 164 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
165#ifdef CONFIG_KVM_S390_UCONTROL
166 case KVM_CAP_S390_UCONTROL:
167#endif
3c038e6b 168 case KVM_CAP_ASYNC_PF:
60b413c9 169 case KVM_CAP_SYNC_REGS:
14eebd91 170 case KVM_CAP_ONE_REG:
d6712df9 171 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 172 case KVM_CAP_S390_CSS_SUPPORT:
ebc32262 173 case KVM_CAP_IRQFD:
10ccaa1e 174 case KVM_CAP_IOEVENTFD:
c05c4186 175 case KVM_CAP_DEVICE_CTRL:
d938dc55 176 case KVM_CAP_ENABLE_CAP_VM:
78599d90 177 case KVM_CAP_S390_IRQCHIP:
f2061656 178 case KVM_CAP_VM_ATTRIBUTES:
6352e4d2 179 case KVM_CAP_MP_STATE:
2444b352 180 case KVM_CAP_S390_USER_SIGP:
e44fc8c9 181 case KVM_CAP_S390_USER_STSI:
d7b0b5eb
CO
182 r = 1;
183 break;
41408c28
TH
184 case KVM_CAP_S390_MEM_OP:
185 r = MEM_OP_MAX_SIZE;
186 break;
e726b1bd
CB
187 case KVM_CAP_NR_VCPUS:
188 case KVM_CAP_MAX_VCPUS:
189 r = KVM_MAX_VCPUS;
190 break;
e1e2e605
NW
191 case KVM_CAP_NR_MEMSLOTS:
192 r = KVM_USER_MEM_SLOTS;
193 break;
1526bf9c 194 case KVM_CAP_S390_COW:
abf09bed 195 r = MACHINE_HAS_ESOP;
1526bf9c 196 break;
68c55750
EF
197 case KVM_CAP_S390_VECTOR_REGISTERS:
198 r = MACHINE_HAS_VX;
199 break;
2bd0ac4e 200 default:
d7b0b5eb 201 r = 0;
2bd0ac4e 202 }
d7b0b5eb 203 return r;
b0c632db
HC
204}
205
15f36ebd
JH
206static void kvm_s390_sync_dirty_log(struct kvm *kvm,
207 struct kvm_memory_slot *memslot)
208{
209 gfn_t cur_gfn, last_gfn;
210 unsigned long address;
211 struct gmap *gmap = kvm->arch.gmap;
212
213 down_read(&gmap->mm->mmap_sem);
214 /* Loop over all guest pages */
215 last_gfn = memslot->base_gfn + memslot->npages;
216 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
217 address = gfn_to_hva_memslot(memslot, cur_gfn);
218
219 if (gmap_test_and_clear_dirty(address, gmap))
220 mark_page_dirty(kvm, cur_gfn);
221 }
222 up_read(&gmap->mm->mmap_sem);
223}
224
b0c632db
HC
225/* Section: vm related */
226/*
227 * Get (and clear) the dirty memory log for a memory slot.
228 */
229int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
230 struct kvm_dirty_log *log)
231{
15f36ebd
JH
232 int r;
233 unsigned long n;
234 struct kvm_memory_slot *memslot;
235 int is_dirty = 0;
236
237 mutex_lock(&kvm->slots_lock);
238
239 r = -EINVAL;
240 if (log->slot >= KVM_USER_MEM_SLOTS)
241 goto out;
242
243 memslot = id_to_memslot(kvm->memslots, log->slot);
244 r = -ENOENT;
245 if (!memslot->dirty_bitmap)
246 goto out;
247
248 kvm_s390_sync_dirty_log(kvm, memslot);
249 r = kvm_get_dirty_log(kvm, log, &is_dirty);
250 if (r)
251 goto out;
252
253 /* Clear the dirty log */
254 if (is_dirty) {
255 n = kvm_dirty_bitmap_bytes(memslot);
256 memset(memslot->dirty_bitmap, 0, n);
257 }
258 r = 0;
259out:
260 mutex_unlock(&kvm->slots_lock);
261 return r;
b0c632db
HC
262}
263
d938dc55
CH
264static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
265{
266 int r;
267
268 if (cap->flags)
269 return -EINVAL;
270
271 switch (cap->cap) {
84223598
CH
272 case KVM_CAP_S390_IRQCHIP:
273 kvm->arch.use_irqchip = 1;
274 r = 0;
275 break;
2444b352
DH
276 case KVM_CAP_S390_USER_SIGP:
277 kvm->arch.user_sigp = 1;
278 r = 0;
279 break;
68c55750
EF
280 case KVM_CAP_S390_VECTOR_REGISTERS:
281 kvm->arch.use_vectors = MACHINE_HAS_VX;
282 r = MACHINE_HAS_VX ? 0 : -EINVAL;
283 break;
e44fc8c9
ET
284 case KVM_CAP_S390_USER_STSI:
285 kvm->arch.user_stsi = 1;
286 r = 0;
287 break;
d938dc55
CH
288 default:
289 r = -EINVAL;
290 break;
291 }
292 return r;
293}
294
8c0a7ce6
DD
295static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
296{
297 int ret;
298
299 switch (attr->attr) {
300 case KVM_S390_VM_MEM_LIMIT_SIZE:
301 ret = 0;
302 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
303 ret = -EFAULT;
304 break;
305 default:
306 ret = -ENXIO;
307 break;
308 }
309 return ret;
310}
311
312static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
4f718eab
DD
313{
314 int ret;
315 unsigned int idx;
316 switch (attr->attr) {
317 case KVM_S390_VM_MEM_ENABLE_CMMA:
318 ret = -EBUSY;
319 mutex_lock(&kvm->lock);
320 if (atomic_read(&kvm->online_vcpus) == 0) {
321 kvm->arch.use_cmma = 1;
322 ret = 0;
323 }
324 mutex_unlock(&kvm->lock);
325 break;
326 case KVM_S390_VM_MEM_CLR_CMMA:
327 mutex_lock(&kvm->lock);
328 idx = srcu_read_lock(&kvm->srcu);
a13cff31 329 s390_reset_cmma(kvm->arch.gmap->mm);
4f718eab
DD
330 srcu_read_unlock(&kvm->srcu, idx);
331 mutex_unlock(&kvm->lock);
332 ret = 0;
333 break;
8c0a7ce6
DD
334 case KVM_S390_VM_MEM_LIMIT_SIZE: {
335 unsigned long new_limit;
336
337 if (kvm_is_ucontrol(kvm))
338 return -EINVAL;
339
340 if (get_user(new_limit, (u64 __user *)attr->addr))
341 return -EFAULT;
342
343 if (new_limit > kvm->arch.gmap->asce_end)
344 return -E2BIG;
345
346 ret = -EBUSY;
347 mutex_lock(&kvm->lock);
348 if (atomic_read(&kvm->online_vcpus) == 0) {
349 /* gmap_alloc will round the limit up */
350 struct gmap *new = gmap_alloc(current->mm, new_limit);
351
352 if (!new) {
353 ret = -ENOMEM;
354 } else {
355 gmap_free(kvm->arch.gmap);
356 new->private = kvm;
357 kvm->arch.gmap = new;
358 ret = 0;
359 }
360 }
361 mutex_unlock(&kvm->lock);
362 break;
363 }
4f718eab
DD
364 default:
365 ret = -ENXIO;
366 break;
367 }
368 return ret;
369}
370
a374e892
TK
371static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
372
373static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
374{
375 struct kvm_vcpu *vcpu;
376 int i;
377
9d8d5786 378 if (!test_kvm_facility(kvm, 76))
a374e892
TK
379 return -EINVAL;
380
381 mutex_lock(&kvm->lock);
382 switch (attr->attr) {
383 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
384 get_random_bytes(
385 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
386 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
387 kvm->arch.crypto.aes_kw = 1;
388 break;
389 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
390 get_random_bytes(
391 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
392 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
393 kvm->arch.crypto.dea_kw = 1;
394 break;
395 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
396 kvm->arch.crypto.aes_kw = 0;
397 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
398 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
399 break;
400 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
401 kvm->arch.crypto.dea_kw = 0;
402 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
403 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
404 break;
405 default:
406 mutex_unlock(&kvm->lock);
407 return -ENXIO;
408 }
409
410 kvm_for_each_vcpu(i, vcpu, kvm) {
411 kvm_s390_vcpu_crypto_setup(vcpu);
412 exit_sie(vcpu);
413 }
414 mutex_unlock(&kvm->lock);
415 return 0;
416}
417
72f25020
JH
418static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
419{
420 u8 gtod_high;
421
422 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
423 sizeof(gtod_high)))
424 return -EFAULT;
425
426 if (gtod_high != 0)
427 return -EINVAL;
428
429 return 0;
430}
431
432static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
433{
434 struct kvm_vcpu *cur_vcpu;
435 unsigned int vcpu_idx;
436 u64 host_tod, gtod;
437 int r;
438
439 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
440 return -EFAULT;
441
442 r = store_tod_clock(&host_tod);
443 if (r)
444 return r;
445
446 mutex_lock(&kvm->lock);
447 kvm->arch.epoch = gtod - host_tod;
448 kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) {
449 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
450 exit_sie(cur_vcpu);
451 }
452 mutex_unlock(&kvm->lock);
453 return 0;
454}
455
456static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
457{
458 int ret;
459
460 if (attr->flags)
461 return -EINVAL;
462
463 switch (attr->attr) {
464 case KVM_S390_VM_TOD_HIGH:
465 ret = kvm_s390_set_tod_high(kvm, attr);
466 break;
467 case KVM_S390_VM_TOD_LOW:
468 ret = kvm_s390_set_tod_low(kvm, attr);
469 break;
470 default:
471 ret = -ENXIO;
472 break;
473 }
474 return ret;
475}
476
477static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
478{
479 u8 gtod_high = 0;
480
481 if (copy_to_user((void __user *)attr->addr, &gtod_high,
482 sizeof(gtod_high)))
483 return -EFAULT;
484
485 return 0;
486}
487
488static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
489{
490 u64 host_tod, gtod;
491 int r;
492
493 r = store_tod_clock(&host_tod);
494 if (r)
495 return r;
496
497 gtod = host_tod + kvm->arch.epoch;
498 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
499 return -EFAULT;
500
501 return 0;
502}
503
504static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
505{
506 int ret;
507
508 if (attr->flags)
509 return -EINVAL;
510
511 switch (attr->attr) {
512 case KVM_S390_VM_TOD_HIGH:
513 ret = kvm_s390_get_tod_high(kvm, attr);
514 break;
515 case KVM_S390_VM_TOD_LOW:
516 ret = kvm_s390_get_tod_low(kvm, attr);
517 break;
518 default:
519 ret = -ENXIO;
520 break;
521 }
522 return ret;
523}
524
658b6eda
MM
525static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
526{
527 struct kvm_s390_vm_cpu_processor *proc;
528 int ret = 0;
529
530 mutex_lock(&kvm->lock);
531 if (atomic_read(&kvm->online_vcpus)) {
532 ret = -EBUSY;
533 goto out;
534 }
535 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
536 if (!proc) {
537 ret = -ENOMEM;
538 goto out;
539 }
540 if (!copy_from_user(proc, (void __user *)attr->addr,
541 sizeof(*proc))) {
542 memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
543 sizeof(struct cpuid));
544 kvm->arch.model.ibc = proc->ibc;
981467c9 545 memcpy(kvm->arch.model.fac->list, proc->fac_list,
658b6eda
MM
546 S390_ARCH_FAC_LIST_SIZE_BYTE);
547 } else
548 ret = -EFAULT;
549 kfree(proc);
550out:
551 mutex_unlock(&kvm->lock);
552 return ret;
553}
554
555static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
556{
557 int ret = -ENXIO;
558
559 switch (attr->attr) {
560 case KVM_S390_VM_CPU_PROCESSOR:
561 ret = kvm_s390_set_processor(kvm, attr);
562 break;
563 }
564 return ret;
565}
566
567static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
568{
569 struct kvm_s390_vm_cpu_processor *proc;
570 int ret = 0;
571
572 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
573 if (!proc) {
574 ret = -ENOMEM;
575 goto out;
576 }
577 memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
578 proc->ibc = kvm->arch.model.ibc;
981467c9 579 memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda
MM
580 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
581 ret = -EFAULT;
582 kfree(proc);
583out:
584 return ret;
585}
586
587static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
588{
589 struct kvm_s390_vm_cpu_machine *mach;
590 int ret = 0;
591
592 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
593 if (!mach) {
594 ret = -ENOMEM;
595 goto out;
596 }
597 get_cpu_id((struct cpuid *) &mach->cpuid);
598 mach->ibc = sclp_get_ibc();
981467c9
MM
599 memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
600 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda 601 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
94422ee8 602 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda
MM
603 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
604 ret = -EFAULT;
605 kfree(mach);
606out:
607 return ret;
608}
609
610static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
611{
612 int ret = -ENXIO;
613
614 switch (attr->attr) {
615 case KVM_S390_VM_CPU_PROCESSOR:
616 ret = kvm_s390_get_processor(kvm, attr);
617 break;
618 case KVM_S390_VM_CPU_MACHINE:
619 ret = kvm_s390_get_machine(kvm, attr);
620 break;
621 }
622 return ret;
623}
624
f2061656
DD
625static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
626{
627 int ret;
628
629 switch (attr->group) {
4f718eab 630 case KVM_S390_VM_MEM_CTRL:
8c0a7ce6 631 ret = kvm_s390_set_mem_control(kvm, attr);
4f718eab 632 break;
72f25020
JH
633 case KVM_S390_VM_TOD:
634 ret = kvm_s390_set_tod(kvm, attr);
635 break;
658b6eda
MM
636 case KVM_S390_VM_CPU_MODEL:
637 ret = kvm_s390_set_cpu_model(kvm, attr);
638 break;
a374e892
TK
639 case KVM_S390_VM_CRYPTO:
640 ret = kvm_s390_vm_set_crypto(kvm, attr);
641 break;
f2061656
DD
642 default:
643 ret = -ENXIO;
644 break;
645 }
646
647 return ret;
648}
649
650static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
651{
8c0a7ce6
DD
652 int ret;
653
654 switch (attr->group) {
655 case KVM_S390_VM_MEM_CTRL:
656 ret = kvm_s390_get_mem_control(kvm, attr);
657 break;
72f25020
JH
658 case KVM_S390_VM_TOD:
659 ret = kvm_s390_get_tod(kvm, attr);
660 break;
658b6eda
MM
661 case KVM_S390_VM_CPU_MODEL:
662 ret = kvm_s390_get_cpu_model(kvm, attr);
663 break;
8c0a7ce6
DD
664 default:
665 ret = -ENXIO;
666 break;
667 }
668
669 return ret;
f2061656
DD
670}
671
672static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
673{
674 int ret;
675
676 switch (attr->group) {
4f718eab
DD
677 case KVM_S390_VM_MEM_CTRL:
678 switch (attr->attr) {
679 case KVM_S390_VM_MEM_ENABLE_CMMA:
680 case KVM_S390_VM_MEM_CLR_CMMA:
8c0a7ce6 681 case KVM_S390_VM_MEM_LIMIT_SIZE:
4f718eab
DD
682 ret = 0;
683 break;
684 default:
685 ret = -ENXIO;
686 break;
687 }
688 break;
72f25020
JH
689 case KVM_S390_VM_TOD:
690 switch (attr->attr) {
691 case KVM_S390_VM_TOD_LOW:
692 case KVM_S390_VM_TOD_HIGH:
693 ret = 0;
694 break;
695 default:
696 ret = -ENXIO;
697 break;
698 }
699 break;
658b6eda
MM
700 case KVM_S390_VM_CPU_MODEL:
701 switch (attr->attr) {
702 case KVM_S390_VM_CPU_PROCESSOR:
703 case KVM_S390_VM_CPU_MACHINE:
704 ret = 0;
705 break;
706 default:
707 ret = -ENXIO;
708 break;
709 }
710 break;
a374e892
TK
711 case KVM_S390_VM_CRYPTO:
712 switch (attr->attr) {
713 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
714 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
715 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
716 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
717 ret = 0;
718 break;
719 default:
720 ret = -ENXIO;
721 break;
722 }
723 break;
f2061656
DD
724 default:
725 ret = -ENXIO;
726 break;
727 }
728
729 return ret;
730}
731
b0c632db
HC
732long kvm_arch_vm_ioctl(struct file *filp,
733 unsigned int ioctl, unsigned long arg)
734{
735 struct kvm *kvm = filp->private_data;
736 void __user *argp = (void __user *)arg;
f2061656 737 struct kvm_device_attr attr;
b0c632db
HC
738 int r;
739
740 switch (ioctl) {
ba5c1e9b
CO
741 case KVM_S390_INTERRUPT: {
742 struct kvm_s390_interrupt s390int;
743
744 r = -EFAULT;
745 if (copy_from_user(&s390int, argp, sizeof(s390int)))
746 break;
747 r = kvm_s390_inject_vm(kvm, &s390int);
748 break;
749 }
d938dc55
CH
750 case KVM_ENABLE_CAP: {
751 struct kvm_enable_cap cap;
752 r = -EFAULT;
753 if (copy_from_user(&cap, argp, sizeof(cap)))
754 break;
755 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
756 break;
757 }
84223598
CH
758 case KVM_CREATE_IRQCHIP: {
759 struct kvm_irq_routing_entry routing;
760
761 r = -EINVAL;
762 if (kvm->arch.use_irqchip) {
763 /* Set up dummy routing. */
764 memset(&routing, 0, sizeof(routing));
765 kvm_set_irq_routing(kvm, &routing, 0, 0);
766 r = 0;
767 }
768 break;
769 }
f2061656
DD
770 case KVM_SET_DEVICE_ATTR: {
771 r = -EFAULT;
772 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
773 break;
774 r = kvm_s390_vm_set_attr(kvm, &attr);
775 break;
776 }
777 case KVM_GET_DEVICE_ATTR: {
778 r = -EFAULT;
779 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
780 break;
781 r = kvm_s390_vm_get_attr(kvm, &attr);
782 break;
783 }
784 case KVM_HAS_DEVICE_ATTR: {
785 r = -EFAULT;
786 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
787 break;
788 r = kvm_s390_vm_has_attr(kvm, &attr);
789 break;
790 }
b0c632db 791 default:
367e1319 792 r = -ENOTTY;
b0c632db
HC
793 }
794
795 return r;
796}
797
45c9b47c
TK
798static int kvm_s390_query_ap_config(u8 *config)
799{
800 u32 fcn_code = 0x04000000UL;
86044c8c 801 u32 cc = 0;
45c9b47c 802
86044c8c 803 memset(config, 0, 128);
45c9b47c
TK
804 asm volatile(
805 "lgr 0,%1\n"
806 "lgr 2,%2\n"
807 ".long 0xb2af0000\n" /* PQAP(QCI) */
86044c8c 808 "0: ipm %0\n"
45c9b47c 809 "srl %0,28\n"
86044c8c
CB
810 "1:\n"
811 EX_TABLE(0b, 1b)
812 : "+r" (cc)
45c9b47c
TK
813 : "r" (fcn_code), "r" (config)
814 : "cc", "0", "2", "memory"
815 );
816
817 return cc;
818}
819
820static int kvm_s390_apxa_installed(void)
821{
822 u8 config[128];
823 int cc;
824
825 if (test_facility(2) && test_facility(12)) {
826 cc = kvm_s390_query_ap_config(config);
827
828 if (cc)
829 pr_err("PQAP(QCI) failed with cc=%d", cc);
830 else
831 return config[0] & 0x40;
832 }
833
834 return 0;
835}
836
837static void kvm_s390_set_crycb_format(struct kvm *kvm)
838{
839 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
840
841 if (kvm_s390_apxa_installed())
842 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
843 else
844 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
845}
846
9d8d5786
MM
847static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
848{
849 get_cpu_id(cpu_id);
850 cpu_id->version = 0xff;
851}
852
5102ee87
TK
853static int kvm_s390_crypto_init(struct kvm *kvm)
854{
9d8d5786 855 if (!test_kvm_facility(kvm, 76))
5102ee87
TK
856 return 0;
857
858 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
859 GFP_KERNEL | GFP_DMA);
860 if (!kvm->arch.crypto.crycb)
861 return -ENOMEM;
862
45c9b47c 863 kvm_s390_set_crycb_format(kvm);
5102ee87 864
ed6f76b4
TK
865 /* Enable AES/DEA protected key functions by default */
866 kvm->arch.crypto.aes_kw = 1;
867 kvm->arch.crypto.dea_kw = 1;
868 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
869 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
870 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
871 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
a374e892 872
5102ee87
TK
873 return 0;
874}
875
e08b9637 876int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 877{
9d8d5786 878 int i, rc;
b0c632db 879 char debug_name[16];
f6c137ff 880 static unsigned long sca_offset;
b0c632db 881
e08b9637
CO
882 rc = -EINVAL;
883#ifdef CONFIG_KVM_S390_UCONTROL
884 if (type & ~KVM_VM_S390_UCONTROL)
885 goto out_err;
886 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
887 goto out_err;
888#else
889 if (type)
890 goto out_err;
891#endif
892
b0c632db
HC
893 rc = s390_enable_sie();
894 if (rc)
d89f5eff 895 goto out_err;
b0c632db 896
b290411a
CO
897 rc = -ENOMEM;
898
b0c632db
HC
899 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
900 if (!kvm->arch.sca)
d89f5eff 901 goto out_err;
f6c137ff
CB
902 spin_lock(&kvm_lock);
903 sca_offset = (sca_offset + 16) & 0x7f0;
904 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
905 spin_unlock(&kvm_lock);
b0c632db
HC
906
907 sprintf(debug_name, "kvm-%u", current->pid);
908
909 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
910 if (!kvm->arch.dbf)
40f5b735 911 goto out_err;
b0c632db 912
9d8d5786
MM
913 /*
914 * The architectural maximum amount of facilities is 16 kbit. To store
915 * this amount, 2 kbyte of memory is required. Thus we need a full
981467c9
MM
916 * page to hold the guest facility list (arch.model.fac->list) and the
917 * facility mask (arch.model.fac->mask). Its address size has to be
9d8d5786
MM
918 * 31 bits and word aligned.
919 */
920 kvm->arch.model.fac =
981467c9 921 (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
9d8d5786 922 if (!kvm->arch.model.fac)
40f5b735 923 goto out_err;
9d8d5786 924
fb5bf93f 925 /* Populate the facility mask initially. */
981467c9 926 memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
94422ee8 927 S390_ARCH_FAC_LIST_SIZE_BYTE);
9d8d5786
MM
928 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
929 if (i < kvm_s390_fac_list_mask_size())
981467c9 930 kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
9d8d5786 931 else
981467c9 932 kvm->arch.model.fac->mask[i] = 0UL;
9d8d5786
MM
933 }
934
981467c9
MM
935 /* Populate the facility list initially. */
936 memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
937 S390_ARCH_FAC_LIST_SIZE_BYTE);
938
9d8d5786 939 kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
658b6eda 940 kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff;
9d8d5786 941
5102ee87 942 if (kvm_s390_crypto_init(kvm) < 0)
40f5b735 943 goto out_err;
5102ee87 944
ba5c1e9b
CO
945 spin_lock_init(&kvm->arch.float_int.lock);
946 INIT_LIST_HEAD(&kvm->arch.float_int.list);
8a242234 947 init_waitqueue_head(&kvm->arch.ipte_wq);
a6b7e459 948 mutex_init(&kvm->arch.ipte_mutex);
ba5c1e9b 949
b0c632db
HC
950 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
951 VM_EVENT(kvm, 3, "%s", "vm created");
952
e08b9637
CO
953 if (type & KVM_VM_S390_UCONTROL) {
954 kvm->arch.gmap = NULL;
955 } else {
0349985a 956 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
e08b9637 957 if (!kvm->arch.gmap)
40f5b735 958 goto out_err;
2c70fe44 959 kvm->arch.gmap->private = kvm;
24eb3a82 960 kvm->arch.gmap->pfault_enabled = 0;
e08b9637 961 }
fa6b7fe9
CH
962
963 kvm->arch.css_support = 0;
84223598 964 kvm->arch.use_irqchip = 0;
68c55750 965 kvm->arch.use_vectors = 0;
72f25020 966 kvm->arch.epoch = 0;
fa6b7fe9 967
8ad35755
DH
968 spin_lock_init(&kvm->arch.start_stop_lock);
969
d89f5eff 970 return 0;
40f5b735 971out_err:
5102ee87 972 kfree(kvm->arch.crypto.crycb);
9d8d5786 973 free_page((unsigned long)kvm->arch.model.fac);
598841ca 974 debug_unregister(kvm->arch.dbf);
b0c632db 975 free_page((unsigned long)(kvm->arch.sca));
d89f5eff 976 return rc;
b0c632db
HC
977}
978
d329c035
CB
979void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
980{
981 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 982 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
67335e63 983 kvm_s390_clear_local_irqs(vcpu);
3c038e6b 984 kvm_clear_async_pf_completion_queue(vcpu);
58f9460b
CO
985 if (!kvm_is_ucontrol(vcpu->kvm)) {
986 clear_bit(63 - vcpu->vcpu_id,
987 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
988 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
989 (__u64) vcpu->arch.sie_block)
990 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
991 }
abf4a71e 992 smp_mb();
27e0393f
CO
993
994 if (kvm_is_ucontrol(vcpu->kvm))
995 gmap_free(vcpu->arch.gmap);
996
b31605c1
DD
997 if (kvm_s390_cmma_enabled(vcpu->kvm))
998 kvm_s390_vcpu_unsetup_cmma(vcpu);
d329c035 999 free_page((unsigned long)(vcpu->arch.sie_block));
b31288fa 1000
6692cef3 1001 kvm_vcpu_uninit(vcpu);
b110feaf 1002 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
1003}
1004
1005static void kvm_free_vcpus(struct kvm *kvm)
1006{
1007 unsigned int i;
988a2cae 1008 struct kvm_vcpu *vcpu;
d329c035 1009
988a2cae
GN
1010 kvm_for_each_vcpu(i, vcpu, kvm)
1011 kvm_arch_vcpu_destroy(vcpu);
1012
1013 mutex_lock(&kvm->lock);
1014 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1015 kvm->vcpus[i] = NULL;
1016
1017 atomic_set(&kvm->online_vcpus, 0);
1018 mutex_unlock(&kvm->lock);
d329c035
CB
1019}
1020
b0c632db
HC
1021void kvm_arch_destroy_vm(struct kvm *kvm)
1022{
d329c035 1023 kvm_free_vcpus(kvm);
9d8d5786 1024 free_page((unsigned long)kvm->arch.model.fac);
b0c632db 1025 free_page((unsigned long)(kvm->arch.sca));
d329c035 1026 debug_unregister(kvm->arch.dbf);
5102ee87 1027 kfree(kvm->arch.crypto.crycb);
27e0393f
CO
1028 if (!kvm_is_ucontrol(kvm))
1029 gmap_free(kvm->arch.gmap);
841b91c5 1030 kvm_s390_destroy_adapters(kvm);
67335e63 1031 kvm_s390_clear_float_irqs(kvm);
b0c632db
HC
1032}
1033
1034/* Section: vcpu related */
dafd032a
DD
1035static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1036{
1037 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
1038 if (!vcpu->arch.gmap)
1039 return -ENOMEM;
1040 vcpu->arch.gmap->private = vcpu->kvm;
1041
1042 return 0;
1043}
1044
b0c632db
HC
1045int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1046{
3c038e6b
DD
1047 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1048 kvm_clear_async_pf_completion_queue(vcpu);
59674c1a
CB
1049 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1050 KVM_SYNC_GPRS |
9eed0735 1051 KVM_SYNC_ACRS |
b028ee3e
DH
1052 KVM_SYNC_CRS |
1053 KVM_SYNC_ARCH0 |
1054 KVM_SYNC_PFAULT;
68c55750
EF
1055 if (test_kvm_facility(vcpu->kvm, 129))
1056 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
dafd032a
DD
1057
1058 if (kvm_is_ucontrol(vcpu->kvm))
1059 return __kvm_ucontrol_vcpu_init(vcpu);
1060
b0c632db
HC
1061 return 0;
1062}
1063
b0c632db
HC
1064void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1065{
4725c860 1066 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
68c55750
EF
1067 if (vcpu->kvm->arch.use_vectors)
1068 save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
1069 else
1070 save_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db 1071 save_access_regs(vcpu->arch.host_acrs);
68c55750
EF
1072 if (vcpu->kvm->arch.use_vectors) {
1073 restore_fp_ctl(&vcpu->run->s.regs.fpc);
1074 restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
1075 } else {
1076 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1077 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
1078 }
59674c1a 1079 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 1080 gmap_enable(vcpu->arch.gmap);
9e6dabef 1081 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
1082}
1083
1084void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1085{
9e6dabef 1086 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 1087 gmap_disable(vcpu->arch.gmap);
68c55750
EF
1088 if (vcpu->kvm->arch.use_vectors) {
1089 save_fp_ctl(&vcpu->run->s.regs.fpc);
1090 save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
1091 } else {
1092 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1093 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1094 }
59674c1a 1095 save_access_regs(vcpu->run->s.regs.acrs);
4725c860 1096 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
68c55750
EF
1097 if (vcpu->kvm->arch.use_vectors)
1098 restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
1099 else
1100 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db
HC
1101 restore_access_regs(vcpu->arch.host_acrs);
1102}
1103
1104static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1105{
1106 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1107 vcpu->arch.sie_block->gpsw.mask = 0UL;
1108 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 1109 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
1110 vcpu->arch.sie_block->cputm = 0UL;
1111 vcpu->arch.sie_block->ckc = 0UL;
1112 vcpu->arch.sie_block->todpr = 0;
1113 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1114 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
1115 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
1116 vcpu->arch.guest_fpregs.fpc = 0;
1117 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
1118 vcpu->arch.sie_block->gbea = 1;
672550fb 1119 vcpu->arch.sie_block->pp = 0;
3c038e6b
DD
1120 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1121 kvm_clear_async_pf_completion_queue(vcpu);
6352e4d2
DH
1122 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1123 kvm_s390_vcpu_stop(vcpu);
2ed10cc1 1124 kvm_s390_clear_local_irqs(vcpu);
b0c632db
HC
1125}
1126
31928aa5 1127void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
42897d86 1128{
72f25020
JH
1129 mutex_lock(&vcpu->kvm->lock);
1130 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
1131 mutex_unlock(&vcpu->kvm->lock);
dafd032a
DD
1132 if (!kvm_is_ucontrol(vcpu->kvm))
1133 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
42897d86
MT
1134}
1135
5102ee87
TK
1136static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1137{
9d8d5786 1138 if (!test_kvm_facility(vcpu->kvm, 76))
5102ee87
TK
1139 return;
1140
a374e892
TK
1141 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1142
1143 if (vcpu->kvm->arch.crypto.aes_kw)
1144 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1145 if (vcpu->kvm->arch.crypto.dea_kw)
1146 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1147
5102ee87
TK
1148 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1149}
1150
b31605c1
DD
1151void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1152{
1153 free_page(vcpu->arch.sie_block->cbrlo);
1154 vcpu->arch.sie_block->cbrlo = 0;
1155}
1156
1157int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1158{
1159 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1160 if (!vcpu->arch.sie_block->cbrlo)
1161 return -ENOMEM;
1162
1163 vcpu->arch.sie_block->ecb2 |= 0x80;
1164 vcpu->arch.sie_block->ecb2 &= ~0x08;
1165 return 0;
1166}
1167
91520f1a
MM
1168static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1169{
1170 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1171
1172 vcpu->arch.cpu_id = model->cpu_id;
1173 vcpu->arch.sie_block->ibc = model->ibc;
1174 vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
1175}
1176
b0c632db
HC
1177int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1178{
b31605c1 1179 int rc = 0;
b31288fa 1180
9e6dabef
CH
1181 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1182 CPUSTAT_SM |
69d0d3a3
CB
1183 CPUSTAT_STOPPED |
1184 CPUSTAT_GED);
91520f1a
MM
1185 kvm_s390_vcpu_setup_model(vcpu);
1186
fc34531d 1187 vcpu->arch.sie_block->ecb = 6;
9d8d5786 1188 if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
7feb6bb8
MM
1189 vcpu->arch.sie_block->ecb |= 0x10;
1190
69d0d3a3 1191 vcpu->arch.sie_block->ecb2 = 8;
ea5f4969 1192 vcpu->arch.sie_block->eca = 0xC1002000U;
217a4406
HC
1193 if (sclp_has_siif())
1194 vcpu->arch.sie_block->eca |= 1;
ea5f4969
DH
1195 if (sclp_has_sigpif())
1196 vcpu->arch.sie_block->eca |= 0x10000000U;
13211ea7
EF
1197 if (vcpu->kvm->arch.use_vectors) {
1198 vcpu->arch.sie_block->eca |= 0x00020000;
1199 vcpu->arch.sie_block->ecd |= 0x20000000;
1200 }
492d8642 1201 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
5a5e6536 1202
b31605c1
DD
1203 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
1204 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1205 if (rc)
1206 return rc;
b31288fa 1207 }
0ac96caf 1208 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ca872302 1209 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
9d8d5786 1210
5102ee87
TK
1211 kvm_s390_vcpu_crypto_setup(vcpu);
1212
b31605c1 1213 return rc;
b0c632db
HC
1214}
1215
1216struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1217 unsigned int id)
1218{
4d47555a 1219 struct kvm_vcpu *vcpu;
7feb6bb8 1220 struct sie_page *sie_page;
4d47555a
CO
1221 int rc = -EINVAL;
1222
1223 if (id >= KVM_MAX_VCPUS)
1224 goto out;
1225
1226 rc = -ENOMEM;
b0c632db 1227
b110feaf 1228 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 1229 if (!vcpu)
4d47555a 1230 goto out;
b0c632db 1231
7feb6bb8
MM
1232 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1233 if (!sie_page)
b0c632db
HC
1234 goto out_free_cpu;
1235
7feb6bb8
MM
1236 vcpu->arch.sie_block = &sie_page->sie_block;
1237 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
68c55750 1238 vcpu->arch.host_vregs = &sie_page->vregs;
7feb6bb8 1239
b0c632db 1240 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
1241 if (!kvm_is_ucontrol(kvm)) {
1242 if (!kvm->arch.sca) {
1243 WARN_ON_ONCE(1);
1244 goto out_free_cpu;
1245 }
1246 if (!kvm->arch.sca->cpu[id].sda)
1247 kvm->arch.sca->cpu[id].sda =
1248 (__u64) vcpu->arch.sie_block;
1249 vcpu->arch.sie_block->scaoh =
1250 (__u32)(((__u64)kvm->arch.sca) >> 32);
1251 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
1252 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
1253 }
b0c632db 1254
ba5c1e9b 1255 spin_lock_init(&vcpu->arch.local_int.lock);
ba5c1e9b 1256 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
d0321a24 1257 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 1258 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
ba5c1e9b 1259
b0c632db
HC
1260 rc = kvm_vcpu_init(vcpu, kvm, id);
1261 if (rc)
7b06bf2f 1262 goto out_free_sie_block;
b0c632db
HC
1263 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
1264 vcpu->arch.sie_block);
ade38c31 1265 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 1266
b0c632db 1267 return vcpu;
7b06bf2f
WY
1268out_free_sie_block:
1269 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 1270out_free_cpu:
b110feaf 1271 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 1272out:
b0c632db
HC
1273 return ERR_PTR(rc);
1274}
1275
b0c632db
HC
1276int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1277{
9a022067 1278 return kvm_s390_vcpu_has_irq(vcpu, 0);
b0c632db
HC
1279}
1280
49b99e1e
CB
1281void s390_vcpu_block(struct kvm_vcpu *vcpu)
1282{
1283 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1284}
1285
1286void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
1287{
1288 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1289}
1290
1291/*
1292 * Kick a guest cpu out of SIE and wait until SIE is not running.
1293 * If the CPU is not running (e.g. waiting as idle) the function will
1294 * return immediately. */
1295void exit_sie(struct kvm_vcpu *vcpu)
1296{
1297 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
1298 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1299 cpu_relax();
1300}
1301
1302/* Kick a guest cpu out of SIE and prevent SIE-reentry */
1303void exit_sie_sync(struct kvm_vcpu *vcpu)
1304{
1305 s390_vcpu_block(vcpu);
1306 exit_sie(vcpu);
1307}
1308
2c70fe44
CB
1309static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1310{
1311 int i;
1312 struct kvm *kvm = gmap->private;
1313 struct kvm_vcpu *vcpu;
1314
1315 kvm_for_each_vcpu(i, vcpu, kvm) {
1316 /* match against both prefix pages */
fda902cb 1317 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
2c70fe44
CB
1318 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
1319 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
1320 exit_sie_sync(vcpu);
1321 }
1322 }
1323}
1324
b6d33834
CD
1325int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1326{
1327 /* kvm common code refers to this, but never calls it */
1328 BUG();
1329 return 0;
1330}
1331
14eebd91
CO
1332static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1333 struct kvm_one_reg *reg)
1334{
1335 int r = -EINVAL;
1336
1337 switch (reg->id) {
29b7c71b
CO
1338 case KVM_REG_S390_TODPR:
1339 r = put_user(vcpu->arch.sie_block->todpr,
1340 (u32 __user *)reg->addr);
1341 break;
1342 case KVM_REG_S390_EPOCHDIFF:
1343 r = put_user(vcpu->arch.sie_block->epoch,
1344 (u64 __user *)reg->addr);
1345 break;
46a6dd1c
J
1346 case KVM_REG_S390_CPU_TIMER:
1347 r = put_user(vcpu->arch.sie_block->cputm,
1348 (u64 __user *)reg->addr);
1349 break;
1350 case KVM_REG_S390_CLOCK_COMP:
1351 r = put_user(vcpu->arch.sie_block->ckc,
1352 (u64 __user *)reg->addr);
1353 break;
536336c2
DD
1354 case KVM_REG_S390_PFTOKEN:
1355 r = put_user(vcpu->arch.pfault_token,
1356 (u64 __user *)reg->addr);
1357 break;
1358 case KVM_REG_S390_PFCOMPARE:
1359 r = put_user(vcpu->arch.pfault_compare,
1360 (u64 __user *)reg->addr);
1361 break;
1362 case KVM_REG_S390_PFSELECT:
1363 r = put_user(vcpu->arch.pfault_select,
1364 (u64 __user *)reg->addr);
1365 break;
672550fb
CB
1366 case KVM_REG_S390_PP:
1367 r = put_user(vcpu->arch.sie_block->pp,
1368 (u64 __user *)reg->addr);
1369 break;
afa45ff5
CB
1370 case KVM_REG_S390_GBEA:
1371 r = put_user(vcpu->arch.sie_block->gbea,
1372 (u64 __user *)reg->addr);
1373 break;
14eebd91
CO
1374 default:
1375 break;
1376 }
1377
1378 return r;
1379}
1380
1381static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
1382 struct kvm_one_reg *reg)
1383{
1384 int r = -EINVAL;
1385
1386 switch (reg->id) {
29b7c71b
CO
1387 case KVM_REG_S390_TODPR:
1388 r = get_user(vcpu->arch.sie_block->todpr,
1389 (u32 __user *)reg->addr);
1390 break;
1391 case KVM_REG_S390_EPOCHDIFF:
1392 r = get_user(vcpu->arch.sie_block->epoch,
1393 (u64 __user *)reg->addr);
1394 break;
46a6dd1c
J
1395 case KVM_REG_S390_CPU_TIMER:
1396 r = get_user(vcpu->arch.sie_block->cputm,
1397 (u64 __user *)reg->addr);
1398 break;
1399 case KVM_REG_S390_CLOCK_COMP:
1400 r = get_user(vcpu->arch.sie_block->ckc,
1401 (u64 __user *)reg->addr);
1402 break;
536336c2
DD
1403 case KVM_REG_S390_PFTOKEN:
1404 r = get_user(vcpu->arch.pfault_token,
1405 (u64 __user *)reg->addr);
9fbd8082
DH
1406 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1407 kvm_clear_async_pf_completion_queue(vcpu);
536336c2
DD
1408 break;
1409 case KVM_REG_S390_PFCOMPARE:
1410 r = get_user(vcpu->arch.pfault_compare,
1411 (u64 __user *)reg->addr);
1412 break;
1413 case KVM_REG_S390_PFSELECT:
1414 r = get_user(vcpu->arch.pfault_select,
1415 (u64 __user *)reg->addr);
1416 break;
672550fb
CB
1417 case KVM_REG_S390_PP:
1418 r = get_user(vcpu->arch.sie_block->pp,
1419 (u64 __user *)reg->addr);
1420 break;
afa45ff5
CB
1421 case KVM_REG_S390_GBEA:
1422 r = get_user(vcpu->arch.sie_block->gbea,
1423 (u64 __user *)reg->addr);
1424 break;
14eebd91
CO
1425 default:
1426 break;
1427 }
1428
1429 return r;
1430}
b6d33834 1431
b0c632db
HC
1432static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1433{
b0c632db 1434 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
1435 return 0;
1436}
1437
1438int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1439{
5a32c1af 1440 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
1441 return 0;
1442}
1443
1444int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1445{
5a32c1af 1446 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
1447 return 0;
1448}
1449
1450int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1451 struct kvm_sregs *sregs)
1452{
59674c1a 1453 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 1454 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 1455 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
1456 return 0;
1457}
1458
1459int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1460 struct kvm_sregs *sregs)
1461{
59674c1a 1462 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 1463 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
1464 return 0;
1465}
1466
1467int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1468{
4725c860
MS
1469 if (test_fp_ctl(fpu->fpc))
1470 return -EINVAL;
b0c632db 1471 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4725c860
MS
1472 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
1473 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1474 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
b0c632db
HC
1475 return 0;
1476}
1477
1478int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1479{
b0c632db
HC
1480 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1481 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
1482 return 0;
1483}
1484
1485static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1486{
1487 int rc = 0;
1488
7a42fdc2 1489 if (!is_vcpu_stopped(vcpu))
b0c632db 1490 rc = -EBUSY;
d7b0b5eb
CO
1491 else {
1492 vcpu->run->psw_mask = psw.mask;
1493 vcpu->run->psw_addr = psw.addr;
1494 }
b0c632db
HC
1495 return rc;
1496}
1497
1498int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1499 struct kvm_translation *tr)
1500{
1501 return -EINVAL; /* not implemented yet */
1502}
1503
27291e21
DH
1504#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1505 KVM_GUESTDBG_USE_HW_BP | \
1506 KVM_GUESTDBG_ENABLE)
1507
d0bfb940
JK
1508int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1509 struct kvm_guest_debug *dbg)
b0c632db 1510{
27291e21
DH
1511 int rc = 0;
1512
1513 vcpu->guest_debug = 0;
1514 kvm_s390_clear_bp_data(vcpu);
1515
2de3bfc2 1516 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
27291e21
DH
1517 return -EINVAL;
1518
1519 if (dbg->control & KVM_GUESTDBG_ENABLE) {
1520 vcpu->guest_debug = dbg->control;
1521 /* enforce guest PER */
1522 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1523
1524 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
1525 rc = kvm_s390_import_bp_data(vcpu, dbg);
1526 } else {
1527 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1528 vcpu->arch.guestdbg.last_bp = 0;
1529 }
1530
1531 if (rc) {
1532 vcpu->guest_debug = 0;
1533 kvm_s390_clear_bp_data(vcpu);
1534 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1535 }
1536
1537 return rc;
b0c632db
HC
1538}
1539
62d9f0db
MT
1540int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1541 struct kvm_mp_state *mp_state)
1542{
6352e4d2
DH
1543 /* CHECK_STOP and LOAD are not supported yet */
1544 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1545 KVM_MP_STATE_OPERATING;
62d9f0db
MT
1546}
1547
1548int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1549 struct kvm_mp_state *mp_state)
1550{
6352e4d2
DH
1551 int rc = 0;
1552
1553 /* user space knows about this interface - let it control the state */
1554 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1555
1556 switch (mp_state->mp_state) {
1557 case KVM_MP_STATE_STOPPED:
1558 kvm_s390_vcpu_stop(vcpu);
1559 break;
1560 case KVM_MP_STATE_OPERATING:
1561 kvm_s390_vcpu_start(vcpu);
1562 break;
1563 case KVM_MP_STATE_LOAD:
1564 case KVM_MP_STATE_CHECK_STOP:
1565 /* fall through - CHECK_STOP and LOAD are not supported yet */
1566 default:
1567 rc = -ENXIO;
1568 }
1569
1570 return rc;
62d9f0db
MT
1571}
1572
b31605c1
DD
1573bool kvm_s390_cmma_enabled(struct kvm *kvm)
1574{
1575 if (!MACHINE_IS_LPAR)
1576 return false;
1577 /* only enable for z10 and later */
1578 if (!MACHINE_HAS_EDAT1)
1579 return false;
1580 if (!kvm->arch.use_cmma)
1581 return false;
1582 return true;
1583}
1584
8ad35755
DH
1585static bool ibs_enabled(struct kvm_vcpu *vcpu)
1586{
1587 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1588}
1589
2c70fe44
CB
1590static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1591{
8ad35755
DH
1592retry:
1593 s390_vcpu_unblock(vcpu);
2c70fe44
CB
1594 /*
1595 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1596 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1597 * This ensures that the ipte instruction for this request has
1598 * already finished. We might race against a second unmapper that
1599 * wants to set the blocking bit. Lets just retry the request loop.
1600 */
8ad35755 1601 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
2c70fe44
CB
1602 int rc;
1603 rc = gmap_ipte_notify(vcpu->arch.gmap,
fda902cb 1604 kvm_s390_get_prefix(vcpu),
2c70fe44
CB
1605 PAGE_SIZE * 2);
1606 if (rc)
1607 return rc;
8ad35755 1608 goto retry;
2c70fe44 1609 }
8ad35755 1610
d3d692c8
DH
1611 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1612 vcpu->arch.sie_block->ihcpu = 0xffff;
1613 goto retry;
1614 }
1615
8ad35755
DH
1616 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1617 if (!ibs_enabled(vcpu)) {
1618 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1619 atomic_set_mask(CPUSTAT_IBS,
1620 &vcpu->arch.sie_block->cpuflags);
1621 }
1622 goto retry;
2c70fe44 1623 }
8ad35755
DH
1624
1625 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1626 if (ibs_enabled(vcpu)) {
1627 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1628 atomic_clear_mask(CPUSTAT_IBS,
1629 &vcpu->arch.sie_block->cpuflags);
1630 }
1631 goto retry;
1632 }
1633
0759d068
DH
1634 /* nothing to do, just clear the request */
1635 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1636
2c70fe44
CB
1637 return 0;
1638}
1639
fa576c58
TH
1640/**
1641 * kvm_arch_fault_in_page - fault-in guest page if necessary
1642 * @vcpu: The corresponding virtual cpu
1643 * @gpa: Guest physical address
1644 * @writable: Whether the page should be writable or not
1645 *
1646 * Make sure that a guest page has been faulted-in on the host.
1647 *
1648 * Return: Zero on success, negative error code otherwise.
1649 */
1650long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
24eb3a82 1651{
527e30b4
MS
1652 return gmap_fault(vcpu->arch.gmap, gpa,
1653 writable ? FAULT_FLAG_WRITE : 0);
24eb3a82
DD
1654}
1655
3c038e6b
DD
1656static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1657 unsigned long token)
1658{
1659 struct kvm_s390_interrupt inti;
383d0b05 1660 struct kvm_s390_irq irq;
3c038e6b
DD
1661
1662 if (start_token) {
383d0b05
JF
1663 irq.u.ext.ext_params2 = token;
1664 irq.type = KVM_S390_INT_PFAULT_INIT;
1665 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
3c038e6b
DD
1666 } else {
1667 inti.type = KVM_S390_INT_PFAULT_DONE;
383d0b05 1668 inti.parm64 = token;
3c038e6b
DD
1669 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1670 }
1671}
1672
1673void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1674 struct kvm_async_pf *work)
1675{
1676 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1677 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1678}
1679
1680void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1681 struct kvm_async_pf *work)
1682{
1683 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1684 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1685}
1686
1687void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1688 struct kvm_async_pf *work)
1689{
1690 /* s390 will always inject the page directly */
1691}
1692
1693bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1694{
1695 /*
1696 * s390 will always inject the page directly,
1697 * but we still want check_async_completion to cleanup
1698 */
1699 return true;
1700}
1701
1702static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1703{
1704 hva_t hva;
1705 struct kvm_arch_async_pf arch;
1706 int rc;
1707
1708 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1709 return 0;
1710 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1711 vcpu->arch.pfault_compare)
1712 return 0;
1713 if (psw_extint_disabled(vcpu))
1714 return 0;
9a022067 1715 if (kvm_s390_vcpu_has_irq(vcpu, 0))
3c038e6b
DD
1716 return 0;
1717 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1718 return 0;
1719 if (!vcpu->arch.gmap->pfault_enabled)
1720 return 0;
1721
81480cc1
HC
1722 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1723 hva += current->thread.gmap_addr & ~PAGE_MASK;
1724 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
3c038e6b
DD
1725 return 0;
1726
1727 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1728 return rc;
1729}
1730
3fb4c40f 1731static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 1732{
3fb4c40f 1733 int rc, cpuflags;
e168bf8d 1734
3c038e6b
DD
1735 /*
1736 * On s390 notifications for arriving pages will be delivered directly
1737 * to the guest but the house keeping for completed pfaults is
1738 * handled outside the worker.
1739 */
1740 kvm_check_async_pf_completion(vcpu);
1741
5a32c1af 1742 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
1743
1744 if (need_resched())
1745 schedule();
1746
d3a73acb 1747 if (test_cpu_flag(CIF_MCCK_PENDING))
71cde587
CB
1748 s390_handle_mcck();
1749
79395031
JF
1750 if (!kvm_is_ucontrol(vcpu->kvm)) {
1751 rc = kvm_s390_deliver_pending_interrupts(vcpu);
1752 if (rc)
1753 return rc;
1754 }
0ff31867 1755
2c70fe44
CB
1756 rc = kvm_s390_handle_requests(vcpu);
1757 if (rc)
1758 return rc;
1759
27291e21
DH
1760 if (guestdbg_enabled(vcpu)) {
1761 kvm_s390_backup_guest_per_regs(vcpu);
1762 kvm_s390_patch_guest_per_regs(vcpu);
1763 }
1764
b0c632db 1765 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
1766 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1767 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1768 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 1769
3fb4c40f
TH
1770 return 0;
1771}
1772
492d8642
TH
1773static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
1774{
1775 psw_t *psw = &vcpu->arch.sie_block->gpsw;
1776 u8 opcode;
1777 int rc;
1778
1779 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1780 trace_kvm_s390_sie_fault(vcpu);
1781
1782 /*
1783 * We want to inject an addressing exception, which is defined as a
1784 * suppressing or terminating exception. However, since we came here
1785 * by a DAT access exception, the PSW still points to the faulting
1786 * instruction since DAT exceptions are nullifying. So we've got
1787 * to look up the current opcode to get the length of the instruction
1788 * to be able to forward the PSW.
1789 */
8ae04b8f 1790 rc = read_guest(vcpu, psw->addr, 0, &opcode, 1);
492d8642
TH
1791 if (rc)
1792 return kvm_s390_inject_prog_cond(vcpu, rc);
1793 psw->addr = __rewind_psw(*psw, -insn_length(opcode));
1794
1795 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1796}
1797
3fb4c40f
TH
1798static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1799{
24eb3a82 1800 int rc = -1;
2b29a9fd
DD
1801
1802 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1803 vcpu->arch.sie_block->icptcode);
1804 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1805
27291e21
DH
1806 if (guestdbg_enabled(vcpu))
1807 kvm_s390_restore_guest_per_regs(vcpu);
1808
3fb4c40f 1809 if (exit_reason >= 0) {
7c470539 1810 rc = 0;
210b1607
TH
1811 } else if (kvm_is_ucontrol(vcpu->kvm)) {
1812 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1813 vcpu->run->s390_ucontrol.trans_exc_code =
1814 current->thread.gmap_addr;
1815 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1816 rc = -EREMOTE;
24eb3a82
DD
1817
1818 } else if (current->thread.gmap_pfault) {
3c038e6b 1819 trace_kvm_s390_major_guest_pfault(vcpu);
24eb3a82 1820 current->thread.gmap_pfault = 0;
fa576c58 1821 if (kvm_arch_setup_async_pf(vcpu)) {
24eb3a82 1822 rc = 0;
fa576c58
TH
1823 } else {
1824 gpa_t gpa = current->thread.gmap_addr;
1825 rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1826 }
24eb3a82
DD
1827 }
1828
492d8642
TH
1829 if (rc == -1)
1830 rc = vcpu_post_run_fault_in_sie(vcpu);
b0c632db 1831
5a32c1af 1832 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
3fb4c40f 1833
a76ccff6
TH
1834 if (rc == 0) {
1835 if (kvm_is_ucontrol(vcpu->kvm))
2955c83f
CB
1836 /* Don't exit for host interrupts. */
1837 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
a76ccff6
TH
1838 else
1839 rc = kvm_handle_sie_intercept(vcpu);
1840 }
1841
3fb4c40f
TH
1842 return rc;
1843}
1844
1845static int __vcpu_run(struct kvm_vcpu *vcpu)
1846{
1847 int rc, exit_reason;
1848
800c1065
TH
1849 /*
1850 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1851 * ning the guest), so that memslots (and other stuff) are protected
1852 */
1853 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1854
a76ccff6
TH
1855 do {
1856 rc = vcpu_pre_run(vcpu);
1857 if (rc)
1858 break;
3fb4c40f 1859
800c1065 1860 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
1861 /*
1862 * As PF_VCPU will be used in fault handler, between
1863 * guest_enter and guest_exit should be no uaccess.
1864 */
1865 preempt_disable();
1866 kvm_guest_enter();
1867 preempt_enable();
1868 exit_reason = sie64a(vcpu->arch.sie_block,
1869 vcpu->run->s.regs.gprs);
1870 kvm_guest_exit();
800c1065 1871 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
1872
1873 rc = vcpu_post_run(vcpu, exit_reason);
27291e21 1874 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
3fb4c40f 1875
800c1065 1876 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 1877 return rc;
b0c632db
HC
1878}
1879
b028ee3e
DH
1880static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1881{
1882 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1883 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1884 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
1885 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1886 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1887 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
d3d692c8
DH
1888 /* some control register changes require a tlb flush */
1889 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
b028ee3e
DH
1890 }
1891 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
1892 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
1893 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
1894 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
1895 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
1896 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
1897 }
1898 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
1899 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
1900 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
1901 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
9fbd8082
DH
1902 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1903 kvm_clear_async_pf_completion_queue(vcpu);
b028ee3e
DH
1904 }
1905 kvm_run->kvm_dirty_regs = 0;
1906}
1907
1908static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1909{
1910 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1911 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1912 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1913 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1914 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
1915 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
1916 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
1917 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
1918 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
1919 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1920 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1921 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1922}
1923
b0c632db
HC
1924int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1925{
8f2abe6a 1926 int rc;
b0c632db
HC
1927 sigset_t sigsaved;
1928
27291e21
DH
1929 if (guestdbg_exit_pending(vcpu)) {
1930 kvm_s390_prepare_debug_exit(vcpu);
1931 return 0;
1932 }
1933
b0c632db
HC
1934 if (vcpu->sigset_active)
1935 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1936
6352e4d2
DH
1937 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
1938 kvm_s390_vcpu_start(vcpu);
1939 } else if (is_vcpu_stopped(vcpu)) {
1940 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
1941 vcpu->vcpu_id);
1942 return -EINVAL;
1943 }
b0c632db 1944
b028ee3e 1945 sync_regs(vcpu, kvm_run);
d7b0b5eb 1946
dab4079d 1947 might_fault();
a76ccff6 1948 rc = __vcpu_run(vcpu);
9ace903d 1949
b1d16c49
CE
1950 if (signal_pending(current) && !rc) {
1951 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 1952 rc = -EINTR;
b1d16c49 1953 }
8f2abe6a 1954
27291e21
DH
1955 if (guestdbg_exit_pending(vcpu) && !rc) {
1956 kvm_s390_prepare_debug_exit(vcpu);
1957 rc = 0;
1958 }
1959
b8e660b8 1960 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
1961 /* intercept cannot be handled in-kernel, prepare kvm-run */
1962 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
1963 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
1964 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
1965 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
1966 rc = 0;
1967 }
1968
1969 if (rc == -EREMOTE) {
1970 /* intercept was handled, but userspace support is needed
1971 * kvm_run has been prepared by the handler */
1972 rc = 0;
1973 }
b0c632db 1974
b028ee3e 1975 store_regs(vcpu, kvm_run);
d7b0b5eb 1976
b0c632db
HC
1977 if (vcpu->sigset_active)
1978 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1979
b0c632db 1980 vcpu->stat.exit_userspace++;
7e8e6ab4 1981 return rc;
b0c632db
HC
1982}
1983
b0c632db
HC
1984/*
1985 * store status at address
1986 * we use have two special cases:
1987 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1988 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1989 */
d0bce605 1990int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
b0c632db 1991{
092670cd 1992 unsigned char archmode = 1;
fda902cb 1993 unsigned int px;
178bd789 1994 u64 clkcomp;
d0bce605 1995 int rc;
b0c632db 1996
d0bce605
HC
1997 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1998 if (write_guest_abs(vcpu, 163, &archmode, 1))
b0c632db 1999 return -EFAULT;
d0bce605
HC
2000 gpa = SAVE_AREA_BASE;
2001 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2002 if (write_guest_real(vcpu, 163, &archmode, 1))
b0c632db 2003 return -EFAULT;
d0bce605
HC
2004 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
2005 }
2006 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
2007 vcpu->arch.guest_fpregs.fprs, 128);
2008 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
2009 vcpu->run->s.regs.gprs, 128);
2010 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
2011 &vcpu->arch.sie_block->gpsw, 16);
fda902cb 2012 px = kvm_s390_get_prefix(vcpu);
d0bce605 2013 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
fda902cb 2014 &px, 4);
d0bce605
HC
2015 rc |= write_guest_abs(vcpu,
2016 gpa + offsetof(struct save_area, fp_ctrl_reg),
2017 &vcpu->arch.guest_fpregs.fpc, 4);
2018 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
2019 &vcpu->arch.sie_block->todpr, 4);
2020 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
2021 &vcpu->arch.sie_block->cputm, 8);
178bd789 2022 clkcomp = vcpu->arch.sie_block->ckc >> 8;
d0bce605
HC
2023 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
2024 &clkcomp, 8);
2025 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
2026 &vcpu->run->s.regs.acrs, 64);
2027 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
2028 &vcpu->arch.sie_block->gcr, 128);
2029 return rc ? -EFAULT : 0;
b0c632db
HC
2030}
2031
e879892c
TH
2032int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2033{
2034 /*
2035 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2036 * copying in vcpu load/put. Lets update our copies before we save
2037 * it into the save area
2038 */
2039 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
2040 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
2041 save_access_regs(vcpu->run->s.regs.acrs);
2042
2043 return kvm_s390_store_status_unloaded(vcpu, addr);
2044}
2045
bc17de7c
EF
2046/*
2047 * store additional status at address
2048 */
2049int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2050 unsigned long gpa)
2051{
2052 /* Only bits 0-53 are used for address formation */
2053 if (!(gpa & ~0x3ff))
2054 return 0;
2055
2056 return write_guest_abs(vcpu, gpa & ~0x3ff,
2057 (void *)&vcpu->run->s.regs.vrs, 512);
2058}
2059
2060int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2061{
2062 if (!test_kvm_facility(vcpu->kvm, 129))
2063 return 0;
2064
2065 /*
2066 * The guest VXRS are in the host VXRs due to the lazy
2067 * copying in vcpu load/put. Let's update our copies before we save
2068 * it into the save area.
2069 */
2070 save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
2071
2072 return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2073}
2074
8ad35755
DH
2075static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2076{
2077 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
2078 kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
2079 exit_sie_sync(vcpu);
2080}
2081
2082static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2083{
2084 unsigned int i;
2085 struct kvm_vcpu *vcpu;
2086
2087 kvm_for_each_vcpu(i, vcpu, kvm) {
2088 __disable_ibs_on_vcpu(vcpu);
2089 }
2090}
2091
2092static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2093{
2094 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
2095 kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
2096 exit_sie_sync(vcpu);
2097}
2098
6852d7b6
DH
2099void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2100{
8ad35755
DH
2101 int i, online_vcpus, started_vcpus = 0;
2102
2103 if (!is_vcpu_stopped(vcpu))
2104 return;
2105
6852d7b6 2106 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
8ad35755 2107 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 2108 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
2109 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2110
2111 for (i = 0; i < online_vcpus; i++) {
2112 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2113 started_vcpus++;
2114 }
2115
2116 if (started_vcpus == 0) {
2117 /* we're the only active VCPU -> speed it up */
2118 __enable_ibs_on_vcpu(vcpu);
2119 } else if (started_vcpus == 1) {
2120 /*
2121 * As we are starting a second VCPU, we have to disable
2122 * the IBS facility on all VCPUs to remove potentially
2123 * oustanding ENABLE requests.
2124 */
2125 __disable_ibs_on_all_vcpus(vcpu->kvm);
2126 }
2127
6852d7b6 2128 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
2129 /*
2130 * Another VCPU might have used IBS while we were offline.
2131 * Let's play safe and flush the VCPU at startup.
2132 */
d3d692c8 2133 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
433b9ee4 2134 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 2135 return;
6852d7b6
DH
2136}
2137
2138void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2139{
8ad35755
DH
2140 int i, online_vcpus, started_vcpus = 0;
2141 struct kvm_vcpu *started_vcpu = NULL;
2142
2143 if (is_vcpu_stopped(vcpu))
2144 return;
2145
6852d7b6 2146 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
8ad35755 2147 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 2148 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
2149 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2150
32f5ff63 2151 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
6cddd432 2152 kvm_s390_clear_stop_irq(vcpu);
32f5ff63 2153
6cddd432 2154 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
2155 __disable_ibs_on_vcpu(vcpu);
2156
2157 for (i = 0; i < online_vcpus; i++) {
2158 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2159 started_vcpus++;
2160 started_vcpu = vcpu->kvm->vcpus[i];
2161 }
2162 }
2163
2164 if (started_vcpus == 1) {
2165 /*
2166 * As we only have one VCPU left, we want to enable the
2167 * IBS facility for that VCPU to speed it up.
2168 */
2169 __enable_ibs_on_vcpu(started_vcpu);
2170 }
2171
433b9ee4 2172 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 2173 return;
6852d7b6
DH
2174}
2175
d6712df9
CH
2176static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2177 struct kvm_enable_cap *cap)
2178{
2179 int r;
2180
2181 if (cap->flags)
2182 return -EINVAL;
2183
2184 switch (cap->cap) {
fa6b7fe9
CH
2185 case KVM_CAP_S390_CSS_SUPPORT:
2186 if (!vcpu->kvm->arch.css_support) {
2187 vcpu->kvm->arch.css_support = 1;
2188 trace_kvm_s390_enable_css(vcpu->kvm);
2189 }
2190 r = 0;
2191 break;
d6712df9
CH
2192 default:
2193 r = -EINVAL;
2194 break;
2195 }
2196 return r;
2197}
2198
41408c28
TH
2199static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
2200 struct kvm_s390_mem_op *mop)
2201{
2202 void __user *uaddr = (void __user *)mop->buf;
2203 void *tmpbuf = NULL;
2204 int r, srcu_idx;
2205 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
2206 | KVM_S390_MEMOP_F_CHECK_ONLY;
2207
2208 if (mop->flags & ~supported_flags)
2209 return -EINVAL;
2210
2211 if (mop->size > MEM_OP_MAX_SIZE)
2212 return -E2BIG;
2213
2214 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2215 tmpbuf = vmalloc(mop->size);
2216 if (!tmpbuf)
2217 return -ENOMEM;
2218 }
2219
2220 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2221
2222 switch (mop->op) {
2223 case KVM_S390_MEMOP_LOGICAL_READ:
2224 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2225 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false);
2226 break;
2227 }
2228 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2229 if (r == 0) {
2230 if (copy_to_user(uaddr, tmpbuf, mop->size))
2231 r = -EFAULT;
2232 }
2233 break;
2234 case KVM_S390_MEMOP_LOGICAL_WRITE:
2235 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2236 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true);
2237 break;
2238 }
2239 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2240 r = -EFAULT;
2241 break;
2242 }
2243 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2244 break;
2245 default:
2246 r = -EINVAL;
2247 }
2248
2249 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2250
2251 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
2252 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
2253
2254 vfree(tmpbuf);
2255 return r;
2256}
2257
b0c632db
HC
2258long kvm_arch_vcpu_ioctl(struct file *filp,
2259 unsigned int ioctl, unsigned long arg)
2260{
2261 struct kvm_vcpu *vcpu = filp->private_data;
2262 void __user *argp = (void __user *)arg;
800c1065 2263 int idx;
bc923cc9 2264 long r;
b0c632db 2265
93736624
AK
2266 switch (ioctl) {
2267 case KVM_S390_INTERRUPT: {
ba5c1e9b 2268 struct kvm_s390_interrupt s390int;
383d0b05 2269 struct kvm_s390_irq s390irq;
ba5c1e9b 2270
93736624 2271 r = -EFAULT;
ba5c1e9b 2272 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624 2273 break;
383d0b05
JF
2274 if (s390int_to_s390irq(&s390int, &s390irq))
2275 return -EINVAL;
2276 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
93736624 2277 break;
ba5c1e9b 2278 }
b0c632db 2279 case KVM_S390_STORE_STATUS:
800c1065 2280 idx = srcu_read_lock(&vcpu->kvm->srcu);
bc923cc9 2281 r = kvm_s390_vcpu_store_status(vcpu, arg);
800c1065 2282 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 2283 break;
b0c632db
HC
2284 case KVM_S390_SET_INITIAL_PSW: {
2285 psw_t psw;
2286
bc923cc9 2287 r = -EFAULT;
b0c632db 2288 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
2289 break;
2290 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2291 break;
b0c632db
HC
2292 }
2293 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
2294 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2295 break;
14eebd91
CO
2296 case KVM_SET_ONE_REG:
2297 case KVM_GET_ONE_REG: {
2298 struct kvm_one_reg reg;
2299 r = -EFAULT;
2300 if (copy_from_user(&reg, argp, sizeof(reg)))
2301 break;
2302 if (ioctl == KVM_SET_ONE_REG)
2303 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
2304 else
2305 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
2306 break;
2307 }
27e0393f
CO
2308#ifdef CONFIG_KVM_S390_UCONTROL
2309 case KVM_S390_UCAS_MAP: {
2310 struct kvm_s390_ucas_mapping ucasmap;
2311
2312 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2313 r = -EFAULT;
2314 break;
2315 }
2316
2317 if (!kvm_is_ucontrol(vcpu->kvm)) {
2318 r = -EINVAL;
2319 break;
2320 }
2321
2322 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
2323 ucasmap.vcpu_addr, ucasmap.length);
2324 break;
2325 }
2326 case KVM_S390_UCAS_UNMAP: {
2327 struct kvm_s390_ucas_mapping ucasmap;
2328
2329 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2330 r = -EFAULT;
2331 break;
2332 }
2333
2334 if (!kvm_is_ucontrol(vcpu->kvm)) {
2335 r = -EINVAL;
2336 break;
2337 }
2338
2339 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
2340 ucasmap.length);
2341 break;
2342 }
2343#endif
ccc7910f 2344 case KVM_S390_VCPU_FAULT: {
527e30b4 2345 r = gmap_fault(vcpu->arch.gmap, arg, 0);
ccc7910f
CO
2346 break;
2347 }
d6712df9
CH
2348 case KVM_ENABLE_CAP:
2349 {
2350 struct kvm_enable_cap cap;
2351 r = -EFAULT;
2352 if (copy_from_user(&cap, argp, sizeof(cap)))
2353 break;
2354 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2355 break;
2356 }
41408c28
TH
2357 case KVM_S390_MEM_OP: {
2358 struct kvm_s390_mem_op mem_op;
2359
2360 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
2361 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
2362 else
2363 r = -EFAULT;
2364 break;
2365 }
b0c632db 2366 default:
3e6afcf1 2367 r = -ENOTTY;
b0c632db 2368 }
bc923cc9 2369 return r;
b0c632db
HC
2370}
2371
5b1c1493
CO
2372int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2373{
2374#ifdef CONFIG_KVM_S390_UCONTROL
2375 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
2376 && (kvm_is_ucontrol(vcpu->kvm))) {
2377 vmf->page = virt_to_page(vcpu->arch.sie_block);
2378 get_page(vmf->page);
2379 return 0;
2380 }
2381#endif
2382 return VM_FAULT_SIGBUS;
2383}
2384
5587027c
AK
2385int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2386 unsigned long npages)
db3fe4eb
TY
2387{
2388 return 0;
2389}
2390
b0c632db 2391/* Section: memory related */
f7784b8e
MT
2392int kvm_arch_prepare_memory_region(struct kvm *kvm,
2393 struct kvm_memory_slot *memslot,
7b6195a9
TY
2394 struct kvm_userspace_memory_region *mem,
2395 enum kvm_mr_change change)
b0c632db 2396{
dd2887e7
NW
2397 /* A few sanity checks. We can have memory slots which have to be
2398 located/ended at a segment boundary (1MB). The memory in userland is
2399 ok to be fragmented into various different vmas. It is okay to mmap()
2400 and munmap() stuff in this slot after doing this call at any time */
b0c632db 2401
598841ca 2402 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
2403 return -EINVAL;
2404
598841ca 2405 if (mem->memory_size & 0xffffful)
b0c632db
HC
2406 return -EINVAL;
2407
f7784b8e
MT
2408 return 0;
2409}
2410
2411void kvm_arch_commit_memory_region(struct kvm *kvm,
2412 struct kvm_userspace_memory_region *mem,
8482644a
TY
2413 const struct kvm_memory_slot *old,
2414 enum kvm_mr_change change)
f7784b8e 2415{
f7850c92 2416 int rc;
f7784b8e 2417
2cef4deb
CB
2418 /* If the basics of the memslot do not change, we do not want
2419 * to update the gmap. Every update causes several unnecessary
2420 * segment translation exceptions. This is usually handled just
2421 * fine by the normal fault handler + gmap, but it will also
2422 * cause faults on the prefix page of running guest CPUs.
2423 */
2424 if (old->userspace_addr == mem->userspace_addr &&
2425 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
2426 old->npages * PAGE_SIZE == mem->memory_size)
2427 return;
598841ca
CO
2428
2429 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2430 mem->guest_phys_addr, mem->memory_size);
2431 if (rc)
f7850c92 2432 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 2433 return;
b0c632db
HC
2434}
2435
b0c632db
HC
2436static int __init kvm_s390_init(void)
2437{
9d8d5786 2438 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
b0c632db
HC
2439}
2440
2441static void __exit kvm_s390_exit(void)
2442{
2443 kvm_exit();
2444}
2445
2446module_init(kvm_s390_init);
2447module_exit(kvm_s390_exit);
566af940
CH
2448
2449/*
2450 * Enable autoloading of the kvm module.
2451 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2452 * since x86 takes a different approach.
2453 */
2454#include <linux/miscdevice.h>
2455MODULE_ALIAS_MISCDEV(KVM_MINOR);
2456MODULE_ALIAS("devname:kvm");