]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/s390/kvm/kvm-s390.c
KVM: s390: store the breaking-event address on pgm interrupts
[mirror_ubuntu-artful-kernel.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15f36ebd 14 * Jason J. Herne <jjherne@us.ibm.com>
b0c632db
HC
15 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
ca872302 20#include <linux/hrtimer.h>
b0c632db
HC
21#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
a374e892 25#include <linux/random.h>
b0c632db 26#include <linux/slab.h>
ba5c1e9b 27#include <linux/timer.h>
41408c28 28#include <linux/vmalloc.h>
cbb870c8 29#include <asm/asm-offsets.h>
b0c632db
HC
30#include <asm/lowcore.h>
31#include <asm/pgtable.h>
f5daba1d 32#include <asm/nmi.h>
a0616cde 33#include <asm/switch_to.h>
1526bf9c 34#include <asm/sclp.h>
8f2abe6a 35#include "kvm-s390.h"
b0c632db
HC
36#include "gaccess.h"
37
5786fffa
CH
38#define CREATE_TRACE_POINTS
39#include "trace.h"
ade38c31 40#include "trace-s390.h"
5786fffa 41
41408c28
TH
42#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
43
b0c632db
HC
44#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
45
46struct kvm_stats_debugfs_item debugfs_entries[] = {
47 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 48 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
49 { "exit_validity", VCPU_STAT(exit_validity) },
50 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
51 { "exit_external_request", VCPU_STAT(exit_external_request) },
52 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
53 { "exit_instruction", VCPU_STAT(exit_instruction) },
54 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
55 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f7819512 56 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
ce2e4f0b 57 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
f5e10b09 58 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b 59 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
aba07508
DH
60 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
61 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
ba5c1e9b 62 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 63 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
64 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
65 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
66 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
67 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
68 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
69 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
70 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 71 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
72 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
73 { "instruction_spx", VCPU_STAT(instruction_spx) },
74 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
75 { "instruction_stap", VCPU_STAT(instruction_stap) },
76 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
8a242234 77 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
453423dc
CB
78 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
79 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
b31288fa 80 { "instruction_essa", VCPU_STAT(instruction_essa) },
453423dc
CB
81 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
82 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 83 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 84 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 85 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 86 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0 87 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
42cb0c9f
DH
88 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
89 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
5288fbf0 90 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
42cb0c9f
DH
91 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
92 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
cd7b4b61 93 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
5288fbf0
CB
94 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
95 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
96 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
42cb0c9f
DH
97 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
98 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
99 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
388186bc 100 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 101 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 102 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
103 { NULL }
104};
105
9d8d5786
MM
106/* upper facilities limit for kvm */
107unsigned long kvm_s390_fac_list_mask[] = {
108 0xff82fffbf4fc2000UL,
109 0x005c000000000000UL,
110};
b0c632db 111
9d8d5786 112unsigned long kvm_s390_fac_list_mask_size(void)
78c4b59f 113{
9d8d5786
MM
114 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
115 return ARRAY_SIZE(kvm_s390_fac_list_mask);
78c4b59f
MM
116}
117
9d8d5786
MM
118static struct gmap_notifier gmap_notifier;
119
b0c632db 120/* Section: not file related */
13a34e06 121int kvm_arch_hardware_enable(void)
b0c632db
HC
122{
123 /* every s390 is virtualization enabled ;-) */
10474ae8 124 return 0;
b0c632db
HC
125}
126
2c70fe44
CB
127static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
128
b0c632db
HC
129int kvm_arch_hardware_setup(void)
130{
2c70fe44
CB
131 gmap_notifier.notifier_call = kvm_gmap_notifier;
132 gmap_register_ipte_notifier(&gmap_notifier);
b0c632db
HC
133 return 0;
134}
135
136void kvm_arch_hardware_unsetup(void)
137{
2c70fe44 138 gmap_unregister_ipte_notifier(&gmap_notifier);
b0c632db
HC
139}
140
b0c632db
HC
141int kvm_arch_init(void *opaque)
142{
84877d93
CH
143 /* Register floating interrupt controller interface. */
144 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
b0c632db
HC
145}
146
b0c632db
HC
147/* Section: device related */
148long kvm_arch_dev_ioctl(struct file *filp,
149 unsigned int ioctl, unsigned long arg)
150{
151 if (ioctl == KVM_S390_ENABLE_SIE)
152 return s390_enable_sie();
153 return -EINVAL;
154}
155
784aa3d7 156int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
b0c632db 157{
d7b0b5eb
CO
158 int r;
159
2bd0ac4e 160 switch (ext) {
d7b0b5eb 161 case KVM_CAP_S390_PSW:
b6cf8788 162 case KVM_CAP_S390_GMAP:
52e16b18 163 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
164#ifdef CONFIG_KVM_S390_UCONTROL
165 case KVM_CAP_S390_UCONTROL:
166#endif
3c038e6b 167 case KVM_CAP_ASYNC_PF:
60b413c9 168 case KVM_CAP_SYNC_REGS:
14eebd91 169 case KVM_CAP_ONE_REG:
d6712df9 170 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 171 case KVM_CAP_S390_CSS_SUPPORT:
ebc32262 172 case KVM_CAP_IRQFD:
10ccaa1e 173 case KVM_CAP_IOEVENTFD:
c05c4186 174 case KVM_CAP_DEVICE_CTRL:
d938dc55 175 case KVM_CAP_ENABLE_CAP_VM:
78599d90 176 case KVM_CAP_S390_IRQCHIP:
f2061656 177 case KVM_CAP_VM_ATTRIBUTES:
6352e4d2 178 case KVM_CAP_MP_STATE:
2444b352 179 case KVM_CAP_S390_USER_SIGP:
e44fc8c9 180 case KVM_CAP_S390_USER_STSI:
30ee2a98 181 case KVM_CAP_S390_SKEYS:
d7b0b5eb
CO
182 r = 1;
183 break;
41408c28
TH
184 case KVM_CAP_S390_MEM_OP:
185 r = MEM_OP_MAX_SIZE;
186 break;
e726b1bd
CB
187 case KVM_CAP_NR_VCPUS:
188 case KVM_CAP_MAX_VCPUS:
189 r = KVM_MAX_VCPUS;
190 break;
e1e2e605
NW
191 case KVM_CAP_NR_MEMSLOTS:
192 r = KVM_USER_MEM_SLOTS;
193 break;
1526bf9c 194 case KVM_CAP_S390_COW:
abf09bed 195 r = MACHINE_HAS_ESOP;
1526bf9c 196 break;
68c55750
EF
197 case KVM_CAP_S390_VECTOR_REGISTERS:
198 r = MACHINE_HAS_VX;
199 break;
2bd0ac4e 200 default:
d7b0b5eb 201 r = 0;
2bd0ac4e 202 }
d7b0b5eb 203 return r;
b0c632db
HC
204}
205
15f36ebd
JH
206static void kvm_s390_sync_dirty_log(struct kvm *kvm,
207 struct kvm_memory_slot *memslot)
208{
209 gfn_t cur_gfn, last_gfn;
210 unsigned long address;
211 struct gmap *gmap = kvm->arch.gmap;
212
213 down_read(&gmap->mm->mmap_sem);
214 /* Loop over all guest pages */
215 last_gfn = memslot->base_gfn + memslot->npages;
216 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
217 address = gfn_to_hva_memslot(memslot, cur_gfn);
218
219 if (gmap_test_and_clear_dirty(address, gmap))
220 mark_page_dirty(kvm, cur_gfn);
221 }
222 up_read(&gmap->mm->mmap_sem);
223}
224
b0c632db
HC
225/* Section: vm related */
226/*
227 * Get (and clear) the dirty memory log for a memory slot.
228 */
229int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
230 struct kvm_dirty_log *log)
231{
15f36ebd
JH
232 int r;
233 unsigned long n;
234 struct kvm_memory_slot *memslot;
235 int is_dirty = 0;
236
237 mutex_lock(&kvm->slots_lock);
238
239 r = -EINVAL;
240 if (log->slot >= KVM_USER_MEM_SLOTS)
241 goto out;
242
243 memslot = id_to_memslot(kvm->memslots, log->slot);
244 r = -ENOENT;
245 if (!memslot->dirty_bitmap)
246 goto out;
247
248 kvm_s390_sync_dirty_log(kvm, memslot);
249 r = kvm_get_dirty_log(kvm, log, &is_dirty);
250 if (r)
251 goto out;
252
253 /* Clear the dirty log */
254 if (is_dirty) {
255 n = kvm_dirty_bitmap_bytes(memslot);
256 memset(memslot->dirty_bitmap, 0, n);
257 }
258 r = 0;
259out:
260 mutex_unlock(&kvm->slots_lock);
261 return r;
b0c632db
HC
262}
263
d938dc55
CH
264static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
265{
266 int r;
267
268 if (cap->flags)
269 return -EINVAL;
270
271 switch (cap->cap) {
84223598
CH
272 case KVM_CAP_S390_IRQCHIP:
273 kvm->arch.use_irqchip = 1;
274 r = 0;
275 break;
2444b352
DH
276 case KVM_CAP_S390_USER_SIGP:
277 kvm->arch.user_sigp = 1;
278 r = 0;
279 break;
68c55750 280 case KVM_CAP_S390_VECTOR_REGISTERS:
18280d8b
MM
281 if (MACHINE_HAS_VX) {
282 set_kvm_facility(kvm->arch.model.fac->mask, 129);
283 set_kvm_facility(kvm->arch.model.fac->list, 129);
284 r = 0;
285 } else
286 r = -EINVAL;
68c55750 287 break;
e44fc8c9
ET
288 case KVM_CAP_S390_USER_STSI:
289 kvm->arch.user_stsi = 1;
290 r = 0;
291 break;
d938dc55
CH
292 default:
293 r = -EINVAL;
294 break;
295 }
296 return r;
297}
298
8c0a7ce6
DD
299static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
300{
301 int ret;
302
303 switch (attr->attr) {
304 case KVM_S390_VM_MEM_LIMIT_SIZE:
305 ret = 0;
306 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
307 ret = -EFAULT;
308 break;
309 default:
310 ret = -ENXIO;
311 break;
312 }
313 return ret;
314}
315
316static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
4f718eab
DD
317{
318 int ret;
319 unsigned int idx;
320 switch (attr->attr) {
321 case KVM_S390_VM_MEM_ENABLE_CMMA:
322 ret = -EBUSY;
323 mutex_lock(&kvm->lock);
324 if (atomic_read(&kvm->online_vcpus) == 0) {
325 kvm->arch.use_cmma = 1;
326 ret = 0;
327 }
328 mutex_unlock(&kvm->lock);
329 break;
330 case KVM_S390_VM_MEM_CLR_CMMA:
331 mutex_lock(&kvm->lock);
332 idx = srcu_read_lock(&kvm->srcu);
a13cff31 333 s390_reset_cmma(kvm->arch.gmap->mm);
4f718eab
DD
334 srcu_read_unlock(&kvm->srcu, idx);
335 mutex_unlock(&kvm->lock);
336 ret = 0;
337 break;
8c0a7ce6
DD
338 case KVM_S390_VM_MEM_LIMIT_SIZE: {
339 unsigned long new_limit;
340
341 if (kvm_is_ucontrol(kvm))
342 return -EINVAL;
343
344 if (get_user(new_limit, (u64 __user *)attr->addr))
345 return -EFAULT;
346
347 if (new_limit > kvm->arch.gmap->asce_end)
348 return -E2BIG;
349
350 ret = -EBUSY;
351 mutex_lock(&kvm->lock);
352 if (atomic_read(&kvm->online_vcpus) == 0) {
353 /* gmap_alloc will round the limit up */
354 struct gmap *new = gmap_alloc(current->mm, new_limit);
355
356 if (!new) {
357 ret = -ENOMEM;
358 } else {
359 gmap_free(kvm->arch.gmap);
360 new->private = kvm;
361 kvm->arch.gmap = new;
362 ret = 0;
363 }
364 }
365 mutex_unlock(&kvm->lock);
366 break;
367 }
4f718eab
DD
368 default:
369 ret = -ENXIO;
370 break;
371 }
372 return ret;
373}
374
a374e892
TK
375static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
376
377static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
378{
379 struct kvm_vcpu *vcpu;
380 int i;
381
9d8d5786 382 if (!test_kvm_facility(kvm, 76))
a374e892
TK
383 return -EINVAL;
384
385 mutex_lock(&kvm->lock);
386 switch (attr->attr) {
387 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
388 get_random_bytes(
389 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
390 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
391 kvm->arch.crypto.aes_kw = 1;
392 break;
393 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
394 get_random_bytes(
395 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
396 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
397 kvm->arch.crypto.dea_kw = 1;
398 break;
399 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
400 kvm->arch.crypto.aes_kw = 0;
401 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
402 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
403 break;
404 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
405 kvm->arch.crypto.dea_kw = 0;
406 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
407 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
408 break;
409 default:
410 mutex_unlock(&kvm->lock);
411 return -ENXIO;
412 }
413
414 kvm_for_each_vcpu(i, vcpu, kvm) {
415 kvm_s390_vcpu_crypto_setup(vcpu);
416 exit_sie(vcpu);
417 }
418 mutex_unlock(&kvm->lock);
419 return 0;
420}
421
72f25020
JH
422static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
423{
424 u8 gtod_high;
425
426 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
427 sizeof(gtod_high)))
428 return -EFAULT;
429
430 if (gtod_high != 0)
431 return -EINVAL;
432
433 return 0;
434}
435
436static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
437{
438 struct kvm_vcpu *cur_vcpu;
439 unsigned int vcpu_idx;
440 u64 host_tod, gtod;
441 int r;
442
443 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
444 return -EFAULT;
445
446 r = store_tod_clock(&host_tod);
447 if (r)
448 return r;
449
450 mutex_lock(&kvm->lock);
451 kvm->arch.epoch = gtod - host_tod;
452 kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) {
453 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
454 exit_sie(cur_vcpu);
455 }
456 mutex_unlock(&kvm->lock);
457 return 0;
458}
459
460static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
461{
462 int ret;
463
464 if (attr->flags)
465 return -EINVAL;
466
467 switch (attr->attr) {
468 case KVM_S390_VM_TOD_HIGH:
469 ret = kvm_s390_set_tod_high(kvm, attr);
470 break;
471 case KVM_S390_VM_TOD_LOW:
472 ret = kvm_s390_set_tod_low(kvm, attr);
473 break;
474 default:
475 ret = -ENXIO;
476 break;
477 }
478 return ret;
479}
480
481static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
482{
483 u8 gtod_high = 0;
484
485 if (copy_to_user((void __user *)attr->addr, &gtod_high,
486 sizeof(gtod_high)))
487 return -EFAULT;
488
489 return 0;
490}
491
492static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
493{
494 u64 host_tod, gtod;
495 int r;
496
497 r = store_tod_clock(&host_tod);
498 if (r)
499 return r;
500
501 gtod = host_tod + kvm->arch.epoch;
502 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
503 return -EFAULT;
504
505 return 0;
506}
507
508static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
509{
510 int ret;
511
512 if (attr->flags)
513 return -EINVAL;
514
515 switch (attr->attr) {
516 case KVM_S390_VM_TOD_HIGH:
517 ret = kvm_s390_get_tod_high(kvm, attr);
518 break;
519 case KVM_S390_VM_TOD_LOW:
520 ret = kvm_s390_get_tod_low(kvm, attr);
521 break;
522 default:
523 ret = -ENXIO;
524 break;
525 }
526 return ret;
527}
528
658b6eda
MM
529static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
530{
531 struct kvm_s390_vm_cpu_processor *proc;
532 int ret = 0;
533
534 mutex_lock(&kvm->lock);
535 if (atomic_read(&kvm->online_vcpus)) {
536 ret = -EBUSY;
537 goto out;
538 }
539 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
540 if (!proc) {
541 ret = -ENOMEM;
542 goto out;
543 }
544 if (!copy_from_user(proc, (void __user *)attr->addr,
545 sizeof(*proc))) {
546 memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
547 sizeof(struct cpuid));
548 kvm->arch.model.ibc = proc->ibc;
981467c9 549 memcpy(kvm->arch.model.fac->list, proc->fac_list,
658b6eda
MM
550 S390_ARCH_FAC_LIST_SIZE_BYTE);
551 } else
552 ret = -EFAULT;
553 kfree(proc);
554out:
555 mutex_unlock(&kvm->lock);
556 return ret;
557}
558
559static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
560{
561 int ret = -ENXIO;
562
563 switch (attr->attr) {
564 case KVM_S390_VM_CPU_PROCESSOR:
565 ret = kvm_s390_set_processor(kvm, attr);
566 break;
567 }
568 return ret;
569}
570
571static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
572{
573 struct kvm_s390_vm_cpu_processor *proc;
574 int ret = 0;
575
576 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
577 if (!proc) {
578 ret = -ENOMEM;
579 goto out;
580 }
581 memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
582 proc->ibc = kvm->arch.model.ibc;
981467c9 583 memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda
MM
584 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
585 ret = -EFAULT;
586 kfree(proc);
587out:
588 return ret;
589}
590
591static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
592{
593 struct kvm_s390_vm_cpu_machine *mach;
594 int ret = 0;
595
596 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
597 if (!mach) {
598 ret = -ENOMEM;
599 goto out;
600 }
601 get_cpu_id((struct cpuid *) &mach->cpuid);
602 mach->ibc = sclp_get_ibc();
981467c9
MM
603 memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
604 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda 605 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
94422ee8 606 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda
MM
607 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
608 ret = -EFAULT;
609 kfree(mach);
610out:
611 return ret;
612}
613
614static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
615{
616 int ret = -ENXIO;
617
618 switch (attr->attr) {
619 case KVM_S390_VM_CPU_PROCESSOR:
620 ret = kvm_s390_get_processor(kvm, attr);
621 break;
622 case KVM_S390_VM_CPU_MACHINE:
623 ret = kvm_s390_get_machine(kvm, attr);
624 break;
625 }
626 return ret;
627}
628
f2061656
DD
629static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
630{
631 int ret;
632
633 switch (attr->group) {
4f718eab 634 case KVM_S390_VM_MEM_CTRL:
8c0a7ce6 635 ret = kvm_s390_set_mem_control(kvm, attr);
4f718eab 636 break;
72f25020
JH
637 case KVM_S390_VM_TOD:
638 ret = kvm_s390_set_tod(kvm, attr);
639 break;
658b6eda
MM
640 case KVM_S390_VM_CPU_MODEL:
641 ret = kvm_s390_set_cpu_model(kvm, attr);
642 break;
a374e892
TK
643 case KVM_S390_VM_CRYPTO:
644 ret = kvm_s390_vm_set_crypto(kvm, attr);
645 break;
f2061656
DD
646 default:
647 ret = -ENXIO;
648 break;
649 }
650
651 return ret;
652}
653
654static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
655{
8c0a7ce6
DD
656 int ret;
657
658 switch (attr->group) {
659 case KVM_S390_VM_MEM_CTRL:
660 ret = kvm_s390_get_mem_control(kvm, attr);
661 break;
72f25020
JH
662 case KVM_S390_VM_TOD:
663 ret = kvm_s390_get_tod(kvm, attr);
664 break;
658b6eda
MM
665 case KVM_S390_VM_CPU_MODEL:
666 ret = kvm_s390_get_cpu_model(kvm, attr);
667 break;
8c0a7ce6
DD
668 default:
669 ret = -ENXIO;
670 break;
671 }
672
673 return ret;
f2061656
DD
674}
675
676static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
677{
678 int ret;
679
680 switch (attr->group) {
4f718eab
DD
681 case KVM_S390_VM_MEM_CTRL:
682 switch (attr->attr) {
683 case KVM_S390_VM_MEM_ENABLE_CMMA:
684 case KVM_S390_VM_MEM_CLR_CMMA:
8c0a7ce6 685 case KVM_S390_VM_MEM_LIMIT_SIZE:
4f718eab
DD
686 ret = 0;
687 break;
688 default:
689 ret = -ENXIO;
690 break;
691 }
692 break;
72f25020
JH
693 case KVM_S390_VM_TOD:
694 switch (attr->attr) {
695 case KVM_S390_VM_TOD_LOW:
696 case KVM_S390_VM_TOD_HIGH:
697 ret = 0;
698 break;
699 default:
700 ret = -ENXIO;
701 break;
702 }
703 break;
658b6eda
MM
704 case KVM_S390_VM_CPU_MODEL:
705 switch (attr->attr) {
706 case KVM_S390_VM_CPU_PROCESSOR:
707 case KVM_S390_VM_CPU_MACHINE:
708 ret = 0;
709 break;
710 default:
711 ret = -ENXIO;
712 break;
713 }
714 break;
a374e892
TK
715 case KVM_S390_VM_CRYPTO:
716 switch (attr->attr) {
717 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
718 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
719 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
720 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
721 ret = 0;
722 break;
723 default:
724 ret = -ENXIO;
725 break;
726 }
727 break;
f2061656
DD
728 default:
729 ret = -ENXIO;
730 break;
731 }
732
733 return ret;
734}
735
30ee2a98
JH
736static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
737{
738 uint8_t *keys;
739 uint64_t hva;
740 unsigned long curkey;
741 int i, r = 0;
742
743 if (args->flags != 0)
744 return -EINVAL;
745
746 /* Is this guest using storage keys? */
747 if (!mm_use_skey(current->mm))
748 return KVM_S390_GET_SKEYS_NONE;
749
750 /* Enforce sane limit on memory allocation */
751 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
752 return -EINVAL;
753
754 keys = kmalloc_array(args->count, sizeof(uint8_t),
755 GFP_KERNEL | __GFP_NOWARN);
756 if (!keys)
757 keys = vmalloc(sizeof(uint8_t) * args->count);
758 if (!keys)
759 return -ENOMEM;
760
761 for (i = 0; i < args->count; i++) {
762 hva = gfn_to_hva(kvm, args->start_gfn + i);
763 if (kvm_is_error_hva(hva)) {
764 r = -EFAULT;
765 goto out;
766 }
767
768 curkey = get_guest_storage_key(current->mm, hva);
769 if (IS_ERR_VALUE(curkey)) {
770 r = curkey;
771 goto out;
772 }
773 keys[i] = curkey;
774 }
775
776 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
777 sizeof(uint8_t) * args->count);
778 if (r)
779 r = -EFAULT;
780out:
781 kvfree(keys);
782 return r;
783}
784
785static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
786{
787 uint8_t *keys;
788 uint64_t hva;
789 int i, r = 0;
790
791 if (args->flags != 0)
792 return -EINVAL;
793
794 /* Enforce sane limit on memory allocation */
795 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
796 return -EINVAL;
797
798 keys = kmalloc_array(args->count, sizeof(uint8_t),
799 GFP_KERNEL | __GFP_NOWARN);
800 if (!keys)
801 keys = vmalloc(sizeof(uint8_t) * args->count);
802 if (!keys)
803 return -ENOMEM;
804
805 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
806 sizeof(uint8_t) * args->count);
807 if (r) {
808 r = -EFAULT;
809 goto out;
810 }
811
812 /* Enable storage key handling for the guest */
813 s390_enable_skey();
814
815 for (i = 0; i < args->count; i++) {
816 hva = gfn_to_hva(kvm, args->start_gfn + i);
817 if (kvm_is_error_hva(hva)) {
818 r = -EFAULT;
819 goto out;
820 }
821
822 /* Lowest order bit is reserved */
823 if (keys[i] & 0x01) {
824 r = -EINVAL;
825 goto out;
826 }
827
828 r = set_guest_storage_key(current->mm, hva,
829 (unsigned long)keys[i], 0);
830 if (r)
831 goto out;
832 }
833out:
834 kvfree(keys);
835 return r;
836}
837
b0c632db
HC
838long kvm_arch_vm_ioctl(struct file *filp,
839 unsigned int ioctl, unsigned long arg)
840{
841 struct kvm *kvm = filp->private_data;
842 void __user *argp = (void __user *)arg;
f2061656 843 struct kvm_device_attr attr;
b0c632db
HC
844 int r;
845
846 switch (ioctl) {
ba5c1e9b
CO
847 case KVM_S390_INTERRUPT: {
848 struct kvm_s390_interrupt s390int;
849
850 r = -EFAULT;
851 if (copy_from_user(&s390int, argp, sizeof(s390int)))
852 break;
853 r = kvm_s390_inject_vm(kvm, &s390int);
854 break;
855 }
d938dc55
CH
856 case KVM_ENABLE_CAP: {
857 struct kvm_enable_cap cap;
858 r = -EFAULT;
859 if (copy_from_user(&cap, argp, sizeof(cap)))
860 break;
861 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
862 break;
863 }
84223598
CH
864 case KVM_CREATE_IRQCHIP: {
865 struct kvm_irq_routing_entry routing;
866
867 r = -EINVAL;
868 if (kvm->arch.use_irqchip) {
869 /* Set up dummy routing. */
870 memset(&routing, 0, sizeof(routing));
871 kvm_set_irq_routing(kvm, &routing, 0, 0);
872 r = 0;
873 }
874 break;
875 }
f2061656
DD
876 case KVM_SET_DEVICE_ATTR: {
877 r = -EFAULT;
878 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
879 break;
880 r = kvm_s390_vm_set_attr(kvm, &attr);
881 break;
882 }
883 case KVM_GET_DEVICE_ATTR: {
884 r = -EFAULT;
885 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
886 break;
887 r = kvm_s390_vm_get_attr(kvm, &attr);
888 break;
889 }
890 case KVM_HAS_DEVICE_ATTR: {
891 r = -EFAULT;
892 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
893 break;
894 r = kvm_s390_vm_has_attr(kvm, &attr);
895 break;
896 }
30ee2a98
JH
897 case KVM_S390_GET_SKEYS: {
898 struct kvm_s390_skeys args;
899
900 r = -EFAULT;
901 if (copy_from_user(&args, argp,
902 sizeof(struct kvm_s390_skeys)))
903 break;
904 r = kvm_s390_get_skeys(kvm, &args);
905 break;
906 }
907 case KVM_S390_SET_SKEYS: {
908 struct kvm_s390_skeys args;
909
910 r = -EFAULT;
911 if (copy_from_user(&args, argp,
912 sizeof(struct kvm_s390_skeys)))
913 break;
914 r = kvm_s390_set_skeys(kvm, &args);
915 break;
916 }
b0c632db 917 default:
367e1319 918 r = -ENOTTY;
b0c632db
HC
919 }
920
921 return r;
922}
923
45c9b47c
TK
924static int kvm_s390_query_ap_config(u8 *config)
925{
926 u32 fcn_code = 0x04000000UL;
86044c8c 927 u32 cc = 0;
45c9b47c 928
86044c8c 929 memset(config, 0, 128);
45c9b47c
TK
930 asm volatile(
931 "lgr 0,%1\n"
932 "lgr 2,%2\n"
933 ".long 0xb2af0000\n" /* PQAP(QCI) */
86044c8c 934 "0: ipm %0\n"
45c9b47c 935 "srl %0,28\n"
86044c8c
CB
936 "1:\n"
937 EX_TABLE(0b, 1b)
938 : "+r" (cc)
45c9b47c
TK
939 : "r" (fcn_code), "r" (config)
940 : "cc", "0", "2", "memory"
941 );
942
943 return cc;
944}
945
946static int kvm_s390_apxa_installed(void)
947{
948 u8 config[128];
949 int cc;
950
951 if (test_facility(2) && test_facility(12)) {
952 cc = kvm_s390_query_ap_config(config);
953
954 if (cc)
955 pr_err("PQAP(QCI) failed with cc=%d", cc);
956 else
957 return config[0] & 0x40;
958 }
959
960 return 0;
961}
962
963static void kvm_s390_set_crycb_format(struct kvm *kvm)
964{
965 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
966
967 if (kvm_s390_apxa_installed())
968 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
969 else
970 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
971}
972
9d8d5786
MM
973static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
974{
975 get_cpu_id(cpu_id);
976 cpu_id->version = 0xff;
977}
978
5102ee87
TK
979static int kvm_s390_crypto_init(struct kvm *kvm)
980{
9d8d5786 981 if (!test_kvm_facility(kvm, 76))
5102ee87
TK
982 return 0;
983
984 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
985 GFP_KERNEL | GFP_DMA);
986 if (!kvm->arch.crypto.crycb)
987 return -ENOMEM;
988
45c9b47c 989 kvm_s390_set_crycb_format(kvm);
5102ee87 990
ed6f76b4
TK
991 /* Enable AES/DEA protected key functions by default */
992 kvm->arch.crypto.aes_kw = 1;
993 kvm->arch.crypto.dea_kw = 1;
994 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
995 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
996 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
997 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
a374e892 998
5102ee87
TK
999 return 0;
1000}
1001
e08b9637 1002int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 1003{
9d8d5786 1004 int i, rc;
b0c632db 1005 char debug_name[16];
f6c137ff 1006 static unsigned long sca_offset;
b0c632db 1007
e08b9637
CO
1008 rc = -EINVAL;
1009#ifdef CONFIG_KVM_S390_UCONTROL
1010 if (type & ~KVM_VM_S390_UCONTROL)
1011 goto out_err;
1012 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1013 goto out_err;
1014#else
1015 if (type)
1016 goto out_err;
1017#endif
1018
b0c632db
HC
1019 rc = s390_enable_sie();
1020 if (rc)
d89f5eff 1021 goto out_err;
b0c632db 1022
b290411a
CO
1023 rc = -ENOMEM;
1024
b0c632db
HC
1025 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
1026 if (!kvm->arch.sca)
d89f5eff 1027 goto out_err;
f6c137ff
CB
1028 spin_lock(&kvm_lock);
1029 sca_offset = (sca_offset + 16) & 0x7f0;
1030 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
1031 spin_unlock(&kvm_lock);
b0c632db
HC
1032
1033 sprintf(debug_name, "kvm-%u", current->pid);
1034
1035 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
1036 if (!kvm->arch.dbf)
40f5b735 1037 goto out_err;
b0c632db 1038
9d8d5786
MM
1039 /*
1040 * The architectural maximum amount of facilities is 16 kbit. To store
1041 * this amount, 2 kbyte of memory is required. Thus we need a full
981467c9
MM
1042 * page to hold the guest facility list (arch.model.fac->list) and the
1043 * facility mask (arch.model.fac->mask). Its address size has to be
9d8d5786
MM
1044 * 31 bits and word aligned.
1045 */
1046 kvm->arch.model.fac =
981467c9 1047 (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
9d8d5786 1048 if (!kvm->arch.model.fac)
40f5b735 1049 goto out_err;
9d8d5786 1050
fb5bf93f 1051 /* Populate the facility mask initially. */
981467c9 1052 memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
94422ee8 1053 S390_ARCH_FAC_LIST_SIZE_BYTE);
9d8d5786
MM
1054 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1055 if (i < kvm_s390_fac_list_mask_size())
981467c9 1056 kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
9d8d5786 1057 else
981467c9 1058 kvm->arch.model.fac->mask[i] = 0UL;
9d8d5786
MM
1059 }
1060
981467c9
MM
1061 /* Populate the facility list initially. */
1062 memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
1063 S390_ARCH_FAC_LIST_SIZE_BYTE);
1064
9d8d5786 1065 kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
658b6eda 1066 kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff;
9d8d5786 1067
5102ee87 1068 if (kvm_s390_crypto_init(kvm) < 0)
40f5b735 1069 goto out_err;
5102ee87 1070
ba5c1e9b
CO
1071 spin_lock_init(&kvm->arch.float_int.lock);
1072 INIT_LIST_HEAD(&kvm->arch.float_int.list);
8a242234 1073 init_waitqueue_head(&kvm->arch.ipte_wq);
a6b7e459 1074 mutex_init(&kvm->arch.ipte_mutex);
ba5c1e9b 1075
b0c632db
HC
1076 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
1077 VM_EVENT(kvm, 3, "%s", "vm created");
1078
e08b9637
CO
1079 if (type & KVM_VM_S390_UCONTROL) {
1080 kvm->arch.gmap = NULL;
1081 } else {
0349985a 1082 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
e08b9637 1083 if (!kvm->arch.gmap)
40f5b735 1084 goto out_err;
2c70fe44 1085 kvm->arch.gmap->private = kvm;
24eb3a82 1086 kvm->arch.gmap->pfault_enabled = 0;
e08b9637 1087 }
fa6b7fe9
CH
1088
1089 kvm->arch.css_support = 0;
84223598 1090 kvm->arch.use_irqchip = 0;
72f25020 1091 kvm->arch.epoch = 0;
fa6b7fe9 1092
8ad35755
DH
1093 spin_lock_init(&kvm->arch.start_stop_lock);
1094
d89f5eff 1095 return 0;
40f5b735 1096out_err:
5102ee87 1097 kfree(kvm->arch.crypto.crycb);
9d8d5786 1098 free_page((unsigned long)kvm->arch.model.fac);
598841ca 1099 debug_unregister(kvm->arch.dbf);
b0c632db 1100 free_page((unsigned long)(kvm->arch.sca));
d89f5eff 1101 return rc;
b0c632db
HC
1102}
1103
d329c035
CB
1104void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1105{
1106 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 1107 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
67335e63 1108 kvm_s390_clear_local_irqs(vcpu);
3c038e6b 1109 kvm_clear_async_pf_completion_queue(vcpu);
58f9460b
CO
1110 if (!kvm_is_ucontrol(vcpu->kvm)) {
1111 clear_bit(63 - vcpu->vcpu_id,
1112 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
1113 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
1114 (__u64) vcpu->arch.sie_block)
1115 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
1116 }
abf4a71e 1117 smp_mb();
27e0393f
CO
1118
1119 if (kvm_is_ucontrol(vcpu->kvm))
1120 gmap_free(vcpu->arch.gmap);
1121
b31605c1
DD
1122 if (kvm_s390_cmma_enabled(vcpu->kvm))
1123 kvm_s390_vcpu_unsetup_cmma(vcpu);
d329c035 1124 free_page((unsigned long)(vcpu->arch.sie_block));
b31288fa 1125
6692cef3 1126 kvm_vcpu_uninit(vcpu);
b110feaf 1127 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
1128}
1129
1130static void kvm_free_vcpus(struct kvm *kvm)
1131{
1132 unsigned int i;
988a2cae 1133 struct kvm_vcpu *vcpu;
d329c035 1134
988a2cae
GN
1135 kvm_for_each_vcpu(i, vcpu, kvm)
1136 kvm_arch_vcpu_destroy(vcpu);
1137
1138 mutex_lock(&kvm->lock);
1139 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1140 kvm->vcpus[i] = NULL;
1141
1142 atomic_set(&kvm->online_vcpus, 0);
1143 mutex_unlock(&kvm->lock);
d329c035
CB
1144}
1145
b0c632db
HC
1146void kvm_arch_destroy_vm(struct kvm *kvm)
1147{
d329c035 1148 kvm_free_vcpus(kvm);
9d8d5786 1149 free_page((unsigned long)kvm->arch.model.fac);
b0c632db 1150 free_page((unsigned long)(kvm->arch.sca));
d329c035 1151 debug_unregister(kvm->arch.dbf);
5102ee87 1152 kfree(kvm->arch.crypto.crycb);
27e0393f
CO
1153 if (!kvm_is_ucontrol(kvm))
1154 gmap_free(kvm->arch.gmap);
841b91c5 1155 kvm_s390_destroy_adapters(kvm);
67335e63 1156 kvm_s390_clear_float_irqs(kvm);
b0c632db
HC
1157}
1158
1159/* Section: vcpu related */
dafd032a
DD
1160static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1161{
1162 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
1163 if (!vcpu->arch.gmap)
1164 return -ENOMEM;
1165 vcpu->arch.gmap->private = vcpu->kvm;
1166
1167 return 0;
1168}
1169
b0c632db
HC
1170int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1171{
3c038e6b
DD
1172 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1173 kvm_clear_async_pf_completion_queue(vcpu);
59674c1a
CB
1174 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1175 KVM_SYNC_GPRS |
9eed0735 1176 KVM_SYNC_ACRS |
b028ee3e
DH
1177 KVM_SYNC_CRS |
1178 KVM_SYNC_ARCH0 |
1179 KVM_SYNC_PFAULT;
68c55750
EF
1180 if (test_kvm_facility(vcpu->kvm, 129))
1181 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
dafd032a
DD
1182
1183 if (kvm_is_ucontrol(vcpu->kvm))
1184 return __kvm_ucontrol_vcpu_init(vcpu);
1185
b0c632db
HC
1186 return 0;
1187}
1188
b0c632db
HC
1189void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1190{
4725c860 1191 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
18280d8b 1192 if (test_kvm_facility(vcpu->kvm, 129))
68c55750
EF
1193 save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
1194 else
1195 save_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db 1196 save_access_regs(vcpu->arch.host_acrs);
18280d8b 1197 if (test_kvm_facility(vcpu->kvm, 129)) {
68c55750
EF
1198 restore_fp_ctl(&vcpu->run->s.regs.fpc);
1199 restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
1200 } else {
1201 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1202 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
1203 }
59674c1a 1204 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 1205 gmap_enable(vcpu->arch.gmap);
9e6dabef 1206 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
1207}
1208
1209void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1210{
9e6dabef 1211 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 1212 gmap_disable(vcpu->arch.gmap);
18280d8b 1213 if (test_kvm_facility(vcpu->kvm, 129)) {
68c55750
EF
1214 save_fp_ctl(&vcpu->run->s.regs.fpc);
1215 save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
1216 } else {
1217 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1218 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1219 }
59674c1a 1220 save_access_regs(vcpu->run->s.regs.acrs);
4725c860 1221 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
18280d8b 1222 if (test_kvm_facility(vcpu->kvm, 129))
68c55750
EF
1223 restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
1224 else
1225 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db
HC
1226 restore_access_regs(vcpu->arch.host_acrs);
1227}
1228
1229static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1230{
1231 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1232 vcpu->arch.sie_block->gpsw.mask = 0UL;
1233 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 1234 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
1235 vcpu->arch.sie_block->cputm = 0UL;
1236 vcpu->arch.sie_block->ckc = 0UL;
1237 vcpu->arch.sie_block->todpr = 0;
1238 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1239 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
1240 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
1241 vcpu->arch.guest_fpregs.fpc = 0;
1242 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
1243 vcpu->arch.sie_block->gbea = 1;
672550fb 1244 vcpu->arch.sie_block->pp = 0;
3c038e6b
DD
1245 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1246 kvm_clear_async_pf_completion_queue(vcpu);
6352e4d2
DH
1247 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1248 kvm_s390_vcpu_stop(vcpu);
2ed10cc1 1249 kvm_s390_clear_local_irqs(vcpu);
b0c632db
HC
1250}
1251
31928aa5 1252void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
42897d86 1253{
72f25020
JH
1254 mutex_lock(&vcpu->kvm->lock);
1255 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
1256 mutex_unlock(&vcpu->kvm->lock);
dafd032a
DD
1257 if (!kvm_is_ucontrol(vcpu->kvm))
1258 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
42897d86
MT
1259}
1260
5102ee87
TK
1261static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1262{
9d8d5786 1263 if (!test_kvm_facility(vcpu->kvm, 76))
5102ee87
TK
1264 return;
1265
a374e892
TK
1266 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1267
1268 if (vcpu->kvm->arch.crypto.aes_kw)
1269 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1270 if (vcpu->kvm->arch.crypto.dea_kw)
1271 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1272
5102ee87
TK
1273 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1274}
1275
b31605c1
DD
1276void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1277{
1278 free_page(vcpu->arch.sie_block->cbrlo);
1279 vcpu->arch.sie_block->cbrlo = 0;
1280}
1281
1282int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1283{
1284 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1285 if (!vcpu->arch.sie_block->cbrlo)
1286 return -ENOMEM;
1287
1288 vcpu->arch.sie_block->ecb2 |= 0x80;
1289 vcpu->arch.sie_block->ecb2 &= ~0x08;
1290 return 0;
1291}
1292
91520f1a
MM
1293static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1294{
1295 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1296
1297 vcpu->arch.cpu_id = model->cpu_id;
1298 vcpu->arch.sie_block->ibc = model->ibc;
1299 vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
1300}
1301
b0c632db
HC
1302int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1303{
b31605c1 1304 int rc = 0;
b31288fa 1305
9e6dabef
CH
1306 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1307 CPUSTAT_SM |
69d0d3a3
CB
1308 CPUSTAT_STOPPED |
1309 CPUSTAT_GED);
91520f1a
MM
1310 kvm_s390_vcpu_setup_model(vcpu);
1311
fc34531d 1312 vcpu->arch.sie_block->ecb = 6;
9d8d5786 1313 if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
7feb6bb8
MM
1314 vcpu->arch.sie_block->ecb |= 0x10;
1315
69d0d3a3 1316 vcpu->arch.sie_block->ecb2 = 8;
ea5f4969 1317 vcpu->arch.sie_block->eca = 0xC1002000U;
217a4406
HC
1318 if (sclp_has_siif())
1319 vcpu->arch.sie_block->eca |= 1;
ea5f4969
DH
1320 if (sclp_has_sigpif())
1321 vcpu->arch.sie_block->eca |= 0x10000000U;
18280d8b 1322 if (test_kvm_facility(vcpu->kvm, 129)) {
13211ea7
EF
1323 vcpu->arch.sie_block->eca |= 0x00020000;
1324 vcpu->arch.sie_block->ecd |= 0x20000000;
1325 }
492d8642 1326 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
5a5e6536 1327
b31605c1
DD
1328 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
1329 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1330 if (rc)
1331 return rc;
b31288fa 1332 }
0ac96caf 1333 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ca872302 1334 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
9d8d5786 1335
5102ee87
TK
1336 kvm_s390_vcpu_crypto_setup(vcpu);
1337
b31605c1 1338 return rc;
b0c632db
HC
1339}
1340
1341struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1342 unsigned int id)
1343{
4d47555a 1344 struct kvm_vcpu *vcpu;
7feb6bb8 1345 struct sie_page *sie_page;
4d47555a
CO
1346 int rc = -EINVAL;
1347
1348 if (id >= KVM_MAX_VCPUS)
1349 goto out;
1350
1351 rc = -ENOMEM;
b0c632db 1352
b110feaf 1353 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 1354 if (!vcpu)
4d47555a 1355 goto out;
b0c632db 1356
7feb6bb8
MM
1357 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1358 if (!sie_page)
b0c632db
HC
1359 goto out_free_cpu;
1360
7feb6bb8
MM
1361 vcpu->arch.sie_block = &sie_page->sie_block;
1362 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
68c55750 1363 vcpu->arch.host_vregs = &sie_page->vregs;
7feb6bb8 1364
b0c632db 1365 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
1366 if (!kvm_is_ucontrol(kvm)) {
1367 if (!kvm->arch.sca) {
1368 WARN_ON_ONCE(1);
1369 goto out_free_cpu;
1370 }
1371 if (!kvm->arch.sca->cpu[id].sda)
1372 kvm->arch.sca->cpu[id].sda =
1373 (__u64) vcpu->arch.sie_block;
1374 vcpu->arch.sie_block->scaoh =
1375 (__u32)(((__u64)kvm->arch.sca) >> 32);
1376 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
1377 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
1378 }
b0c632db 1379
ba5c1e9b 1380 spin_lock_init(&vcpu->arch.local_int.lock);
ba5c1e9b 1381 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
d0321a24 1382 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 1383 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
ba5c1e9b 1384
b0c632db
HC
1385 rc = kvm_vcpu_init(vcpu, kvm, id);
1386 if (rc)
7b06bf2f 1387 goto out_free_sie_block;
b0c632db
HC
1388 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
1389 vcpu->arch.sie_block);
ade38c31 1390 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 1391
b0c632db 1392 return vcpu;
7b06bf2f
WY
1393out_free_sie_block:
1394 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 1395out_free_cpu:
b110feaf 1396 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 1397out:
b0c632db
HC
1398 return ERR_PTR(rc);
1399}
1400
b0c632db
HC
1401int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1402{
9a022067 1403 return kvm_s390_vcpu_has_irq(vcpu, 0);
b0c632db
HC
1404}
1405
49b99e1e
CB
1406void s390_vcpu_block(struct kvm_vcpu *vcpu)
1407{
1408 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1409}
1410
1411void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
1412{
1413 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1414}
1415
1416/*
1417 * Kick a guest cpu out of SIE and wait until SIE is not running.
1418 * If the CPU is not running (e.g. waiting as idle) the function will
1419 * return immediately. */
1420void exit_sie(struct kvm_vcpu *vcpu)
1421{
1422 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
1423 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1424 cpu_relax();
1425}
1426
1427/* Kick a guest cpu out of SIE and prevent SIE-reentry */
1428void exit_sie_sync(struct kvm_vcpu *vcpu)
1429{
1430 s390_vcpu_block(vcpu);
1431 exit_sie(vcpu);
1432}
1433
2c70fe44
CB
1434static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1435{
1436 int i;
1437 struct kvm *kvm = gmap->private;
1438 struct kvm_vcpu *vcpu;
1439
1440 kvm_for_each_vcpu(i, vcpu, kvm) {
1441 /* match against both prefix pages */
fda902cb 1442 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
2c70fe44
CB
1443 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
1444 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
1445 exit_sie_sync(vcpu);
1446 }
1447 }
1448}
1449
b6d33834
CD
1450int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1451{
1452 /* kvm common code refers to this, but never calls it */
1453 BUG();
1454 return 0;
1455}
1456
14eebd91
CO
1457static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1458 struct kvm_one_reg *reg)
1459{
1460 int r = -EINVAL;
1461
1462 switch (reg->id) {
29b7c71b
CO
1463 case KVM_REG_S390_TODPR:
1464 r = put_user(vcpu->arch.sie_block->todpr,
1465 (u32 __user *)reg->addr);
1466 break;
1467 case KVM_REG_S390_EPOCHDIFF:
1468 r = put_user(vcpu->arch.sie_block->epoch,
1469 (u64 __user *)reg->addr);
1470 break;
46a6dd1c
J
1471 case KVM_REG_S390_CPU_TIMER:
1472 r = put_user(vcpu->arch.sie_block->cputm,
1473 (u64 __user *)reg->addr);
1474 break;
1475 case KVM_REG_S390_CLOCK_COMP:
1476 r = put_user(vcpu->arch.sie_block->ckc,
1477 (u64 __user *)reg->addr);
1478 break;
536336c2
DD
1479 case KVM_REG_S390_PFTOKEN:
1480 r = put_user(vcpu->arch.pfault_token,
1481 (u64 __user *)reg->addr);
1482 break;
1483 case KVM_REG_S390_PFCOMPARE:
1484 r = put_user(vcpu->arch.pfault_compare,
1485 (u64 __user *)reg->addr);
1486 break;
1487 case KVM_REG_S390_PFSELECT:
1488 r = put_user(vcpu->arch.pfault_select,
1489 (u64 __user *)reg->addr);
1490 break;
672550fb
CB
1491 case KVM_REG_S390_PP:
1492 r = put_user(vcpu->arch.sie_block->pp,
1493 (u64 __user *)reg->addr);
1494 break;
afa45ff5
CB
1495 case KVM_REG_S390_GBEA:
1496 r = put_user(vcpu->arch.sie_block->gbea,
1497 (u64 __user *)reg->addr);
1498 break;
14eebd91
CO
1499 default:
1500 break;
1501 }
1502
1503 return r;
1504}
1505
1506static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
1507 struct kvm_one_reg *reg)
1508{
1509 int r = -EINVAL;
1510
1511 switch (reg->id) {
29b7c71b
CO
1512 case KVM_REG_S390_TODPR:
1513 r = get_user(vcpu->arch.sie_block->todpr,
1514 (u32 __user *)reg->addr);
1515 break;
1516 case KVM_REG_S390_EPOCHDIFF:
1517 r = get_user(vcpu->arch.sie_block->epoch,
1518 (u64 __user *)reg->addr);
1519 break;
46a6dd1c
J
1520 case KVM_REG_S390_CPU_TIMER:
1521 r = get_user(vcpu->arch.sie_block->cputm,
1522 (u64 __user *)reg->addr);
1523 break;
1524 case KVM_REG_S390_CLOCK_COMP:
1525 r = get_user(vcpu->arch.sie_block->ckc,
1526 (u64 __user *)reg->addr);
1527 break;
536336c2
DD
1528 case KVM_REG_S390_PFTOKEN:
1529 r = get_user(vcpu->arch.pfault_token,
1530 (u64 __user *)reg->addr);
9fbd8082
DH
1531 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1532 kvm_clear_async_pf_completion_queue(vcpu);
536336c2
DD
1533 break;
1534 case KVM_REG_S390_PFCOMPARE:
1535 r = get_user(vcpu->arch.pfault_compare,
1536 (u64 __user *)reg->addr);
1537 break;
1538 case KVM_REG_S390_PFSELECT:
1539 r = get_user(vcpu->arch.pfault_select,
1540 (u64 __user *)reg->addr);
1541 break;
672550fb
CB
1542 case KVM_REG_S390_PP:
1543 r = get_user(vcpu->arch.sie_block->pp,
1544 (u64 __user *)reg->addr);
1545 break;
afa45ff5
CB
1546 case KVM_REG_S390_GBEA:
1547 r = get_user(vcpu->arch.sie_block->gbea,
1548 (u64 __user *)reg->addr);
1549 break;
14eebd91
CO
1550 default:
1551 break;
1552 }
1553
1554 return r;
1555}
b6d33834 1556
b0c632db
HC
1557static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1558{
b0c632db 1559 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
1560 return 0;
1561}
1562
1563int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1564{
5a32c1af 1565 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
1566 return 0;
1567}
1568
1569int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1570{
5a32c1af 1571 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
1572 return 0;
1573}
1574
1575int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1576 struct kvm_sregs *sregs)
1577{
59674c1a 1578 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 1579 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 1580 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
1581 return 0;
1582}
1583
1584int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1585 struct kvm_sregs *sregs)
1586{
59674c1a 1587 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 1588 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
1589 return 0;
1590}
1591
1592int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1593{
4725c860
MS
1594 if (test_fp_ctl(fpu->fpc))
1595 return -EINVAL;
b0c632db 1596 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4725c860
MS
1597 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
1598 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1599 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
b0c632db
HC
1600 return 0;
1601}
1602
1603int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1604{
b0c632db
HC
1605 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1606 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
1607 return 0;
1608}
1609
1610static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1611{
1612 int rc = 0;
1613
7a42fdc2 1614 if (!is_vcpu_stopped(vcpu))
b0c632db 1615 rc = -EBUSY;
d7b0b5eb
CO
1616 else {
1617 vcpu->run->psw_mask = psw.mask;
1618 vcpu->run->psw_addr = psw.addr;
1619 }
b0c632db
HC
1620 return rc;
1621}
1622
1623int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1624 struct kvm_translation *tr)
1625{
1626 return -EINVAL; /* not implemented yet */
1627}
1628
27291e21
DH
1629#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1630 KVM_GUESTDBG_USE_HW_BP | \
1631 KVM_GUESTDBG_ENABLE)
1632
d0bfb940
JK
1633int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1634 struct kvm_guest_debug *dbg)
b0c632db 1635{
27291e21
DH
1636 int rc = 0;
1637
1638 vcpu->guest_debug = 0;
1639 kvm_s390_clear_bp_data(vcpu);
1640
2de3bfc2 1641 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
27291e21
DH
1642 return -EINVAL;
1643
1644 if (dbg->control & KVM_GUESTDBG_ENABLE) {
1645 vcpu->guest_debug = dbg->control;
1646 /* enforce guest PER */
1647 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1648
1649 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
1650 rc = kvm_s390_import_bp_data(vcpu, dbg);
1651 } else {
1652 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1653 vcpu->arch.guestdbg.last_bp = 0;
1654 }
1655
1656 if (rc) {
1657 vcpu->guest_debug = 0;
1658 kvm_s390_clear_bp_data(vcpu);
1659 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1660 }
1661
1662 return rc;
b0c632db
HC
1663}
1664
62d9f0db
MT
1665int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1666 struct kvm_mp_state *mp_state)
1667{
6352e4d2
DH
1668 /* CHECK_STOP and LOAD are not supported yet */
1669 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1670 KVM_MP_STATE_OPERATING;
62d9f0db
MT
1671}
1672
1673int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1674 struct kvm_mp_state *mp_state)
1675{
6352e4d2
DH
1676 int rc = 0;
1677
1678 /* user space knows about this interface - let it control the state */
1679 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1680
1681 switch (mp_state->mp_state) {
1682 case KVM_MP_STATE_STOPPED:
1683 kvm_s390_vcpu_stop(vcpu);
1684 break;
1685 case KVM_MP_STATE_OPERATING:
1686 kvm_s390_vcpu_start(vcpu);
1687 break;
1688 case KVM_MP_STATE_LOAD:
1689 case KVM_MP_STATE_CHECK_STOP:
1690 /* fall through - CHECK_STOP and LOAD are not supported yet */
1691 default:
1692 rc = -ENXIO;
1693 }
1694
1695 return rc;
62d9f0db
MT
1696}
1697
b31605c1
DD
1698bool kvm_s390_cmma_enabled(struct kvm *kvm)
1699{
1700 if (!MACHINE_IS_LPAR)
1701 return false;
1702 /* only enable for z10 and later */
1703 if (!MACHINE_HAS_EDAT1)
1704 return false;
1705 if (!kvm->arch.use_cmma)
1706 return false;
1707 return true;
1708}
1709
8ad35755
DH
1710static bool ibs_enabled(struct kvm_vcpu *vcpu)
1711{
1712 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1713}
1714
2c70fe44
CB
1715static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1716{
8ad35755
DH
1717retry:
1718 s390_vcpu_unblock(vcpu);
2c70fe44
CB
1719 /*
1720 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1721 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1722 * This ensures that the ipte instruction for this request has
1723 * already finished. We might race against a second unmapper that
1724 * wants to set the blocking bit. Lets just retry the request loop.
1725 */
8ad35755 1726 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
2c70fe44
CB
1727 int rc;
1728 rc = gmap_ipte_notify(vcpu->arch.gmap,
fda902cb 1729 kvm_s390_get_prefix(vcpu),
2c70fe44
CB
1730 PAGE_SIZE * 2);
1731 if (rc)
1732 return rc;
8ad35755 1733 goto retry;
2c70fe44 1734 }
8ad35755 1735
d3d692c8
DH
1736 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1737 vcpu->arch.sie_block->ihcpu = 0xffff;
1738 goto retry;
1739 }
1740
8ad35755
DH
1741 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1742 if (!ibs_enabled(vcpu)) {
1743 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1744 atomic_set_mask(CPUSTAT_IBS,
1745 &vcpu->arch.sie_block->cpuflags);
1746 }
1747 goto retry;
2c70fe44 1748 }
8ad35755
DH
1749
1750 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1751 if (ibs_enabled(vcpu)) {
1752 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1753 atomic_clear_mask(CPUSTAT_IBS,
1754 &vcpu->arch.sie_block->cpuflags);
1755 }
1756 goto retry;
1757 }
1758
0759d068
DH
1759 /* nothing to do, just clear the request */
1760 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1761
2c70fe44
CB
1762 return 0;
1763}
1764
fa576c58
TH
1765/**
1766 * kvm_arch_fault_in_page - fault-in guest page if necessary
1767 * @vcpu: The corresponding virtual cpu
1768 * @gpa: Guest physical address
1769 * @writable: Whether the page should be writable or not
1770 *
1771 * Make sure that a guest page has been faulted-in on the host.
1772 *
1773 * Return: Zero on success, negative error code otherwise.
1774 */
1775long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
24eb3a82 1776{
527e30b4
MS
1777 return gmap_fault(vcpu->arch.gmap, gpa,
1778 writable ? FAULT_FLAG_WRITE : 0);
24eb3a82
DD
1779}
1780
3c038e6b
DD
1781static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1782 unsigned long token)
1783{
1784 struct kvm_s390_interrupt inti;
383d0b05 1785 struct kvm_s390_irq irq;
3c038e6b
DD
1786
1787 if (start_token) {
383d0b05
JF
1788 irq.u.ext.ext_params2 = token;
1789 irq.type = KVM_S390_INT_PFAULT_INIT;
1790 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
3c038e6b
DD
1791 } else {
1792 inti.type = KVM_S390_INT_PFAULT_DONE;
383d0b05 1793 inti.parm64 = token;
3c038e6b
DD
1794 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1795 }
1796}
1797
1798void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1799 struct kvm_async_pf *work)
1800{
1801 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1802 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1803}
1804
1805void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1806 struct kvm_async_pf *work)
1807{
1808 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1809 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1810}
1811
1812void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1813 struct kvm_async_pf *work)
1814{
1815 /* s390 will always inject the page directly */
1816}
1817
1818bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1819{
1820 /*
1821 * s390 will always inject the page directly,
1822 * but we still want check_async_completion to cleanup
1823 */
1824 return true;
1825}
1826
1827static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1828{
1829 hva_t hva;
1830 struct kvm_arch_async_pf arch;
1831 int rc;
1832
1833 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1834 return 0;
1835 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1836 vcpu->arch.pfault_compare)
1837 return 0;
1838 if (psw_extint_disabled(vcpu))
1839 return 0;
9a022067 1840 if (kvm_s390_vcpu_has_irq(vcpu, 0))
3c038e6b
DD
1841 return 0;
1842 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1843 return 0;
1844 if (!vcpu->arch.gmap->pfault_enabled)
1845 return 0;
1846
81480cc1
HC
1847 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1848 hva += current->thread.gmap_addr & ~PAGE_MASK;
1849 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
3c038e6b
DD
1850 return 0;
1851
1852 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1853 return rc;
1854}
1855
3fb4c40f 1856static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 1857{
3fb4c40f 1858 int rc, cpuflags;
e168bf8d 1859
3c038e6b
DD
1860 /*
1861 * On s390 notifications for arriving pages will be delivered directly
1862 * to the guest but the house keeping for completed pfaults is
1863 * handled outside the worker.
1864 */
1865 kvm_check_async_pf_completion(vcpu);
1866
5a32c1af 1867 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
1868
1869 if (need_resched())
1870 schedule();
1871
d3a73acb 1872 if (test_cpu_flag(CIF_MCCK_PENDING))
71cde587
CB
1873 s390_handle_mcck();
1874
79395031
JF
1875 if (!kvm_is_ucontrol(vcpu->kvm)) {
1876 rc = kvm_s390_deliver_pending_interrupts(vcpu);
1877 if (rc)
1878 return rc;
1879 }
0ff31867 1880
2c70fe44
CB
1881 rc = kvm_s390_handle_requests(vcpu);
1882 if (rc)
1883 return rc;
1884
27291e21
DH
1885 if (guestdbg_enabled(vcpu)) {
1886 kvm_s390_backup_guest_per_regs(vcpu);
1887 kvm_s390_patch_guest_per_regs(vcpu);
1888 }
1889
b0c632db 1890 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
1891 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1892 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1893 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 1894
3fb4c40f
TH
1895 return 0;
1896}
1897
492d8642
TH
1898static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
1899{
1900 psw_t *psw = &vcpu->arch.sie_block->gpsw;
1901 u8 opcode;
1902 int rc;
1903
1904 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1905 trace_kvm_s390_sie_fault(vcpu);
1906
1907 /*
1908 * We want to inject an addressing exception, which is defined as a
1909 * suppressing or terminating exception. However, since we came here
1910 * by a DAT access exception, the PSW still points to the faulting
1911 * instruction since DAT exceptions are nullifying. So we've got
1912 * to look up the current opcode to get the length of the instruction
1913 * to be able to forward the PSW.
1914 */
8ae04b8f 1915 rc = read_guest(vcpu, psw->addr, 0, &opcode, 1);
492d8642
TH
1916 if (rc)
1917 return kvm_s390_inject_prog_cond(vcpu, rc);
1918 psw->addr = __rewind_psw(*psw, -insn_length(opcode));
1919
1920 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1921}
1922
3fb4c40f
TH
1923static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1924{
24eb3a82 1925 int rc = -1;
2b29a9fd
DD
1926
1927 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1928 vcpu->arch.sie_block->icptcode);
1929 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1930
27291e21
DH
1931 if (guestdbg_enabled(vcpu))
1932 kvm_s390_restore_guest_per_regs(vcpu);
1933
3fb4c40f 1934 if (exit_reason >= 0) {
7c470539 1935 rc = 0;
210b1607
TH
1936 } else if (kvm_is_ucontrol(vcpu->kvm)) {
1937 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1938 vcpu->run->s390_ucontrol.trans_exc_code =
1939 current->thread.gmap_addr;
1940 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1941 rc = -EREMOTE;
24eb3a82
DD
1942
1943 } else if (current->thread.gmap_pfault) {
3c038e6b 1944 trace_kvm_s390_major_guest_pfault(vcpu);
24eb3a82 1945 current->thread.gmap_pfault = 0;
fa576c58 1946 if (kvm_arch_setup_async_pf(vcpu)) {
24eb3a82 1947 rc = 0;
fa576c58
TH
1948 } else {
1949 gpa_t gpa = current->thread.gmap_addr;
1950 rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1951 }
24eb3a82
DD
1952 }
1953
492d8642
TH
1954 if (rc == -1)
1955 rc = vcpu_post_run_fault_in_sie(vcpu);
b0c632db 1956
5a32c1af 1957 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
3fb4c40f 1958
a76ccff6
TH
1959 if (rc == 0) {
1960 if (kvm_is_ucontrol(vcpu->kvm))
2955c83f
CB
1961 /* Don't exit for host interrupts. */
1962 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
a76ccff6
TH
1963 else
1964 rc = kvm_handle_sie_intercept(vcpu);
1965 }
1966
3fb4c40f
TH
1967 return rc;
1968}
1969
1970static int __vcpu_run(struct kvm_vcpu *vcpu)
1971{
1972 int rc, exit_reason;
1973
800c1065
TH
1974 /*
1975 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1976 * ning the guest), so that memslots (and other stuff) are protected
1977 */
1978 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1979
a76ccff6
TH
1980 do {
1981 rc = vcpu_pre_run(vcpu);
1982 if (rc)
1983 break;
3fb4c40f 1984
800c1065 1985 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
1986 /*
1987 * As PF_VCPU will be used in fault handler, between
1988 * guest_enter and guest_exit should be no uaccess.
1989 */
1990 preempt_disable();
1991 kvm_guest_enter();
1992 preempt_enable();
1993 exit_reason = sie64a(vcpu->arch.sie_block,
1994 vcpu->run->s.regs.gprs);
1995 kvm_guest_exit();
800c1065 1996 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
1997
1998 rc = vcpu_post_run(vcpu, exit_reason);
27291e21 1999 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
3fb4c40f 2000
800c1065 2001 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 2002 return rc;
b0c632db
HC
2003}
2004
b028ee3e
DH
2005static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2006{
2007 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2008 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2009 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2010 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2011 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2012 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
d3d692c8
DH
2013 /* some control register changes require a tlb flush */
2014 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
b028ee3e
DH
2015 }
2016 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
2017 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
2018 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2019 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2020 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2021 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2022 }
2023 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2024 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2025 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2026 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
9fbd8082
DH
2027 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2028 kvm_clear_async_pf_completion_queue(vcpu);
b028ee3e
DH
2029 }
2030 kvm_run->kvm_dirty_regs = 0;
2031}
2032
2033static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2034{
2035 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2036 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2037 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2038 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
2039 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
2040 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2041 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2042 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2043 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2044 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2045 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2046 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2047}
2048
b0c632db
HC
2049int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2050{
8f2abe6a 2051 int rc;
b0c632db
HC
2052 sigset_t sigsaved;
2053
27291e21
DH
2054 if (guestdbg_exit_pending(vcpu)) {
2055 kvm_s390_prepare_debug_exit(vcpu);
2056 return 0;
2057 }
2058
b0c632db
HC
2059 if (vcpu->sigset_active)
2060 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2061
6352e4d2
DH
2062 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2063 kvm_s390_vcpu_start(vcpu);
2064 } else if (is_vcpu_stopped(vcpu)) {
2065 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
2066 vcpu->vcpu_id);
2067 return -EINVAL;
2068 }
b0c632db 2069
b028ee3e 2070 sync_regs(vcpu, kvm_run);
d7b0b5eb 2071
dab4079d 2072 might_fault();
a76ccff6 2073 rc = __vcpu_run(vcpu);
9ace903d 2074
b1d16c49
CE
2075 if (signal_pending(current) && !rc) {
2076 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 2077 rc = -EINTR;
b1d16c49 2078 }
8f2abe6a 2079
27291e21
DH
2080 if (guestdbg_exit_pending(vcpu) && !rc) {
2081 kvm_s390_prepare_debug_exit(vcpu);
2082 rc = 0;
2083 }
2084
b8e660b8 2085 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
2086 /* intercept cannot be handled in-kernel, prepare kvm-run */
2087 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
2088 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
2089 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
2090 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
2091 rc = 0;
2092 }
2093
2094 if (rc == -EREMOTE) {
2095 /* intercept was handled, but userspace support is needed
2096 * kvm_run has been prepared by the handler */
2097 rc = 0;
2098 }
b0c632db 2099
b028ee3e 2100 store_regs(vcpu, kvm_run);
d7b0b5eb 2101
b0c632db
HC
2102 if (vcpu->sigset_active)
2103 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2104
b0c632db 2105 vcpu->stat.exit_userspace++;
7e8e6ab4 2106 return rc;
b0c632db
HC
2107}
2108
b0c632db
HC
2109/*
2110 * store status at address
2111 * we use have two special cases:
2112 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2113 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2114 */
d0bce605 2115int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
b0c632db 2116{
092670cd 2117 unsigned char archmode = 1;
fda902cb 2118 unsigned int px;
178bd789 2119 u64 clkcomp;
d0bce605 2120 int rc;
b0c632db 2121
d0bce605
HC
2122 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2123 if (write_guest_abs(vcpu, 163, &archmode, 1))
b0c632db 2124 return -EFAULT;
d0bce605
HC
2125 gpa = SAVE_AREA_BASE;
2126 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2127 if (write_guest_real(vcpu, 163, &archmode, 1))
b0c632db 2128 return -EFAULT;
d0bce605
HC
2129 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
2130 }
2131 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
2132 vcpu->arch.guest_fpregs.fprs, 128);
2133 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
2134 vcpu->run->s.regs.gprs, 128);
2135 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
2136 &vcpu->arch.sie_block->gpsw, 16);
fda902cb 2137 px = kvm_s390_get_prefix(vcpu);
d0bce605 2138 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
fda902cb 2139 &px, 4);
d0bce605
HC
2140 rc |= write_guest_abs(vcpu,
2141 gpa + offsetof(struct save_area, fp_ctrl_reg),
2142 &vcpu->arch.guest_fpregs.fpc, 4);
2143 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
2144 &vcpu->arch.sie_block->todpr, 4);
2145 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
2146 &vcpu->arch.sie_block->cputm, 8);
178bd789 2147 clkcomp = vcpu->arch.sie_block->ckc >> 8;
d0bce605
HC
2148 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
2149 &clkcomp, 8);
2150 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
2151 &vcpu->run->s.regs.acrs, 64);
2152 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
2153 &vcpu->arch.sie_block->gcr, 128);
2154 return rc ? -EFAULT : 0;
b0c632db
HC
2155}
2156
e879892c
TH
2157int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2158{
2159 /*
2160 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2161 * copying in vcpu load/put. Lets update our copies before we save
2162 * it into the save area
2163 */
2164 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
2165 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
2166 save_access_regs(vcpu->run->s.regs.acrs);
2167
2168 return kvm_s390_store_status_unloaded(vcpu, addr);
2169}
2170
bc17de7c
EF
2171/*
2172 * store additional status at address
2173 */
2174int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2175 unsigned long gpa)
2176{
2177 /* Only bits 0-53 are used for address formation */
2178 if (!(gpa & ~0x3ff))
2179 return 0;
2180
2181 return write_guest_abs(vcpu, gpa & ~0x3ff,
2182 (void *)&vcpu->run->s.regs.vrs, 512);
2183}
2184
2185int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2186{
2187 if (!test_kvm_facility(vcpu->kvm, 129))
2188 return 0;
2189
2190 /*
2191 * The guest VXRS are in the host VXRs due to the lazy
2192 * copying in vcpu load/put. Let's update our copies before we save
2193 * it into the save area.
2194 */
2195 save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
2196
2197 return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2198}
2199
8ad35755
DH
2200static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2201{
2202 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
2203 kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
2204 exit_sie_sync(vcpu);
2205}
2206
2207static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2208{
2209 unsigned int i;
2210 struct kvm_vcpu *vcpu;
2211
2212 kvm_for_each_vcpu(i, vcpu, kvm) {
2213 __disable_ibs_on_vcpu(vcpu);
2214 }
2215}
2216
2217static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2218{
2219 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
2220 kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
2221 exit_sie_sync(vcpu);
2222}
2223
6852d7b6
DH
2224void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2225{
8ad35755
DH
2226 int i, online_vcpus, started_vcpus = 0;
2227
2228 if (!is_vcpu_stopped(vcpu))
2229 return;
2230
6852d7b6 2231 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
8ad35755 2232 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 2233 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
2234 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2235
2236 for (i = 0; i < online_vcpus; i++) {
2237 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2238 started_vcpus++;
2239 }
2240
2241 if (started_vcpus == 0) {
2242 /* we're the only active VCPU -> speed it up */
2243 __enable_ibs_on_vcpu(vcpu);
2244 } else if (started_vcpus == 1) {
2245 /*
2246 * As we are starting a second VCPU, we have to disable
2247 * the IBS facility on all VCPUs to remove potentially
2248 * oustanding ENABLE requests.
2249 */
2250 __disable_ibs_on_all_vcpus(vcpu->kvm);
2251 }
2252
6852d7b6 2253 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
2254 /*
2255 * Another VCPU might have used IBS while we were offline.
2256 * Let's play safe and flush the VCPU at startup.
2257 */
d3d692c8 2258 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
433b9ee4 2259 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 2260 return;
6852d7b6
DH
2261}
2262
2263void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2264{
8ad35755
DH
2265 int i, online_vcpus, started_vcpus = 0;
2266 struct kvm_vcpu *started_vcpu = NULL;
2267
2268 if (is_vcpu_stopped(vcpu))
2269 return;
2270
6852d7b6 2271 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
8ad35755 2272 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 2273 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
2274 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2275
32f5ff63 2276 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
6cddd432 2277 kvm_s390_clear_stop_irq(vcpu);
32f5ff63 2278
6cddd432 2279 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
2280 __disable_ibs_on_vcpu(vcpu);
2281
2282 for (i = 0; i < online_vcpus; i++) {
2283 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2284 started_vcpus++;
2285 started_vcpu = vcpu->kvm->vcpus[i];
2286 }
2287 }
2288
2289 if (started_vcpus == 1) {
2290 /*
2291 * As we only have one VCPU left, we want to enable the
2292 * IBS facility for that VCPU to speed it up.
2293 */
2294 __enable_ibs_on_vcpu(started_vcpu);
2295 }
2296
433b9ee4 2297 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 2298 return;
6852d7b6
DH
2299}
2300
d6712df9
CH
2301static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2302 struct kvm_enable_cap *cap)
2303{
2304 int r;
2305
2306 if (cap->flags)
2307 return -EINVAL;
2308
2309 switch (cap->cap) {
fa6b7fe9
CH
2310 case KVM_CAP_S390_CSS_SUPPORT:
2311 if (!vcpu->kvm->arch.css_support) {
2312 vcpu->kvm->arch.css_support = 1;
2313 trace_kvm_s390_enable_css(vcpu->kvm);
2314 }
2315 r = 0;
2316 break;
d6712df9
CH
2317 default:
2318 r = -EINVAL;
2319 break;
2320 }
2321 return r;
2322}
2323
41408c28
TH
2324static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
2325 struct kvm_s390_mem_op *mop)
2326{
2327 void __user *uaddr = (void __user *)mop->buf;
2328 void *tmpbuf = NULL;
2329 int r, srcu_idx;
2330 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
2331 | KVM_S390_MEMOP_F_CHECK_ONLY;
2332
2333 if (mop->flags & ~supported_flags)
2334 return -EINVAL;
2335
2336 if (mop->size > MEM_OP_MAX_SIZE)
2337 return -E2BIG;
2338
2339 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2340 tmpbuf = vmalloc(mop->size);
2341 if (!tmpbuf)
2342 return -ENOMEM;
2343 }
2344
2345 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2346
2347 switch (mop->op) {
2348 case KVM_S390_MEMOP_LOGICAL_READ:
2349 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2350 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false);
2351 break;
2352 }
2353 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2354 if (r == 0) {
2355 if (copy_to_user(uaddr, tmpbuf, mop->size))
2356 r = -EFAULT;
2357 }
2358 break;
2359 case KVM_S390_MEMOP_LOGICAL_WRITE:
2360 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2361 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true);
2362 break;
2363 }
2364 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2365 r = -EFAULT;
2366 break;
2367 }
2368 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2369 break;
2370 default:
2371 r = -EINVAL;
2372 }
2373
2374 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2375
2376 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
2377 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
2378
2379 vfree(tmpbuf);
2380 return r;
2381}
2382
b0c632db
HC
2383long kvm_arch_vcpu_ioctl(struct file *filp,
2384 unsigned int ioctl, unsigned long arg)
2385{
2386 struct kvm_vcpu *vcpu = filp->private_data;
2387 void __user *argp = (void __user *)arg;
800c1065 2388 int idx;
bc923cc9 2389 long r;
b0c632db 2390
93736624
AK
2391 switch (ioctl) {
2392 case KVM_S390_INTERRUPT: {
ba5c1e9b 2393 struct kvm_s390_interrupt s390int;
383d0b05 2394 struct kvm_s390_irq s390irq;
ba5c1e9b 2395
93736624 2396 r = -EFAULT;
ba5c1e9b 2397 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624 2398 break;
383d0b05
JF
2399 if (s390int_to_s390irq(&s390int, &s390irq))
2400 return -EINVAL;
2401 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
93736624 2402 break;
ba5c1e9b 2403 }
b0c632db 2404 case KVM_S390_STORE_STATUS:
800c1065 2405 idx = srcu_read_lock(&vcpu->kvm->srcu);
bc923cc9 2406 r = kvm_s390_vcpu_store_status(vcpu, arg);
800c1065 2407 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 2408 break;
b0c632db
HC
2409 case KVM_S390_SET_INITIAL_PSW: {
2410 psw_t psw;
2411
bc923cc9 2412 r = -EFAULT;
b0c632db 2413 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
2414 break;
2415 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2416 break;
b0c632db
HC
2417 }
2418 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
2419 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2420 break;
14eebd91
CO
2421 case KVM_SET_ONE_REG:
2422 case KVM_GET_ONE_REG: {
2423 struct kvm_one_reg reg;
2424 r = -EFAULT;
2425 if (copy_from_user(&reg, argp, sizeof(reg)))
2426 break;
2427 if (ioctl == KVM_SET_ONE_REG)
2428 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
2429 else
2430 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
2431 break;
2432 }
27e0393f
CO
2433#ifdef CONFIG_KVM_S390_UCONTROL
2434 case KVM_S390_UCAS_MAP: {
2435 struct kvm_s390_ucas_mapping ucasmap;
2436
2437 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2438 r = -EFAULT;
2439 break;
2440 }
2441
2442 if (!kvm_is_ucontrol(vcpu->kvm)) {
2443 r = -EINVAL;
2444 break;
2445 }
2446
2447 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
2448 ucasmap.vcpu_addr, ucasmap.length);
2449 break;
2450 }
2451 case KVM_S390_UCAS_UNMAP: {
2452 struct kvm_s390_ucas_mapping ucasmap;
2453
2454 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2455 r = -EFAULT;
2456 break;
2457 }
2458
2459 if (!kvm_is_ucontrol(vcpu->kvm)) {
2460 r = -EINVAL;
2461 break;
2462 }
2463
2464 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
2465 ucasmap.length);
2466 break;
2467 }
2468#endif
ccc7910f 2469 case KVM_S390_VCPU_FAULT: {
527e30b4 2470 r = gmap_fault(vcpu->arch.gmap, arg, 0);
ccc7910f
CO
2471 break;
2472 }
d6712df9
CH
2473 case KVM_ENABLE_CAP:
2474 {
2475 struct kvm_enable_cap cap;
2476 r = -EFAULT;
2477 if (copy_from_user(&cap, argp, sizeof(cap)))
2478 break;
2479 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2480 break;
2481 }
41408c28
TH
2482 case KVM_S390_MEM_OP: {
2483 struct kvm_s390_mem_op mem_op;
2484
2485 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
2486 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
2487 else
2488 r = -EFAULT;
2489 break;
2490 }
b0c632db 2491 default:
3e6afcf1 2492 r = -ENOTTY;
b0c632db 2493 }
bc923cc9 2494 return r;
b0c632db
HC
2495}
2496
5b1c1493
CO
2497int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2498{
2499#ifdef CONFIG_KVM_S390_UCONTROL
2500 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
2501 && (kvm_is_ucontrol(vcpu->kvm))) {
2502 vmf->page = virt_to_page(vcpu->arch.sie_block);
2503 get_page(vmf->page);
2504 return 0;
2505 }
2506#endif
2507 return VM_FAULT_SIGBUS;
2508}
2509
5587027c
AK
2510int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2511 unsigned long npages)
db3fe4eb
TY
2512{
2513 return 0;
2514}
2515
b0c632db 2516/* Section: memory related */
f7784b8e
MT
2517int kvm_arch_prepare_memory_region(struct kvm *kvm,
2518 struct kvm_memory_slot *memslot,
7b6195a9
TY
2519 struct kvm_userspace_memory_region *mem,
2520 enum kvm_mr_change change)
b0c632db 2521{
dd2887e7
NW
2522 /* A few sanity checks. We can have memory slots which have to be
2523 located/ended at a segment boundary (1MB). The memory in userland is
2524 ok to be fragmented into various different vmas. It is okay to mmap()
2525 and munmap() stuff in this slot after doing this call at any time */
b0c632db 2526
598841ca 2527 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
2528 return -EINVAL;
2529
598841ca 2530 if (mem->memory_size & 0xffffful)
b0c632db
HC
2531 return -EINVAL;
2532
f7784b8e
MT
2533 return 0;
2534}
2535
2536void kvm_arch_commit_memory_region(struct kvm *kvm,
2537 struct kvm_userspace_memory_region *mem,
8482644a
TY
2538 const struct kvm_memory_slot *old,
2539 enum kvm_mr_change change)
f7784b8e 2540{
f7850c92 2541 int rc;
f7784b8e 2542
2cef4deb
CB
2543 /* If the basics of the memslot do not change, we do not want
2544 * to update the gmap. Every update causes several unnecessary
2545 * segment translation exceptions. This is usually handled just
2546 * fine by the normal fault handler + gmap, but it will also
2547 * cause faults on the prefix page of running guest CPUs.
2548 */
2549 if (old->userspace_addr == mem->userspace_addr &&
2550 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
2551 old->npages * PAGE_SIZE == mem->memory_size)
2552 return;
598841ca
CO
2553
2554 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2555 mem->guest_phys_addr, mem->memory_size);
2556 if (rc)
f7850c92 2557 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 2558 return;
b0c632db
HC
2559}
2560
b0c632db
HC
2561static int __init kvm_s390_init(void)
2562{
9d8d5786 2563 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
b0c632db
HC
2564}
2565
2566static void __exit kvm_s390_exit(void)
2567{
2568 kvm_exit();
2569}
2570
2571module_init(kvm_s390_init);
2572module_exit(kvm_s390_exit);
566af940
CH
2573
2574/*
2575 * Enable autoloading of the kvm module.
2576 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2577 * since x86 takes a different approach.
2578 */
2579#include <linux/miscdevice.h>
2580MODULE_ALIAS_MISCDEV(KVM_MINOR);
2581MODULE_ALIAS("devname:kvm");