]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/s390/kvm/kvm-s390.c
KVM: s390: host STP toleration for VMs
[mirror_ubuntu-artful-kernel.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15f36ebd 14 * Jason J. Herne <jjherne@us.ibm.com>
b0c632db
HC
15 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
ca872302 20#include <linux/hrtimer.h>
b0c632db
HC
21#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
a374e892 25#include <linux/random.h>
b0c632db 26#include <linux/slab.h>
ba5c1e9b 27#include <linux/timer.h>
41408c28 28#include <linux/vmalloc.h>
cbb870c8 29#include <asm/asm-offsets.h>
b0c632db 30#include <asm/lowcore.h>
fdf03650 31#include <asm/etr.h>
b0c632db 32#include <asm/pgtable.h>
f5daba1d 33#include <asm/nmi.h>
a0616cde 34#include <asm/switch_to.h>
6d3da241 35#include <asm/isc.h>
1526bf9c 36#include <asm/sclp.h>
8f2abe6a 37#include "kvm-s390.h"
b0c632db
HC
38#include "gaccess.h"
39
ea2cdd27
DH
40#define KMSG_COMPONENT "kvm-s390"
41#undef pr_fmt
42#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
43
5786fffa
CH
44#define CREATE_TRACE_POINTS
45#include "trace.h"
ade38c31 46#include "trace-s390.h"
5786fffa 47
41408c28 48#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
816c7667
JF
49#define LOCAL_IRQS 32
50#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
51 (KVM_MAX_VCPUS + LOCAL_IRQS))
41408c28 52
b0c632db
HC
53#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
54
55struct kvm_stats_debugfs_item debugfs_entries[] = {
56 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 57 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
58 { "exit_validity", VCPU_STAT(exit_validity) },
59 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
60 { "exit_external_request", VCPU_STAT(exit_external_request) },
61 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
62 { "exit_instruction", VCPU_STAT(exit_instruction) },
63 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
64 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f7819512 65 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
ce2e4f0b 66 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
f5e10b09 67 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b 68 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
aba07508
DH
69 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
70 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
ba5c1e9b 71 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 72 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
73 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
74 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
75 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
76 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
77 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
78 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
79 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 80 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
81 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
82 { "instruction_spx", VCPU_STAT(instruction_spx) },
83 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
84 { "instruction_stap", VCPU_STAT(instruction_stap) },
85 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
8a242234 86 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
453423dc
CB
87 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
88 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
b31288fa 89 { "instruction_essa", VCPU_STAT(instruction_essa) },
453423dc
CB
90 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
91 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 92 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 93 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 94 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 95 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0 96 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
42cb0c9f
DH
97 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
98 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
5288fbf0 99 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
42cb0c9f
DH
100 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
101 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
cd7b4b61 102 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
5288fbf0
CB
103 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
104 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
105 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
42cb0c9f
DH
106 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
107 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
108 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
388186bc 109 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 110 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 111 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
175a5c9e
CB
112 { "diagnose_258", VCPU_STAT(diagnose_258) },
113 { "diagnose_308", VCPU_STAT(diagnose_308) },
114 { "diagnose_500", VCPU_STAT(diagnose_500) },
b0c632db
HC
115 { NULL }
116};
117
9d8d5786
MM
118/* upper facilities limit for kvm */
119unsigned long kvm_s390_fac_list_mask[] = {
a3ed8dae 120 0xffe6fffbfcfdfc40UL,
53df84f8 121 0x005e800000000000UL,
9d8d5786 122};
b0c632db 123
9d8d5786 124unsigned long kvm_s390_fac_list_mask_size(void)
78c4b59f 125{
9d8d5786
MM
126 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
127 return ARRAY_SIZE(kvm_s390_fac_list_mask);
78c4b59f
MM
128}
129
9d8d5786 130static struct gmap_notifier gmap_notifier;
78f26131 131debug_info_t *kvm_s390_dbf;
9d8d5786 132
b0c632db 133/* Section: not file related */
13a34e06 134int kvm_arch_hardware_enable(void)
b0c632db
HC
135{
136 /* every s390 is virtualization enabled ;-) */
10474ae8 137 return 0;
b0c632db
HC
138}
139
2c70fe44
CB
140static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
141
fdf03650
FZ
142/*
143 * This callback is executed during stop_machine(). All CPUs are therefore
144 * temporarily stopped. In order not to change guest behavior, we have to
145 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
146 * so a CPU won't be stopped while calculating with the epoch.
147 */
148static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
149 void *v)
150{
151 struct kvm *kvm;
152 struct kvm_vcpu *vcpu;
153 int i;
154 unsigned long long *delta = v;
155
156 list_for_each_entry(kvm, &vm_list, vm_list) {
157 kvm->arch.epoch -= *delta;
158 kvm_for_each_vcpu(i, vcpu, kvm) {
159 vcpu->arch.sie_block->epoch -= *delta;
160 }
161 }
162 return NOTIFY_OK;
163}
164
165static struct notifier_block kvm_clock_notifier = {
166 .notifier_call = kvm_clock_sync,
167};
168
b0c632db
HC
169int kvm_arch_hardware_setup(void)
170{
2c70fe44
CB
171 gmap_notifier.notifier_call = kvm_gmap_notifier;
172 gmap_register_ipte_notifier(&gmap_notifier);
fdf03650
FZ
173 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
174 &kvm_clock_notifier);
b0c632db
HC
175 return 0;
176}
177
178void kvm_arch_hardware_unsetup(void)
179{
2c70fe44 180 gmap_unregister_ipte_notifier(&gmap_notifier);
fdf03650
FZ
181 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
182 &kvm_clock_notifier);
b0c632db
HC
183}
184
b0c632db
HC
185int kvm_arch_init(void *opaque)
186{
78f26131
CB
187 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
188 if (!kvm_s390_dbf)
189 return -ENOMEM;
190
191 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
192 debug_unregister(kvm_s390_dbf);
193 return -ENOMEM;
194 }
195
84877d93
CH
196 /* Register floating interrupt controller interface. */
197 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
b0c632db
HC
198}
199
78f26131
CB
200void kvm_arch_exit(void)
201{
202 debug_unregister(kvm_s390_dbf);
203}
204
b0c632db
HC
205/* Section: device related */
206long kvm_arch_dev_ioctl(struct file *filp,
207 unsigned int ioctl, unsigned long arg)
208{
209 if (ioctl == KVM_S390_ENABLE_SIE)
210 return s390_enable_sie();
211 return -EINVAL;
212}
213
784aa3d7 214int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
b0c632db 215{
d7b0b5eb
CO
216 int r;
217
2bd0ac4e 218 switch (ext) {
d7b0b5eb 219 case KVM_CAP_S390_PSW:
b6cf8788 220 case KVM_CAP_S390_GMAP:
52e16b18 221 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
222#ifdef CONFIG_KVM_S390_UCONTROL
223 case KVM_CAP_S390_UCONTROL:
224#endif
3c038e6b 225 case KVM_CAP_ASYNC_PF:
60b413c9 226 case KVM_CAP_SYNC_REGS:
14eebd91 227 case KVM_CAP_ONE_REG:
d6712df9 228 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 229 case KVM_CAP_S390_CSS_SUPPORT:
10ccaa1e 230 case KVM_CAP_IOEVENTFD:
c05c4186 231 case KVM_CAP_DEVICE_CTRL:
d938dc55 232 case KVM_CAP_ENABLE_CAP_VM:
78599d90 233 case KVM_CAP_S390_IRQCHIP:
f2061656 234 case KVM_CAP_VM_ATTRIBUTES:
6352e4d2 235 case KVM_CAP_MP_STATE:
47b43c52 236 case KVM_CAP_S390_INJECT_IRQ:
2444b352 237 case KVM_CAP_S390_USER_SIGP:
e44fc8c9 238 case KVM_CAP_S390_USER_STSI:
30ee2a98 239 case KVM_CAP_S390_SKEYS:
816c7667 240 case KVM_CAP_S390_IRQ_STATE:
d7b0b5eb
CO
241 r = 1;
242 break;
41408c28
TH
243 case KVM_CAP_S390_MEM_OP:
244 r = MEM_OP_MAX_SIZE;
245 break;
e726b1bd
CB
246 case KVM_CAP_NR_VCPUS:
247 case KVM_CAP_MAX_VCPUS:
248 r = KVM_MAX_VCPUS;
249 break;
e1e2e605
NW
250 case KVM_CAP_NR_MEMSLOTS:
251 r = KVM_USER_MEM_SLOTS;
252 break;
1526bf9c 253 case KVM_CAP_S390_COW:
abf09bed 254 r = MACHINE_HAS_ESOP;
1526bf9c 255 break;
68c55750
EF
256 case KVM_CAP_S390_VECTOR_REGISTERS:
257 r = MACHINE_HAS_VX;
258 break;
2bd0ac4e 259 default:
d7b0b5eb 260 r = 0;
2bd0ac4e 261 }
d7b0b5eb 262 return r;
b0c632db
HC
263}
264
15f36ebd
JH
265static void kvm_s390_sync_dirty_log(struct kvm *kvm,
266 struct kvm_memory_slot *memslot)
267{
268 gfn_t cur_gfn, last_gfn;
269 unsigned long address;
270 struct gmap *gmap = kvm->arch.gmap;
271
272 down_read(&gmap->mm->mmap_sem);
273 /* Loop over all guest pages */
274 last_gfn = memslot->base_gfn + memslot->npages;
275 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
276 address = gfn_to_hva_memslot(memslot, cur_gfn);
277
278 if (gmap_test_and_clear_dirty(address, gmap))
279 mark_page_dirty(kvm, cur_gfn);
280 }
281 up_read(&gmap->mm->mmap_sem);
282}
283
b0c632db
HC
284/* Section: vm related */
285/*
286 * Get (and clear) the dirty memory log for a memory slot.
287 */
288int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
289 struct kvm_dirty_log *log)
290{
15f36ebd
JH
291 int r;
292 unsigned long n;
9f6b8029 293 struct kvm_memslots *slots;
15f36ebd
JH
294 struct kvm_memory_slot *memslot;
295 int is_dirty = 0;
296
297 mutex_lock(&kvm->slots_lock);
298
299 r = -EINVAL;
300 if (log->slot >= KVM_USER_MEM_SLOTS)
301 goto out;
302
9f6b8029
PB
303 slots = kvm_memslots(kvm);
304 memslot = id_to_memslot(slots, log->slot);
15f36ebd
JH
305 r = -ENOENT;
306 if (!memslot->dirty_bitmap)
307 goto out;
308
309 kvm_s390_sync_dirty_log(kvm, memslot);
310 r = kvm_get_dirty_log(kvm, log, &is_dirty);
311 if (r)
312 goto out;
313
314 /* Clear the dirty log */
315 if (is_dirty) {
316 n = kvm_dirty_bitmap_bytes(memslot);
317 memset(memslot->dirty_bitmap, 0, n);
318 }
319 r = 0;
320out:
321 mutex_unlock(&kvm->slots_lock);
322 return r;
b0c632db
HC
323}
324
d938dc55
CH
325static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
326{
327 int r;
328
329 if (cap->flags)
330 return -EINVAL;
331
332 switch (cap->cap) {
84223598 333 case KVM_CAP_S390_IRQCHIP:
c92ea7b9 334 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
84223598
CH
335 kvm->arch.use_irqchip = 1;
336 r = 0;
337 break;
2444b352 338 case KVM_CAP_S390_USER_SIGP:
c92ea7b9 339 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
2444b352
DH
340 kvm->arch.user_sigp = 1;
341 r = 0;
342 break;
68c55750 343 case KVM_CAP_S390_VECTOR_REGISTERS:
18280d8b
MM
344 if (MACHINE_HAS_VX) {
345 set_kvm_facility(kvm->arch.model.fac->mask, 129);
346 set_kvm_facility(kvm->arch.model.fac->list, 129);
347 r = 0;
348 } else
349 r = -EINVAL;
c92ea7b9
CB
350 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
351 r ? "(not available)" : "(success)");
68c55750 352 break;
e44fc8c9 353 case KVM_CAP_S390_USER_STSI:
c92ea7b9 354 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
e44fc8c9
ET
355 kvm->arch.user_stsi = 1;
356 r = 0;
357 break;
d938dc55
CH
358 default:
359 r = -EINVAL;
360 break;
361 }
362 return r;
363}
364
8c0a7ce6
DD
365static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
366{
367 int ret;
368
369 switch (attr->attr) {
370 case KVM_S390_VM_MEM_LIMIT_SIZE:
371 ret = 0;
c92ea7b9
CB
372 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
373 kvm->arch.gmap->asce_end);
8c0a7ce6
DD
374 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
375 ret = -EFAULT;
376 break;
377 default:
378 ret = -ENXIO;
379 break;
380 }
381 return ret;
382}
383
384static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
4f718eab
DD
385{
386 int ret;
387 unsigned int idx;
388 switch (attr->attr) {
389 case KVM_S390_VM_MEM_ENABLE_CMMA:
e6db1d61
DD
390 /* enable CMMA only for z10 and later (EDAT_1) */
391 ret = -EINVAL;
392 if (!MACHINE_IS_LPAR || !MACHINE_HAS_EDAT1)
393 break;
394
4f718eab 395 ret = -EBUSY;
c92ea7b9 396 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
4f718eab
DD
397 mutex_lock(&kvm->lock);
398 if (atomic_read(&kvm->online_vcpus) == 0) {
399 kvm->arch.use_cmma = 1;
400 ret = 0;
401 }
402 mutex_unlock(&kvm->lock);
403 break;
404 case KVM_S390_VM_MEM_CLR_CMMA:
c3489155
DD
405 ret = -EINVAL;
406 if (!kvm->arch.use_cmma)
407 break;
408
c92ea7b9 409 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
4f718eab
DD
410 mutex_lock(&kvm->lock);
411 idx = srcu_read_lock(&kvm->srcu);
a13cff31 412 s390_reset_cmma(kvm->arch.gmap->mm);
4f718eab
DD
413 srcu_read_unlock(&kvm->srcu, idx);
414 mutex_unlock(&kvm->lock);
415 ret = 0;
416 break;
8c0a7ce6
DD
417 case KVM_S390_VM_MEM_LIMIT_SIZE: {
418 unsigned long new_limit;
419
420 if (kvm_is_ucontrol(kvm))
421 return -EINVAL;
422
423 if (get_user(new_limit, (u64 __user *)attr->addr))
424 return -EFAULT;
425
426 if (new_limit > kvm->arch.gmap->asce_end)
427 return -E2BIG;
428
429 ret = -EBUSY;
430 mutex_lock(&kvm->lock);
431 if (atomic_read(&kvm->online_vcpus) == 0) {
432 /* gmap_alloc will round the limit up */
433 struct gmap *new = gmap_alloc(current->mm, new_limit);
434
435 if (!new) {
436 ret = -ENOMEM;
437 } else {
438 gmap_free(kvm->arch.gmap);
439 new->private = kvm;
440 kvm->arch.gmap = new;
441 ret = 0;
442 }
443 }
444 mutex_unlock(&kvm->lock);
c92ea7b9 445 VM_EVENT(kvm, 3, "SET: max guest memory: %lu bytes", new_limit);
8c0a7ce6
DD
446 break;
447 }
4f718eab
DD
448 default:
449 ret = -ENXIO;
450 break;
451 }
452 return ret;
453}
454
a374e892
TK
455static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
456
457static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
458{
459 struct kvm_vcpu *vcpu;
460 int i;
461
9d8d5786 462 if (!test_kvm_facility(kvm, 76))
a374e892
TK
463 return -EINVAL;
464
465 mutex_lock(&kvm->lock);
466 switch (attr->attr) {
467 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
468 get_random_bytes(
469 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
470 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
471 kvm->arch.crypto.aes_kw = 1;
c92ea7b9 472 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
a374e892
TK
473 break;
474 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
475 get_random_bytes(
476 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
477 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
478 kvm->arch.crypto.dea_kw = 1;
c92ea7b9 479 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
a374e892
TK
480 break;
481 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
482 kvm->arch.crypto.aes_kw = 0;
483 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
484 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
c92ea7b9 485 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
a374e892
TK
486 break;
487 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
488 kvm->arch.crypto.dea_kw = 0;
489 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
490 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
c92ea7b9 491 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
a374e892
TK
492 break;
493 default:
494 mutex_unlock(&kvm->lock);
495 return -ENXIO;
496 }
497
498 kvm_for_each_vcpu(i, vcpu, kvm) {
499 kvm_s390_vcpu_crypto_setup(vcpu);
500 exit_sie(vcpu);
501 }
502 mutex_unlock(&kvm->lock);
503 return 0;
504}
505
72f25020
JH
506static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
507{
508 u8 gtod_high;
509
510 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
511 sizeof(gtod_high)))
512 return -EFAULT;
513
514 if (gtod_high != 0)
515 return -EINVAL;
c92ea7b9 516 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x\n", gtod_high);
72f25020
JH
517
518 return 0;
519}
520
521static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
522{
523 struct kvm_vcpu *cur_vcpu;
524 unsigned int vcpu_idx;
525 u64 host_tod, gtod;
526 int r;
527
528 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
529 return -EFAULT;
530
531 r = store_tod_clock(&host_tod);
532 if (r)
533 return r;
534
535 mutex_lock(&kvm->lock);
fdf03650 536 preempt_disable();
72f25020 537 kvm->arch.epoch = gtod - host_tod;
27406cd5
CB
538 kvm_s390_vcpu_block_all(kvm);
539 kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm)
72f25020 540 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
27406cd5 541 kvm_s390_vcpu_unblock_all(kvm);
fdf03650 542 preempt_enable();
72f25020 543 mutex_unlock(&kvm->lock);
c92ea7b9 544 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx\n", gtod);
72f25020
JH
545 return 0;
546}
547
548static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
549{
550 int ret;
551
552 if (attr->flags)
553 return -EINVAL;
554
555 switch (attr->attr) {
556 case KVM_S390_VM_TOD_HIGH:
557 ret = kvm_s390_set_tod_high(kvm, attr);
558 break;
559 case KVM_S390_VM_TOD_LOW:
560 ret = kvm_s390_set_tod_low(kvm, attr);
561 break;
562 default:
563 ret = -ENXIO;
564 break;
565 }
566 return ret;
567}
568
569static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
570{
571 u8 gtod_high = 0;
572
573 if (copy_to_user((void __user *)attr->addr, &gtod_high,
574 sizeof(gtod_high)))
575 return -EFAULT;
c92ea7b9 576 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x\n", gtod_high);
72f25020
JH
577
578 return 0;
579}
580
581static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
582{
583 u64 host_tod, gtod;
584 int r;
585
586 r = store_tod_clock(&host_tod);
587 if (r)
588 return r;
589
fdf03650 590 preempt_disable();
72f25020 591 gtod = host_tod + kvm->arch.epoch;
fdf03650 592 preempt_enable();
72f25020
JH
593 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
594 return -EFAULT;
c92ea7b9 595 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx\n", gtod);
72f25020
JH
596
597 return 0;
598}
599
600static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
601{
602 int ret;
603
604 if (attr->flags)
605 return -EINVAL;
606
607 switch (attr->attr) {
608 case KVM_S390_VM_TOD_HIGH:
609 ret = kvm_s390_get_tod_high(kvm, attr);
610 break;
611 case KVM_S390_VM_TOD_LOW:
612 ret = kvm_s390_get_tod_low(kvm, attr);
613 break;
614 default:
615 ret = -ENXIO;
616 break;
617 }
618 return ret;
619}
620
658b6eda
MM
621static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
622{
623 struct kvm_s390_vm_cpu_processor *proc;
624 int ret = 0;
625
626 mutex_lock(&kvm->lock);
627 if (atomic_read(&kvm->online_vcpus)) {
628 ret = -EBUSY;
629 goto out;
630 }
631 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
632 if (!proc) {
633 ret = -ENOMEM;
634 goto out;
635 }
636 if (!copy_from_user(proc, (void __user *)attr->addr,
637 sizeof(*proc))) {
638 memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
639 sizeof(struct cpuid));
640 kvm->arch.model.ibc = proc->ibc;
981467c9 641 memcpy(kvm->arch.model.fac->list, proc->fac_list,
658b6eda
MM
642 S390_ARCH_FAC_LIST_SIZE_BYTE);
643 } else
644 ret = -EFAULT;
645 kfree(proc);
646out:
647 mutex_unlock(&kvm->lock);
648 return ret;
649}
650
651static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
652{
653 int ret = -ENXIO;
654
655 switch (attr->attr) {
656 case KVM_S390_VM_CPU_PROCESSOR:
657 ret = kvm_s390_set_processor(kvm, attr);
658 break;
659 }
660 return ret;
661}
662
663static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
664{
665 struct kvm_s390_vm_cpu_processor *proc;
666 int ret = 0;
667
668 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
669 if (!proc) {
670 ret = -ENOMEM;
671 goto out;
672 }
673 memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
674 proc->ibc = kvm->arch.model.ibc;
981467c9 675 memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda
MM
676 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
677 ret = -EFAULT;
678 kfree(proc);
679out:
680 return ret;
681}
682
683static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
684{
685 struct kvm_s390_vm_cpu_machine *mach;
686 int ret = 0;
687
688 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
689 if (!mach) {
690 ret = -ENOMEM;
691 goto out;
692 }
693 get_cpu_id((struct cpuid *) &mach->cpuid);
37c5f6c8 694 mach->ibc = sclp.ibc;
981467c9
MM
695 memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
696 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda 697 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
94422ee8 698 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda
MM
699 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
700 ret = -EFAULT;
701 kfree(mach);
702out:
703 return ret;
704}
705
706static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
707{
708 int ret = -ENXIO;
709
710 switch (attr->attr) {
711 case KVM_S390_VM_CPU_PROCESSOR:
712 ret = kvm_s390_get_processor(kvm, attr);
713 break;
714 case KVM_S390_VM_CPU_MACHINE:
715 ret = kvm_s390_get_machine(kvm, attr);
716 break;
717 }
718 return ret;
719}
720
f2061656
DD
721static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
722{
723 int ret;
724
725 switch (attr->group) {
4f718eab 726 case KVM_S390_VM_MEM_CTRL:
8c0a7ce6 727 ret = kvm_s390_set_mem_control(kvm, attr);
4f718eab 728 break;
72f25020
JH
729 case KVM_S390_VM_TOD:
730 ret = kvm_s390_set_tod(kvm, attr);
731 break;
658b6eda
MM
732 case KVM_S390_VM_CPU_MODEL:
733 ret = kvm_s390_set_cpu_model(kvm, attr);
734 break;
a374e892
TK
735 case KVM_S390_VM_CRYPTO:
736 ret = kvm_s390_vm_set_crypto(kvm, attr);
737 break;
f2061656
DD
738 default:
739 ret = -ENXIO;
740 break;
741 }
742
743 return ret;
744}
745
746static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
747{
8c0a7ce6
DD
748 int ret;
749
750 switch (attr->group) {
751 case KVM_S390_VM_MEM_CTRL:
752 ret = kvm_s390_get_mem_control(kvm, attr);
753 break;
72f25020
JH
754 case KVM_S390_VM_TOD:
755 ret = kvm_s390_get_tod(kvm, attr);
756 break;
658b6eda
MM
757 case KVM_S390_VM_CPU_MODEL:
758 ret = kvm_s390_get_cpu_model(kvm, attr);
759 break;
8c0a7ce6
DD
760 default:
761 ret = -ENXIO;
762 break;
763 }
764
765 return ret;
f2061656
DD
766}
767
768static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
769{
770 int ret;
771
772 switch (attr->group) {
4f718eab
DD
773 case KVM_S390_VM_MEM_CTRL:
774 switch (attr->attr) {
775 case KVM_S390_VM_MEM_ENABLE_CMMA:
776 case KVM_S390_VM_MEM_CLR_CMMA:
8c0a7ce6 777 case KVM_S390_VM_MEM_LIMIT_SIZE:
4f718eab
DD
778 ret = 0;
779 break;
780 default:
781 ret = -ENXIO;
782 break;
783 }
784 break;
72f25020
JH
785 case KVM_S390_VM_TOD:
786 switch (attr->attr) {
787 case KVM_S390_VM_TOD_LOW:
788 case KVM_S390_VM_TOD_HIGH:
789 ret = 0;
790 break;
791 default:
792 ret = -ENXIO;
793 break;
794 }
795 break;
658b6eda
MM
796 case KVM_S390_VM_CPU_MODEL:
797 switch (attr->attr) {
798 case KVM_S390_VM_CPU_PROCESSOR:
799 case KVM_S390_VM_CPU_MACHINE:
800 ret = 0;
801 break;
802 default:
803 ret = -ENXIO;
804 break;
805 }
806 break;
a374e892
TK
807 case KVM_S390_VM_CRYPTO:
808 switch (attr->attr) {
809 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
810 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
811 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
812 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
813 ret = 0;
814 break;
815 default:
816 ret = -ENXIO;
817 break;
818 }
819 break;
f2061656
DD
820 default:
821 ret = -ENXIO;
822 break;
823 }
824
825 return ret;
826}
827
30ee2a98
JH
828static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
829{
830 uint8_t *keys;
831 uint64_t hva;
832 unsigned long curkey;
833 int i, r = 0;
834
835 if (args->flags != 0)
836 return -EINVAL;
837
838 /* Is this guest using storage keys? */
839 if (!mm_use_skey(current->mm))
840 return KVM_S390_GET_SKEYS_NONE;
841
842 /* Enforce sane limit on memory allocation */
843 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
844 return -EINVAL;
845
846 keys = kmalloc_array(args->count, sizeof(uint8_t),
847 GFP_KERNEL | __GFP_NOWARN);
848 if (!keys)
849 keys = vmalloc(sizeof(uint8_t) * args->count);
850 if (!keys)
851 return -ENOMEM;
852
853 for (i = 0; i < args->count; i++) {
854 hva = gfn_to_hva(kvm, args->start_gfn + i);
855 if (kvm_is_error_hva(hva)) {
856 r = -EFAULT;
857 goto out;
858 }
859
860 curkey = get_guest_storage_key(current->mm, hva);
861 if (IS_ERR_VALUE(curkey)) {
862 r = curkey;
863 goto out;
864 }
865 keys[i] = curkey;
866 }
867
868 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
869 sizeof(uint8_t) * args->count);
870 if (r)
871 r = -EFAULT;
872out:
873 kvfree(keys);
874 return r;
875}
876
877static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
878{
879 uint8_t *keys;
880 uint64_t hva;
881 int i, r = 0;
882
883 if (args->flags != 0)
884 return -EINVAL;
885
886 /* Enforce sane limit on memory allocation */
887 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
888 return -EINVAL;
889
890 keys = kmalloc_array(args->count, sizeof(uint8_t),
891 GFP_KERNEL | __GFP_NOWARN);
892 if (!keys)
893 keys = vmalloc(sizeof(uint8_t) * args->count);
894 if (!keys)
895 return -ENOMEM;
896
897 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
898 sizeof(uint8_t) * args->count);
899 if (r) {
900 r = -EFAULT;
901 goto out;
902 }
903
904 /* Enable storage key handling for the guest */
14d4a425
DD
905 r = s390_enable_skey();
906 if (r)
907 goto out;
30ee2a98
JH
908
909 for (i = 0; i < args->count; i++) {
910 hva = gfn_to_hva(kvm, args->start_gfn + i);
911 if (kvm_is_error_hva(hva)) {
912 r = -EFAULT;
913 goto out;
914 }
915
916 /* Lowest order bit is reserved */
917 if (keys[i] & 0x01) {
918 r = -EINVAL;
919 goto out;
920 }
921
922 r = set_guest_storage_key(current->mm, hva,
923 (unsigned long)keys[i], 0);
924 if (r)
925 goto out;
926 }
927out:
928 kvfree(keys);
929 return r;
930}
931
b0c632db
HC
932long kvm_arch_vm_ioctl(struct file *filp,
933 unsigned int ioctl, unsigned long arg)
934{
935 struct kvm *kvm = filp->private_data;
936 void __user *argp = (void __user *)arg;
f2061656 937 struct kvm_device_attr attr;
b0c632db
HC
938 int r;
939
940 switch (ioctl) {
ba5c1e9b
CO
941 case KVM_S390_INTERRUPT: {
942 struct kvm_s390_interrupt s390int;
943
944 r = -EFAULT;
945 if (copy_from_user(&s390int, argp, sizeof(s390int)))
946 break;
947 r = kvm_s390_inject_vm(kvm, &s390int);
948 break;
949 }
d938dc55
CH
950 case KVM_ENABLE_CAP: {
951 struct kvm_enable_cap cap;
952 r = -EFAULT;
953 if (copy_from_user(&cap, argp, sizeof(cap)))
954 break;
955 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
956 break;
957 }
84223598
CH
958 case KVM_CREATE_IRQCHIP: {
959 struct kvm_irq_routing_entry routing;
960
961 r = -EINVAL;
962 if (kvm->arch.use_irqchip) {
963 /* Set up dummy routing. */
964 memset(&routing, 0, sizeof(routing));
965 kvm_set_irq_routing(kvm, &routing, 0, 0);
966 r = 0;
967 }
968 break;
969 }
f2061656
DD
970 case KVM_SET_DEVICE_ATTR: {
971 r = -EFAULT;
972 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
973 break;
974 r = kvm_s390_vm_set_attr(kvm, &attr);
975 break;
976 }
977 case KVM_GET_DEVICE_ATTR: {
978 r = -EFAULT;
979 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
980 break;
981 r = kvm_s390_vm_get_attr(kvm, &attr);
982 break;
983 }
984 case KVM_HAS_DEVICE_ATTR: {
985 r = -EFAULT;
986 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
987 break;
988 r = kvm_s390_vm_has_attr(kvm, &attr);
989 break;
990 }
30ee2a98
JH
991 case KVM_S390_GET_SKEYS: {
992 struct kvm_s390_skeys args;
993
994 r = -EFAULT;
995 if (copy_from_user(&args, argp,
996 sizeof(struct kvm_s390_skeys)))
997 break;
998 r = kvm_s390_get_skeys(kvm, &args);
999 break;
1000 }
1001 case KVM_S390_SET_SKEYS: {
1002 struct kvm_s390_skeys args;
1003
1004 r = -EFAULT;
1005 if (copy_from_user(&args, argp,
1006 sizeof(struct kvm_s390_skeys)))
1007 break;
1008 r = kvm_s390_set_skeys(kvm, &args);
1009 break;
1010 }
b0c632db 1011 default:
367e1319 1012 r = -ENOTTY;
b0c632db
HC
1013 }
1014
1015 return r;
1016}
1017
45c9b47c
TK
1018static int kvm_s390_query_ap_config(u8 *config)
1019{
1020 u32 fcn_code = 0x04000000UL;
86044c8c 1021 u32 cc = 0;
45c9b47c 1022
86044c8c 1023 memset(config, 0, 128);
45c9b47c
TK
1024 asm volatile(
1025 "lgr 0,%1\n"
1026 "lgr 2,%2\n"
1027 ".long 0xb2af0000\n" /* PQAP(QCI) */
86044c8c 1028 "0: ipm %0\n"
45c9b47c 1029 "srl %0,28\n"
86044c8c
CB
1030 "1:\n"
1031 EX_TABLE(0b, 1b)
1032 : "+r" (cc)
45c9b47c
TK
1033 : "r" (fcn_code), "r" (config)
1034 : "cc", "0", "2", "memory"
1035 );
1036
1037 return cc;
1038}
1039
1040static int kvm_s390_apxa_installed(void)
1041{
1042 u8 config[128];
1043 int cc;
1044
1045 if (test_facility(2) && test_facility(12)) {
1046 cc = kvm_s390_query_ap_config(config);
1047
1048 if (cc)
1049 pr_err("PQAP(QCI) failed with cc=%d", cc);
1050 else
1051 return config[0] & 0x40;
1052 }
1053
1054 return 0;
1055}
1056
1057static void kvm_s390_set_crycb_format(struct kvm *kvm)
1058{
1059 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1060
1061 if (kvm_s390_apxa_installed())
1062 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1063 else
1064 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1065}
1066
9d8d5786
MM
1067static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
1068{
1069 get_cpu_id(cpu_id);
1070 cpu_id->version = 0xff;
1071}
1072
5102ee87
TK
1073static int kvm_s390_crypto_init(struct kvm *kvm)
1074{
9d8d5786 1075 if (!test_kvm_facility(kvm, 76))
5102ee87
TK
1076 return 0;
1077
1078 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
1079 GFP_KERNEL | GFP_DMA);
1080 if (!kvm->arch.crypto.crycb)
1081 return -ENOMEM;
1082
45c9b47c 1083 kvm_s390_set_crycb_format(kvm);
5102ee87 1084
ed6f76b4
TK
1085 /* Enable AES/DEA protected key functions by default */
1086 kvm->arch.crypto.aes_kw = 1;
1087 kvm->arch.crypto.dea_kw = 1;
1088 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1089 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1090 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1091 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
a374e892 1092
5102ee87
TK
1093 return 0;
1094}
1095
e08b9637 1096int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 1097{
9d8d5786 1098 int i, rc;
b0c632db 1099 char debug_name[16];
f6c137ff 1100 static unsigned long sca_offset;
b0c632db 1101
e08b9637
CO
1102 rc = -EINVAL;
1103#ifdef CONFIG_KVM_S390_UCONTROL
1104 if (type & ~KVM_VM_S390_UCONTROL)
1105 goto out_err;
1106 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1107 goto out_err;
1108#else
1109 if (type)
1110 goto out_err;
1111#endif
1112
b0c632db
HC
1113 rc = s390_enable_sie();
1114 if (rc)
d89f5eff 1115 goto out_err;
b0c632db 1116
b290411a
CO
1117 rc = -ENOMEM;
1118
b0c632db
HC
1119 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
1120 if (!kvm->arch.sca)
d89f5eff 1121 goto out_err;
f6c137ff
CB
1122 spin_lock(&kvm_lock);
1123 sca_offset = (sca_offset + 16) & 0x7f0;
1124 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
1125 spin_unlock(&kvm_lock);
b0c632db
HC
1126
1127 sprintf(debug_name, "kvm-%u", current->pid);
1128
1cb9cf72 1129 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
b0c632db 1130 if (!kvm->arch.dbf)
40f5b735 1131 goto out_err;
b0c632db 1132
9d8d5786
MM
1133 /*
1134 * The architectural maximum amount of facilities is 16 kbit. To store
1135 * this amount, 2 kbyte of memory is required. Thus we need a full
981467c9
MM
1136 * page to hold the guest facility list (arch.model.fac->list) and the
1137 * facility mask (arch.model.fac->mask). Its address size has to be
9d8d5786
MM
1138 * 31 bits and word aligned.
1139 */
1140 kvm->arch.model.fac =
981467c9 1141 (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
9d8d5786 1142 if (!kvm->arch.model.fac)
40f5b735 1143 goto out_err;
9d8d5786 1144
fb5bf93f 1145 /* Populate the facility mask initially. */
981467c9 1146 memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
94422ee8 1147 S390_ARCH_FAC_LIST_SIZE_BYTE);
9d8d5786
MM
1148 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1149 if (i < kvm_s390_fac_list_mask_size())
981467c9 1150 kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
9d8d5786 1151 else
981467c9 1152 kvm->arch.model.fac->mask[i] = 0UL;
9d8d5786
MM
1153 }
1154
981467c9
MM
1155 /* Populate the facility list initially. */
1156 memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
1157 S390_ARCH_FAC_LIST_SIZE_BYTE);
1158
9d8d5786 1159 kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
37c5f6c8 1160 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
9d8d5786 1161
5102ee87 1162 if (kvm_s390_crypto_init(kvm) < 0)
40f5b735 1163 goto out_err;
5102ee87 1164
ba5c1e9b 1165 spin_lock_init(&kvm->arch.float_int.lock);
6d3da241
JF
1166 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1167 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
8a242234 1168 init_waitqueue_head(&kvm->arch.ipte_wq);
a6b7e459 1169 mutex_init(&kvm->arch.ipte_mutex);
ba5c1e9b 1170
b0c632db 1171 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
78f26131 1172 VM_EVENT(kvm, 3, "vm created with type %lu", type);
b0c632db 1173
e08b9637
CO
1174 if (type & KVM_VM_S390_UCONTROL) {
1175 kvm->arch.gmap = NULL;
1176 } else {
0349985a 1177 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
e08b9637 1178 if (!kvm->arch.gmap)
40f5b735 1179 goto out_err;
2c70fe44 1180 kvm->arch.gmap->private = kvm;
24eb3a82 1181 kvm->arch.gmap->pfault_enabled = 0;
e08b9637 1182 }
fa6b7fe9
CH
1183
1184 kvm->arch.css_support = 0;
84223598 1185 kvm->arch.use_irqchip = 0;
72f25020 1186 kvm->arch.epoch = 0;
fa6b7fe9 1187
8ad35755 1188 spin_lock_init(&kvm->arch.start_stop_lock);
78f26131 1189 KVM_EVENT(3, "vm 0x%p created by pid %u", kvm, current->pid);
8ad35755 1190
d89f5eff 1191 return 0;
40f5b735 1192out_err:
5102ee87 1193 kfree(kvm->arch.crypto.crycb);
9d8d5786 1194 free_page((unsigned long)kvm->arch.model.fac);
598841ca 1195 debug_unregister(kvm->arch.dbf);
b0c632db 1196 free_page((unsigned long)(kvm->arch.sca));
78f26131 1197 KVM_EVENT(3, "creation of vm failed: %d", rc);
d89f5eff 1198 return rc;
b0c632db
HC
1199}
1200
d329c035
CB
1201void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1202{
1203 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 1204 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
67335e63 1205 kvm_s390_clear_local_irqs(vcpu);
3c038e6b 1206 kvm_clear_async_pf_completion_queue(vcpu);
58f9460b
CO
1207 if (!kvm_is_ucontrol(vcpu->kvm)) {
1208 clear_bit(63 - vcpu->vcpu_id,
1209 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
1210 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
1211 (__u64) vcpu->arch.sie_block)
1212 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
1213 }
abf4a71e 1214 smp_mb();
27e0393f
CO
1215
1216 if (kvm_is_ucontrol(vcpu->kvm))
1217 gmap_free(vcpu->arch.gmap);
1218
e6db1d61 1219 if (vcpu->kvm->arch.use_cmma)
b31605c1 1220 kvm_s390_vcpu_unsetup_cmma(vcpu);
d329c035 1221 free_page((unsigned long)(vcpu->arch.sie_block));
b31288fa 1222
6692cef3 1223 kvm_vcpu_uninit(vcpu);
b110feaf 1224 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
1225}
1226
1227static void kvm_free_vcpus(struct kvm *kvm)
1228{
1229 unsigned int i;
988a2cae 1230 struct kvm_vcpu *vcpu;
d329c035 1231
988a2cae
GN
1232 kvm_for_each_vcpu(i, vcpu, kvm)
1233 kvm_arch_vcpu_destroy(vcpu);
1234
1235 mutex_lock(&kvm->lock);
1236 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1237 kvm->vcpus[i] = NULL;
1238
1239 atomic_set(&kvm->online_vcpus, 0);
1240 mutex_unlock(&kvm->lock);
d329c035
CB
1241}
1242
b0c632db
HC
1243void kvm_arch_destroy_vm(struct kvm *kvm)
1244{
d329c035 1245 kvm_free_vcpus(kvm);
9d8d5786 1246 free_page((unsigned long)kvm->arch.model.fac);
b0c632db 1247 free_page((unsigned long)(kvm->arch.sca));
d329c035 1248 debug_unregister(kvm->arch.dbf);
5102ee87 1249 kfree(kvm->arch.crypto.crycb);
27e0393f
CO
1250 if (!kvm_is_ucontrol(kvm))
1251 gmap_free(kvm->arch.gmap);
841b91c5 1252 kvm_s390_destroy_adapters(kvm);
67335e63 1253 kvm_s390_clear_float_irqs(kvm);
78f26131 1254 KVM_EVENT(3, "vm 0x%p destroyed", kvm);
b0c632db
HC
1255}
1256
1257/* Section: vcpu related */
dafd032a
DD
1258static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1259{
1260 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
1261 if (!vcpu->arch.gmap)
1262 return -ENOMEM;
1263 vcpu->arch.gmap->private = vcpu->kvm;
1264
1265 return 0;
1266}
1267
b0c632db
HC
1268int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1269{
3c038e6b
DD
1270 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1271 kvm_clear_async_pf_completion_queue(vcpu);
59674c1a
CB
1272 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1273 KVM_SYNC_GPRS |
9eed0735 1274 KVM_SYNC_ACRS |
b028ee3e
DH
1275 KVM_SYNC_CRS |
1276 KVM_SYNC_ARCH0 |
1277 KVM_SYNC_PFAULT;
68c55750
EF
1278 if (test_kvm_facility(vcpu->kvm, 129))
1279 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
dafd032a
DD
1280
1281 if (kvm_is_ucontrol(vcpu->kvm))
1282 return __kvm_ucontrol_vcpu_init(vcpu);
1283
b0c632db
HC
1284 return 0;
1285}
1286
b0c632db
HC
1287void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1288{
4725c860 1289 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
18280d8b 1290 if (test_kvm_facility(vcpu->kvm, 129))
68c55750
EF
1291 save_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
1292 else
1293 save_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db 1294 save_access_regs(vcpu->arch.host_acrs);
18280d8b 1295 if (test_kvm_facility(vcpu->kvm, 129)) {
68c55750
EF
1296 restore_fp_ctl(&vcpu->run->s.regs.fpc);
1297 restore_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
1298 } else {
1299 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1300 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
1301 }
59674c1a 1302 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 1303 gmap_enable(vcpu->arch.gmap);
9e6dabef 1304 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
1305}
1306
1307void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1308{
9e6dabef 1309 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 1310 gmap_disable(vcpu->arch.gmap);
18280d8b 1311 if (test_kvm_facility(vcpu->kvm, 129)) {
68c55750
EF
1312 save_fp_ctl(&vcpu->run->s.regs.fpc);
1313 save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
1314 } else {
1315 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1316 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1317 }
59674c1a 1318 save_access_regs(vcpu->run->s.regs.acrs);
4725c860 1319 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
18280d8b 1320 if (test_kvm_facility(vcpu->kvm, 129))
68c55750
EF
1321 restore_vx_regs((__vector128 *)&vcpu->arch.host_vregs->vrs);
1322 else
1323 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db
HC
1324 restore_access_regs(vcpu->arch.host_acrs);
1325}
1326
1327static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1328{
1329 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1330 vcpu->arch.sie_block->gpsw.mask = 0UL;
1331 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 1332 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
1333 vcpu->arch.sie_block->cputm = 0UL;
1334 vcpu->arch.sie_block->ckc = 0UL;
1335 vcpu->arch.sie_block->todpr = 0;
1336 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1337 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
1338 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
1339 vcpu->arch.guest_fpregs.fpc = 0;
1340 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
1341 vcpu->arch.sie_block->gbea = 1;
672550fb 1342 vcpu->arch.sie_block->pp = 0;
3c038e6b
DD
1343 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1344 kvm_clear_async_pf_completion_queue(vcpu);
6352e4d2
DH
1345 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1346 kvm_s390_vcpu_stop(vcpu);
2ed10cc1 1347 kvm_s390_clear_local_irqs(vcpu);
b0c632db
HC
1348}
1349
31928aa5 1350void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
42897d86 1351{
72f25020 1352 mutex_lock(&vcpu->kvm->lock);
fdf03650 1353 preempt_disable();
72f25020 1354 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
fdf03650 1355 preempt_enable();
72f25020 1356 mutex_unlock(&vcpu->kvm->lock);
dafd032a
DD
1357 if (!kvm_is_ucontrol(vcpu->kvm))
1358 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
42897d86
MT
1359}
1360
5102ee87
TK
1361static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1362{
9d8d5786 1363 if (!test_kvm_facility(vcpu->kvm, 76))
5102ee87
TK
1364 return;
1365
a374e892
TK
1366 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1367
1368 if (vcpu->kvm->arch.crypto.aes_kw)
1369 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1370 if (vcpu->kvm->arch.crypto.dea_kw)
1371 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1372
5102ee87
TK
1373 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1374}
1375
b31605c1
DD
1376void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1377{
1378 free_page(vcpu->arch.sie_block->cbrlo);
1379 vcpu->arch.sie_block->cbrlo = 0;
1380}
1381
1382int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1383{
1384 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1385 if (!vcpu->arch.sie_block->cbrlo)
1386 return -ENOMEM;
1387
1388 vcpu->arch.sie_block->ecb2 |= 0x80;
1389 vcpu->arch.sie_block->ecb2 &= ~0x08;
1390 return 0;
1391}
1392
91520f1a
MM
1393static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1394{
1395 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1396
1397 vcpu->arch.cpu_id = model->cpu_id;
1398 vcpu->arch.sie_block->ibc = model->ibc;
1399 vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
1400}
1401
b0c632db
HC
1402int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1403{
b31605c1 1404 int rc = 0;
b31288fa 1405
9e6dabef
CH
1406 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1407 CPUSTAT_SM |
a4a4f191
GH
1408 CPUSTAT_STOPPED);
1409
53df84f8
GH
1410 if (test_kvm_facility(vcpu->kvm, 78))
1411 atomic_set_mask(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
1412 else if (test_kvm_facility(vcpu->kvm, 8))
a4a4f191
GH
1413 atomic_set_mask(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
1414
91520f1a
MM
1415 kvm_s390_vcpu_setup_model(vcpu);
1416
fc34531d 1417 vcpu->arch.sie_block->ecb = 6;
9d8d5786 1418 if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
7feb6bb8
MM
1419 vcpu->arch.sie_block->ecb |= 0x10;
1420
69d0d3a3 1421 vcpu->arch.sie_block->ecb2 = 8;
ea5f4969 1422 vcpu->arch.sie_block->eca = 0xC1002000U;
37c5f6c8 1423 if (sclp.has_siif)
217a4406 1424 vcpu->arch.sie_block->eca |= 1;
37c5f6c8 1425 if (sclp.has_sigpif)
ea5f4969 1426 vcpu->arch.sie_block->eca |= 0x10000000U;
18280d8b 1427 if (test_kvm_facility(vcpu->kvm, 129)) {
13211ea7
EF
1428 vcpu->arch.sie_block->eca |= 0x00020000;
1429 vcpu->arch.sie_block->ecd |= 0x20000000;
1430 }
492d8642 1431 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
5a5e6536 1432
e6db1d61 1433 if (vcpu->kvm->arch.use_cmma) {
b31605c1
DD
1434 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1435 if (rc)
1436 return rc;
b31288fa 1437 }
0ac96caf 1438 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ca872302 1439 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
9d8d5786 1440
5102ee87
TK
1441 kvm_s390_vcpu_crypto_setup(vcpu);
1442
b31605c1 1443 return rc;
b0c632db
HC
1444}
1445
1446struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1447 unsigned int id)
1448{
4d47555a 1449 struct kvm_vcpu *vcpu;
7feb6bb8 1450 struct sie_page *sie_page;
4d47555a
CO
1451 int rc = -EINVAL;
1452
1453 if (id >= KVM_MAX_VCPUS)
1454 goto out;
1455
1456 rc = -ENOMEM;
b0c632db 1457
b110feaf 1458 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 1459 if (!vcpu)
4d47555a 1460 goto out;
b0c632db 1461
7feb6bb8
MM
1462 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1463 if (!sie_page)
b0c632db
HC
1464 goto out_free_cpu;
1465
7feb6bb8
MM
1466 vcpu->arch.sie_block = &sie_page->sie_block;
1467 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
68c55750 1468 vcpu->arch.host_vregs = &sie_page->vregs;
7feb6bb8 1469
b0c632db 1470 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
1471 if (!kvm_is_ucontrol(kvm)) {
1472 if (!kvm->arch.sca) {
1473 WARN_ON_ONCE(1);
1474 goto out_free_cpu;
1475 }
1476 if (!kvm->arch.sca->cpu[id].sda)
1477 kvm->arch.sca->cpu[id].sda =
1478 (__u64) vcpu->arch.sie_block;
1479 vcpu->arch.sie_block->scaoh =
1480 (__u32)(((__u64)kvm->arch.sca) >> 32);
1481 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
1482 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
1483 }
b0c632db 1484
ba5c1e9b 1485 spin_lock_init(&vcpu->arch.local_int.lock);
ba5c1e9b 1486 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
d0321a24 1487 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 1488 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
ba5c1e9b 1489
b0c632db
HC
1490 rc = kvm_vcpu_init(vcpu, kvm, id);
1491 if (rc)
7b06bf2f 1492 goto out_free_sie_block;
b0c632db
HC
1493 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
1494 vcpu->arch.sie_block);
ade38c31 1495 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 1496
b0c632db 1497 return vcpu;
7b06bf2f
WY
1498out_free_sie_block:
1499 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 1500out_free_cpu:
b110feaf 1501 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 1502out:
b0c632db
HC
1503 return ERR_PTR(rc);
1504}
1505
b0c632db
HC
1506int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1507{
9a022067 1508 return kvm_s390_vcpu_has_irq(vcpu, 0);
b0c632db
HC
1509}
1510
27406cd5 1511void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
49b99e1e
CB
1512{
1513 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
61a6df54 1514 exit_sie(vcpu);
49b99e1e
CB
1515}
1516
27406cd5 1517void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
49b99e1e
CB
1518{
1519 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1520}
1521
8e236546
CB
1522static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
1523{
1524 atomic_set_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
61a6df54 1525 exit_sie(vcpu);
8e236546
CB
1526}
1527
1528static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
1529{
1530 atomic_clear_mask(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
1531}
1532
49b99e1e
CB
1533/*
1534 * Kick a guest cpu out of SIE and wait until SIE is not running.
1535 * If the CPU is not running (e.g. waiting as idle) the function will
1536 * return immediately. */
1537void exit_sie(struct kvm_vcpu *vcpu)
1538{
1539 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
1540 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1541 cpu_relax();
1542}
1543
8e236546
CB
1544/* Kick a guest cpu out of SIE to process a request synchronously */
1545void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
49b99e1e 1546{
8e236546
CB
1547 kvm_make_request(req, vcpu);
1548 kvm_s390_vcpu_request(vcpu);
49b99e1e
CB
1549}
1550
2c70fe44
CB
1551static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1552{
1553 int i;
1554 struct kvm *kvm = gmap->private;
1555 struct kvm_vcpu *vcpu;
1556
1557 kvm_for_each_vcpu(i, vcpu, kvm) {
1558 /* match against both prefix pages */
fda902cb 1559 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
2c70fe44 1560 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
8e236546 1561 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
2c70fe44
CB
1562 }
1563 }
1564}
1565
b6d33834
CD
1566int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1567{
1568 /* kvm common code refers to this, but never calls it */
1569 BUG();
1570 return 0;
1571}
1572
14eebd91
CO
1573static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1574 struct kvm_one_reg *reg)
1575{
1576 int r = -EINVAL;
1577
1578 switch (reg->id) {
29b7c71b
CO
1579 case KVM_REG_S390_TODPR:
1580 r = put_user(vcpu->arch.sie_block->todpr,
1581 (u32 __user *)reg->addr);
1582 break;
1583 case KVM_REG_S390_EPOCHDIFF:
1584 r = put_user(vcpu->arch.sie_block->epoch,
1585 (u64 __user *)reg->addr);
1586 break;
46a6dd1c
J
1587 case KVM_REG_S390_CPU_TIMER:
1588 r = put_user(vcpu->arch.sie_block->cputm,
1589 (u64 __user *)reg->addr);
1590 break;
1591 case KVM_REG_S390_CLOCK_COMP:
1592 r = put_user(vcpu->arch.sie_block->ckc,
1593 (u64 __user *)reg->addr);
1594 break;
536336c2
DD
1595 case KVM_REG_S390_PFTOKEN:
1596 r = put_user(vcpu->arch.pfault_token,
1597 (u64 __user *)reg->addr);
1598 break;
1599 case KVM_REG_S390_PFCOMPARE:
1600 r = put_user(vcpu->arch.pfault_compare,
1601 (u64 __user *)reg->addr);
1602 break;
1603 case KVM_REG_S390_PFSELECT:
1604 r = put_user(vcpu->arch.pfault_select,
1605 (u64 __user *)reg->addr);
1606 break;
672550fb
CB
1607 case KVM_REG_S390_PP:
1608 r = put_user(vcpu->arch.sie_block->pp,
1609 (u64 __user *)reg->addr);
1610 break;
afa45ff5
CB
1611 case KVM_REG_S390_GBEA:
1612 r = put_user(vcpu->arch.sie_block->gbea,
1613 (u64 __user *)reg->addr);
1614 break;
14eebd91
CO
1615 default:
1616 break;
1617 }
1618
1619 return r;
1620}
1621
1622static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
1623 struct kvm_one_reg *reg)
1624{
1625 int r = -EINVAL;
1626
1627 switch (reg->id) {
29b7c71b
CO
1628 case KVM_REG_S390_TODPR:
1629 r = get_user(vcpu->arch.sie_block->todpr,
1630 (u32 __user *)reg->addr);
1631 break;
1632 case KVM_REG_S390_EPOCHDIFF:
1633 r = get_user(vcpu->arch.sie_block->epoch,
1634 (u64 __user *)reg->addr);
1635 break;
46a6dd1c
J
1636 case KVM_REG_S390_CPU_TIMER:
1637 r = get_user(vcpu->arch.sie_block->cputm,
1638 (u64 __user *)reg->addr);
1639 break;
1640 case KVM_REG_S390_CLOCK_COMP:
1641 r = get_user(vcpu->arch.sie_block->ckc,
1642 (u64 __user *)reg->addr);
1643 break;
536336c2
DD
1644 case KVM_REG_S390_PFTOKEN:
1645 r = get_user(vcpu->arch.pfault_token,
1646 (u64 __user *)reg->addr);
9fbd8082
DH
1647 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1648 kvm_clear_async_pf_completion_queue(vcpu);
536336c2
DD
1649 break;
1650 case KVM_REG_S390_PFCOMPARE:
1651 r = get_user(vcpu->arch.pfault_compare,
1652 (u64 __user *)reg->addr);
1653 break;
1654 case KVM_REG_S390_PFSELECT:
1655 r = get_user(vcpu->arch.pfault_select,
1656 (u64 __user *)reg->addr);
1657 break;
672550fb
CB
1658 case KVM_REG_S390_PP:
1659 r = get_user(vcpu->arch.sie_block->pp,
1660 (u64 __user *)reg->addr);
1661 break;
afa45ff5
CB
1662 case KVM_REG_S390_GBEA:
1663 r = get_user(vcpu->arch.sie_block->gbea,
1664 (u64 __user *)reg->addr);
1665 break;
14eebd91
CO
1666 default:
1667 break;
1668 }
1669
1670 return r;
1671}
b6d33834 1672
b0c632db
HC
1673static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1674{
b0c632db 1675 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
1676 return 0;
1677}
1678
1679int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1680{
5a32c1af 1681 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
1682 return 0;
1683}
1684
1685int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1686{
5a32c1af 1687 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
1688 return 0;
1689}
1690
1691int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1692 struct kvm_sregs *sregs)
1693{
59674c1a 1694 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 1695 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 1696 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
1697 return 0;
1698}
1699
1700int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1701 struct kvm_sregs *sregs)
1702{
59674c1a 1703 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 1704 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
1705 return 0;
1706}
1707
1708int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1709{
4725c860
MS
1710 if (test_fp_ctl(fpu->fpc))
1711 return -EINVAL;
b0c632db 1712 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4725c860
MS
1713 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
1714 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1715 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
b0c632db
HC
1716 return 0;
1717}
1718
1719int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1720{
b0c632db
HC
1721 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1722 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
1723 return 0;
1724}
1725
1726static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1727{
1728 int rc = 0;
1729
7a42fdc2 1730 if (!is_vcpu_stopped(vcpu))
b0c632db 1731 rc = -EBUSY;
d7b0b5eb
CO
1732 else {
1733 vcpu->run->psw_mask = psw.mask;
1734 vcpu->run->psw_addr = psw.addr;
1735 }
b0c632db
HC
1736 return rc;
1737}
1738
1739int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1740 struct kvm_translation *tr)
1741{
1742 return -EINVAL; /* not implemented yet */
1743}
1744
27291e21
DH
1745#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1746 KVM_GUESTDBG_USE_HW_BP | \
1747 KVM_GUESTDBG_ENABLE)
1748
d0bfb940
JK
1749int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1750 struct kvm_guest_debug *dbg)
b0c632db 1751{
27291e21
DH
1752 int rc = 0;
1753
1754 vcpu->guest_debug = 0;
1755 kvm_s390_clear_bp_data(vcpu);
1756
2de3bfc2 1757 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
27291e21
DH
1758 return -EINVAL;
1759
1760 if (dbg->control & KVM_GUESTDBG_ENABLE) {
1761 vcpu->guest_debug = dbg->control;
1762 /* enforce guest PER */
1763 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1764
1765 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
1766 rc = kvm_s390_import_bp_data(vcpu, dbg);
1767 } else {
1768 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1769 vcpu->arch.guestdbg.last_bp = 0;
1770 }
1771
1772 if (rc) {
1773 vcpu->guest_debug = 0;
1774 kvm_s390_clear_bp_data(vcpu);
1775 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1776 }
1777
1778 return rc;
b0c632db
HC
1779}
1780
62d9f0db
MT
1781int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1782 struct kvm_mp_state *mp_state)
1783{
6352e4d2
DH
1784 /* CHECK_STOP and LOAD are not supported yet */
1785 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1786 KVM_MP_STATE_OPERATING;
62d9f0db
MT
1787}
1788
1789int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1790 struct kvm_mp_state *mp_state)
1791{
6352e4d2
DH
1792 int rc = 0;
1793
1794 /* user space knows about this interface - let it control the state */
1795 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1796
1797 switch (mp_state->mp_state) {
1798 case KVM_MP_STATE_STOPPED:
1799 kvm_s390_vcpu_stop(vcpu);
1800 break;
1801 case KVM_MP_STATE_OPERATING:
1802 kvm_s390_vcpu_start(vcpu);
1803 break;
1804 case KVM_MP_STATE_LOAD:
1805 case KVM_MP_STATE_CHECK_STOP:
1806 /* fall through - CHECK_STOP and LOAD are not supported yet */
1807 default:
1808 rc = -ENXIO;
1809 }
1810
1811 return rc;
62d9f0db
MT
1812}
1813
8ad35755
DH
1814static bool ibs_enabled(struct kvm_vcpu *vcpu)
1815{
1816 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1817}
1818
2c70fe44
CB
1819static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1820{
785dbef4
CB
1821 if (!vcpu->requests)
1822 return 0;
8ad35755 1823retry:
8e236546 1824 kvm_s390_vcpu_request_handled(vcpu);
2c70fe44
CB
1825 /*
1826 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1827 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1828 * This ensures that the ipte instruction for this request has
1829 * already finished. We might race against a second unmapper that
1830 * wants to set the blocking bit. Lets just retry the request loop.
1831 */
8ad35755 1832 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
2c70fe44
CB
1833 int rc;
1834 rc = gmap_ipte_notify(vcpu->arch.gmap,
fda902cb 1835 kvm_s390_get_prefix(vcpu),
2c70fe44
CB
1836 PAGE_SIZE * 2);
1837 if (rc)
1838 return rc;
8ad35755 1839 goto retry;
2c70fe44 1840 }
8ad35755 1841
d3d692c8
DH
1842 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1843 vcpu->arch.sie_block->ihcpu = 0xffff;
1844 goto retry;
1845 }
1846
8ad35755
DH
1847 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1848 if (!ibs_enabled(vcpu)) {
1849 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1850 atomic_set_mask(CPUSTAT_IBS,
1851 &vcpu->arch.sie_block->cpuflags);
1852 }
1853 goto retry;
2c70fe44 1854 }
8ad35755
DH
1855
1856 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1857 if (ibs_enabled(vcpu)) {
1858 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1859 atomic_clear_mask(CPUSTAT_IBS,
1860 &vcpu->arch.sie_block->cpuflags);
1861 }
1862 goto retry;
1863 }
1864
0759d068
DH
1865 /* nothing to do, just clear the request */
1866 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1867
2c70fe44
CB
1868 return 0;
1869}
1870
fa576c58
TH
1871/**
1872 * kvm_arch_fault_in_page - fault-in guest page if necessary
1873 * @vcpu: The corresponding virtual cpu
1874 * @gpa: Guest physical address
1875 * @writable: Whether the page should be writable or not
1876 *
1877 * Make sure that a guest page has been faulted-in on the host.
1878 *
1879 * Return: Zero on success, negative error code otherwise.
1880 */
1881long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
24eb3a82 1882{
527e30b4
MS
1883 return gmap_fault(vcpu->arch.gmap, gpa,
1884 writable ? FAULT_FLAG_WRITE : 0);
24eb3a82
DD
1885}
1886
3c038e6b
DD
1887static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1888 unsigned long token)
1889{
1890 struct kvm_s390_interrupt inti;
383d0b05 1891 struct kvm_s390_irq irq;
3c038e6b
DD
1892
1893 if (start_token) {
383d0b05
JF
1894 irq.u.ext.ext_params2 = token;
1895 irq.type = KVM_S390_INT_PFAULT_INIT;
1896 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
3c038e6b
DD
1897 } else {
1898 inti.type = KVM_S390_INT_PFAULT_DONE;
383d0b05 1899 inti.parm64 = token;
3c038e6b
DD
1900 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1901 }
1902}
1903
1904void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1905 struct kvm_async_pf *work)
1906{
1907 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1908 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1909}
1910
1911void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1912 struct kvm_async_pf *work)
1913{
1914 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1915 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1916}
1917
1918void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1919 struct kvm_async_pf *work)
1920{
1921 /* s390 will always inject the page directly */
1922}
1923
1924bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1925{
1926 /*
1927 * s390 will always inject the page directly,
1928 * but we still want check_async_completion to cleanup
1929 */
1930 return true;
1931}
1932
1933static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1934{
1935 hva_t hva;
1936 struct kvm_arch_async_pf arch;
1937 int rc;
1938
1939 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1940 return 0;
1941 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1942 vcpu->arch.pfault_compare)
1943 return 0;
1944 if (psw_extint_disabled(vcpu))
1945 return 0;
9a022067 1946 if (kvm_s390_vcpu_has_irq(vcpu, 0))
3c038e6b
DD
1947 return 0;
1948 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1949 return 0;
1950 if (!vcpu->arch.gmap->pfault_enabled)
1951 return 0;
1952
81480cc1
HC
1953 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1954 hva += current->thread.gmap_addr & ~PAGE_MASK;
1955 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
3c038e6b
DD
1956 return 0;
1957
1958 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1959 return rc;
1960}
1961
3fb4c40f 1962static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 1963{
3fb4c40f 1964 int rc, cpuflags;
e168bf8d 1965
3c038e6b
DD
1966 /*
1967 * On s390 notifications for arriving pages will be delivered directly
1968 * to the guest but the house keeping for completed pfaults is
1969 * handled outside the worker.
1970 */
1971 kvm_check_async_pf_completion(vcpu);
1972
5a32c1af 1973 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
1974
1975 if (need_resched())
1976 schedule();
1977
d3a73acb 1978 if (test_cpu_flag(CIF_MCCK_PENDING))
71cde587
CB
1979 s390_handle_mcck();
1980
79395031
JF
1981 if (!kvm_is_ucontrol(vcpu->kvm)) {
1982 rc = kvm_s390_deliver_pending_interrupts(vcpu);
1983 if (rc)
1984 return rc;
1985 }
0ff31867 1986
2c70fe44
CB
1987 rc = kvm_s390_handle_requests(vcpu);
1988 if (rc)
1989 return rc;
1990
27291e21
DH
1991 if (guestdbg_enabled(vcpu)) {
1992 kvm_s390_backup_guest_per_regs(vcpu);
1993 kvm_s390_patch_guest_per_regs(vcpu);
1994 }
1995
b0c632db 1996 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
1997 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1998 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1999 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 2000
3fb4c40f
TH
2001 return 0;
2002}
2003
492d8642
TH
2004static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
2005{
2006 psw_t *psw = &vcpu->arch.sie_block->gpsw;
2007 u8 opcode;
2008 int rc;
2009
2010 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
2011 trace_kvm_s390_sie_fault(vcpu);
2012
2013 /*
2014 * We want to inject an addressing exception, which is defined as a
2015 * suppressing or terminating exception. However, since we came here
2016 * by a DAT access exception, the PSW still points to the faulting
2017 * instruction since DAT exceptions are nullifying. So we've got
2018 * to look up the current opcode to get the length of the instruction
2019 * to be able to forward the PSW.
2020 */
8ae04b8f 2021 rc = read_guest(vcpu, psw->addr, 0, &opcode, 1);
492d8642
TH
2022 if (rc)
2023 return kvm_s390_inject_prog_cond(vcpu, rc);
2024 psw->addr = __rewind_psw(*psw, -insn_length(opcode));
2025
2026 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
2027}
2028
3fb4c40f
TH
2029static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
2030{
24eb3a82 2031 int rc = -1;
2b29a9fd
DD
2032
2033 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
2034 vcpu->arch.sie_block->icptcode);
2035 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
2036
27291e21
DH
2037 if (guestdbg_enabled(vcpu))
2038 kvm_s390_restore_guest_per_regs(vcpu);
2039
3fb4c40f 2040 if (exit_reason >= 0) {
7c470539 2041 rc = 0;
210b1607
TH
2042 } else if (kvm_is_ucontrol(vcpu->kvm)) {
2043 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
2044 vcpu->run->s390_ucontrol.trans_exc_code =
2045 current->thread.gmap_addr;
2046 vcpu->run->s390_ucontrol.pgm_code = 0x10;
2047 rc = -EREMOTE;
24eb3a82
DD
2048
2049 } else if (current->thread.gmap_pfault) {
3c038e6b 2050 trace_kvm_s390_major_guest_pfault(vcpu);
24eb3a82 2051 current->thread.gmap_pfault = 0;
fa576c58 2052 if (kvm_arch_setup_async_pf(vcpu)) {
24eb3a82 2053 rc = 0;
fa576c58
TH
2054 } else {
2055 gpa_t gpa = current->thread.gmap_addr;
2056 rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
2057 }
24eb3a82
DD
2058 }
2059
492d8642
TH
2060 if (rc == -1)
2061 rc = vcpu_post_run_fault_in_sie(vcpu);
b0c632db 2062
5a32c1af 2063 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
3fb4c40f 2064
a76ccff6
TH
2065 if (rc == 0) {
2066 if (kvm_is_ucontrol(vcpu->kvm))
2955c83f
CB
2067 /* Don't exit for host interrupts. */
2068 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
a76ccff6
TH
2069 else
2070 rc = kvm_handle_sie_intercept(vcpu);
2071 }
2072
3fb4c40f
TH
2073 return rc;
2074}
2075
2076static int __vcpu_run(struct kvm_vcpu *vcpu)
2077{
2078 int rc, exit_reason;
2079
800c1065
TH
2080 /*
2081 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2082 * ning the guest), so that memslots (and other stuff) are protected
2083 */
2084 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2085
a76ccff6
TH
2086 do {
2087 rc = vcpu_pre_run(vcpu);
2088 if (rc)
2089 break;
3fb4c40f 2090
800c1065 2091 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
2092 /*
2093 * As PF_VCPU will be used in fault handler, between
2094 * guest_enter and guest_exit should be no uaccess.
2095 */
0097d12e
CB
2096 local_irq_disable();
2097 __kvm_guest_enter();
2098 local_irq_enable();
a76ccff6
TH
2099 exit_reason = sie64a(vcpu->arch.sie_block,
2100 vcpu->run->s.regs.gprs);
0097d12e
CB
2101 local_irq_disable();
2102 __kvm_guest_exit();
2103 local_irq_enable();
800c1065 2104 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
2105
2106 rc = vcpu_post_run(vcpu, exit_reason);
27291e21 2107 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
3fb4c40f 2108
800c1065 2109 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 2110 return rc;
b0c632db
HC
2111}
2112
b028ee3e
DH
2113static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2114{
2115 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2116 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2117 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2118 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2119 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2120 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
d3d692c8
DH
2121 /* some control register changes require a tlb flush */
2122 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
b028ee3e
DH
2123 }
2124 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
2125 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
2126 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2127 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2128 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2129 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2130 }
2131 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2132 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2133 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2134 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
9fbd8082
DH
2135 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2136 kvm_clear_async_pf_completion_queue(vcpu);
b028ee3e
DH
2137 }
2138 kvm_run->kvm_dirty_regs = 0;
2139}
2140
2141static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2142{
2143 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2144 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2145 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2146 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
2147 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
2148 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2149 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2150 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2151 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2152 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2153 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2154 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2155}
2156
b0c632db
HC
2157int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2158{
8f2abe6a 2159 int rc;
b0c632db
HC
2160 sigset_t sigsaved;
2161
27291e21
DH
2162 if (guestdbg_exit_pending(vcpu)) {
2163 kvm_s390_prepare_debug_exit(vcpu);
2164 return 0;
2165 }
2166
b0c632db
HC
2167 if (vcpu->sigset_active)
2168 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2169
6352e4d2
DH
2170 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2171 kvm_s390_vcpu_start(vcpu);
2172 } else if (is_vcpu_stopped(vcpu)) {
ea2cdd27 2173 pr_err_ratelimited("can't run stopped vcpu %d\n",
6352e4d2
DH
2174 vcpu->vcpu_id);
2175 return -EINVAL;
2176 }
b0c632db 2177
b028ee3e 2178 sync_regs(vcpu, kvm_run);
d7b0b5eb 2179
dab4079d 2180 might_fault();
a76ccff6 2181 rc = __vcpu_run(vcpu);
9ace903d 2182
b1d16c49
CE
2183 if (signal_pending(current) && !rc) {
2184 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 2185 rc = -EINTR;
b1d16c49 2186 }
8f2abe6a 2187
27291e21
DH
2188 if (guestdbg_exit_pending(vcpu) && !rc) {
2189 kvm_s390_prepare_debug_exit(vcpu);
2190 rc = 0;
2191 }
2192
b8e660b8 2193 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
2194 /* intercept cannot be handled in-kernel, prepare kvm-run */
2195 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
2196 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
2197 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
2198 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
2199 rc = 0;
2200 }
2201
2202 if (rc == -EREMOTE) {
2203 /* intercept was handled, but userspace support is needed
2204 * kvm_run has been prepared by the handler */
2205 rc = 0;
2206 }
b0c632db 2207
b028ee3e 2208 store_regs(vcpu, kvm_run);
d7b0b5eb 2209
b0c632db
HC
2210 if (vcpu->sigset_active)
2211 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2212
b0c632db 2213 vcpu->stat.exit_userspace++;
7e8e6ab4 2214 return rc;
b0c632db
HC
2215}
2216
b0c632db
HC
2217/*
2218 * store status at address
2219 * we use have two special cases:
2220 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2221 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2222 */
d0bce605 2223int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
b0c632db 2224{
092670cd 2225 unsigned char archmode = 1;
fda902cb 2226 unsigned int px;
178bd789 2227 u64 clkcomp;
d0bce605 2228 int rc;
b0c632db 2229
d0bce605
HC
2230 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2231 if (write_guest_abs(vcpu, 163, &archmode, 1))
b0c632db 2232 return -EFAULT;
d0bce605
HC
2233 gpa = SAVE_AREA_BASE;
2234 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2235 if (write_guest_real(vcpu, 163, &archmode, 1))
b0c632db 2236 return -EFAULT;
d0bce605
HC
2237 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
2238 }
2239 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
2240 vcpu->arch.guest_fpregs.fprs, 128);
2241 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
2242 vcpu->run->s.regs.gprs, 128);
2243 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
2244 &vcpu->arch.sie_block->gpsw, 16);
fda902cb 2245 px = kvm_s390_get_prefix(vcpu);
d0bce605 2246 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
fda902cb 2247 &px, 4);
d0bce605
HC
2248 rc |= write_guest_abs(vcpu,
2249 gpa + offsetof(struct save_area, fp_ctrl_reg),
2250 &vcpu->arch.guest_fpregs.fpc, 4);
2251 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
2252 &vcpu->arch.sie_block->todpr, 4);
2253 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
2254 &vcpu->arch.sie_block->cputm, 8);
178bd789 2255 clkcomp = vcpu->arch.sie_block->ckc >> 8;
d0bce605
HC
2256 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
2257 &clkcomp, 8);
2258 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
2259 &vcpu->run->s.regs.acrs, 64);
2260 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
2261 &vcpu->arch.sie_block->gcr, 128);
2262 return rc ? -EFAULT : 0;
b0c632db
HC
2263}
2264
e879892c
TH
2265int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2266{
2267 /*
2268 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2269 * copying in vcpu load/put. Lets update our copies before we save
2270 * it into the save area
2271 */
2272 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
2273 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
2274 save_access_regs(vcpu->run->s.regs.acrs);
2275
2276 return kvm_s390_store_status_unloaded(vcpu, addr);
2277}
2278
bc17de7c
EF
2279/*
2280 * store additional status at address
2281 */
2282int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2283 unsigned long gpa)
2284{
2285 /* Only bits 0-53 are used for address formation */
2286 if (!(gpa & ~0x3ff))
2287 return 0;
2288
2289 return write_guest_abs(vcpu, gpa & ~0x3ff,
2290 (void *)&vcpu->run->s.regs.vrs, 512);
2291}
2292
2293int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2294{
2295 if (!test_kvm_facility(vcpu->kvm, 129))
2296 return 0;
2297
2298 /*
2299 * The guest VXRS are in the host VXRs due to the lazy
2300 * copying in vcpu load/put. Let's update our copies before we save
2301 * it into the save area.
2302 */
2303 save_vx_regs((__vector128 *)&vcpu->run->s.regs.vrs);
2304
2305 return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2306}
2307
8ad35755
DH
2308static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2309{
2310 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
8e236546 2311 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
8ad35755
DH
2312}
2313
2314static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2315{
2316 unsigned int i;
2317 struct kvm_vcpu *vcpu;
2318
2319 kvm_for_each_vcpu(i, vcpu, kvm) {
2320 __disable_ibs_on_vcpu(vcpu);
2321 }
2322}
2323
2324static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2325{
2326 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
8e236546 2327 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
8ad35755
DH
2328}
2329
6852d7b6
DH
2330void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2331{
8ad35755
DH
2332 int i, online_vcpus, started_vcpus = 0;
2333
2334 if (!is_vcpu_stopped(vcpu))
2335 return;
2336
6852d7b6 2337 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
8ad35755 2338 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 2339 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
2340 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2341
2342 for (i = 0; i < online_vcpus; i++) {
2343 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2344 started_vcpus++;
2345 }
2346
2347 if (started_vcpus == 0) {
2348 /* we're the only active VCPU -> speed it up */
2349 __enable_ibs_on_vcpu(vcpu);
2350 } else if (started_vcpus == 1) {
2351 /*
2352 * As we are starting a second VCPU, we have to disable
2353 * the IBS facility on all VCPUs to remove potentially
2354 * oustanding ENABLE requests.
2355 */
2356 __disable_ibs_on_all_vcpus(vcpu->kvm);
2357 }
2358
6852d7b6 2359 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
2360 /*
2361 * Another VCPU might have used IBS while we were offline.
2362 * Let's play safe and flush the VCPU at startup.
2363 */
d3d692c8 2364 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
433b9ee4 2365 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 2366 return;
6852d7b6
DH
2367}
2368
2369void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2370{
8ad35755
DH
2371 int i, online_vcpus, started_vcpus = 0;
2372 struct kvm_vcpu *started_vcpu = NULL;
2373
2374 if (is_vcpu_stopped(vcpu))
2375 return;
2376
6852d7b6 2377 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
8ad35755 2378 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 2379 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
2380 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2381
32f5ff63 2382 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
6cddd432 2383 kvm_s390_clear_stop_irq(vcpu);
32f5ff63 2384
6cddd432 2385 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
2386 __disable_ibs_on_vcpu(vcpu);
2387
2388 for (i = 0; i < online_vcpus; i++) {
2389 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2390 started_vcpus++;
2391 started_vcpu = vcpu->kvm->vcpus[i];
2392 }
2393 }
2394
2395 if (started_vcpus == 1) {
2396 /*
2397 * As we only have one VCPU left, we want to enable the
2398 * IBS facility for that VCPU to speed it up.
2399 */
2400 __enable_ibs_on_vcpu(started_vcpu);
2401 }
2402
433b9ee4 2403 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 2404 return;
6852d7b6
DH
2405}
2406
d6712df9
CH
2407static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2408 struct kvm_enable_cap *cap)
2409{
2410 int r;
2411
2412 if (cap->flags)
2413 return -EINVAL;
2414
2415 switch (cap->cap) {
fa6b7fe9
CH
2416 case KVM_CAP_S390_CSS_SUPPORT:
2417 if (!vcpu->kvm->arch.css_support) {
2418 vcpu->kvm->arch.css_support = 1;
c92ea7b9 2419 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
fa6b7fe9
CH
2420 trace_kvm_s390_enable_css(vcpu->kvm);
2421 }
2422 r = 0;
2423 break;
d6712df9
CH
2424 default:
2425 r = -EINVAL;
2426 break;
2427 }
2428 return r;
2429}
2430
41408c28
TH
2431static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
2432 struct kvm_s390_mem_op *mop)
2433{
2434 void __user *uaddr = (void __user *)mop->buf;
2435 void *tmpbuf = NULL;
2436 int r, srcu_idx;
2437 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
2438 | KVM_S390_MEMOP_F_CHECK_ONLY;
2439
2440 if (mop->flags & ~supported_flags)
2441 return -EINVAL;
2442
2443 if (mop->size > MEM_OP_MAX_SIZE)
2444 return -E2BIG;
2445
2446 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2447 tmpbuf = vmalloc(mop->size);
2448 if (!tmpbuf)
2449 return -ENOMEM;
2450 }
2451
2452 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2453
2454 switch (mop->op) {
2455 case KVM_S390_MEMOP_LOGICAL_READ:
2456 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2457 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false);
2458 break;
2459 }
2460 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2461 if (r == 0) {
2462 if (copy_to_user(uaddr, tmpbuf, mop->size))
2463 r = -EFAULT;
2464 }
2465 break;
2466 case KVM_S390_MEMOP_LOGICAL_WRITE:
2467 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2468 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true);
2469 break;
2470 }
2471 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2472 r = -EFAULT;
2473 break;
2474 }
2475 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2476 break;
2477 default:
2478 r = -EINVAL;
2479 }
2480
2481 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2482
2483 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
2484 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
2485
2486 vfree(tmpbuf);
2487 return r;
2488}
2489
b0c632db
HC
2490long kvm_arch_vcpu_ioctl(struct file *filp,
2491 unsigned int ioctl, unsigned long arg)
2492{
2493 struct kvm_vcpu *vcpu = filp->private_data;
2494 void __user *argp = (void __user *)arg;
800c1065 2495 int idx;
bc923cc9 2496 long r;
b0c632db 2497
93736624 2498 switch (ioctl) {
47b43c52
JF
2499 case KVM_S390_IRQ: {
2500 struct kvm_s390_irq s390irq;
2501
2502 r = -EFAULT;
2503 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
2504 break;
2505 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
2506 break;
2507 }
93736624 2508 case KVM_S390_INTERRUPT: {
ba5c1e9b 2509 struct kvm_s390_interrupt s390int;
383d0b05 2510 struct kvm_s390_irq s390irq;
ba5c1e9b 2511
93736624 2512 r = -EFAULT;
ba5c1e9b 2513 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624 2514 break;
383d0b05
JF
2515 if (s390int_to_s390irq(&s390int, &s390irq))
2516 return -EINVAL;
2517 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
93736624 2518 break;
ba5c1e9b 2519 }
b0c632db 2520 case KVM_S390_STORE_STATUS:
800c1065 2521 idx = srcu_read_lock(&vcpu->kvm->srcu);
bc923cc9 2522 r = kvm_s390_vcpu_store_status(vcpu, arg);
800c1065 2523 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 2524 break;
b0c632db
HC
2525 case KVM_S390_SET_INITIAL_PSW: {
2526 psw_t psw;
2527
bc923cc9 2528 r = -EFAULT;
b0c632db 2529 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
2530 break;
2531 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2532 break;
b0c632db
HC
2533 }
2534 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
2535 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2536 break;
14eebd91
CO
2537 case KVM_SET_ONE_REG:
2538 case KVM_GET_ONE_REG: {
2539 struct kvm_one_reg reg;
2540 r = -EFAULT;
2541 if (copy_from_user(&reg, argp, sizeof(reg)))
2542 break;
2543 if (ioctl == KVM_SET_ONE_REG)
2544 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
2545 else
2546 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
2547 break;
2548 }
27e0393f
CO
2549#ifdef CONFIG_KVM_S390_UCONTROL
2550 case KVM_S390_UCAS_MAP: {
2551 struct kvm_s390_ucas_mapping ucasmap;
2552
2553 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2554 r = -EFAULT;
2555 break;
2556 }
2557
2558 if (!kvm_is_ucontrol(vcpu->kvm)) {
2559 r = -EINVAL;
2560 break;
2561 }
2562
2563 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
2564 ucasmap.vcpu_addr, ucasmap.length);
2565 break;
2566 }
2567 case KVM_S390_UCAS_UNMAP: {
2568 struct kvm_s390_ucas_mapping ucasmap;
2569
2570 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2571 r = -EFAULT;
2572 break;
2573 }
2574
2575 if (!kvm_is_ucontrol(vcpu->kvm)) {
2576 r = -EINVAL;
2577 break;
2578 }
2579
2580 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
2581 ucasmap.length);
2582 break;
2583 }
2584#endif
ccc7910f 2585 case KVM_S390_VCPU_FAULT: {
527e30b4 2586 r = gmap_fault(vcpu->arch.gmap, arg, 0);
ccc7910f
CO
2587 break;
2588 }
d6712df9
CH
2589 case KVM_ENABLE_CAP:
2590 {
2591 struct kvm_enable_cap cap;
2592 r = -EFAULT;
2593 if (copy_from_user(&cap, argp, sizeof(cap)))
2594 break;
2595 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2596 break;
2597 }
41408c28
TH
2598 case KVM_S390_MEM_OP: {
2599 struct kvm_s390_mem_op mem_op;
2600
2601 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
2602 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
2603 else
2604 r = -EFAULT;
2605 break;
2606 }
816c7667
JF
2607 case KVM_S390_SET_IRQ_STATE: {
2608 struct kvm_s390_irq_state irq_state;
2609
2610 r = -EFAULT;
2611 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2612 break;
2613 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
2614 irq_state.len == 0 ||
2615 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
2616 r = -EINVAL;
2617 break;
2618 }
2619 r = kvm_s390_set_irq_state(vcpu,
2620 (void __user *) irq_state.buf,
2621 irq_state.len);
2622 break;
2623 }
2624 case KVM_S390_GET_IRQ_STATE: {
2625 struct kvm_s390_irq_state irq_state;
2626
2627 r = -EFAULT;
2628 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2629 break;
2630 if (irq_state.len == 0) {
2631 r = -EINVAL;
2632 break;
2633 }
2634 r = kvm_s390_get_irq_state(vcpu,
2635 (__u8 __user *) irq_state.buf,
2636 irq_state.len);
2637 break;
2638 }
b0c632db 2639 default:
3e6afcf1 2640 r = -ENOTTY;
b0c632db 2641 }
bc923cc9 2642 return r;
b0c632db
HC
2643}
2644
5b1c1493
CO
2645int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2646{
2647#ifdef CONFIG_KVM_S390_UCONTROL
2648 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
2649 && (kvm_is_ucontrol(vcpu->kvm))) {
2650 vmf->page = virt_to_page(vcpu->arch.sie_block);
2651 get_page(vmf->page);
2652 return 0;
2653 }
2654#endif
2655 return VM_FAULT_SIGBUS;
2656}
2657
5587027c
AK
2658int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2659 unsigned long npages)
db3fe4eb
TY
2660{
2661 return 0;
2662}
2663
b0c632db 2664/* Section: memory related */
f7784b8e
MT
2665int kvm_arch_prepare_memory_region(struct kvm *kvm,
2666 struct kvm_memory_slot *memslot,
09170a49 2667 const struct kvm_userspace_memory_region *mem,
7b6195a9 2668 enum kvm_mr_change change)
b0c632db 2669{
dd2887e7
NW
2670 /* A few sanity checks. We can have memory slots which have to be
2671 located/ended at a segment boundary (1MB). The memory in userland is
2672 ok to be fragmented into various different vmas. It is okay to mmap()
2673 and munmap() stuff in this slot after doing this call at any time */
b0c632db 2674
598841ca 2675 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
2676 return -EINVAL;
2677
598841ca 2678 if (mem->memory_size & 0xffffful)
b0c632db
HC
2679 return -EINVAL;
2680
f7784b8e
MT
2681 return 0;
2682}
2683
2684void kvm_arch_commit_memory_region(struct kvm *kvm,
09170a49 2685 const struct kvm_userspace_memory_region *mem,
8482644a 2686 const struct kvm_memory_slot *old,
f36f3f28 2687 const struct kvm_memory_slot *new,
8482644a 2688 enum kvm_mr_change change)
f7784b8e 2689{
f7850c92 2690 int rc;
f7784b8e 2691
2cef4deb
CB
2692 /* If the basics of the memslot do not change, we do not want
2693 * to update the gmap. Every update causes several unnecessary
2694 * segment translation exceptions. This is usually handled just
2695 * fine by the normal fault handler + gmap, but it will also
2696 * cause faults on the prefix page of running guest CPUs.
2697 */
2698 if (old->userspace_addr == mem->userspace_addr &&
2699 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
2700 old->npages * PAGE_SIZE == mem->memory_size)
2701 return;
598841ca
CO
2702
2703 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2704 mem->guest_phys_addr, mem->memory_size);
2705 if (rc)
ea2cdd27 2706 pr_warn("failed to commit memory region\n");
598841ca 2707 return;
b0c632db
HC
2708}
2709
b0c632db
HC
2710static int __init kvm_s390_init(void)
2711{
9d8d5786 2712 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
b0c632db
HC
2713}
2714
2715static void __exit kvm_s390_exit(void)
2716{
2717 kvm_exit();
2718}
2719
2720module_init(kvm_s390_init);
2721module_exit(kvm_s390_exit);
566af940
CH
2722
2723/*
2724 * Enable autoloading of the kvm module.
2725 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2726 * since x86 takes a different approach.
2727 */
2728#include <linux/miscdevice.h>
2729MODULE_ALIAS_MISCDEV(KVM_MINOR);
2730MODULE_ALIAS("devname:kvm");