]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/s390/kvm/kvm-s390.c
Merge branches 'for-4.4/upstream-fixes', 'for-4.5/async-suspend', 'for-4.5/container...
[mirror_ubuntu-artful-kernel.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15f36ebd 14 * Jason J. Herne <jjherne@us.ibm.com>
b0c632db
HC
15 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
ca872302 20#include <linux/hrtimer.h>
b0c632db
HC
21#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
a374e892 25#include <linux/random.h>
b0c632db 26#include <linux/slab.h>
ba5c1e9b 27#include <linux/timer.h>
41408c28 28#include <linux/vmalloc.h>
cbb870c8 29#include <asm/asm-offsets.h>
b0c632db 30#include <asm/lowcore.h>
fdf03650 31#include <asm/etr.h>
b0c632db 32#include <asm/pgtable.h>
f5daba1d 33#include <asm/nmi.h>
a0616cde 34#include <asm/switch_to.h>
6d3da241 35#include <asm/isc.h>
1526bf9c 36#include <asm/sclp.h>
8f2abe6a 37#include "kvm-s390.h"
b0c632db
HC
38#include "gaccess.h"
39
ea2cdd27
DH
40#define KMSG_COMPONENT "kvm-s390"
41#undef pr_fmt
42#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
43
5786fffa
CH
44#define CREATE_TRACE_POINTS
45#include "trace.h"
ade38c31 46#include "trace-s390.h"
5786fffa 47
41408c28 48#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
816c7667
JF
49#define LOCAL_IRQS 32
50#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
51 (KVM_MAX_VCPUS + LOCAL_IRQS))
41408c28 52
b0c632db
HC
53#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
54
55struct kvm_stats_debugfs_item debugfs_entries[] = {
56 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 57 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
58 { "exit_validity", VCPU_STAT(exit_validity) },
59 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
60 { "exit_external_request", VCPU_STAT(exit_external_request) },
61 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
62 { "exit_instruction", VCPU_STAT(exit_instruction) },
63 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
64 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f7819512 65 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
62bea5bf 66 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
ce2e4f0b 67 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
f5e10b09 68 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b 69 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
aba07508
DH
70 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
71 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
ba5c1e9b 72 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 73 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
74 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
75 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
76 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
77 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
78 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
79 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
80 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 81 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
82 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
83 { "instruction_spx", VCPU_STAT(instruction_spx) },
84 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
85 { "instruction_stap", VCPU_STAT(instruction_stap) },
86 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
8a242234 87 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
453423dc
CB
88 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
89 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
b31288fa 90 { "instruction_essa", VCPU_STAT(instruction_essa) },
453423dc
CB
91 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
92 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 93 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 94 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 95 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 96 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0 97 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
42cb0c9f
DH
98 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
99 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
5288fbf0 100 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
42cb0c9f
DH
101 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
102 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
cd7b4b61 103 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
5288fbf0
CB
104 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
105 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
106 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
42cb0c9f
DH
107 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
108 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
109 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
388186bc 110 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 111 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 112 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
175a5c9e
CB
113 { "diagnose_258", VCPU_STAT(diagnose_258) },
114 { "diagnose_308", VCPU_STAT(diagnose_308) },
115 { "diagnose_500", VCPU_STAT(diagnose_500) },
b0c632db
HC
116 { NULL }
117};
118
9d8d5786
MM
119/* upper facilities limit for kvm */
120unsigned long kvm_s390_fac_list_mask[] = {
a3ed8dae 121 0xffe6fffbfcfdfc40UL,
53df84f8 122 0x005e800000000000UL,
9d8d5786 123};
b0c632db 124
9d8d5786 125unsigned long kvm_s390_fac_list_mask_size(void)
78c4b59f 126{
9d8d5786
MM
127 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
128 return ARRAY_SIZE(kvm_s390_fac_list_mask);
78c4b59f
MM
129}
130
9d8d5786 131static struct gmap_notifier gmap_notifier;
78f26131 132debug_info_t *kvm_s390_dbf;
9d8d5786 133
b0c632db 134/* Section: not file related */
13a34e06 135int kvm_arch_hardware_enable(void)
b0c632db
HC
136{
137 /* every s390 is virtualization enabled ;-) */
10474ae8 138 return 0;
b0c632db
HC
139}
140
2c70fe44
CB
141static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
142
fdf03650
FZ
143/*
144 * This callback is executed during stop_machine(). All CPUs are therefore
145 * temporarily stopped. In order not to change guest behavior, we have to
146 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
147 * so a CPU won't be stopped while calculating with the epoch.
148 */
149static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
150 void *v)
151{
152 struct kvm *kvm;
153 struct kvm_vcpu *vcpu;
154 int i;
155 unsigned long long *delta = v;
156
157 list_for_each_entry(kvm, &vm_list, vm_list) {
158 kvm->arch.epoch -= *delta;
159 kvm_for_each_vcpu(i, vcpu, kvm) {
160 vcpu->arch.sie_block->epoch -= *delta;
161 }
162 }
163 return NOTIFY_OK;
164}
165
166static struct notifier_block kvm_clock_notifier = {
167 .notifier_call = kvm_clock_sync,
168};
169
b0c632db
HC
170int kvm_arch_hardware_setup(void)
171{
2c70fe44
CB
172 gmap_notifier.notifier_call = kvm_gmap_notifier;
173 gmap_register_ipte_notifier(&gmap_notifier);
fdf03650
FZ
174 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
175 &kvm_clock_notifier);
b0c632db
HC
176 return 0;
177}
178
179void kvm_arch_hardware_unsetup(void)
180{
2c70fe44 181 gmap_unregister_ipte_notifier(&gmap_notifier);
fdf03650
FZ
182 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
183 &kvm_clock_notifier);
b0c632db
HC
184}
185
b0c632db
HC
186int kvm_arch_init(void *opaque)
187{
78f26131
CB
188 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
189 if (!kvm_s390_dbf)
190 return -ENOMEM;
191
192 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
193 debug_unregister(kvm_s390_dbf);
194 return -ENOMEM;
195 }
196
84877d93
CH
197 /* Register floating interrupt controller interface. */
198 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
b0c632db
HC
199}
200
78f26131
CB
201void kvm_arch_exit(void)
202{
203 debug_unregister(kvm_s390_dbf);
204}
205
b0c632db
HC
206/* Section: device related */
207long kvm_arch_dev_ioctl(struct file *filp,
208 unsigned int ioctl, unsigned long arg)
209{
210 if (ioctl == KVM_S390_ENABLE_SIE)
211 return s390_enable_sie();
212 return -EINVAL;
213}
214
784aa3d7 215int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
b0c632db 216{
d7b0b5eb
CO
217 int r;
218
2bd0ac4e 219 switch (ext) {
d7b0b5eb 220 case KVM_CAP_S390_PSW:
b6cf8788 221 case KVM_CAP_S390_GMAP:
52e16b18 222 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
223#ifdef CONFIG_KVM_S390_UCONTROL
224 case KVM_CAP_S390_UCONTROL:
225#endif
3c038e6b 226 case KVM_CAP_ASYNC_PF:
60b413c9 227 case KVM_CAP_SYNC_REGS:
14eebd91 228 case KVM_CAP_ONE_REG:
d6712df9 229 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 230 case KVM_CAP_S390_CSS_SUPPORT:
10ccaa1e 231 case KVM_CAP_IOEVENTFD:
c05c4186 232 case KVM_CAP_DEVICE_CTRL:
d938dc55 233 case KVM_CAP_ENABLE_CAP_VM:
78599d90 234 case KVM_CAP_S390_IRQCHIP:
f2061656 235 case KVM_CAP_VM_ATTRIBUTES:
6352e4d2 236 case KVM_CAP_MP_STATE:
47b43c52 237 case KVM_CAP_S390_INJECT_IRQ:
2444b352 238 case KVM_CAP_S390_USER_SIGP:
e44fc8c9 239 case KVM_CAP_S390_USER_STSI:
30ee2a98 240 case KVM_CAP_S390_SKEYS:
816c7667 241 case KVM_CAP_S390_IRQ_STATE:
d7b0b5eb
CO
242 r = 1;
243 break;
41408c28
TH
244 case KVM_CAP_S390_MEM_OP:
245 r = MEM_OP_MAX_SIZE;
246 break;
e726b1bd
CB
247 case KVM_CAP_NR_VCPUS:
248 case KVM_CAP_MAX_VCPUS:
249 r = KVM_MAX_VCPUS;
250 break;
e1e2e605
NW
251 case KVM_CAP_NR_MEMSLOTS:
252 r = KVM_USER_MEM_SLOTS;
253 break;
1526bf9c 254 case KVM_CAP_S390_COW:
abf09bed 255 r = MACHINE_HAS_ESOP;
1526bf9c 256 break;
68c55750
EF
257 case KVM_CAP_S390_VECTOR_REGISTERS:
258 r = MACHINE_HAS_VX;
259 break;
2bd0ac4e 260 default:
d7b0b5eb 261 r = 0;
2bd0ac4e 262 }
d7b0b5eb 263 return r;
b0c632db
HC
264}
265
15f36ebd
JH
266static void kvm_s390_sync_dirty_log(struct kvm *kvm,
267 struct kvm_memory_slot *memslot)
268{
269 gfn_t cur_gfn, last_gfn;
270 unsigned long address;
271 struct gmap *gmap = kvm->arch.gmap;
272
273 down_read(&gmap->mm->mmap_sem);
274 /* Loop over all guest pages */
275 last_gfn = memslot->base_gfn + memslot->npages;
276 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
277 address = gfn_to_hva_memslot(memslot, cur_gfn);
278
279 if (gmap_test_and_clear_dirty(address, gmap))
280 mark_page_dirty(kvm, cur_gfn);
281 }
282 up_read(&gmap->mm->mmap_sem);
283}
284
b0c632db
HC
285/* Section: vm related */
286/*
287 * Get (and clear) the dirty memory log for a memory slot.
288 */
289int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
290 struct kvm_dirty_log *log)
291{
15f36ebd
JH
292 int r;
293 unsigned long n;
9f6b8029 294 struct kvm_memslots *slots;
15f36ebd
JH
295 struct kvm_memory_slot *memslot;
296 int is_dirty = 0;
297
298 mutex_lock(&kvm->slots_lock);
299
300 r = -EINVAL;
301 if (log->slot >= KVM_USER_MEM_SLOTS)
302 goto out;
303
9f6b8029
PB
304 slots = kvm_memslots(kvm);
305 memslot = id_to_memslot(slots, log->slot);
15f36ebd
JH
306 r = -ENOENT;
307 if (!memslot->dirty_bitmap)
308 goto out;
309
310 kvm_s390_sync_dirty_log(kvm, memslot);
311 r = kvm_get_dirty_log(kvm, log, &is_dirty);
312 if (r)
313 goto out;
314
315 /* Clear the dirty log */
316 if (is_dirty) {
317 n = kvm_dirty_bitmap_bytes(memslot);
318 memset(memslot->dirty_bitmap, 0, n);
319 }
320 r = 0;
321out:
322 mutex_unlock(&kvm->slots_lock);
323 return r;
b0c632db
HC
324}
325
d938dc55
CH
326static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
327{
328 int r;
329
330 if (cap->flags)
331 return -EINVAL;
332
333 switch (cap->cap) {
84223598 334 case KVM_CAP_S390_IRQCHIP:
c92ea7b9 335 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
84223598
CH
336 kvm->arch.use_irqchip = 1;
337 r = 0;
338 break;
2444b352 339 case KVM_CAP_S390_USER_SIGP:
c92ea7b9 340 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
2444b352
DH
341 kvm->arch.user_sigp = 1;
342 r = 0;
343 break;
68c55750 344 case KVM_CAP_S390_VECTOR_REGISTERS:
5967c17b
DH
345 mutex_lock(&kvm->lock);
346 if (atomic_read(&kvm->online_vcpus)) {
347 r = -EBUSY;
348 } else if (MACHINE_HAS_VX) {
18280d8b
MM
349 set_kvm_facility(kvm->arch.model.fac->mask, 129);
350 set_kvm_facility(kvm->arch.model.fac->list, 129);
351 r = 0;
352 } else
353 r = -EINVAL;
5967c17b 354 mutex_unlock(&kvm->lock);
c92ea7b9
CB
355 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
356 r ? "(not available)" : "(success)");
68c55750 357 break;
e44fc8c9 358 case KVM_CAP_S390_USER_STSI:
c92ea7b9 359 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
e44fc8c9
ET
360 kvm->arch.user_stsi = 1;
361 r = 0;
362 break;
d938dc55
CH
363 default:
364 r = -EINVAL;
365 break;
366 }
367 return r;
368}
369
8c0a7ce6
DD
370static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
371{
372 int ret;
373
374 switch (attr->attr) {
375 case KVM_S390_VM_MEM_LIMIT_SIZE:
376 ret = 0;
c92ea7b9
CB
377 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
378 kvm->arch.gmap->asce_end);
8c0a7ce6
DD
379 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
380 ret = -EFAULT;
381 break;
382 default:
383 ret = -ENXIO;
384 break;
385 }
386 return ret;
387}
388
389static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
4f718eab
DD
390{
391 int ret;
392 unsigned int idx;
393 switch (attr->attr) {
394 case KVM_S390_VM_MEM_ENABLE_CMMA:
e6db1d61
DD
395 /* enable CMMA only for z10 and later (EDAT_1) */
396 ret = -EINVAL;
397 if (!MACHINE_IS_LPAR || !MACHINE_HAS_EDAT1)
398 break;
399
4f718eab 400 ret = -EBUSY;
c92ea7b9 401 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
4f718eab
DD
402 mutex_lock(&kvm->lock);
403 if (atomic_read(&kvm->online_vcpus) == 0) {
404 kvm->arch.use_cmma = 1;
405 ret = 0;
406 }
407 mutex_unlock(&kvm->lock);
408 break;
409 case KVM_S390_VM_MEM_CLR_CMMA:
c3489155
DD
410 ret = -EINVAL;
411 if (!kvm->arch.use_cmma)
412 break;
413
c92ea7b9 414 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
4f718eab
DD
415 mutex_lock(&kvm->lock);
416 idx = srcu_read_lock(&kvm->srcu);
a13cff31 417 s390_reset_cmma(kvm->arch.gmap->mm);
4f718eab
DD
418 srcu_read_unlock(&kvm->srcu, idx);
419 mutex_unlock(&kvm->lock);
420 ret = 0;
421 break;
8c0a7ce6
DD
422 case KVM_S390_VM_MEM_LIMIT_SIZE: {
423 unsigned long new_limit;
424
425 if (kvm_is_ucontrol(kvm))
426 return -EINVAL;
427
428 if (get_user(new_limit, (u64 __user *)attr->addr))
429 return -EFAULT;
430
431 if (new_limit > kvm->arch.gmap->asce_end)
432 return -E2BIG;
433
434 ret = -EBUSY;
435 mutex_lock(&kvm->lock);
436 if (atomic_read(&kvm->online_vcpus) == 0) {
437 /* gmap_alloc will round the limit up */
438 struct gmap *new = gmap_alloc(current->mm, new_limit);
439
440 if (!new) {
441 ret = -ENOMEM;
442 } else {
443 gmap_free(kvm->arch.gmap);
444 new->private = kvm;
445 kvm->arch.gmap = new;
446 ret = 0;
447 }
448 }
449 mutex_unlock(&kvm->lock);
c92ea7b9 450 VM_EVENT(kvm, 3, "SET: max guest memory: %lu bytes", new_limit);
8c0a7ce6
DD
451 break;
452 }
4f718eab
DD
453 default:
454 ret = -ENXIO;
455 break;
456 }
457 return ret;
458}
459
a374e892
TK
460static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
461
462static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
463{
464 struct kvm_vcpu *vcpu;
465 int i;
466
9d8d5786 467 if (!test_kvm_facility(kvm, 76))
a374e892
TK
468 return -EINVAL;
469
470 mutex_lock(&kvm->lock);
471 switch (attr->attr) {
472 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
473 get_random_bytes(
474 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
475 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
476 kvm->arch.crypto.aes_kw = 1;
c92ea7b9 477 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
a374e892
TK
478 break;
479 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
480 get_random_bytes(
481 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
482 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
483 kvm->arch.crypto.dea_kw = 1;
c92ea7b9 484 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
a374e892
TK
485 break;
486 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
487 kvm->arch.crypto.aes_kw = 0;
488 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
489 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
c92ea7b9 490 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
a374e892
TK
491 break;
492 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
493 kvm->arch.crypto.dea_kw = 0;
494 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
495 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
c92ea7b9 496 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
a374e892
TK
497 break;
498 default:
499 mutex_unlock(&kvm->lock);
500 return -ENXIO;
501 }
502
503 kvm_for_each_vcpu(i, vcpu, kvm) {
504 kvm_s390_vcpu_crypto_setup(vcpu);
505 exit_sie(vcpu);
506 }
507 mutex_unlock(&kvm->lock);
508 return 0;
509}
510
72f25020
JH
511static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
512{
513 u8 gtod_high;
514
515 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
516 sizeof(gtod_high)))
517 return -EFAULT;
518
519 if (gtod_high != 0)
520 return -EINVAL;
58c383c6 521 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
72f25020
JH
522
523 return 0;
524}
525
526static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
527{
5a3d883a 528 u64 gtod;
72f25020
JH
529
530 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
531 return -EFAULT;
532
25ed1675 533 kvm_s390_set_tod_clock(kvm, gtod);
58c383c6 534 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
72f25020
JH
535 return 0;
536}
537
538static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
539{
540 int ret;
541
542 if (attr->flags)
543 return -EINVAL;
544
545 switch (attr->attr) {
546 case KVM_S390_VM_TOD_HIGH:
547 ret = kvm_s390_set_tod_high(kvm, attr);
548 break;
549 case KVM_S390_VM_TOD_LOW:
550 ret = kvm_s390_set_tod_low(kvm, attr);
551 break;
552 default:
553 ret = -ENXIO;
554 break;
555 }
556 return ret;
557}
558
559static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
560{
561 u8 gtod_high = 0;
562
563 if (copy_to_user((void __user *)attr->addr, &gtod_high,
564 sizeof(gtod_high)))
565 return -EFAULT;
58c383c6 566 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
72f25020
JH
567
568 return 0;
569}
570
571static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
572{
5a3d883a 573 u64 gtod;
72f25020 574
60417fcc 575 gtod = kvm_s390_get_tod_clock_fast(kvm);
72f25020
JH
576 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
577 return -EFAULT;
58c383c6 578 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
72f25020
JH
579
580 return 0;
581}
582
583static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
584{
585 int ret;
586
587 if (attr->flags)
588 return -EINVAL;
589
590 switch (attr->attr) {
591 case KVM_S390_VM_TOD_HIGH:
592 ret = kvm_s390_get_tod_high(kvm, attr);
593 break;
594 case KVM_S390_VM_TOD_LOW:
595 ret = kvm_s390_get_tod_low(kvm, attr);
596 break;
597 default:
598 ret = -ENXIO;
599 break;
600 }
601 return ret;
602}
603
658b6eda
MM
604static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
605{
606 struct kvm_s390_vm_cpu_processor *proc;
607 int ret = 0;
608
609 mutex_lock(&kvm->lock);
610 if (atomic_read(&kvm->online_vcpus)) {
611 ret = -EBUSY;
612 goto out;
613 }
614 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
615 if (!proc) {
616 ret = -ENOMEM;
617 goto out;
618 }
619 if (!copy_from_user(proc, (void __user *)attr->addr,
620 sizeof(*proc))) {
621 memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
622 sizeof(struct cpuid));
623 kvm->arch.model.ibc = proc->ibc;
981467c9 624 memcpy(kvm->arch.model.fac->list, proc->fac_list,
658b6eda
MM
625 S390_ARCH_FAC_LIST_SIZE_BYTE);
626 } else
627 ret = -EFAULT;
628 kfree(proc);
629out:
630 mutex_unlock(&kvm->lock);
631 return ret;
632}
633
634static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
635{
636 int ret = -ENXIO;
637
638 switch (attr->attr) {
639 case KVM_S390_VM_CPU_PROCESSOR:
640 ret = kvm_s390_set_processor(kvm, attr);
641 break;
642 }
643 return ret;
644}
645
646static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
647{
648 struct kvm_s390_vm_cpu_processor *proc;
649 int ret = 0;
650
651 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
652 if (!proc) {
653 ret = -ENOMEM;
654 goto out;
655 }
656 memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
657 proc->ibc = kvm->arch.model.ibc;
981467c9 658 memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda
MM
659 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
660 ret = -EFAULT;
661 kfree(proc);
662out:
663 return ret;
664}
665
666static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
667{
668 struct kvm_s390_vm_cpu_machine *mach;
669 int ret = 0;
670
671 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
672 if (!mach) {
673 ret = -ENOMEM;
674 goto out;
675 }
676 get_cpu_id((struct cpuid *) &mach->cpuid);
37c5f6c8 677 mach->ibc = sclp.ibc;
981467c9
MM
678 memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
679 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda 680 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
94422ee8 681 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda
MM
682 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
683 ret = -EFAULT;
684 kfree(mach);
685out:
686 return ret;
687}
688
689static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
690{
691 int ret = -ENXIO;
692
693 switch (attr->attr) {
694 case KVM_S390_VM_CPU_PROCESSOR:
695 ret = kvm_s390_get_processor(kvm, attr);
696 break;
697 case KVM_S390_VM_CPU_MACHINE:
698 ret = kvm_s390_get_machine(kvm, attr);
699 break;
700 }
701 return ret;
702}
703
f2061656
DD
704static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
705{
706 int ret;
707
708 switch (attr->group) {
4f718eab 709 case KVM_S390_VM_MEM_CTRL:
8c0a7ce6 710 ret = kvm_s390_set_mem_control(kvm, attr);
4f718eab 711 break;
72f25020
JH
712 case KVM_S390_VM_TOD:
713 ret = kvm_s390_set_tod(kvm, attr);
714 break;
658b6eda
MM
715 case KVM_S390_VM_CPU_MODEL:
716 ret = kvm_s390_set_cpu_model(kvm, attr);
717 break;
a374e892
TK
718 case KVM_S390_VM_CRYPTO:
719 ret = kvm_s390_vm_set_crypto(kvm, attr);
720 break;
f2061656
DD
721 default:
722 ret = -ENXIO;
723 break;
724 }
725
726 return ret;
727}
728
729static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
730{
8c0a7ce6
DD
731 int ret;
732
733 switch (attr->group) {
734 case KVM_S390_VM_MEM_CTRL:
735 ret = kvm_s390_get_mem_control(kvm, attr);
736 break;
72f25020
JH
737 case KVM_S390_VM_TOD:
738 ret = kvm_s390_get_tod(kvm, attr);
739 break;
658b6eda
MM
740 case KVM_S390_VM_CPU_MODEL:
741 ret = kvm_s390_get_cpu_model(kvm, attr);
742 break;
8c0a7ce6
DD
743 default:
744 ret = -ENXIO;
745 break;
746 }
747
748 return ret;
f2061656
DD
749}
750
751static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
752{
753 int ret;
754
755 switch (attr->group) {
4f718eab
DD
756 case KVM_S390_VM_MEM_CTRL:
757 switch (attr->attr) {
758 case KVM_S390_VM_MEM_ENABLE_CMMA:
759 case KVM_S390_VM_MEM_CLR_CMMA:
8c0a7ce6 760 case KVM_S390_VM_MEM_LIMIT_SIZE:
4f718eab
DD
761 ret = 0;
762 break;
763 default:
764 ret = -ENXIO;
765 break;
766 }
767 break;
72f25020
JH
768 case KVM_S390_VM_TOD:
769 switch (attr->attr) {
770 case KVM_S390_VM_TOD_LOW:
771 case KVM_S390_VM_TOD_HIGH:
772 ret = 0;
773 break;
774 default:
775 ret = -ENXIO;
776 break;
777 }
778 break;
658b6eda
MM
779 case KVM_S390_VM_CPU_MODEL:
780 switch (attr->attr) {
781 case KVM_S390_VM_CPU_PROCESSOR:
782 case KVM_S390_VM_CPU_MACHINE:
783 ret = 0;
784 break;
785 default:
786 ret = -ENXIO;
787 break;
788 }
789 break;
a374e892
TK
790 case KVM_S390_VM_CRYPTO:
791 switch (attr->attr) {
792 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
793 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
794 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
795 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
796 ret = 0;
797 break;
798 default:
799 ret = -ENXIO;
800 break;
801 }
802 break;
f2061656
DD
803 default:
804 ret = -ENXIO;
805 break;
806 }
807
808 return ret;
809}
810
30ee2a98
JH
811static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
812{
813 uint8_t *keys;
814 uint64_t hva;
815 unsigned long curkey;
816 int i, r = 0;
817
818 if (args->flags != 0)
819 return -EINVAL;
820
821 /* Is this guest using storage keys? */
822 if (!mm_use_skey(current->mm))
823 return KVM_S390_GET_SKEYS_NONE;
824
825 /* Enforce sane limit on memory allocation */
826 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
827 return -EINVAL;
828
829 keys = kmalloc_array(args->count, sizeof(uint8_t),
830 GFP_KERNEL | __GFP_NOWARN);
831 if (!keys)
832 keys = vmalloc(sizeof(uint8_t) * args->count);
833 if (!keys)
834 return -ENOMEM;
835
836 for (i = 0; i < args->count; i++) {
837 hva = gfn_to_hva(kvm, args->start_gfn + i);
838 if (kvm_is_error_hva(hva)) {
839 r = -EFAULT;
840 goto out;
841 }
842
843 curkey = get_guest_storage_key(current->mm, hva);
844 if (IS_ERR_VALUE(curkey)) {
845 r = curkey;
846 goto out;
847 }
848 keys[i] = curkey;
849 }
850
851 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
852 sizeof(uint8_t) * args->count);
853 if (r)
854 r = -EFAULT;
855out:
856 kvfree(keys);
857 return r;
858}
859
860static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
861{
862 uint8_t *keys;
863 uint64_t hva;
864 int i, r = 0;
865
866 if (args->flags != 0)
867 return -EINVAL;
868
869 /* Enforce sane limit on memory allocation */
870 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
871 return -EINVAL;
872
873 keys = kmalloc_array(args->count, sizeof(uint8_t),
874 GFP_KERNEL | __GFP_NOWARN);
875 if (!keys)
876 keys = vmalloc(sizeof(uint8_t) * args->count);
877 if (!keys)
878 return -ENOMEM;
879
880 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
881 sizeof(uint8_t) * args->count);
882 if (r) {
883 r = -EFAULT;
884 goto out;
885 }
886
887 /* Enable storage key handling for the guest */
14d4a425
DD
888 r = s390_enable_skey();
889 if (r)
890 goto out;
30ee2a98
JH
891
892 for (i = 0; i < args->count; i++) {
893 hva = gfn_to_hva(kvm, args->start_gfn + i);
894 if (kvm_is_error_hva(hva)) {
895 r = -EFAULT;
896 goto out;
897 }
898
899 /* Lowest order bit is reserved */
900 if (keys[i] & 0x01) {
901 r = -EINVAL;
902 goto out;
903 }
904
905 r = set_guest_storage_key(current->mm, hva,
906 (unsigned long)keys[i], 0);
907 if (r)
908 goto out;
909 }
910out:
911 kvfree(keys);
912 return r;
913}
914
b0c632db
HC
915long kvm_arch_vm_ioctl(struct file *filp,
916 unsigned int ioctl, unsigned long arg)
917{
918 struct kvm *kvm = filp->private_data;
919 void __user *argp = (void __user *)arg;
f2061656 920 struct kvm_device_attr attr;
b0c632db
HC
921 int r;
922
923 switch (ioctl) {
ba5c1e9b
CO
924 case KVM_S390_INTERRUPT: {
925 struct kvm_s390_interrupt s390int;
926
927 r = -EFAULT;
928 if (copy_from_user(&s390int, argp, sizeof(s390int)))
929 break;
930 r = kvm_s390_inject_vm(kvm, &s390int);
931 break;
932 }
d938dc55
CH
933 case KVM_ENABLE_CAP: {
934 struct kvm_enable_cap cap;
935 r = -EFAULT;
936 if (copy_from_user(&cap, argp, sizeof(cap)))
937 break;
938 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
939 break;
940 }
84223598
CH
941 case KVM_CREATE_IRQCHIP: {
942 struct kvm_irq_routing_entry routing;
943
944 r = -EINVAL;
945 if (kvm->arch.use_irqchip) {
946 /* Set up dummy routing. */
947 memset(&routing, 0, sizeof(routing));
152b2839 948 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
84223598
CH
949 }
950 break;
951 }
f2061656
DD
952 case KVM_SET_DEVICE_ATTR: {
953 r = -EFAULT;
954 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
955 break;
956 r = kvm_s390_vm_set_attr(kvm, &attr);
957 break;
958 }
959 case KVM_GET_DEVICE_ATTR: {
960 r = -EFAULT;
961 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
962 break;
963 r = kvm_s390_vm_get_attr(kvm, &attr);
964 break;
965 }
966 case KVM_HAS_DEVICE_ATTR: {
967 r = -EFAULT;
968 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
969 break;
970 r = kvm_s390_vm_has_attr(kvm, &attr);
971 break;
972 }
30ee2a98
JH
973 case KVM_S390_GET_SKEYS: {
974 struct kvm_s390_skeys args;
975
976 r = -EFAULT;
977 if (copy_from_user(&args, argp,
978 sizeof(struct kvm_s390_skeys)))
979 break;
980 r = kvm_s390_get_skeys(kvm, &args);
981 break;
982 }
983 case KVM_S390_SET_SKEYS: {
984 struct kvm_s390_skeys args;
985
986 r = -EFAULT;
987 if (copy_from_user(&args, argp,
988 sizeof(struct kvm_s390_skeys)))
989 break;
990 r = kvm_s390_set_skeys(kvm, &args);
991 break;
992 }
b0c632db 993 default:
367e1319 994 r = -ENOTTY;
b0c632db
HC
995 }
996
997 return r;
998}
999
45c9b47c
TK
1000static int kvm_s390_query_ap_config(u8 *config)
1001{
1002 u32 fcn_code = 0x04000000UL;
86044c8c 1003 u32 cc = 0;
45c9b47c 1004
86044c8c 1005 memset(config, 0, 128);
45c9b47c
TK
1006 asm volatile(
1007 "lgr 0,%1\n"
1008 "lgr 2,%2\n"
1009 ".long 0xb2af0000\n" /* PQAP(QCI) */
86044c8c 1010 "0: ipm %0\n"
45c9b47c 1011 "srl %0,28\n"
86044c8c
CB
1012 "1:\n"
1013 EX_TABLE(0b, 1b)
1014 : "+r" (cc)
45c9b47c
TK
1015 : "r" (fcn_code), "r" (config)
1016 : "cc", "0", "2", "memory"
1017 );
1018
1019 return cc;
1020}
1021
1022static int kvm_s390_apxa_installed(void)
1023{
1024 u8 config[128];
1025 int cc;
1026
1027 if (test_facility(2) && test_facility(12)) {
1028 cc = kvm_s390_query_ap_config(config);
1029
1030 if (cc)
1031 pr_err("PQAP(QCI) failed with cc=%d", cc);
1032 else
1033 return config[0] & 0x40;
1034 }
1035
1036 return 0;
1037}
1038
1039static void kvm_s390_set_crycb_format(struct kvm *kvm)
1040{
1041 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1042
1043 if (kvm_s390_apxa_installed())
1044 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1045 else
1046 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1047}
1048
9d8d5786
MM
1049static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
1050{
1051 get_cpu_id(cpu_id);
1052 cpu_id->version = 0xff;
1053}
1054
5102ee87
TK
1055static int kvm_s390_crypto_init(struct kvm *kvm)
1056{
9d8d5786 1057 if (!test_kvm_facility(kvm, 76))
5102ee87
TK
1058 return 0;
1059
1060 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
1061 GFP_KERNEL | GFP_DMA);
1062 if (!kvm->arch.crypto.crycb)
1063 return -ENOMEM;
1064
45c9b47c 1065 kvm_s390_set_crycb_format(kvm);
5102ee87 1066
ed6f76b4
TK
1067 /* Enable AES/DEA protected key functions by default */
1068 kvm->arch.crypto.aes_kw = 1;
1069 kvm->arch.crypto.dea_kw = 1;
1070 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1071 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1072 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1073 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
a374e892 1074
5102ee87
TK
1075 return 0;
1076}
1077
e08b9637 1078int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 1079{
9d8d5786 1080 int i, rc;
b0c632db 1081 char debug_name[16];
f6c137ff 1082 static unsigned long sca_offset;
b0c632db 1083
e08b9637
CO
1084 rc = -EINVAL;
1085#ifdef CONFIG_KVM_S390_UCONTROL
1086 if (type & ~KVM_VM_S390_UCONTROL)
1087 goto out_err;
1088 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1089 goto out_err;
1090#else
1091 if (type)
1092 goto out_err;
1093#endif
1094
b0c632db
HC
1095 rc = s390_enable_sie();
1096 if (rc)
d89f5eff 1097 goto out_err;
b0c632db 1098
b290411a
CO
1099 rc = -ENOMEM;
1100
b0c632db
HC
1101 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
1102 if (!kvm->arch.sca)
d89f5eff 1103 goto out_err;
f6c137ff 1104 spin_lock(&kvm_lock);
c5c2c393
DH
1105 sca_offset += 16;
1106 if (sca_offset + sizeof(struct sca_block) > PAGE_SIZE)
1107 sca_offset = 0;
f6c137ff
CB
1108 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
1109 spin_unlock(&kvm_lock);
b0c632db
HC
1110
1111 sprintf(debug_name, "kvm-%u", current->pid);
1112
1cb9cf72 1113 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
b0c632db 1114 if (!kvm->arch.dbf)
40f5b735 1115 goto out_err;
b0c632db 1116
9d8d5786
MM
1117 /*
1118 * The architectural maximum amount of facilities is 16 kbit. To store
1119 * this amount, 2 kbyte of memory is required. Thus we need a full
981467c9
MM
1120 * page to hold the guest facility list (arch.model.fac->list) and the
1121 * facility mask (arch.model.fac->mask). Its address size has to be
9d8d5786
MM
1122 * 31 bits and word aligned.
1123 */
1124 kvm->arch.model.fac =
981467c9 1125 (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
9d8d5786 1126 if (!kvm->arch.model.fac)
40f5b735 1127 goto out_err;
9d8d5786 1128
fb5bf93f 1129 /* Populate the facility mask initially. */
981467c9 1130 memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
94422ee8 1131 S390_ARCH_FAC_LIST_SIZE_BYTE);
9d8d5786
MM
1132 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1133 if (i < kvm_s390_fac_list_mask_size())
981467c9 1134 kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
9d8d5786 1135 else
981467c9 1136 kvm->arch.model.fac->mask[i] = 0UL;
9d8d5786
MM
1137 }
1138
981467c9
MM
1139 /* Populate the facility list initially. */
1140 memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
1141 S390_ARCH_FAC_LIST_SIZE_BYTE);
1142
9d8d5786 1143 kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
37c5f6c8 1144 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
9d8d5786 1145
5102ee87 1146 if (kvm_s390_crypto_init(kvm) < 0)
40f5b735 1147 goto out_err;
5102ee87 1148
ba5c1e9b 1149 spin_lock_init(&kvm->arch.float_int.lock);
6d3da241
JF
1150 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1151 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
8a242234 1152 init_waitqueue_head(&kvm->arch.ipte_wq);
a6b7e459 1153 mutex_init(&kvm->arch.ipte_mutex);
ba5c1e9b 1154
b0c632db 1155 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
78f26131 1156 VM_EVENT(kvm, 3, "vm created with type %lu", type);
b0c632db 1157
e08b9637
CO
1158 if (type & KVM_VM_S390_UCONTROL) {
1159 kvm->arch.gmap = NULL;
1160 } else {
0349985a 1161 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
e08b9637 1162 if (!kvm->arch.gmap)
40f5b735 1163 goto out_err;
2c70fe44 1164 kvm->arch.gmap->private = kvm;
24eb3a82 1165 kvm->arch.gmap->pfault_enabled = 0;
e08b9637 1166 }
fa6b7fe9
CH
1167
1168 kvm->arch.css_support = 0;
84223598 1169 kvm->arch.use_irqchip = 0;
72f25020 1170 kvm->arch.epoch = 0;
fa6b7fe9 1171
8ad35755 1172 spin_lock_init(&kvm->arch.start_stop_lock);
78f26131 1173 KVM_EVENT(3, "vm 0x%p created by pid %u", kvm, current->pid);
8ad35755 1174
d89f5eff 1175 return 0;
40f5b735 1176out_err:
5102ee87 1177 kfree(kvm->arch.crypto.crycb);
9d8d5786 1178 free_page((unsigned long)kvm->arch.model.fac);
598841ca 1179 debug_unregister(kvm->arch.dbf);
b0c632db 1180 free_page((unsigned long)(kvm->arch.sca));
78f26131 1181 KVM_EVENT(3, "creation of vm failed: %d", rc);
d89f5eff 1182 return rc;
b0c632db
HC
1183}
1184
d329c035
CB
1185void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1186{
1187 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 1188 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
67335e63 1189 kvm_s390_clear_local_irqs(vcpu);
3c038e6b 1190 kvm_clear_async_pf_completion_queue(vcpu);
58f9460b
CO
1191 if (!kvm_is_ucontrol(vcpu->kvm)) {
1192 clear_bit(63 - vcpu->vcpu_id,
1193 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
1194 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
1195 (__u64) vcpu->arch.sie_block)
1196 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
1197 }
abf4a71e 1198 smp_mb();
27e0393f
CO
1199
1200 if (kvm_is_ucontrol(vcpu->kvm))
1201 gmap_free(vcpu->arch.gmap);
1202
e6db1d61 1203 if (vcpu->kvm->arch.use_cmma)
b31605c1 1204 kvm_s390_vcpu_unsetup_cmma(vcpu);
d329c035 1205 free_page((unsigned long)(vcpu->arch.sie_block));
b31288fa 1206
6692cef3 1207 kvm_vcpu_uninit(vcpu);
b110feaf 1208 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
1209}
1210
1211static void kvm_free_vcpus(struct kvm *kvm)
1212{
1213 unsigned int i;
988a2cae 1214 struct kvm_vcpu *vcpu;
d329c035 1215
988a2cae
GN
1216 kvm_for_each_vcpu(i, vcpu, kvm)
1217 kvm_arch_vcpu_destroy(vcpu);
1218
1219 mutex_lock(&kvm->lock);
1220 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1221 kvm->vcpus[i] = NULL;
1222
1223 atomic_set(&kvm->online_vcpus, 0);
1224 mutex_unlock(&kvm->lock);
d329c035
CB
1225}
1226
b0c632db
HC
1227void kvm_arch_destroy_vm(struct kvm *kvm)
1228{
d329c035 1229 kvm_free_vcpus(kvm);
9d8d5786 1230 free_page((unsigned long)kvm->arch.model.fac);
b0c632db 1231 free_page((unsigned long)(kvm->arch.sca));
d329c035 1232 debug_unregister(kvm->arch.dbf);
5102ee87 1233 kfree(kvm->arch.crypto.crycb);
27e0393f
CO
1234 if (!kvm_is_ucontrol(kvm))
1235 gmap_free(kvm->arch.gmap);
841b91c5 1236 kvm_s390_destroy_adapters(kvm);
67335e63 1237 kvm_s390_clear_float_irqs(kvm);
78f26131 1238 KVM_EVENT(3, "vm 0x%p destroyed", kvm);
b0c632db
HC
1239}
1240
1241/* Section: vcpu related */
dafd032a
DD
1242static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1243{
1244 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
1245 if (!vcpu->arch.gmap)
1246 return -ENOMEM;
1247 vcpu->arch.gmap->private = vcpu->kvm;
1248
1249 return 0;
1250}
1251
b0c632db
HC
1252int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1253{
3c038e6b
DD
1254 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1255 kvm_clear_async_pf_completion_queue(vcpu);
59674c1a
CB
1256 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1257 KVM_SYNC_GPRS |
9eed0735 1258 KVM_SYNC_ACRS |
b028ee3e
DH
1259 KVM_SYNC_CRS |
1260 KVM_SYNC_ARCH0 |
1261 KVM_SYNC_PFAULT;
68c55750
EF
1262 if (test_kvm_facility(vcpu->kvm, 129))
1263 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
dafd032a
DD
1264
1265 if (kvm_is_ucontrol(vcpu->kvm))
1266 return __kvm_ucontrol_vcpu_init(vcpu);
1267
b0c632db
HC
1268 return 0;
1269}
1270
9977e886
HB
1271/*
1272 * Backs up the current FP/VX register save area on a particular
1273 * destination. Used to switch between different register save
1274 * areas.
1275 */
1276static inline void save_fpu_to(struct fpu *dst)
1277{
1278 dst->fpc = current->thread.fpu.fpc;
9977e886
HB
1279 dst->regs = current->thread.fpu.regs;
1280}
1281
1282/*
1283 * Switches the FP/VX register save area from which to lazy
1284 * restore register contents.
1285 */
1286static inline void load_fpu_from(struct fpu *from)
1287{
1288 current->thread.fpu.fpc = from->fpc;
9977e886
HB
1289 current->thread.fpu.regs = from->regs;
1290}
1291
b0c632db
HC
1292void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1293{
9977e886 1294 /* Save host register state */
d0164ee2 1295 save_fpu_regs();
9977e886 1296 save_fpu_to(&vcpu->arch.host_fpregs);
96b2d7a8 1297
18280d8b 1298 if (test_kvm_facility(vcpu->kvm, 129)) {
9977e886 1299 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
9977e886
HB
1300 /*
1301 * Use the register save area in the SIE-control block
1302 * for register restore and save in kvm_arch_vcpu_put()
1303 */
1304 current->thread.fpu.vxrs =
1305 (__vector128 *)&vcpu->run->s.regs.vrs;
9977e886
HB
1306 } else
1307 load_fpu_from(&vcpu->arch.guest_fpregs);
1308
1309 if (test_fp_ctl(current->thread.fpu.fpc))
96b2d7a8 1310 /* User space provided an invalid FPC, let's clear it */
9977e886
HB
1311 current->thread.fpu.fpc = 0;
1312
1313 save_access_regs(vcpu->arch.host_acrs);
59674c1a 1314 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 1315 gmap_enable(vcpu->arch.gmap);
805de8f4 1316 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
1317}
1318
1319void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1320{
805de8f4 1321 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 1322 gmap_disable(vcpu->arch.gmap);
9977e886 1323
d0164ee2 1324 save_fpu_regs();
9977e886 1325
18280d8b 1326 if (test_kvm_facility(vcpu->kvm, 129))
9977e886
HB
1327 /*
1328 * kvm_arch_vcpu_load() set up the register save area to
1329 * the &vcpu->run->s.regs.vrs and, thus, the vector registers
1330 * are already saved. Only the floating-point control must be
1331 * copied.
1332 */
1333 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
68c55750 1334 else
9977e886
HB
1335 save_fpu_to(&vcpu->arch.guest_fpregs);
1336 load_fpu_from(&vcpu->arch.host_fpregs);
1337
1338 save_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
1339 restore_access_regs(vcpu->arch.host_acrs);
1340}
1341
1342static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1343{
1344 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1345 vcpu->arch.sie_block->gpsw.mask = 0UL;
1346 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 1347 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
1348 vcpu->arch.sie_block->cputm = 0UL;
1349 vcpu->arch.sie_block->ckc = 0UL;
1350 vcpu->arch.sie_block->todpr = 0;
1351 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1352 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
1353 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
1354 vcpu->arch.guest_fpregs.fpc = 0;
1355 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
1356 vcpu->arch.sie_block->gbea = 1;
672550fb 1357 vcpu->arch.sie_block->pp = 0;
3c038e6b
DD
1358 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1359 kvm_clear_async_pf_completion_queue(vcpu);
6352e4d2
DH
1360 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1361 kvm_s390_vcpu_stop(vcpu);
2ed10cc1 1362 kvm_s390_clear_local_irqs(vcpu);
b0c632db
HC
1363}
1364
31928aa5 1365void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
42897d86 1366{
72f25020 1367 mutex_lock(&vcpu->kvm->lock);
fdf03650 1368 preempt_disable();
72f25020 1369 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
fdf03650 1370 preempt_enable();
72f25020 1371 mutex_unlock(&vcpu->kvm->lock);
dafd032a
DD
1372 if (!kvm_is_ucontrol(vcpu->kvm))
1373 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
42897d86
MT
1374}
1375
5102ee87
TK
1376static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1377{
9d8d5786 1378 if (!test_kvm_facility(vcpu->kvm, 76))
5102ee87
TK
1379 return;
1380
a374e892
TK
1381 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1382
1383 if (vcpu->kvm->arch.crypto.aes_kw)
1384 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1385 if (vcpu->kvm->arch.crypto.dea_kw)
1386 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1387
5102ee87
TK
1388 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1389}
1390
b31605c1
DD
1391void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1392{
1393 free_page(vcpu->arch.sie_block->cbrlo);
1394 vcpu->arch.sie_block->cbrlo = 0;
1395}
1396
1397int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1398{
1399 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1400 if (!vcpu->arch.sie_block->cbrlo)
1401 return -ENOMEM;
1402
1403 vcpu->arch.sie_block->ecb2 |= 0x80;
1404 vcpu->arch.sie_block->ecb2 &= ~0x08;
1405 return 0;
1406}
1407
91520f1a
MM
1408static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1409{
1410 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1411
1412 vcpu->arch.cpu_id = model->cpu_id;
1413 vcpu->arch.sie_block->ibc = model->ibc;
1414 vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
1415}
1416
b0c632db
HC
1417int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1418{
b31605c1 1419 int rc = 0;
b31288fa 1420
9e6dabef
CH
1421 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1422 CPUSTAT_SM |
a4a4f191
GH
1423 CPUSTAT_STOPPED);
1424
53df84f8 1425 if (test_kvm_facility(vcpu->kvm, 78))
805de8f4 1426 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
53df84f8 1427 else if (test_kvm_facility(vcpu->kvm, 8))
805de8f4 1428 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
a4a4f191 1429
91520f1a
MM
1430 kvm_s390_vcpu_setup_model(vcpu);
1431
fc34531d 1432 vcpu->arch.sie_block->ecb = 6;
9d8d5786 1433 if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
7feb6bb8
MM
1434 vcpu->arch.sie_block->ecb |= 0x10;
1435
69d0d3a3 1436 vcpu->arch.sie_block->ecb2 = 8;
ea5f4969 1437 vcpu->arch.sie_block->eca = 0xC1002000U;
37c5f6c8 1438 if (sclp.has_siif)
217a4406 1439 vcpu->arch.sie_block->eca |= 1;
37c5f6c8 1440 if (sclp.has_sigpif)
ea5f4969 1441 vcpu->arch.sie_block->eca |= 0x10000000U;
18280d8b 1442 if (test_kvm_facility(vcpu->kvm, 129)) {
13211ea7
EF
1443 vcpu->arch.sie_block->eca |= 0x00020000;
1444 vcpu->arch.sie_block->ecd |= 0x20000000;
1445 }
492d8642 1446 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
5a5e6536 1447
e6db1d61 1448 if (vcpu->kvm->arch.use_cmma) {
b31605c1
DD
1449 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1450 if (rc)
1451 return rc;
b31288fa 1452 }
0ac96caf 1453 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ca872302 1454 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
9d8d5786 1455
5102ee87
TK
1456 kvm_s390_vcpu_crypto_setup(vcpu);
1457
b31605c1 1458 return rc;
b0c632db
HC
1459}
1460
1461struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1462 unsigned int id)
1463{
4d47555a 1464 struct kvm_vcpu *vcpu;
7feb6bb8 1465 struct sie_page *sie_page;
4d47555a
CO
1466 int rc = -EINVAL;
1467
1468 if (id >= KVM_MAX_VCPUS)
1469 goto out;
1470
1471 rc = -ENOMEM;
b0c632db 1472
b110feaf 1473 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 1474 if (!vcpu)
4d47555a 1475 goto out;
b0c632db 1476
7feb6bb8
MM
1477 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1478 if (!sie_page)
b0c632db
HC
1479 goto out_free_cpu;
1480
7feb6bb8
MM
1481 vcpu->arch.sie_block = &sie_page->sie_block;
1482 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
1483
b0c632db 1484 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
1485 if (!kvm_is_ucontrol(kvm)) {
1486 if (!kvm->arch.sca) {
1487 WARN_ON_ONCE(1);
1488 goto out_free_cpu;
1489 }
1490 if (!kvm->arch.sca->cpu[id].sda)
1491 kvm->arch.sca->cpu[id].sda =
1492 (__u64) vcpu->arch.sie_block;
1493 vcpu->arch.sie_block->scaoh =
1494 (__u32)(((__u64)kvm->arch.sca) >> 32);
1495 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
1496 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
1497 }
b0c632db 1498
ba5c1e9b 1499 spin_lock_init(&vcpu->arch.local_int.lock);
ba5c1e9b 1500 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
d0321a24 1501 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 1502 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
ba5c1e9b 1503
9977e886
HB
1504 /*
1505 * Allocate a save area for floating-point registers. If the vector
1506 * extension is available, register contents are saved in the SIE
1507 * control block. The allocated save area is still required in
1508 * particular places, for example, in kvm_s390_vcpu_store_status().
1509 */
1510 vcpu->arch.guest_fpregs.fprs = kzalloc(sizeof(freg_t) * __NUM_FPRS,
1511 GFP_KERNEL);
1512 if (!vcpu->arch.guest_fpregs.fprs) {
1513 rc = -ENOMEM;
1514 goto out_free_sie_block;
1515 }
1516
b0c632db
HC
1517 rc = kvm_vcpu_init(vcpu, kvm, id);
1518 if (rc)
7b06bf2f 1519 goto out_free_sie_block;
b0c632db
HC
1520 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
1521 vcpu->arch.sie_block);
ade38c31 1522 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 1523
b0c632db 1524 return vcpu;
7b06bf2f
WY
1525out_free_sie_block:
1526 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 1527out_free_cpu:
b110feaf 1528 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 1529out:
b0c632db
HC
1530 return ERR_PTR(rc);
1531}
1532
b0c632db
HC
1533int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1534{
9a022067 1535 return kvm_s390_vcpu_has_irq(vcpu, 0);
b0c632db
HC
1536}
1537
27406cd5 1538void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
49b99e1e 1539{
805de8f4 1540 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
61a6df54 1541 exit_sie(vcpu);
49b99e1e
CB
1542}
1543
27406cd5 1544void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
49b99e1e 1545{
805de8f4 1546 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
49b99e1e
CB
1547}
1548
8e236546
CB
1549static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
1550{
805de8f4 1551 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
61a6df54 1552 exit_sie(vcpu);
8e236546
CB
1553}
1554
1555static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
1556{
9bf9fde2 1557 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
8e236546
CB
1558}
1559
49b99e1e
CB
1560/*
1561 * Kick a guest cpu out of SIE and wait until SIE is not running.
1562 * If the CPU is not running (e.g. waiting as idle) the function will
1563 * return immediately. */
1564void exit_sie(struct kvm_vcpu *vcpu)
1565{
805de8f4 1566 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
49b99e1e
CB
1567 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1568 cpu_relax();
1569}
1570
8e236546
CB
1571/* Kick a guest cpu out of SIE to process a request synchronously */
1572void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
49b99e1e 1573{
8e236546
CB
1574 kvm_make_request(req, vcpu);
1575 kvm_s390_vcpu_request(vcpu);
49b99e1e
CB
1576}
1577
2c70fe44
CB
1578static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1579{
1580 int i;
1581 struct kvm *kvm = gmap->private;
1582 struct kvm_vcpu *vcpu;
1583
1584 kvm_for_each_vcpu(i, vcpu, kvm) {
1585 /* match against both prefix pages */
fda902cb 1586 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
2c70fe44 1587 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
8e236546 1588 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
2c70fe44
CB
1589 }
1590 }
1591}
1592
b6d33834
CD
1593int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1594{
1595 /* kvm common code refers to this, but never calls it */
1596 BUG();
1597 return 0;
1598}
1599
14eebd91
CO
1600static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1601 struct kvm_one_reg *reg)
1602{
1603 int r = -EINVAL;
1604
1605 switch (reg->id) {
29b7c71b
CO
1606 case KVM_REG_S390_TODPR:
1607 r = put_user(vcpu->arch.sie_block->todpr,
1608 (u32 __user *)reg->addr);
1609 break;
1610 case KVM_REG_S390_EPOCHDIFF:
1611 r = put_user(vcpu->arch.sie_block->epoch,
1612 (u64 __user *)reg->addr);
1613 break;
46a6dd1c
J
1614 case KVM_REG_S390_CPU_TIMER:
1615 r = put_user(vcpu->arch.sie_block->cputm,
1616 (u64 __user *)reg->addr);
1617 break;
1618 case KVM_REG_S390_CLOCK_COMP:
1619 r = put_user(vcpu->arch.sie_block->ckc,
1620 (u64 __user *)reg->addr);
1621 break;
536336c2
DD
1622 case KVM_REG_S390_PFTOKEN:
1623 r = put_user(vcpu->arch.pfault_token,
1624 (u64 __user *)reg->addr);
1625 break;
1626 case KVM_REG_S390_PFCOMPARE:
1627 r = put_user(vcpu->arch.pfault_compare,
1628 (u64 __user *)reg->addr);
1629 break;
1630 case KVM_REG_S390_PFSELECT:
1631 r = put_user(vcpu->arch.pfault_select,
1632 (u64 __user *)reg->addr);
1633 break;
672550fb
CB
1634 case KVM_REG_S390_PP:
1635 r = put_user(vcpu->arch.sie_block->pp,
1636 (u64 __user *)reg->addr);
1637 break;
afa45ff5
CB
1638 case KVM_REG_S390_GBEA:
1639 r = put_user(vcpu->arch.sie_block->gbea,
1640 (u64 __user *)reg->addr);
1641 break;
14eebd91
CO
1642 default:
1643 break;
1644 }
1645
1646 return r;
1647}
1648
1649static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
1650 struct kvm_one_reg *reg)
1651{
1652 int r = -EINVAL;
1653
1654 switch (reg->id) {
29b7c71b
CO
1655 case KVM_REG_S390_TODPR:
1656 r = get_user(vcpu->arch.sie_block->todpr,
1657 (u32 __user *)reg->addr);
1658 break;
1659 case KVM_REG_S390_EPOCHDIFF:
1660 r = get_user(vcpu->arch.sie_block->epoch,
1661 (u64 __user *)reg->addr);
1662 break;
46a6dd1c
J
1663 case KVM_REG_S390_CPU_TIMER:
1664 r = get_user(vcpu->arch.sie_block->cputm,
1665 (u64 __user *)reg->addr);
1666 break;
1667 case KVM_REG_S390_CLOCK_COMP:
1668 r = get_user(vcpu->arch.sie_block->ckc,
1669 (u64 __user *)reg->addr);
1670 break;
536336c2
DD
1671 case KVM_REG_S390_PFTOKEN:
1672 r = get_user(vcpu->arch.pfault_token,
1673 (u64 __user *)reg->addr);
9fbd8082
DH
1674 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1675 kvm_clear_async_pf_completion_queue(vcpu);
536336c2
DD
1676 break;
1677 case KVM_REG_S390_PFCOMPARE:
1678 r = get_user(vcpu->arch.pfault_compare,
1679 (u64 __user *)reg->addr);
1680 break;
1681 case KVM_REG_S390_PFSELECT:
1682 r = get_user(vcpu->arch.pfault_select,
1683 (u64 __user *)reg->addr);
1684 break;
672550fb
CB
1685 case KVM_REG_S390_PP:
1686 r = get_user(vcpu->arch.sie_block->pp,
1687 (u64 __user *)reg->addr);
1688 break;
afa45ff5
CB
1689 case KVM_REG_S390_GBEA:
1690 r = get_user(vcpu->arch.sie_block->gbea,
1691 (u64 __user *)reg->addr);
1692 break;
14eebd91
CO
1693 default:
1694 break;
1695 }
1696
1697 return r;
1698}
b6d33834 1699
b0c632db
HC
1700static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1701{
b0c632db 1702 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
1703 return 0;
1704}
1705
1706int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1707{
5a32c1af 1708 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
1709 return 0;
1710}
1711
1712int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1713{
5a32c1af 1714 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
1715 return 0;
1716}
1717
1718int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1719 struct kvm_sregs *sregs)
1720{
59674c1a 1721 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 1722 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 1723 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
1724 return 0;
1725}
1726
1727int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1728 struct kvm_sregs *sregs)
1729{
59674c1a 1730 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 1731 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
1732 return 0;
1733}
1734
1735int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1736{
4725c860
MS
1737 if (test_fp_ctl(fpu->fpc))
1738 return -EINVAL;
9977e886 1739 memcpy(vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4725c860 1740 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
d0164ee2 1741 save_fpu_regs();
9977e886 1742 load_fpu_from(&vcpu->arch.guest_fpregs);
b0c632db
HC
1743 return 0;
1744}
1745
1746int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1747{
9977e886 1748 memcpy(&fpu->fprs, vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
b0c632db 1749 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
1750 return 0;
1751}
1752
1753static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1754{
1755 int rc = 0;
1756
7a42fdc2 1757 if (!is_vcpu_stopped(vcpu))
b0c632db 1758 rc = -EBUSY;
d7b0b5eb
CO
1759 else {
1760 vcpu->run->psw_mask = psw.mask;
1761 vcpu->run->psw_addr = psw.addr;
1762 }
b0c632db
HC
1763 return rc;
1764}
1765
1766int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1767 struct kvm_translation *tr)
1768{
1769 return -EINVAL; /* not implemented yet */
1770}
1771
27291e21
DH
1772#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1773 KVM_GUESTDBG_USE_HW_BP | \
1774 KVM_GUESTDBG_ENABLE)
1775
d0bfb940
JK
1776int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1777 struct kvm_guest_debug *dbg)
b0c632db 1778{
27291e21
DH
1779 int rc = 0;
1780
1781 vcpu->guest_debug = 0;
1782 kvm_s390_clear_bp_data(vcpu);
1783
2de3bfc2 1784 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
27291e21
DH
1785 return -EINVAL;
1786
1787 if (dbg->control & KVM_GUESTDBG_ENABLE) {
1788 vcpu->guest_debug = dbg->control;
1789 /* enforce guest PER */
805de8f4 1790 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
27291e21
DH
1791
1792 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
1793 rc = kvm_s390_import_bp_data(vcpu, dbg);
1794 } else {
805de8f4 1795 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
27291e21
DH
1796 vcpu->arch.guestdbg.last_bp = 0;
1797 }
1798
1799 if (rc) {
1800 vcpu->guest_debug = 0;
1801 kvm_s390_clear_bp_data(vcpu);
805de8f4 1802 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
27291e21
DH
1803 }
1804
1805 return rc;
b0c632db
HC
1806}
1807
62d9f0db
MT
1808int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1809 struct kvm_mp_state *mp_state)
1810{
6352e4d2
DH
1811 /* CHECK_STOP and LOAD are not supported yet */
1812 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1813 KVM_MP_STATE_OPERATING;
62d9f0db
MT
1814}
1815
1816int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1817 struct kvm_mp_state *mp_state)
1818{
6352e4d2
DH
1819 int rc = 0;
1820
1821 /* user space knows about this interface - let it control the state */
1822 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1823
1824 switch (mp_state->mp_state) {
1825 case KVM_MP_STATE_STOPPED:
1826 kvm_s390_vcpu_stop(vcpu);
1827 break;
1828 case KVM_MP_STATE_OPERATING:
1829 kvm_s390_vcpu_start(vcpu);
1830 break;
1831 case KVM_MP_STATE_LOAD:
1832 case KVM_MP_STATE_CHECK_STOP:
1833 /* fall through - CHECK_STOP and LOAD are not supported yet */
1834 default:
1835 rc = -ENXIO;
1836 }
1837
1838 return rc;
62d9f0db
MT
1839}
1840
8ad35755
DH
1841static bool ibs_enabled(struct kvm_vcpu *vcpu)
1842{
1843 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1844}
1845
2c70fe44
CB
1846static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1847{
8ad35755 1848retry:
8e236546 1849 kvm_s390_vcpu_request_handled(vcpu);
586b7ccd
CB
1850 if (!vcpu->requests)
1851 return 0;
2c70fe44
CB
1852 /*
1853 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1854 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1855 * This ensures that the ipte instruction for this request has
1856 * already finished. We might race against a second unmapper that
1857 * wants to set the blocking bit. Lets just retry the request loop.
1858 */
8ad35755 1859 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
2c70fe44
CB
1860 int rc;
1861 rc = gmap_ipte_notify(vcpu->arch.gmap,
fda902cb 1862 kvm_s390_get_prefix(vcpu),
2c70fe44
CB
1863 PAGE_SIZE * 2);
1864 if (rc)
1865 return rc;
8ad35755 1866 goto retry;
2c70fe44 1867 }
8ad35755 1868
d3d692c8
DH
1869 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1870 vcpu->arch.sie_block->ihcpu = 0xffff;
1871 goto retry;
1872 }
1873
8ad35755
DH
1874 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1875 if (!ibs_enabled(vcpu)) {
1876 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
805de8f4 1877 atomic_or(CPUSTAT_IBS,
8ad35755
DH
1878 &vcpu->arch.sie_block->cpuflags);
1879 }
1880 goto retry;
2c70fe44 1881 }
8ad35755
DH
1882
1883 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1884 if (ibs_enabled(vcpu)) {
1885 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
805de8f4 1886 atomic_andnot(CPUSTAT_IBS,
8ad35755
DH
1887 &vcpu->arch.sie_block->cpuflags);
1888 }
1889 goto retry;
1890 }
1891
0759d068
DH
1892 /* nothing to do, just clear the request */
1893 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1894
2c70fe44
CB
1895 return 0;
1896}
1897
25ed1675
DH
1898void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
1899{
1900 struct kvm_vcpu *vcpu;
1901 int i;
1902
1903 mutex_lock(&kvm->lock);
1904 preempt_disable();
1905 kvm->arch.epoch = tod - get_tod_clock();
1906 kvm_s390_vcpu_block_all(kvm);
1907 kvm_for_each_vcpu(i, vcpu, kvm)
1908 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
1909 kvm_s390_vcpu_unblock_all(kvm);
1910 preempt_enable();
1911 mutex_unlock(&kvm->lock);
1912}
1913
fa576c58
TH
1914/**
1915 * kvm_arch_fault_in_page - fault-in guest page if necessary
1916 * @vcpu: The corresponding virtual cpu
1917 * @gpa: Guest physical address
1918 * @writable: Whether the page should be writable or not
1919 *
1920 * Make sure that a guest page has been faulted-in on the host.
1921 *
1922 * Return: Zero on success, negative error code otherwise.
1923 */
1924long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
24eb3a82 1925{
527e30b4
MS
1926 return gmap_fault(vcpu->arch.gmap, gpa,
1927 writable ? FAULT_FLAG_WRITE : 0);
24eb3a82
DD
1928}
1929
3c038e6b
DD
1930static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1931 unsigned long token)
1932{
1933 struct kvm_s390_interrupt inti;
383d0b05 1934 struct kvm_s390_irq irq;
3c038e6b
DD
1935
1936 if (start_token) {
383d0b05
JF
1937 irq.u.ext.ext_params2 = token;
1938 irq.type = KVM_S390_INT_PFAULT_INIT;
1939 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
3c038e6b
DD
1940 } else {
1941 inti.type = KVM_S390_INT_PFAULT_DONE;
383d0b05 1942 inti.parm64 = token;
3c038e6b
DD
1943 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1944 }
1945}
1946
1947void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1948 struct kvm_async_pf *work)
1949{
1950 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1951 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1952}
1953
1954void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1955 struct kvm_async_pf *work)
1956{
1957 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1958 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1959}
1960
1961void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1962 struct kvm_async_pf *work)
1963{
1964 /* s390 will always inject the page directly */
1965}
1966
1967bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1968{
1969 /*
1970 * s390 will always inject the page directly,
1971 * but we still want check_async_completion to cleanup
1972 */
1973 return true;
1974}
1975
1976static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1977{
1978 hva_t hva;
1979 struct kvm_arch_async_pf arch;
1980 int rc;
1981
1982 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1983 return 0;
1984 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1985 vcpu->arch.pfault_compare)
1986 return 0;
1987 if (psw_extint_disabled(vcpu))
1988 return 0;
9a022067 1989 if (kvm_s390_vcpu_has_irq(vcpu, 0))
3c038e6b
DD
1990 return 0;
1991 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1992 return 0;
1993 if (!vcpu->arch.gmap->pfault_enabled)
1994 return 0;
1995
81480cc1
HC
1996 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1997 hva += current->thread.gmap_addr & ~PAGE_MASK;
1998 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
3c038e6b
DD
1999 return 0;
2000
2001 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
2002 return rc;
2003}
2004
3fb4c40f 2005static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 2006{
3fb4c40f 2007 int rc, cpuflags;
e168bf8d 2008
3c038e6b
DD
2009 /*
2010 * On s390 notifications for arriving pages will be delivered directly
2011 * to the guest but the house keeping for completed pfaults is
2012 * handled outside the worker.
2013 */
2014 kvm_check_async_pf_completion(vcpu);
2015
5a32c1af 2016 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
2017
2018 if (need_resched())
2019 schedule();
2020
d3a73acb 2021 if (test_cpu_flag(CIF_MCCK_PENDING))
71cde587
CB
2022 s390_handle_mcck();
2023
79395031
JF
2024 if (!kvm_is_ucontrol(vcpu->kvm)) {
2025 rc = kvm_s390_deliver_pending_interrupts(vcpu);
2026 if (rc)
2027 return rc;
2028 }
0ff31867 2029
2c70fe44
CB
2030 rc = kvm_s390_handle_requests(vcpu);
2031 if (rc)
2032 return rc;
2033
27291e21
DH
2034 if (guestdbg_enabled(vcpu)) {
2035 kvm_s390_backup_guest_per_regs(vcpu);
2036 kvm_s390_patch_guest_per_regs(vcpu);
2037 }
2038
b0c632db 2039 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
2040 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
2041 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
2042 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 2043
3fb4c40f
TH
2044 return 0;
2045}
2046
492d8642
TH
2047static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
2048{
2049 psw_t *psw = &vcpu->arch.sie_block->gpsw;
2050 u8 opcode;
2051 int rc;
2052
2053 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
2054 trace_kvm_s390_sie_fault(vcpu);
2055
2056 /*
2057 * We want to inject an addressing exception, which is defined as a
2058 * suppressing or terminating exception. However, since we came here
2059 * by a DAT access exception, the PSW still points to the faulting
2060 * instruction since DAT exceptions are nullifying. So we've got
2061 * to look up the current opcode to get the length of the instruction
2062 * to be able to forward the PSW.
2063 */
8ae04b8f 2064 rc = read_guest(vcpu, psw->addr, 0, &opcode, 1);
492d8642
TH
2065 if (rc)
2066 return kvm_s390_inject_prog_cond(vcpu, rc);
2067 psw->addr = __rewind_psw(*psw, -insn_length(opcode));
2068
2069 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
2070}
2071
3fb4c40f
TH
2072static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
2073{
24eb3a82 2074 int rc = -1;
2b29a9fd
DD
2075
2076 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
2077 vcpu->arch.sie_block->icptcode);
2078 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
2079
27291e21
DH
2080 if (guestdbg_enabled(vcpu))
2081 kvm_s390_restore_guest_per_regs(vcpu);
2082
3fb4c40f 2083 if (exit_reason >= 0) {
7c470539 2084 rc = 0;
210b1607
TH
2085 } else if (kvm_is_ucontrol(vcpu->kvm)) {
2086 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
2087 vcpu->run->s390_ucontrol.trans_exc_code =
2088 current->thread.gmap_addr;
2089 vcpu->run->s390_ucontrol.pgm_code = 0x10;
2090 rc = -EREMOTE;
24eb3a82
DD
2091
2092 } else if (current->thread.gmap_pfault) {
3c038e6b 2093 trace_kvm_s390_major_guest_pfault(vcpu);
24eb3a82 2094 current->thread.gmap_pfault = 0;
fa576c58 2095 if (kvm_arch_setup_async_pf(vcpu)) {
24eb3a82 2096 rc = 0;
fa576c58
TH
2097 } else {
2098 gpa_t gpa = current->thread.gmap_addr;
2099 rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
2100 }
24eb3a82
DD
2101 }
2102
492d8642
TH
2103 if (rc == -1)
2104 rc = vcpu_post_run_fault_in_sie(vcpu);
b0c632db 2105
5a32c1af 2106 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
3fb4c40f 2107
a76ccff6
TH
2108 if (rc == 0) {
2109 if (kvm_is_ucontrol(vcpu->kvm))
2955c83f
CB
2110 /* Don't exit for host interrupts. */
2111 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
a76ccff6
TH
2112 else
2113 rc = kvm_handle_sie_intercept(vcpu);
2114 }
2115
3fb4c40f
TH
2116 return rc;
2117}
2118
2119static int __vcpu_run(struct kvm_vcpu *vcpu)
2120{
2121 int rc, exit_reason;
2122
800c1065
TH
2123 /*
2124 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2125 * ning the guest), so that memslots (and other stuff) are protected
2126 */
2127 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2128
a76ccff6
TH
2129 do {
2130 rc = vcpu_pre_run(vcpu);
2131 if (rc)
2132 break;
3fb4c40f 2133
800c1065 2134 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
2135 /*
2136 * As PF_VCPU will be used in fault handler, between
2137 * guest_enter and guest_exit should be no uaccess.
2138 */
0097d12e
CB
2139 local_irq_disable();
2140 __kvm_guest_enter();
2141 local_irq_enable();
a76ccff6
TH
2142 exit_reason = sie64a(vcpu->arch.sie_block,
2143 vcpu->run->s.regs.gprs);
0097d12e
CB
2144 local_irq_disable();
2145 __kvm_guest_exit();
2146 local_irq_enable();
800c1065 2147 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
2148
2149 rc = vcpu_post_run(vcpu, exit_reason);
27291e21 2150 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
3fb4c40f 2151
800c1065 2152 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 2153 return rc;
b0c632db
HC
2154}
2155
b028ee3e
DH
2156static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2157{
2158 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2159 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2160 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2161 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2162 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2163 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
d3d692c8
DH
2164 /* some control register changes require a tlb flush */
2165 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
b028ee3e
DH
2166 }
2167 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
2168 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
2169 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2170 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2171 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2172 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2173 }
2174 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2175 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2176 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2177 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
9fbd8082
DH
2178 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2179 kvm_clear_async_pf_completion_queue(vcpu);
b028ee3e
DH
2180 }
2181 kvm_run->kvm_dirty_regs = 0;
2182}
2183
2184static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2185{
2186 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2187 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2188 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2189 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
2190 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
2191 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2192 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2193 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2194 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2195 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2196 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2197 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2198}
2199
b0c632db
HC
2200int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2201{
8f2abe6a 2202 int rc;
b0c632db
HC
2203 sigset_t sigsaved;
2204
27291e21
DH
2205 if (guestdbg_exit_pending(vcpu)) {
2206 kvm_s390_prepare_debug_exit(vcpu);
2207 return 0;
2208 }
2209
b0c632db
HC
2210 if (vcpu->sigset_active)
2211 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2212
6352e4d2
DH
2213 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2214 kvm_s390_vcpu_start(vcpu);
2215 } else if (is_vcpu_stopped(vcpu)) {
ea2cdd27 2216 pr_err_ratelimited("can't run stopped vcpu %d\n",
6352e4d2
DH
2217 vcpu->vcpu_id);
2218 return -EINVAL;
2219 }
b0c632db 2220
b028ee3e 2221 sync_regs(vcpu, kvm_run);
d7b0b5eb 2222
dab4079d 2223 might_fault();
a76ccff6 2224 rc = __vcpu_run(vcpu);
9ace903d 2225
b1d16c49
CE
2226 if (signal_pending(current) && !rc) {
2227 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 2228 rc = -EINTR;
b1d16c49 2229 }
8f2abe6a 2230
27291e21
DH
2231 if (guestdbg_exit_pending(vcpu) && !rc) {
2232 kvm_s390_prepare_debug_exit(vcpu);
2233 rc = 0;
2234 }
2235
b8e660b8 2236 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
2237 /* intercept cannot be handled in-kernel, prepare kvm-run */
2238 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
2239 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
2240 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
2241 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
2242 rc = 0;
2243 }
2244
2245 if (rc == -EREMOTE) {
2246 /* intercept was handled, but userspace support is needed
2247 * kvm_run has been prepared by the handler */
2248 rc = 0;
2249 }
b0c632db 2250
b028ee3e 2251 store_regs(vcpu, kvm_run);
d7b0b5eb 2252
b0c632db
HC
2253 if (vcpu->sigset_active)
2254 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2255
b0c632db 2256 vcpu->stat.exit_userspace++;
7e8e6ab4 2257 return rc;
b0c632db
HC
2258}
2259
b0c632db
HC
2260/*
2261 * store status at address
2262 * we use have two special cases:
2263 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2264 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2265 */
d0bce605 2266int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
b0c632db 2267{
092670cd 2268 unsigned char archmode = 1;
fda902cb 2269 unsigned int px;
178bd789 2270 u64 clkcomp;
d0bce605 2271 int rc;
b0c632db 2272
d0bce605
HC
2273 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2274 if (write_guest_abs(vcpu, 163, &archmode, 1))
b0c632db 2275 return -EFAULT;
d0bce605
HC
2276 gpa = SAVE_AREA_BASE;
2277 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2278 if (write_guest_real(vcpu, 163, &archmode, 1))
b0c632db 2279 return -EFAULT;
d0bce605
HC
2280 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
2281 }
2282 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
2283 vcpu->arch.guest_fpregs.fprs, 128);
2284 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
2285 vcpu->run->s.regs.gprs, 128);
2286 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
2287 &vcpu->arch.sie_block->gpsw, 16);
fda902cb 2288 px = kvm_s390_get_prefix(vcpu);
d0bce605 2289 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
fda902cb 2290 &px, 4);
d0bce605
HC
2291 rc |= write_guest_abs(vcpu,
2292 gpa + offsetof(struct save_area, fp_ctrl_reg),
2293 &vcpu->arch.guest_fpregs.fpc, 4);
2294 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
2295 &vcpu->arch.sie_block->todpr, 4);
2296 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
2297 &vcpu->arch.sie_block->cputm, 8);
178bd789 2298 clkcomp = vcpu->arch.sie_block->ckc >> 8;
d0bce605
HC
2299 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
2300 &clkcomp, 8);
2301 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
2302 &vcpu->run->s.regs.acrs, 64);
2303 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
2304 &vcpu->arch.sie_block->gcr, 128);
2305 return rc ? -EFAULT : 0;
b0c632db
HC
2306}
2307
e879892c
TH
2308int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2309{
2310 /*
2311 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2312 * copying in vcpu load/put. Lets update our copies before we save
2313 * it into the save area
2314 */
d0164ee2 2315 save_fpu_regs();
9977e886
HB
2316 if (test_kvm_facility(vcpu->kvm, 129)) {
2317 /*
2318 * If the vector extension is available, the vector registers
2319 * which overlaps with floating-point registers are saved in
2320 * the SIE-control block. Hence, extract the floating-point
2321 * registers and the FPC value and store them in the
2322 * guest_fpregs structure.
2323 */
9977e886
HB
2324 vcpu->arch.guest_fpregs.fpc = current->thread.fpu.fpc;
2325 convert_vx_to_fp(vcpu->arch.guest_fpregs.fprs,
2326 current->thread.fpu.vxrs);
2327 } else
2328 save_fpu_to(&vcpu->arch.guest_fpregs);
e879892c
TH
2329 save_access_regs(vcpu->run->s.regs.acrs);
2330
2331 return kvm_s390_store_status_unloaded(vcpu, addr);
2332}
2333
bc17de7c
EF
2334/*
2335 * store additional status at address
2336 */
2337int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2338 unsigned long gpa)
2339{
2340 /* Only bits 0-53 are used for address formation */
2341 if (!(gpa & ~0x3ff))
2342 return 0;
2343
2344 return write_guest_abs(vcpu, gpa & ~0x3ff,
2345 (void *)&vcpu->run->s.regs.vrs, 512);
2346}
2347
2348int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2349{
2350 if (!test_kvm_facility(vcpu->kvm, 129))
2351 return 0;
2352
2353 /*
2354 * The guest VXRS are in the host VXRs due to the lazy
9977e886
HB
2355 * copying in vcpu load/put. We can simply call save_fpu_regs()
2356 * to save the current register state because we are in the
2357 * middle of a load/put cycle.
2358 *
2359 * Let's update our copies before we save it into the save area.
bc17de7c 2360 */
d0164ee2 2361 save_fpu_regs();
bc17de7c
EF
2362
2363 return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2364}
2365
8ad35755
DH
2366static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2367{
2368 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
8e236546 2369 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
8ad35755
DH
2370}
2371
2372static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2373{
2374 unsigned int i;
2375 struct kvm_vcpu *vcpu;
2376
2377 kvm_for_each_vcpu(i, vcpu, kvm) {
2378 __disable_ibs_on_vcpu(vcpu);
2379 }
2380}
2381
2382static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2383{
2384 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
8e236546 2385 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
8ad35755
DH
2386}
2387
6852d7b6
DH
2388void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2389{
8ad35755
DH
2390 int i, online_vcpus, started_vcpus = 0;
2391
2392 if (!is_vcpu_stopped(vcpu))
2393 return;
2394
6852d7b6 2395 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
8ad35755 2396 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 2397 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
2398 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2399
2400 for (i = 0; i < online_vcpus; i++) {
2401 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2402 started_vcpus++;
2403 }
2404
2405 if (started_vcpus == 0) {
2406 /* we're the only active VCPU -> speed it up */
2407 __enable_ibs_on_vcpu(vcpu);
2408 } else if (started_vcpus == 1) {
2409 /*
2410 * As we are starting a second VCPU, we have to disable
2411 * the IBS facility on all VCPUs to remove potentially
2412 * oustanding ENABLE requests.
2413 */
2414 __disable_ibs_on_all_vcpus(vcpu->kvm);
2415 }
2416
805de8f4 2417 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
2418 /*
2419 * Another VCPU might have used IBS while we were offline.
2420 * Let's play safe and flush the VCPU at startup.
2421 */
d3d692c8 2422 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
433b9ee4 2423 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 2424 return;
6852d7b6
DH
2425}
2426
2427void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2428{
8ad35755
DH
2429 int i, online_vcpus, started_vcpus = 0;
2430 struct kvm_vcpu *started_vcpu = NULL;
2431
2432 if (is_vcpu_stopped(vcpu))
2433 return;
2434
6852d7b6 2435 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
8ad35755 2436 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 2437 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
2438 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2439
32f5ff63 2440 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
6cddd432 2441 kvm_s390_clear_stop_irq(vcpu);
32f5ff63 2442
805de8f4 2443 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
2444 __disable_ibs_on_vcpu(vcpu);
2445
2446 for (i = 0; i < online_vcpus; i++) {
2447 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2448 started_vcpus++;
2449 started_vcpu = vcpu->kvm->vcpus[i];
2450 }
2451 }
2452
2453 if (started_vcpus == 1) {
2454 /*
2455 * As we only have one VCPU left, we want to enable the
2456 * IBS facility for that VCPU to speed it up.
2457 */
2458 __enable_ibs_on_vcpu(started_vcpu);
2459 }
2460
433b9ee4 2461 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 2462 return;
6852d7b6
DH
2463}
2464
d6712df9
CH
2465static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2466 struct kvm_enable_cap *cap)
2467{
2468 int r;
2469
2470 if (cap->flags)
2471 return -EINVAL;
2472
2473 switch (cap->cap) {
fa6b7fe9
CH
2474 case KVM_CAP_S390_CSS_SUPPORT:
2475 if (!vcpu->kvm->arch.css_support) {
2476 vcpu->kvm->arch.css_support = 1;
c92ea7b9 2477 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
fa6b7fe9
CH
2478 trace_kvm_s390_enable_css(vcpu->kvm);
2479 }
2480 r = 0;
2481 break;
d6712df9
CH
2482 default:
2483 r = -EINVAL;
2484 break;
2485 }
2486 return r;
2487}
2488
41408c28
TH
2489static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
2490 struct kvm_s390_mem_op *mop)
2491{
2492 void __user *uaddr = (void __user *)mop->buf;
2493 void *tmpbuf = NULL;
2494 int r, srcu_idx;
2495 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
2496 | KVM_S390_MEMOP_F_CHECK_ONLY;
2497
2498 if (mop->flags & ~supported_flags)
2499 return -EINVAL;
2500
2501 if (mop->size > MEM_OP_MAX_SIZE)
2502 return -E2BIG;
2503
2504 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2505 tmpbuf = vmalloc(mop->size);
2506 if (!tmpbuf)
2507 return -ENOMEM;
2508 }
2509
2510 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2511
2512 switch (mop->op) {
2513 case KVM_S390_MEMOP_LOGICAL_READ:
2514 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2515 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false);
2516 break;
2517 }
2518 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2519 if (r == 0) {
2520 if (copy_to_user(uaddr, tmpbuf, mop->size))
2521 r = -EFAULT;
2522 }
2523 break;
2524 case KVM_S390_MEMOP_LOGICAL_WRITE:
2525 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2526 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true);
2527 break;
2528 }
2529 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2530 r = -EFAULT;
2531 break;
2532 }
2533 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2534 break;
2535 default:
2536 r = -EINVAL;
2537 }
2538
2539 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2540
2541 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
2542 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
2543
2544 vfree(tmpbuf);
2545 return r;
2546}
2547
b0c632db
HC
2548long kvm_arch_vcpu_ioctl(struct file *filp,
2549 unsigned int ioctl, unsigned long arg)
2550{
2551 struct kvm_vcpu *vcpu = filp->private_data;
2552 void __user *argp = (void __user *)arg;
800c1065 2553 int idx;
bc923cc9 2554 long r;
b0c632db 2555
93736624 2556 switch (ioctl) {
47b43c52
JF
2557 case KVM_S390_IRQ: {
2558 struct kvm_s390_irq s390irq;
2559
2560 r = -EFAULT;
2561 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
2562 break;
2563 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
2564 break;
2565 }
93736624 2566 case KVM_S390_INTERRUPT: {
ba5c1e9b 2567 struct kvm_s390_interrupt s390int;
383d0b05 2568 struct kvm_s390_irq s390irq;
ba5c1e9b 2569
93736624 2570 r = -EFAULT;
ba5c1e9b 2571 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624 2572 break;
383d0b05
JF
2573 if (s390int_to_s390irq(&s390int, &s390irq))
2574 return -EINVAL;
2575 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
93736624 2576 break;
ba5c1e9b 2577 }
b0c632db 2578 case KVM_S390_STORE_STATUS:
800c1065 2579 idx = srcu_read_lock(&vcpu->kvm->srcu);
bc923cc9 2580 r = kvm_s390_vcpu_store_status(vcpu, arg);
800c1065 2581 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 2582 break;
b0c632db
HC
2583 case KVM_S390_SET_INITIAL_PSW: {
2584 psw_t psw;
2585
bc923cc9 2586 r = -EFAULT;
b0c632db 2587 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
2588 break;
2589 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2590 break;
b0c632db
HC
2591 }
2592 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
2593 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2594 break;
14eebd91
CO
2595 case KVM_SET_ONE_REG:
2596 case KVM_GET_ONE_REG: {
2597 struct kvm_one_reg reg;
2598 r = -EFAULT;
2599 if (copy_from_user(&reg, argp, sizeof(reg)))
2600 break;
2601 if (ioctl == KVM_SET_ONE_REG)
2602 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
2603 else
2604 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
2605 break;
2606 }
27e0393f
CO
2607#ifdef CONFIG_KVM_S390_UCONTROL
2608 case KVM_S390_UCAS_MAP: {
2609 struct kvm_s390_ucas_mapping ucasmap;
2610
2611 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2612 r = -EFAULT;
2613 break;
2614 }
2615
2616 if (!kvm_is_ucontrol(vcpu->kvm)) {
2617 r = -EINVAL;
2618 break;
2619 }
2620
2621 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
2622 ucasmap.vcpu_addr, ucasmap.length);
2623 break;
2624 }
2625 case KVM_S390_UCAS_UNMAP: {
2626 struct kvm_s390_ucas_mapping ucasmap;
2627
2628 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2629 r = -EFAULT;
2630 break;
2631 }
2632
2633 if (!kvm_is_ucontrol(vcpu->kvm)) {
2634 r = -EINVAL;
2635 break;
2636 }
2637
2638 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
2639 ucasmap.length);
2640 break;
2641 }
2642#endif
ccc7910f 2643 case KVM_S390_VCPU_FAULT: {
527e30b4 2644 r = gmap_fault(vcpu->arch.gmap, arg, 0);
ccc7910f
CO
2645 break;
2646 }
d6712df9
CH
2647 case KVM_ENABLE_CAP:
2648 {
2649 struct kvm_enable_cap cap;
2650 r = -EFAULT;
2651 if (copy_from_user(&cap, argp, sizeof(cap)))
2652 break;
2653 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2654 break;
2655 }
41408c28
TH
2656 case KVM_S390_MEM_OP: {
2657 struct kvm_s390_mem_op mem_op;
2658
2659 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
2660 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
2661 else
2662 r = -EFAULT;
2663 break;
2664 }
816c7667
JF
2665 case KVM_S390_SET_IRQ_STATE: {
2666 struct kvm_s390_irq_state irq_state;
2667
2668 r = -EFAULT;
2669 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2670 break;
2671 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
2672 irq_state.len == 0 ||
2673 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
2674 r = -EINVAL;
2675 break;
2676 }
2677 r = kvm_s390_set_irq_state(vcpu,
2678 (void __user *) irq_state.buf,
2679 irq_state.len);
2680 break;
2681 }
2682 case KVM_S390_GET_IRQ_STATE: {
2683 struct kvm_s390_irq_state irq_state;
2684
2685 r = -EFAULT;
2686 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2687 break;
2688 if (irq_state.len == 0) {
2689 r = -EINVAL;
2690 break;
2691 }
2692 r = kvm_s390_get_irq_state(vcpu,
2693 (__u8 __user *) irq_state.buf,
2694 irq_state.len);
2695 break;
2696 }
b0c632db 2697 default:
3e6afcf1 2698 r = -ENOTTY;
b0c632db 2699 }
bc923cc9 2700 return r;
b0c632db
HC
2701}
2702
5b1c1493
CO
2703int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2704{
2705#ifdef CONFIG_KVM_S390_UCONTROL
2706 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
2707 && (kvm_is_ucontrol(vcpu->kvm))) {
2708 vmf->page = virt_to_page(vcpu->arch.sie_block);
2709 get_page(vmf->page);
2710 return 0;
2711 }
2712#endif
2713 return VM_FAULT_SIGBUS;
2714}
2715
5587027c
AK
2716int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2717 unsigned long npages)
db3fe4eb
TY
2718{
2719 return 0;
2720}
2721
b0c632db 2722/* Section: memory related */
f7784b8e
MT
2723int kvm_arch_prepare_memory_region(struct kvm *kvm,
2724 struct kvm_memory_slot *memslot,
09170a49 2725 const struct kvm_userspace_memory_region *mem,
7b6195a9 2726 enum kvm_mr_change change)
b0c632db 2727{
dd2887e7
NW
2728 /* A few sanity checks. We can have memory slots which have to be
2729 located/ended at a segment boundary (1MB). The memory in userland is
2730 ok to be fragmented into various different vmas. It is okay to mmap()
2731 and munmap() stuff in this slot after doing this call at any time */
b0c632db 2732
598841ca 2733 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
2734 return -EINVAL;
2735
598841ca 2736 if (mem->memory_size & 0xffffful)
b0c632db
HC
2737 return -EINVAL;
2738
f7784b8e
MT
2739 return 0;
2740}
2741
2742void kvm_arch_commit_memory_region(struct kvm *kvm,
09170a49 2743 const struct kvm_userspace_memory_region *mem,
8482644a 2744 const struct kvm_memory_slot *old,
f36f3f28 2745 const struct kvm_memory_slot *new,
8482644a 2746 enum kvm_mr_change change)
f7784b8e 2747{
f7850c92 2748 int rc;
f7784b8e 2749
2cef4deb
CB
2750 /* If the basics of the memslot do not change, we do not want
2751 * to update the gmap. Every update causes several unnecessary
2752 * segment translation exceptions. This is usually handled just
2753 * fine by the normal fault handler + gmap, but it will also
2754 * cause faults on the prefix page of running guest CPUs.
2755 */
2756 if (old->userspace_addr == mem->userspace_addr &&
2757 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
2758 old->npages * PAGE_SIZE == mem->memory_size)
2759 return;
598841ca
CO
2760
2761 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2762 mem->guest_phys_addr, mem->memory_size);
2763 if (rc)
ea2cdd27 2764 pr_warn("failed to commit memory region\n");
598841ca 2765 return;
b0c632db
HC
2766}
2767
b0c632db
HC
2768static int __init kvm_s390_init(void)
2769{
9d8d5786 2770 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
b0c632db
HC
2771}
2772
2773static void __exit kvm_s390_exit(void)
2774{
2775 kvm_exit();
2776}
2777
2778module_init(kvm_s390_init);
2779module_exit(kvm_s390_exit);
566af940
CH
2780
2781/*
2782 * Enable autoloading of the kvm module.
2783 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2784 * since x86 takes a different approach.
2785 */
2786#include <linux/miscdevice.h>
2787MODULE_ALIAS_MISCDEV(KVM_MINOR);
2788MODULE_ALIAS("devname:kvm");