]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/s390/kvm/kvm-s390.c
KVM: s390: switch to get_tod_clock() and fix STP sync races
[mirror_ubuntu-jammy-kernel.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15f36ebd 14 * Jason J. Herne <jjherne@us.ibm.com>
b0c632db
HC
15 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
ca872302 20#include <linux/hrtimer.h>
b0c632db
HC
21#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
a374e892 25#include <linux/random.h>
b0c632db 26#include <linux/slab.h>
ba5c1e9b 27#include <linux/timer.h>
41408c28 28#include <linux/vmalloc.h>
cbb870c8 29#include <asm/asm-offsets.h>
b0c632db 30#include <asm/lowcore.h>
fdf03650 31#include <asm/etr.h>
b0c632db 32#include <asm/pgtable.h>
f5daba1d 33#include <asm/nmi.h>
a0616cde 34#include <asm/switch_to.h>
6d3da241 35#include <asm/isc.h>
1526bf9c 36#include <asm/sclp.h>
8f2abe6a 37#include "kvm-s390.h"
b0c632db
HC
38#include "gaccess.h"
39
ea2cdd27
DH
40#define KMSG_COMPONENT "kvm-s390"
41#undef pr_fmt
42#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
43
5786fffa
CH
44#define CREATE_TRACE_POINTS
45#include "trace.h"
ade38c31 46#include "trace-s390.h"
5786fffa 47
41408c28 48#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
816c7667
JF
49#define LOCAL_IRQS 32
50#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
51 (KVM_MAX_VCPUS + LOCAL_IRQS))
41408c28 52
b0c632db
HC
53#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
54
55struct kvm_stats_debugfs_item debugfs_entries[] = {
56 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 57 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
58 { "exit_validity", VCPU_STAT(exit_validity) },
59 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
60 { "exit_external_request", VCPU_STAT(exit_external_request) },
61 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
62 { "exit_instruction", VCPU_STAT(exit_instruction) },
63 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
64 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f7819512 65 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
62bea5bf 66 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
ce2e4f0b 67 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
f5e10b09 68 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b 69 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
aba07508
DH
70 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
71 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
ba5c1e9b 72 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 73 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
74 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
75 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
76 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
77 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
78 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
79 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
80 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 81 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
82 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
83 { "instruction_spx", VCPU_STAT(instruction_spx) },
84 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
85 { "instruction_stap", VCPU_STAT(instruction_stap) },
86 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
8a242234 87 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
453423dc
CB
88 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
89 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
b31288fa 90 { "instruction_essa", VCPU_STAT(instruction_essa) },
453423dc
CB
91 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
92 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 93 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 94 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 95 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 96 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0 97 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
42cb0c9f
DH
98 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
99 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
5288fbf0 100 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
42cb0c9f
DH
101 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
102 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
cd7b4b61 103 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
5288fbf0
CB
104 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
105 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
106 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
42cb0c9f
DH
107 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
108 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
109 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
388186bc 110 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 111 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 112 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
175a5c9e
CB
113 { "diagnose_258", VCPU_STAT(diagnose_258) },
114 { "diagnose_308", VCPU_STAT(diagnose_308) },
115 { "diagnose_500", VCPU_STAT(diagnose_500) },
b0c632db
HC
116 { NULL }
117};
118
9d8d5786
MM
119/* upper facilities limit for kvm */
120unsigned long kvm_s390_fac_list_mask[] = {
a3ed8dae 121 0xffe6fffbfcfdfc40UL,
53df84f8 122 0x005e800000000000UL,
9d8d5786 123};
b0c632db 124
9d8d5786 125unsigned long kvm_s390_fac_list_mask_size(void)
78c4b59f 126{
9d8d5786
MM
127 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
128 return ARRAY_SIZE(kvm_s390_fac_list_mask);
78c4b59f
MM
129}
130
9d8d5786 131static struct gmap_notifier gmap_notifier;
78f26131 132debug_info_t *kvm_s390_dbf;
9d8d5786 133
b0c632db 134/* Section: not file related */
13a34e06 135int kvm_arch_hardware_enable(void)
b0c632db
HC
136{
137 /* every s390 is virtualization enabled ;-) */
10474ae8 138 return 0;
b0c632db
HC
139}
140
2c70fe44
CB
141static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
142
fdf03650
FZ
143/*
144 * This callback is executed during stop_machine(). All CPUs are therefore
145 * temporarily stopped. In order not to change guest behavior, we have to
146 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
147 * so a CPU won't be stopped while calculating with the epoch.
148 */
149static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
150 void *v)
151{
152 struct kvm *kvm;
153 struct kvm_vcpu *vcpu;
154 int i;
155 unsigned long long *delta = v;
156
157 list_for_each_entry(kvm, &vm_list, vm_list) {
158 kvm->arch.epoch -= *delta;
159 kvm_for_each_vcpu(i, vcpu, kvm) {
160 vcpu->arch.sie_block->epoch -= *delta;
161 }
162 }
163 return NOTIFY_OK;
164}
165
166static struct notifier_block kvm_clock_notifier = {
167 .notifier_call = kvm_clock_sync,
168};
169
b0c632db
HC
170int kvm_arch_hardware_setup(void)
171{
2c70fe44
CB
172 gmap_notifier.notifier_call = kvm_gmap_notifier;
173 gmap_register_ipte_notifier(&gmap_notifier);
fdf03650
FZ
174 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
175 &kvm_clock_notifier);
b0c632db
HC
176 return 0;
177}
178
179void kvm_arch_hardware_unsetup(void)
180{
2c70fe44 181 gmap_unregister_ipte_notifier(&gmap_notifier);
fdf03650
FZ
182 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
183 &kvm_clock_notifier);
b0c632db
HC
184}
185
b0c632db
HC
186int kvm_arch_init(void *opaque)
187{
78f26131
CB
188 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
189 if (!kvm_s390_dbf)
190 return -ENOMEM;
191
192 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
193 debug_unregister(kvm_s390_dbf);
194 return -ENOMEM;
195 }
196
84877d93
CH
197 /* Register floating interrupt controller interface. */
198 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
b0c632db
HC
199}
200
78f26131
CB
201void kvm_arch_exit(void)
202{
203 debug_unregister(kvm_s390_dbf);
204}
205
b0c632db
HC
206/* Section: device related */
207long kvm_arch_dev_ioctl(struct file *filp,
208 unsigned int ioctl, unsigned long arg)
209{
210 if (ioctl == KVM_S390_ENABLE_SIE)
211 return s390_enable_sie();
212 return -EINVAL;
213}
214
784aa3d7 215int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
b0c632db 216{
d7b0b5eb
CO
217 int r;
218
2bd0ac4e 219 switch (ext) {
d7b0b5eb 220 case KVM_CAP_S390_PSW:
b6cf8788 221 case KVM_CAP_S390_GMAP:
52e16b18 222 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
223#ifdef CONFIG_KVM_S390_UCONTROL
224 case KVM_CAP_S390_UCONTROL:
225#endif
3c038e6b 226 case KVM_CAP_ASYNC_PF:
60b413c9 227 case KVM_CAP_SYNC_REGS:
14eebd91 228 case KVM_CAP_ONE_REG:
d6712df9 229 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 230 case KVM_CAP_S390_CSS_SUPPORT:
10ccaa1e 231 case KVM_CAP_IOEVENTFD:
c05c4186 232 case KVM_CAP_DEVICE_CTRL:
d938dc55 233 case KVM_CAP_ENABLE_CAP_VM:
78599d90 234 case KVM_CAP_S390_IRQCHIP:
f2061656 235 case KVM_CAP_VM_ATTRIBUTES:
6352e4d2 236 case KVM_CAP_MP_STATE:
47b43c52 237 case KVM_CAP_S390_INJECT_IRQ:
2444b352 238 case KVM_CAP_S390_USER_SIGP:
e44fc8c9 239 case KVM_CAP_S390_USER_STSI:
30ee2a98 240 case KVM_CAP_S390_SKEYS:
816c7667 241 case KVM_CAP_S390_IRQ_STATE:
d7b0b5eb
CO
242 r = 1;
243 break;
41408c28
TH
244 case KVM_CAP_S390_MEM_OP:
245 r = MEM_OP_MAX_SIZE;
246 break;
e726b1bd
CB
247 case KVM_CAP_NR_VCPUS:
248 case KVM_CAP_MAX_VCPUS:
249 r = KVM_MAX_VCPUS;
250 break;
e1e2e605
NW
251 case KVM_CAP_NR_MEMSLOTS:
252 r = KVM_USER_MEM_SLOTS;
253 break;
1526bf9c 254 case KVM_CAP_S390_COW:
abf09bed 255 r = MACHINE_HAS_ESOP;
1526bf9c 256 break;
68c55750
EF
257 case KVM_CAP_S390_VECTOR_REGISTERS:
258 r = MACHINE_HAS_VX;
259 break;
2bd0ac4e 260 default:
d7b0b5eb 261 r = 0;
2bd0ac4e 262 }
d7b0b5eb 263 return r;
b0c632db
HC
264}
265
15f36ebd
JH
266static void kvm_s390_sync_dirty_log(struct kvm *kvm,
267 struct kvm_memory_slot *memslot)
268{
269 gfn_t cur_gfn, last_gfn;
270 unsigned long address;
271 struct gmap *gmap = kvm->arch.gmap;
272
273 down_read(&gmap->mm->mmap_sem);
274 /* Loop over all guest pages */
275 last_gfn = memslot->base_gfn + memslot->npages;
276 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
277 address = gfn_to_hva_memslot(memslot, cur_gfn);
278
279 if (gmap_test_and_clear_dirty(address, gmap))
280 mark_page_dirty(kvm, cur_gfn);
281 }
282 up_read(&gmap->mm->mmap_sem);
283}
284
b0c632db
HC
285/* Section: vm related */
286/*
287 * Get (and clear) the dirty memory log for a memory slot.
288 */
289int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
290 struct kvm_dirty_log *log)
291{
15f36ebd
JH
292 int r;
293 unsigned long n;
9f6b8029 294 struct kvm_memslots *slots;
15f36ebd
JH
295 struct kvm_memory_slot *memslot;
296 int is_dirty = 0;
297
298 mutex_lock(&kvm->slots_lock);
299
300 r = -EINVAL;
301 if (log->slot >= KVM_USER_MEM_SLOTS)
302 goto out;
303
9f6b8029
PB
304 slots = kvm_memslots(kvm);
305 memslot = id_to_memslot(slots, log->slot);
15f36ebd
JH
306 r = -ENOENT;
307 if (!memslot->dirty_bitmap)
308 goto out;
309
310 kvm_s390_sync_dirty_log(kvm, memslot);
311 r = kvm_get_dirty_log(kvm, log, &is_dirty);
312 if (r)
313 goto out;
314
315 /* Clear the dirty log */
316 if (is_dirty) {
317 n = kvm_dirty_bitmap_bytes(memslot);
318 memset(memslot->dirty_bitmap, 0, n);
319 }
320 r = 0;
321out:
322 mutex_unlock(&kvm->slots_lock);
323 return r;
b0c632db
HC
324}
325
d938dc55
CH
326static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
327{
328 int r;
329
330 if (cap->flags)
331 return -EINVAL;
332
333 switch (cap->cap) {
84223598 334 case KVM_CAP_S390_IRQCHIP:
c92ea7b9 335 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
84223598
CH
336 kvm->arch.use_irqchip = 1;
337 r = 0;
338 break;
2444b352 339 case KVM_CAP_S390_USER_SIGP:
c92ea7b9 340 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
2444b352
DH
341 kvm->arch.user_sigp = 1;
342 r = 0;
343 break;
68c55750 344 case KVM_CAP_S390_VECTOR_REGISTERS:
18280d8b
MM
345 if (MACHINE_HAS_VX) {
346 set_kvm_facility(kvm->arch.model.fac->mask, 129);
347 set_kvm_facility(kvm->arch.model.fac->list, 129);
348 r = 0;
349 } else
350 r = -EINVAL;
c92ea7b9
CB
351 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
352 r ? "(not available)" : "(success)");
68c55750 353 break;
e44fc8c9 354 case KVM_CAP_S390_USER_STSI:
c92ea7b9 355 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
e44fc8c9
ET
356 kvm->arch.user_stsi = 1;
357 r = 0;
358 break;
d938dc55
CH
359 default:
360 r = -EINVAL;
361 break;
362 }
363 return r;
364}
365
8c0a7ce6
DD
366static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
367{
368 int ret;
369
370 switch (attr->attr) {
371 case KVM_S390_VM_MEM_LIMIT_SIZE:
372 ret = 0;
c92ea7b9
CB
373 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
374 kvm->arch.gmap->asce_end);
8c0a7ce6
DD
375 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
376 ret = -EFAULT;
377 break;
378 default:
379 ret = -ENXIO;
380 break;
381 }
382 return ret;
383}
384
385static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
4f718eab
DD
386{
387 int ret;
388 unsigned int idx;
389 switch (attr->attr) {
390 case KVM_S390_VM_MEM_ENABLE_CMMA:
e6db1d61
DD
391 /* enable CMMA only for z10 and later (EDAT_1) */
392 ret = -EINVAL;
393 if (!MACHINE_IS_LPAR || !MACHINE_HAS_EDAT1)
394 break;
395
4f718eab 396 ret = -EBUSY;
c92ea7b9 397 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
4f718eab
DD
398 mutex_lock(&kvm->lock);
399 if (atomic_read(&kvm->online_vcpus) == 0) {
400 kvm->arch.use_cmma = 1;
401 ret = 0;
402 }
403 mutex_unlock(&kvm->lock);
404 break;
405 case KVM_S390_VM_MEM_CLR_CMMA:
c3489155
DD
406 ret = -EINVAL;
407 if (!kvm->arch.use_cmma)
408 break;
409
c92ea7b9 410 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
4f718eab
DD
411 mutex_lock(&kvm->lock);
412 idx = srcu_read_lock(&kvm->srcu);
a13cff31 413 s390_reset_cmma(kvm->arch.gmap->mm);
4f718eab
DD
414 srcu_read_unlock(&kvm->srcu, idx);
415 mutex_unlock(&kvm->lock);
416 ret = 0;
417 break;
8c0a7ce6
DD
418 case KVM_S390_VM_MEM_LIMIT_SIZE: {
419 unsigned long new_limit;
420
421 if (kvm_is_ucontrol(kvm))
422 return -EINVAL;
423
424 if (get_user(new_limit, (u64 __user *)attr->addr))
425 return -EFAULT;
426
427 if (new_limit > kvm->arch.gmap->asce_end)
428 return -E2BIG;
429
430 ret = -EBUSY;
431 mutex_lock(&kvm->lock);
432 if (atomic_read(&kvm->online_vcpus) == 0) {
433 /* gmap_alloc will round the limit up */
434 struct gmap *new = gmap_alloc(current->mm, new_limit);
435
436 if (!new) {
437 ret = -ENOMEM;
438 } else {
439 gmap_free(kvm->arch.gmap);
440 new->private = kvm;
441 kvm->arch.gmap = new;
442 ret = 0;
443 }
444 }
445 mutex_unlock(&kvm->lock);
c92ea7b9 446 VM_EVENT(kvm, 3, "SET: max guest memory: %lu bytes", new_limit);
8c0a7ce6
DD
447 break;
448 }
4f718eab
DD
449 default:
450 ret = -ENXIO;
451 break;
452 }
453 return ret;
454}
455
a374e892
TK
456static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
457
458static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
459{
460 struct kvm_vcpu *vcpu;
461 int i;
462
9d8d5786 463 if (!test_kvm_facility(kvm, 76))
a374e892
TK
464 return -EINVAL;
465
466 mutex_lock(&kvm->lock);
467 switch (attr->attr) {
468 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
469 get_random_bytes(
470 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
471 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
472 kvm->arch.crypto.aes_kw = 1;
c92ea7b9 473 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
a374e892
TK
474 break;
475 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
476 get_random_bytes(
477 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
478 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
479 kvm->arch.crypto.dea_kw = 1;
c92ea7b9 480 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
a374e892
TK
481 break;
482 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
483 kvm->arch.crypto.aes_kw = 0;
484 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
485 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
c92ea7b9 486 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
a374e892
TK
487 break;
488 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
489 kvm->arch.crypto.dea_kw = 0;
490 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
491 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
c92ea7b9 492 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
a374e892
TK
493 break;
494 default:
495 mutex_unlock(&kvm->lock);
496 return -ENXIO;
497 }
498
499 kvm_for_each_vcpu(i, vcpu, kvm) {
500 kvm_s390_vcpu_crypto_setup(vcpu);
501 exit_sie(vcpu);
502 }
503 mutex_unlock(&kvm->lock);
504 return 0;
505}
506
72f25020
JH
507static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
508{
509 u8 gtod_high;
510
511 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
512 sizeof(gtod_high)))
513 return -EFAULT;
514
515 if (gtod_high != 0)
516 return -EINVAL;
c92ea7b9 517 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x\n", gtod_high);
72f25020
JH
518
519 return 0;
520}
521
522static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
523{
524 struct kvm_vcpu *cur_vcpu;
525 unsigned int vcpu_idx;
5a3d883a 526 u64 gtod;
72f25020
JH
527
528 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
529 return -EFAULT;
530
72f25020 531 mutex_lock(&kvm->lock);
fdf03650 532 preempt_disable();
5a3d883a 533 kvm->arch.epoch = gtod - get_tod_clock();
27406cd5
CB
534 kvm_s390_vcpu_block_all(kvm);
535 kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm)
72f25020 536 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
27406cd5 537 kvm_s390_vcpu_unblock_all(kvm);
fdf03650 538 preempt_enable();
72f25020 539 mutex_unlock(&kvm->lock);
c92ea7b9 540 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx\n", gtod);
72f25020
JH
541 return 0;
542}
543
544static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
545{
546 int ret;
547
548 if (attr->flags)
549 return -EINVAL;
550
551 switch (attr->attr) {
552 case KVM_S390_VM_TOD_HIGH:
553 ret = kvm_s390_set_tod_high(kvm, attr);
554 break;
555 case KVM_S390_VM_TOD_LOW:
556 ret = kvm_s390_set_tod_low(kvm, attr);
557 break;
558 default:
559 ret = -ENXIO;
560 break;
561 }
562 return ret;
563}
564
565static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
566{
567 u8 gtod_high = 0;
568
569 if (copy_to_user((void __user *)attr->addr, &gtod_high,
570 sizeof(gtod_high)))
571 return -EFAULT;
c92ea7b9 572 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x\n", gtod_high);
72f25020
JH
573
574 return 0;
575}
576
577static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
578{
5a3d883a 579 u64 gtod;
72f25020 580
fdf03650 581 preempt_disable();
5a3d883a 582 gtod = get_tod_clock() + kvm->arch.epoch;
fdf03650 583 preempt_enable();
72f25020
JH
584 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
585 return -EFAULT;
c92ea7b9 586 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx\n", gtod);
72f25020
JH
587
588 return 0;
589}
590
591static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
592{
593 int ret;
594
595 if (attr->flags)
596 return -EINVAL;
597
598 switch (attr->attr) {
599 case KVM_S390_VM_TOD_HIGH:
600 ret = kvm_s390_get_tod_high(kvm, attr);
601 break;
602 case KVM_S390_VM_TOD_LOW:
603 ret = kvm_s390_get_tod_low(kvm, attr);
604 break;
605 default:
606 ret = -ENXIO;
607 break;
608 }
609 return ret;
610}
611
658b6eda
MM
612static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
613{
614 struct kvm_s390_vm_cpu_processor *proc;
615 int ret = 0;
616
617 mutex_lock(&kvm->lock);
618 if (atomic_read(&kvm->online_vcpus)) {
619 ret = -EBUSY;
620 goto out;
621 }
622 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
623 if (!proc) {
624 ret = -ENOMEM;
625 goto out;
626 }
627 if (!copy_from_user(proc, (void __user *)attr->addr,
628 sizeof(*proc))) {
629 memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
630 sizeof(struct cpuid));
631 kvm->arch.model.ibc = proc->ibc;
981467c9 632 memcpy(kvm->arch.model.fac->list, proc->fac_list,
658b6eda
MM
633 S390_ARCH_FAC_LIST_SIZE_BYTE);
634 } else
635 ret = -EFAULT;
636 kfree(proc);
637out:
638 mutex_unlock(&kvm->lock);
639 return ret;
640}
641
642static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
643{
644 int ret = -ENXIO;
645
646 switch (attr->attr) {
647 case KVM_S390_VM_CPU_PROCESSOR:
648 ret = kvm_s390_set_processor(kvm, attr);
649 break;
650 }
651 return ret;
652}
653
654static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
655{
656 struct kvm_s390_vm_cpu_processor *proc;
657 int ret = 0;
658
659 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
660 if (!proc) {
661 ret = -ENOMEM;
662 goto out;
663 }
664 memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
665 proc->ibc = kvm->arch.model.ibc;
981467c9 666 memcpy(&proc->fac_list, kvm->arch.model.fac->list, S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda
MM
667 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
668 ret = -EFAULT;
669 kfree(proc);
670out:
671 return ret;
672}
673
674static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
675{
676 struct kvm_s390_vm_cpu_machine *mach;
677 int ret = 0;
678
679 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
680 if (!mach) {
681 ret = -ENOMEM;
682 goto out;
683 }
684 get_cpu_id((struct cpuid *) &mach->cpuid);
37c5f6c8 685 mach->ibc = sclp.ibc;
981467c9
MM
686 memcpy(&mach->fac_mask, kvm->arch.model.fac->mask,
687 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda 688 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
94422ee8 689 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda
MM
690 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
691 ret = -EFAULT;
692 kfree(mach);
693out:
694 return ret;
695}
696
697static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
698{
699 int ret = -ENXIO;
700
701 switch (attr->attr) {
702 case KVM_S390_VM_CPU_PROCESSOR:
703 ret = kvm_s390_get_processor(kvm, attr);
704 break;
705 case KVM_S390_VM_CPU_MACHINE:
706 ret = kvm_s390_get_machine(kvm, attr);
707 break;
708 }
709 return ret;
710}
711
f2061656
DD
712static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
713{
714 int ret;
715
716 switch (attr->group) {
4f718eab 717 case KVM_S390_VM_MEM_CTRL:
8c0a7ce6 718 ret = kvm_s390_set_mem_control(kvm, attr);
4f718eab 719 break;
72f25020
JH
720 case KVM_S390_VM_TOD:
721 ret = kvm_s390_set_tod(kvm, attr);
722 break;
658b6eda
MM
723 case KVM_S390_VM_CPU_MODEL:
724 ret = kvm_s390_set_cpu_model(kvm, attr);
725 break;
a374e892
TK
726 case KVM_S390_VM_CRYPTO:
727 ret = kvm_s390_vm_set_crypto(kvm, attr);
728 break;
f2061656
DD
729 default:
730 ret = -ENXIO;
731 break;
732 }
733
734 return ret;
735}
736
737static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
738{
8c0a7ce6
DD
739 int ret;
740
741 switch (attr->group) {
742 case KVM_S390_VM_MEM_CTRL:
743 ret = kvm_s390_get_mem_control(kvm, attr);
744 break;
72f25020
JH
745 case KVM_S390_VM_TOD:
746 ret = kvm_s390_get_tod(kvm, attr);
747 break;
658b6eda
MM
748 case KVM_S390_VM_CPU_MODEL:
749 ret = kvm_s390_get_cpu_model(kvm, attr);
750 break;
8c0a7ce6
DD
751 default:
752 ret = -ENXIO;
753 break;
754 }
755
756 return ret;
f2061656
DD
757}
758
759static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
760{
761 int ret;
762
763 switch (attr->group) {
4f718eab
DD
764 case KVM_S390_VM_MEM_CTRL:
765 switch (attr->attr) {
766 case KVM_S390_VM_MEM_ENABLE_CMMA:
767 case KVM_S390_VM_MEM_CLR_CMMA:
8c0a7ce6 768 case KVM_S390_VM_MEM_LIMIT_SIZE:
4f718eab
DD
769 ret = 0;
770 break;
771 default:
772 ret = -ENXIO;
773 break;
774 }
775 break;
72f25020
JH
776 case KVM_S390_VM_TOD:
777 switch (attr->attr) {
778 case KVM_S390_VM_TOD_LOW:
779 case KVM_S390_VM_TOD_HIGH:
780 ret = 0;
781 break;
782 default:
783 ret = -ENXIO;
784 break;
785 }
786 break;
658b6eda
MM
787 case KVM_S390_VM_CPU_MODEL:
788 switch (attr->attr) {
789 case KVM_S390_VM_CPU_PROCESSOR:
790 case KVM_S390_VM_CPU_MACHINE:
791 ret = 0;
792 break;
793 default:
794 ret = -ENXIO;
795 break;
796 }
797 break;
a374e892
TK
798 case KVM_S390_VM_CRYPTO:
799 switch (attr->attr) {
800 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
801 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
802 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
803 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
804 ret = 0;
805 break;
806 default:
807 ret = -ENXIO;
808 break;
809 }
810 break;
f2061656
DD
811 default:
812 ret = -ENXIO;
813 break;
814 }
815
816 return ret;
817}
818
30ee2a98
JH
819static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
820{
821 uint8_t *keys;
822 uint64_t hva;
823 unsigned long curkey;
824 int i, r = 0;
825
826 if (args->flags != 0)
827 return -EINVAL;
828
829 /* Is this guest using storage keys? */
830 if (!mm_use_skey(current->mm))
831 return KVM_S390_GET_SKEYS_NONE;
832
833 /* Enforce sane limit on memory allocation */
834 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
835 return -EINVAL;
836
837 keys = kmalloc_array(args->count, sizeof(uint8_t),
838 GFP_KERNEL | __GFP_NOWARN);
839 if (!keys)
840 keys = vmalloc(sizeof(uint8_t) * args->count);
841 if (!keys)
842 return -ENOMEM;
843
844 for (i = 0; i < args->count; i++) {
845 hva = gfn_to_hva(kvm, args->start_gfn + i);
846 if (kvm_is_error_hva(hva)) {
847 r = -EFAULT;
848 goto out;
849 }
850
851 curkey = get_guest_storage_key(current->mm, hva);
852 if (IS_ERR_VALUE(curkey)) {
853 r = curkey;
854 goto out;
855 }
856 keys[i] = curkey;
857 }
858
859 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
860 sizeof(uint8_t) * args->count);
861 if (r)
862 r = -EFAULT;
863out:
864 kvfree(keys);
865 return r;
866}
867
868static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
869{
870 uint8_t *keys;
871 uint64_t hva;
872 int i, r = 0;
873
874 if (args->flags != 0)
875 return -EINVAL;
876
877 /* Enforce sane limit on memory allocation */
878 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
879 return -EINVAL;
880
881 keys = kmalloc_array(args->count, sizeof(uint8_t),
882 GFP_KERNEL | __GFP_NOWARN);
883 if (!keys)
884 keys = vmalloc(sizeof(uint8_t) * args->count);
885 if (!keys)
886 return -ENOMEM;
887
888 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
889 sizeof(uint8_t) * args->count);
890 if (r) {
891 r = -EFAULT;
892 goto out;
893 }
894
895 /* Enable storage key handling for the guest */
14d4a425
DD
896 r = s390_enable_skey();
897 if (r)
898 goto out;
30ee2a98
JH
899
900 for (i = 0; i < args->count; i++) {
901 hva = gfn_to_hva(kvm, args->start_gfn + i);
902 if (kvm_is_error_hva(hva)) {
903 r = -EFAULT;
904 goto out;
905 }
906
907 /* Lowest order bit is reserved */
908 if (keys[i] & 0x01) {
909 r = -EINVAL;
910 goto out;
911 }
912
913 r = set_guest_storage_key(current->mm, hva,
914 (unsigned long)keys[i], 0);
915 if (r)
916 goto out;
917 }
918out:
919 kvfree(keys);
920 return r;
921}
922
b0c632db
HC
923long kvm_arch_vm_ioctl(struct file *filp,
924 unsigned int ioctl, unsigned long arg)
925{
926 struct kvm *kvm = filp->private_data;
927 void __user *argp = (void __user *)arg;
f2061656 928 struct kvm_device_attr attr;
b0c632db
HC
929 int r;
930
931 switch (ioctl) {
ba5c1e9b
CO
932 case KVM_S390_INTERRUPT: {
933 struct kvm_s390_interrupt s390int;
934
935 r = -EFAULT;
936 if (copy_from_user(&s390int, argp, sizeof(s390int)))
937 break;
938 r = kvm_s390_inject_vm(kvm, &s390int);
939 break;
940 }
d938dc55
CH
941 case KVM_ENABLE_CAP: {
942 struct kvm_enable_cap cap;
943 r = -EFAULT;
944 if (copy_from_user(&cap, argp, sizeof(cap)))
945 break;
946 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
947 break;
948 }
84223598
CH
949 case KVM_CREATE_IRQCHIP: {
950 struct kvm_irq_routing_entry routing;
951
952 r = -EINVAL;
953 if (kvm->arch.use_irqchip) {
954 /* Set up dummy routing. */
955 memset(&routing, 0, sizeof(routing));
152b2839 956 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
84223598
CH
957 }
958 break;
959 }
f2061656
DD
960 case KVM_SET_DEVICE_ATTR: {
961 r = -EFAULT;
962 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
963 break;
964 r = kvm_s390_vm_set_attr(kvm, &attr);
965 break;
966 }
967 case KVM_GET_DEVICE_ATTR: {
968 r = -EFAULT;
969 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
970 break;
971 r = kvm_s390_vm_get_attr(kvm, &attr);
972 break;
973 }
974 case KVM_HAS_DEVICE_ATTR: {
975 r = -EFAULT;
976 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
977 break;
978 r = kvm_s390_vm_has_attr(kvm, &attr);
979 break;
980 }
30ee2a98
JH
981 case KVM_S390_GET_SKEYS: {
982 struct kvm_s390_skeys args;
983
984 r = -EFAULT;
985 if (copy_from_user(&args, argp,
986 sizeof(struct kvm_s390_skeys)))
987 break;
988 r = kvm_s390_get_skeys(kvm, &args);
989 break;
990 }
991 case KVM_S390_SET_SKEYS: {
992 struct kvm_s390_skeys args;
993
994 r = -EFAULT;
995 if (copy_from_user(&args, argp,
996 sizeof(struct kvm_s390_skeys)))
997 break;
998 r = kvm_s390_set_skeys(kvm, &args);
999 break;
1000 }
b0c632db 1001 default:
367e1319 1002 r = -ENOTTY;
b0c632db
HC
1003 }
1004
1005 return r;
1006}
1007
45c9b47c
TK
1008static int kvm_s390_query_ap_config(u8 *config)
1009{
1010 u32 fcn_code = 0x04000000UL;
86044c8c 1011 u32 cc = 0;
45c9b47c 1012
86044c8c 1013 memset(config, 0, 128);
45c9b47c
TK
1014 asm volatile(
1015 "lgr 0,%1\n"
1016 "lgr 2,%2\n"
1017 ".long 0xb2af0000\n" /* PQAP(QCI) */
86044c8c 1018 "0: ipm %0\n"
45c9b47c 1019 "srl %0,28\n"
86044c8c
CB
1020 "1:\n"
1021 EX_TABLE(0b, 1b)
1022 : "+r" (cc)
45c9b47c
TK
1023 : "r" (fcn_code), "r" (config)
1024 : "cc", "0", "2", "memory"
1025 );
1026
1027 return cc;
1028}
1029
1030static int kvm_s390_apxa_installed(void)
1031{
1032 u8 config[128];
1033 int cc;
1034
1035 if (test_facility(2) && test_facility(12)) {
1036 cc = kvm_s390_query_ap_config(config);
1037
1038 if (cc)
1039 pr_err("PQAP(QCI) failed with cc=%d", cc);
1040 else
1041 return config[0] & 0x40;
1042 }
1043
1044 return 0;
1045}
1046
1047static void kvm_s390_set_crycb_format(struct kvm *kvm)
1048{
1049 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1050
1051 if (kvm_s390_apxa_installed())
1052 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1053 else
1054 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1055}
1056
9d8d5786
MM
1057static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
1058{
1059 get_cpu_id(cpu_id);
1060 cpu_id->version = 0xff;
1061}
1062
5102ee87
TK
1063static int kvm_s390_crypto_init(struct kvm *kvm)
1064{
9d8d5786 1065 if (!test_kvm_facility(kvm, 76))
5102ee87
TK
1066 return 0;
1067
1068 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
1069 GFP_KERNEL | GFP_DMA);
1070 if (!kvm->arch.crypto.crycb)
1071 return -ENOMEM;
1072
45c9b47c 1073 kvm_s390_set_crycb_format(kvm);
5102ee87 1074
ed6f76b4
TK
1075 /* Enable AES/DEA protected key functions by default */
1076 kvm->arch.crypto.aes_kw = 1;
1077 kvm->arch.crypto.dea_kw = 1;
1078 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1079 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1080 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1081 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
a374e892 1082
5102ee87
TK
1083 return 0;
1084}
1085
e08b9637 1086int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 1087{
9d8d5786 1088 int i, rc;
b0c632db 1089 char debug_name[16];
f6c137ff 1090 static unsigned long sca_offset;
b0c632db 1091
e08b9637
CO
1092 rc = -EINVAL;
1093#ifdef CONFIG_KVM_S390_UCONTROL
1094 if (type & ~KVM_VM_S390_UCONTROL)
1095 goto out_err;
1096 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1097 goto out_err;
1098#else
1099 if (type)
1100 goto out_err;
1101#endif
1102
b0c632db
HC
1103 rc = s390_enable_sie();
1104 if (rc)
d89f5eff 1105 goto out_err;
b0c632db 1106
b290411a
CO
1107 rc = -ENOMEM;
1108
b0c632db
HC
1109 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
1110 if (!kvm->arch.sca)
d89f5eff 1111 goto out_err;
f6c137ff
CB
1112 spin_lock(&kvm_lock);
1113 sca_offset = (sca_offset + 16) & 0x7f0;
1114 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
1115 spin_unlock(&kvm_lock);
b0c632db
HC
1116
1117 sprintf(debug_name, "kvm-%u", current->pid);
1118
1cb9cf72 1119 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
b0c632db 1120 if (!kvm->arch.dbf)
40f5b735 1121 goto out_err;
b0c632db 1122
9d8d5786
MM
1123 /*
1124 * The architectural maximum amount of facilities is 16 kbit. To store
1125 * this amount, 2 kbyte of memory is required. Thus we need a full
981467c9
MM
1126 * page to hold the guest facility list (arch.model.fac->list) and the
1127 * facility mask (arch.model.fac->mask). Its address size has to be
9d8d5786
MM
1128 * 31 bits and word aligned.
1129 */
1130 kvm->arch.model.fac =
981467c9 1131 (struct kvm_s390_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
9d8d5786 1132 if (!kvm->arch.model.fac)
40f5b735 1133 goto out_err;
9d8d5786 1134
fb5bf93f 1135 /* Populate the facility mask initially. */
981467c9 1136 memcpy(kvm->arch.model.fac->mask, S390_lowcore.stfle_fac_list,
94422ee8 1137 S390_ARCH_FAC_LIST_SIZE_BYTE);
9d8d5786
MM
1138 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1139 if (i < kvm_s390_fac_list_mask_size())
981467c9 1140 kvm->arch.model.fac->mask[i] &= kvm_s390_fac_list_mask[i];
9d8d5786 1141 else
981467c9 1142 kvm->arch.model.fac->mask[i] = 0UL;
9d8d5786
MM
1143 }
1144
981467c9
MM
1145 /* Populate the facility list initially. */
1146 memcpy(kvm->arch.model.fac->list, kvm->arch.model.fac->mask,
1147 S390_ARCH_FAC_LIST_SIZE_BYTE);
1148
9d8d5786 1149 kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
37c5f6c8 1150 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
9d8d5786 1151
5102ee87 1152 if (kvm_s390_crypto_init(kvm) < 0)
40f5b735 1153 goto out_err;
5102ee87 1154
ba5c1e9b 1155 spin_lock_init(&kvm->arch.float_int.lock);
6d3da241
JF
1156 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1157 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
8a242234 1158 init_waitqueue_head(&kvm->arch.ipte_wq);
a6b7e459 1159 mutex_init(&kvm->arch.ipte_mutex);
ba5c1e9b 1160
b0c632db 1161 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
78f26131 1162 VM_EVENT(kvm, 3, "vm created with type %lu", type);
b0c632db 1163
e08b9637
CO
1164 if (type & KVM_VM_S390_UCONTROL) {
1165 kvm->arch.gmap = NULL;
1166 } else {
0349985a 1167 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
e08b9637 1168 if (!kvm->arch.gmap)
40f5b735 1169 goto out_err;
2c70fe44 1170 kvm->arch.gmap->private = kvm;
24eb3a82 1171 kvm->arch.gmap->pfault_enabled = 0;
e08b9637 1172 }
fa6b7fe9
CH
1173
1174 kvm->arch.css_support = 0;
84223598 1175 kvm->arch.use_irqchip = 0;
72f25020 1176 kvm->arch.epoch = 0;
fa6b7fe9 1177
8ad35755 1178 spin_lock_init(&kvm->arch.start_stop_lock);
78f26131 1179 KVM_EVENT(3, "vm 0x%p created by pid %u", kvm, current->pid);
8ad35755 1180
d89f5eff 1181 return 0;
40f5b735 1182out_err:
5102ee87 1183 kfree(kvm->arch.crypto.crycb);
9d8d5786 1184 free_page((unsigned long)kvm->arch.model.fac);
598841ca 1185 debug_unregister(kvm->arch.dbf);
b0c632db 1186 free_page((unsigned long)(kvm->arch.sca));
78f26131 1187 KVM_EVENT(3, "creation of vm failed: %d", rc);
d89f5eff 1188 return rc;
b0c632db
HC
1189}
1190
d329c035
CB
1191void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1192{
1193 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 1194 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
67335e63 1195 kvm_s390_clear_local_irqs(vcpu);
3c038e6b 1196 kvm_clear_async_pf_completion_queue(vcpu);
58f9460b
CO
1197 if (!kvm_is_ucontrol(vcpu->kvm)) {
1198 clear_bit(63 - vcpu->vcpu_id,
1199 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
1200 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
1201 (__u64) vcpu->arch.sie_block)
1202 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
1203 }
abf4a71e 1204 smp_mb();
27e0393f
CO
1205
1206 if (kvm_is_ucontrol(vcpu->kvm))
1207 gmap_free(vcpu->arch.gmap);
1208
e6db1d61 1209 if (vcpu->kvm->arch.use_cmma)
b31605c1 1210 kvm_s390_vcpu_unsetup_cmma(vcpu);
d329c035 1211 free_page((unsigned long)(vcpu->arch.sie_block));
b31288fa 1212
6692cef3 1213 kvm_vcpu_uninit(vcpu);
b110feaf 1214 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
1215}
1216
1217static void kvm_free_vcpus(struct kvm *kvm)
1218{
1219 unsigned int i;
988a2cae 1220 struct kvm_vcpu *vcpu;
d329c035 1221
988a2cae
GN
1222 kvm_for_each_vcpu(i, vcpu, kvm)
1223 kvm_arch_vcpu_destroy(vcpu);
1224
1225 mutex_lock(&kvm->lock);
1226 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1227 kvm->vcpus[i] = NULL;
1228
1229 atomic_set(&kvm->online_vcpus, 0);
1230 mutex_unlock(&kvm->lock);
d329c035
CB
1231}
1232
b0c632db
HC
1233void kvm_arch_destroy_vm(struct kvm *kvm)
1234{
d329c035 1235 kvm_free_vcpus(kvm);
9d8d5786 1236 free_page((unsigned long)kvm->arch.model.fac);
b0c632db 1237 free_page((unsigned long)(kvm->arch.sca));
d329c035 1238 debug_unregister(kvm->arch.dbf);
5102ee87 1239 kfree(kvm->arch.crypto.crycb);
27e0393f
CO
1240 if (!kvm_is_ucontrol(kvm))
1241 gmap_free(kvm->arch.gmap);
841b91c5 1242 kvm_s390_destroy_adapters(kvm);
67335e63 1243 kvm_s390_clear_float_irqs(kvm);
78f26131 1244 KVM_EVENT(3, "vm 0x%p destroyed", kvm);
b0c632db
HC
1245}
1246
1247/* Section: vcpu related */
dafd032a
DD
1248static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1249{
1250 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
1251 if (!vcpu->arch.gmap)
1252 return -ENOMEM;
1253 vcpu->arch.gmap->private = vcpu->kvm;
1254
1255 return 0;
1256}
1257
b0c632db
HC
1258int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1259{
3c038e6b
DD
1260 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1261 kvm_clear_async_pf_completion_queue(vcpu);
59674c1a
CB
1262 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1263 KVM_SYNC_GPRS |
9eed0735 1264 KVM_SYNC_ACRS |
b028ee3e
DH
1265 KVM_SYNC_CRS |
1266 KVM_SYNC_ARCH0 |
1267 KVM_SYNC_PFAULT;
68c55750
EF
1268 if (test_kvm_facility(vcpu->kvm, 129))
1269 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
dafd032a
DD
1270
1271 if (kvm_is_ucontrol(vcpu->kvm))
1272 return __kvm_ucontrol_vcpu_init(vcpu);
1273
b0c632db
HC
1274 return 0;
1275}
1276
9977e886
HB
1277/*
1278 * Backs up the current FP/VX register save area on a particular
1279 * destination. Used to switch between different register save
1280 * areas.
1281 */
1282static inline void save_fpu_to(struct fpu *dst)
1283{
1284 dst->fpc = current->thread.fpu.fpc;
1285 dst->flags = current->thread.fpu.flags;
1286 dst->regs = current->thread.fpu.regs;
1287}
1288
1289/*
1290 * Switches the FP/VX register save area from which to lazy
1291 * restore register contents.
1292 */
1293static inline void load_fpu_from(struct fpu *from)
1294{
1295 current->thread.fpu.fpc = from->fpc;
1296 current->thread.fpu.flags = from->flags;
1297 current->thread.fpu.regs = from->regs;
1298}
1299
b0c632db
HC
1300void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1301{
9977e886 1302 /* Save host register state */
d0164ee2 1303 save_fpu_regs();
9977e886 1304 save_fpu_to(&vcpu->arch.host_fpregs);
96b2d7a8 1305
18280d8b 1306 if (test_kvm_facility(vcpu->kvm, 129)) {
9977e886
HB
1307 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
1308 current->thread.fpu.flags = FPU_USE_VX;
1309 /*
1310 * Use the register save area in the SIE-control block
1311 * for register restore and save in kvm_arch_vcpu_put()
1312 */
1313 current->thread.fpu.vxrs =
1314 (__vector128 *)&vcpu->run->s.regs.vrs;
1315 /* Always enable the vector extension for KVM */
1316 __ctl_set_vx();
1317 } else
1318 load_fpu_from(&vcpu->arch.guest_fpregs);
1319
1320 if (test_fp_ctl(current->thread.fpu.fpc))
96b2d7a8 1321 /* User space provided an invalid FPC, let's clear it */
9977e886
HB
1322 current->thread.fpu.fpc = 0;
1323
1324 save_access_regs(vcpu->arch.host_acrs);
59674c1a 1325 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 1326 gmap_enable(vcpu->arch.gmap);
805de8f4 1327 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
1328}
1329
1330void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1331{
805de8f4 1332 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 1333 gmap_disable(vcpu->arch.gmap);
9977e886 1334
d0164ee2 1335 save_fpu_regs();
9977e886 1336
18280d8b 1337 if (test_kvm_facility(vcpu->kvm, 129))
9977e886
HB
1338 /*
1339 * kvm_arch_vcpu_load() set up the register save area to
1340 * the &vcpu->run->s.regs.vrs and, thus, the vector registers
1341 * are already saved. Only the floating-point control must be
1342 * copied.
1343 */
1344 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
68c55750 1345 else
9977e886
HB
1346 save_fpu_to(&vcpu->arch.guest_fpregs);
1347 load_fpu_from(&vcpu->arch.host_fpregs);
1348
1349 save_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
1350 restore_access_regs(vcpu->arch.host_acrs);
1351}
1352
1353static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1354{
1355 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1356 vcpu->arch.sie_block->gpsw.mask = 0UL;
1357 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 1358 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
1359 vcpu->arch.sie_block->cputm = 0UL;
1360 vcpu->arch.sie_block->ckc = 0UL;
1361 vcpu->arch.sie_block->todpr = 0;
1362 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1363 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
1364 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
1365 vcpu->arch.guest_fpregs.fpc = 0;
1366 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
1367 vcpu->arch.sie_block->gbea = 1;
672550fb 1368 vcpu->arch.sie_block->pp = 0;
3c038e6b
DD
1369 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1370 kvm_clear_async_pf_completion_queue(vcpu);
6352e4d2
DH
1371 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1372 kvm_s390_vcpu_stop(vcpu);
2ed10cc1 1373 kvm_s390_clear_local_irqs(vcpu);
b0c632db
HC
1374}
1375
31928aa5 1376void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
42897d86 1377{
72f25020 1378 mutex_lock(&vcpu->kvm->lock);
fdf03650 1379 preempt_disable();
72f25020 1380 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
fdf03650 1381 preempt_enable();
72f25020 1382 mutex_unlock(&vcpu->kvm->lock);
dafd032a
DD
1383 if (!kvm_is_ucontrol(vcpu->kvm))
1384 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
42897d86
MT
1385}
1386
5102ee87
TK
1387static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1388{
9d8d5786 1389 if (!test_kvm_facility(vcpu->kvm, 76))
5102ee87
TK
1390 return;
1391
a374e892
TK
1392 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1393
1394 if (vcpu->kvm->arch.crypto.aes_kw)
1395 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1396 if (vcpu->kvm->arch.crypto.dea_kw)
1397 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1398
5102ee87
TK
1399 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1400}
1401
b31605c1
DD
1402void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1403{
1404 free_page(vcpu->arch.sie_block->cbrlo);
1405 vcpu->arch.sie_block->cbrlo = 0;
1406}
1407
1408int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1409{
1410 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1411 if (!vcpu->arch.sie_block->cbrlo)
1412 return -ENOMEM;
1413
1414 vcpu->arch.sie_block->ecb2 |= 0x80;
1415 vcpu->arch.sie_block->ecb2 &= ~0x08;
1416 return 0;
1417}
1418
91520f1a
MM
1419static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1420{
1421 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1422
1423 vcpu->arch.cpu_id = model->cpu_id;
1424 vcpu->arch.sie_block->ibc = model->ibc;
1425 vcpu->arch.sie_block->fac = (int) (long) model->fac->list;
1426}
1427
b0c632db
HC
1428int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1429{
b31605c1 1430 int rc = 0;
b31288fa 1431
9e6dabef
CH
1432 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1433 CPUSTAT_SM |
a4a4f191
GH
1434 CPUSTAT_STOPPED);
1435
53df84f8 1436 if (test_kvm_facility(vcpu->kvm, 78))
805de8f4 1437 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
53df84f8 1438 else if (test_kvm_facility(vcpu->kvm, 8))
805de8f4 1439 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
a4a4f191 1440
91520f1a
MM
1441 kvm_s390_vcpu_setup_model(vcpu);
1442
fc34531d 1443 vcpu->arch.sie_block->ecb = 6;
9d8d5786 1444 if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
7feb6bb8
MM
1445 vcpu->arch.sie_block->ecb |= 0x10;
1446
69d0d3a3 1447 vcpu->arch.sie_block->ecb2 = 8;
ea5f4969 1448 vcpu->arch.sie_block->eca = 0xC1002000U;
37c5f6c8 1449 if (sclp.has_siif)
217a4406 1450 vcpu->arch.sie_block->eca |= 1;
37c5f6c8 1451 if (sclp.has_sigpif)
ea5f4969 1452 vcpu->arch.sie_block->eca |= 0x10000000U;
18280d8b 1453 if (test_kvm_facility(vcpu->kvm, 129)) {
13211ea7
EF
1454 vcpu->arch.sie_block->eca |= 0x00020000;
1455 vcpu->arch.sie_block->ecd |= 0x20000000;
1456 }
492d8642 1457 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
5a5e6536 1458
e6db1d61 1459 if (vcpu->kvm->arch.use_cmma) {
b31605c1
DD
1460 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1461 if (rc)
1462 return rc;
b31288fa 1463 }
0ac96caf 1464 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ca872302 1465 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
9d8d5786 1466
5102ee87
TK
1467 kvm_s390_vcpu_crypto_setup(vcpu);
1468
b31605c1 1469 return rc;
b0c632db
HC
1470}
1471
1472struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1473 unsigned int id)
1474{
4d47555a 1475 struct kvm_vcpu *vcpu;
7feb6bb8 1476 struct sie_page *sie_page;
4d47555a
CO
1477 int rc = -EINVAL;
1478
1479 if (id >= KVM_MAX_VCPUS)
1480 goto out;
1481
1482 rc = -ENOMEM;
b0c632db 1483
b110feaf 1484 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 1485 if (!vcpu)
4d47555a 1486 goto out;
b0c632db 1487
7feb6bb8
MM
1488 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1489 if (!sie_page)
b0c632db
HC
1490 goto out_free_cpu;
1491
7feb6bb8
MM
1492 vcpu->arch.sie_block = &sie_page->sie_block;
1493 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
1494
b0c632db 1495 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
1496 if (!kvm_is_ucontrol(kvm)) {
1497 if (!kvm->arch.sca) {
1498 WARN_ON_ONCE(1);
1499 goto out_free_cpu;
1500 }
1501 if (!kvm->arch.sca->cpu[id].sda)
1502 kvm->arch.sca->cpu[id].sda =
1503 (__u64) vcpu->arch.sie_block;
1504 vcpu->arch.sie_block->scaoh =
1505 (__u32)(((__u64)kvm->arch.sca) >> 32);
1506 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
1507 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
1508 }
b0c632db 1509
ba5c1e9b 1510 spin_lock_init(&vcpu->arch.local_int.lock);
ba5c1e9b 1511 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
d0321a24 1512 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 1513 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
ba5c1e9b 1514
9977e886
HB
1515 /*
1516 * Allocate a save area for floating-point registers. If the vector
1517 * extension is available, register contents are saved in the SIE
1518 * control block. The allocated save area is still required in
1519 * particular places, for example, in kvm_s390_vcpu_store_status().
1520 */
1521 vcpu->arch.guest_fpregs.fprs = kzalloc(sizeof(freg_t) * __NUM_FPRS,
1522 GFP_KERNEL);
1523 if (!vcpu->arch.guest_fpregs.fprs) {
1524 rc = -ENOMEM;
1525 goto out_free_sie_block;
1526 }
1527
b0c632db
HC
1528 rc = kvm_vcpu_init(vcpu, kvm, id);
1529 if (rc)
7b06bf2f 1530 goto out_free_sie_block;
b0c632db
HC
1531 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
1532 vcpu->arch.sie_block);
ade38c31 1533 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 1534
b0c632db 1535 return vcpu;
7b06bf2f
WY
1536out_free_sie_block:
1537 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 1538out_free_cpu:
b110feaf 1539 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 1540out:
b0c632db
HC
1541 return ERR_PTR(rc);
1542}
1543
b0c632db
HC
1544int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1545{
9a022067 1546 return kvm_s390_vcpu_has_irq(vcpu, 0);
b0c632db
HC
1547}
1548
27406cd5 1549void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
49b99e1e 1550{
805de8f4 1551 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
61a6df54 1552 exit_sie(vcpu);
49b99e1e
CB
1553}
1554
27406cd5 1555void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
49b99e1e 1556{
805de8f4 1557 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
49b99e1e
CB
1558}
1559
8e236546
CB
1560static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
1561{
805de8f4 1562 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
61a6df54 1563 exit_sie(vcpu);
8e236546
CB
1564}
1565
1566static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
1567{
9bf9fde2 1568 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
8e236546
CB
1569}
1570
49b99e1e
CB
1571/*
1572 * Kick a guest cpu out of SIE and wait until SIE is not running.
1573 * If the CPU is not running (e.g. waiting as idle) the function will
1574 * return immediately. */
1575void exit_sie(struct kvm_vcpu *vcpu)
1576{
805de8f4 1577 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
49b99e1e
CB
1578 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1579 cpu_relax();
1580}
1581
8e236546
CB
1582/* Kick a guest cpu out of SIE to process a request synchronously */
1583void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
49b99e1e 1584{
8e236546
CB
1585 kvm_make_request(req, vcpu);
1586 kvm_s390_vcpu_request(vcpu);
49b99e1e
CB
1587}
1588
2c70fe44
CB
1589static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1590{
1591 int i;
1592 struct kvm *kvm = gmap->private;
1593 struct kvm_vcpu *vcpu;
1594
1595 kvm_for_each_vcpu(i, vcpu, kvm) {
1596 /* match against both prefix pages */
fda902cb 1597 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
2c70fe44 1598 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
8e236546 1599 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
2c70fe44
CB
1600 }
1601 }
1602}
1603
b6d33834
CD
1604int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1605{
1606 /* kvm common code refers to this, but never calls it */
1607 BUG();
1608 return 0;
1609}
1610
14eebd91
CO
1611static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1612 struct kvm_one_reg *reg)
1613{
1614 int r = -EINVAL;
1615
1616 switch (reg->id) {
29b7c71b
CO
1617 case KVM_REG_S390_TODPR:
1618 r = put_user(vcpu->arch.sie_block->todpr,
1619 (u32 __user *)reg->addr);
1620 break;
1621 case KVM_REG_S390_EPOCHDIFF:
1622 r = put_user(vcpu->arch.sie_block->epoch,
1623 (u64 __user *)reg->addr);
1624 break;
46a6dd1c
J
1625 case KVM_REG_S390_CPU_TIMER:
1626 r = put_user(vcpu->arch.sie_block->cputm,
1627 (u64 __user *)reg->addr);
1628 break;
1629 case KVM_REG_S390_CLOCK_COMP:
1630 r = put_user(vcpu->arch.sie_block->ckc,
1631 (u64 __user *)reg->addr);
1632 break;
536336c2
DD
1633 case KVM_REG_S390_PFTOKEN:
1634 r = put_user(vcpu->arch.pfault_token,
1635 (u64 __user *)reg->addr);
1636 break;
1637 case KVM_REG_S390_PFCOMPARE:
1638 r = put_user(vcpu->arch.pfault_compare,
1639 (u64 __user *)reg->addr);
1640 break;
1641 case KVM_REG_S390_PFSELECT:
1642 r = put_user(vcpu->arch.pfault_select,
1643 (u64 __user *)reg->addr);
1644 break;
672550fb
CB
1645 case KVM_REG_S390_PP:
1646 r = put_user(vcpu->arch.sie_block->pp,
1647 (u64 __user *)reg->addr);
1648 break;
afa45ff5
CB
1649 case KVM_REG_S390_GBEA:
1650 r = put_user(vcpu->arch.sie_block->gbea,
1651 (u64 __user *)reg->addr);
1652 break;
14eebd91
CO
1653 default:
1654 break;
1655 }
1656
1657 return r;
1658}
1659
1660static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
1661 struct kvm_one_reg *reg)
1662{
1663 int r = -EINVAL;
1664
1665 switch (reg->id) {
29b7c71b
CO
1666 case KVM_REG_S390_TODPR:
1667 r = get_user(vcpu->arch.sie_block->todpr,
1668 (u32 __user *)reg->addr);
1669 break;
1670 case KVM_REG_S390_EPOCHDIFF:
1671 r = get_user(vcpu->arch.sie_block->epoch,
1672 (u64 __user *)reg->addr);
1673 break;
46a6dd1c
J
1674 case KVM_REG_S390_CPU_TIMER:
1675 r = get_user(vcpu->arch.sie_block->cputm,
1676 (u64 __user *)reg->addr);
1677 break;
1678 case KVM_REG_S390_CLOCK_COMP:
1679 r = get_user(vcpu->arch.sie_block->ckc,
1680 (u64 __user *)reg->addr);
1681 break;
536336c2
DD
1682 case KVM_REG_S390_PFTOKEN:
1683 r = get_user(vcpu->arch.pfault_token,
1684 (u64 __user *)reg->addr);
9fbd8082
DH
1685 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1686 kvm_clear_async_pf_completion_queue(vcpu);
536336c2
DD
1687 break;
1688 case KVM_REG_S390_PFCOMPARE:
1689 r = get_user(vcpu->arch.pfault_compare,
1690 (u64 __user *)reg->addr);
1691 break;
1692 case KVM_REG_S390_PFSELECT:
1693 r = get_user(vcpu->arch.pfault_select,
1694 (u64 __user *)reg->addr);
1695 break;
672550fb
CB
1696 case KVM_REG_S390_PP:
1697 r = get_user(vcpu->arch.sie_block->pp,
1698 (u64 __user *)reg->addr);
1699 break;
afa45ff5
CB
1700 case KVM_REG_S390_GBEA:
1701 r = get_user(vcpu->arch.sie_block->gbea,
1702 (u64 __user *)reg->addr);
1703 break;
14eebd91
CO
1704 default:
1705 break;
1706 }
1707
1708 return r;
1709}
b6d33834 1710
b0c632db
HC
1711static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1712{
b0c632db 1713 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
1714 return 0;
1715}
1716
1717int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1718{
5a32c1af 1719 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
1720 return 0;
1721}
1722
1723int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1724{
5a32c1af 1725 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
1726 return 0;
1727}
1728
1729int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1730 struct kvm_sregs *sregs)
1731{
59674c1a 1732 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 1733 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 1734 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
1735 return 0;
1736}
1737
1738int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1739 struct kvm_sregs *sregs)
1740{
59674c1a 1741 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 1742 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
1743 return 0;
1744}
1745
1746int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1747{
4725c860
MS
1748 if (test_fp_ctl(fpu->fpc))
1749 return -EINVAL;
9977e886 1750 memcpy(vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4725c860 1751 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
d0164ee2 1752 save_fpu_regs();
9977e886 1753 load_fpu_from(&vcpu->arch.guest_fpregs);
b0c632db
HC
1754 return 0;
1755}
1756
1757int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1758{
9977e886 1759 memcpy(&fpu->fprs, vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
b0c632db 1760 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
1761 return 0;
1762}
1763
1764static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1765{
1766 int rc = 0;
1767
7a42fdc2 1768 if (!is_vcpu_stopped(vcpu))
b0c632db 1769 rc = -EBUSY;
d7b0b5eb
CO
1770 else {
1771 vcpu->run->psw_mask = psw.mask;
1772 vcpu->run->psw_addr = psw.addr;
1773 }
b0c632db
HC
1774 return rc;
1775}
1776
1777int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1778 struct kvm_translation *tr)
1779{
1780 return -EINVAL; /* not implemented yet */
1781}
1782
27291e21
DH
1783#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1784 KVM_GUESTDBG_USE_HW_BP | \
1785 KVM_GUESTDBG_ENABLE)
1786
d0bfb940
JK
1787int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1788 struct kvm_guest_debug *dbg)
b0c632db 1789{
27291e21
DH
1790 int rc = 0;
1791
1792 vcpu->guest_debug = 0;
1793 kvm_s390_clear_bp_data(vcpu);
1794
2de3bfc2 1795 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
27291e21
DH
1796 return -EINVAL;
1797
1798 if (dbg->control & KVM_GUESTDBG_ENABLE) {
1799 vcpu->guest_debug = dbg->control;
1800 /* enforce guest PER */
805de8f4 1801 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
27291e21
DH
1802
1803 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
1804 rc = kvm_s390_import_bp_data(vcpu, dbg);
1805 } else {
805de8f4 1806 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
27291e21
DH
1807 vcpu->arch.guestdbg.last_bp = 0;
1808 }
1809
1810 if (rc) {
1811 vcpu->guest_debug = 0;
1812 kvm_s390_clear_bp_data(vcpu);
805de8f4 1813 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
27291e21
DH
1814 }
1815
1816 return rc;
b0c632db
HC
1817}
1818
62d9f0db
MT
1819int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1820 struct kvm_mp_state *mp_state)
1821{
6352e4d2
DH
1822 /* CHECK_STOP and LOAD are not supported yet */
1823 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1824 KVM_MP_STATE_OPERATING;
62d9f0db
MT
1825}
1826
1827int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1828 struct kvm_mp_state *mp_state)
1829{
6352e4d2
DH
1830 int rc = 0;
1831
1832 /* user space knows about this interface - let it control the state */
1833 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1834
1835 switch (mp_state->mp_state) {
1836 case KVM_MP_STATE_STOPPED:
1837 kvm_s390_vcpu_stop(vcpu);
1838 break;
1839 case KVM_MP_STATE_OPERATING:
1840 kvm_s390_vcpu_start(vcpu);
1841 break;
1842 case KVM_MP_STATE_LOAD:
1843 case KVM_MP_STATE_CHECK_STOP:
1844 /* fall through - CHECK_STOP and LOAD are not supported yet */
1845 default:
1846 rc = -ENXIO;
1847 }
1848
1849 return rc;
62d9f0db
MT
1850}
1851
8ad35755
DH
1852static bool ibs_enabled(struct kvm_vcpu *vcpu)
1853{
1854 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1855}
1856
2c70fe44
CB
1857static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1858{
8ad35755 1859retry:
8e236546 1860 kvm_s390_vcpu_request_handled(vcpu);
586b7ccd
CB
1861 if (!vcpu->requests)
1862 return 0;
2c70fe44
CB
1863 /*
1864 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1865 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1866 * This ensures that the ipte instruction for this request has
1867 * already finished. We might race against a second unmapper that
1868 * wants to set the blocking bit. Lets just retry the request loop.
1869 */
8ad35755 1870 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
2c70fe44
CB
1871 int rc;
1872 rc = gmap_ipte_notify(vcpu->arch.gmap,
fda902cb 1873 kvm_s390_get_prefix(vcpu),
2c70fe44
CB
1874 PAGE_SIZE * 2);
1875 if (rc)
1876 return rc;
8ad35755 1877 goto retry;
2c70fe44 1878 }
8ad35755 1879
d3d692c8
DH
1880 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1881 vcpu->arch.sie_block->ihcpu = 0xffff;
1882 goto retry;
1883 }
1884
8ad35755
DH
1885 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1886 if (!ibs_enabled(vcpu)) {
1887 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
805de8f4 1888 atomic_or(CPUSTAT_IBS,
8ad35755
DH
1889 &vcpu->arch.sie_block->cpuflags);
1890 }
1891 goto retry;
2c70fe44 1892 }
8ad35755
DH
1893
1894 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1895 if (ibs_enabled(vcpu)) {
1896 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
805de8f4 1897 atomic_andnot(CPUSTAT_IBS,
8ad35755
DH
1898 &vcpu->arch.sie_block->cpuflags);
1899 }
1900 goto retry;
1901 }
1902
0759d068
DH
1903 /* nothing to do, just clear the request */
1904 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1905
2c70fe44
CB
1906 return 0;
1907}
1908
fa576c58
TH
1909/**
1910 * kvm_arch_fault_in_page - fault-in guest page if necessary
1911 * @vcpu: The corresponding virtual cpu
1912 * @gpa: Guest physical address
1913 * @writable: Whether the page should be writable or not
1914 *
1915 * Make sure that a guest page has been faulted-in on the host.
1916 *
1917 * Return: Zero on success, negative error code otherwise.
1918 */
1919long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
24eb3a82 1920{
527e30b4
MS
1921 return gmap_fault(vcpu->arch.gmap, gpa,
1922 writable ? FAULT_FLAG_WRITE : 0);
24eb3a82
DD
1923}
1924
3c038e6b
DD
1925static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1926 unsigned long token)
1927{
1928 struct kvm_s390_interrupt inti;
383d0b05 1929 struct kvm_s390_irq irq;
3c038e6b
DD
1930
1931 if (start_token) {
383d0b05
JF
1932 irq.u.ext.ext_params2 = token;
1933 irq.type = KVM_S390_INT_PFAULT_INIT;
1934 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
3c038e6b
DD
1935 } else {
1936 inti.type = KVM_S390_INT_PFAULT_DONE;
383d0b05 1937 inti.parm64 = token;
3c038e6b
DD
1938 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1939 }
1940}
1941
1942void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1943 struct kvm_async_pf *work)
1944{
1945 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1946 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1947}
1948
1949void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1950 struct kvm_async_pf *work)
1951{
1952 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1953 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1954}
1955
1956void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1957 struct kvm_async_pf *work)
1958{
1959 /* s390 will always inject the page directly */
1960}
1961
1962bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1963{
1964 /*
1965 * s390 will always inject the page directly,
1966 * but we still want check_async_completion to cleanup
1967 */
1968 return true;
1969}
1970
1971static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1972{
1973 hva_t hva;
1974 struct kvm_arch_async_pf arch;
1975 int rc;
1976
1977 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1978 return 0;
1979 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1980 vcpu->arch.pfault_compare)
1981 return 0;
1982 if (psw_extint_disabled(vcpu))
1983 return 0;
9a022067 1984 if (kvm_s390_vcpu_has_irq(vcpu, 0))
3c038e6b
DD
1985 return 0;
1986 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1987 return 0;
1988 if (!vcpu->arch.gmap->pfault_enabled)
1989 return 0;
1990
81480cc1
HC
1991 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1992 hva += current->thread.gmap_addr & ~PAGE_MASK;
1993 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
3c038e6b
DD
1994 return 0;
1995
1996 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1997 return rc;
1998}
1999
3fb4c40f 2000static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 2001{
3fb4c40f 2002 int rc, cpuflags;
e168bf8d 2003
3c038e6b
DD
2004 /*
2005 * On s390 notifications for arriving pages will be delivered directly
2006 * to the guest but the house keeping for completed pfaults is
2007 * handled outside the worker.
2008 */
2009 kvm_check_async_pf_completion(vcpu);
2010
5a32c1af 2011 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
2012
2013 if (need_resched())
2014 schedule();
2015
d3a73acb 2016 if (test_cpu_flag(CIF_MCCK_PENDING))
71cde587
CB
2017 s390_handle_mcck();
2018
79395031
JF
2019 if (!kvm_is_ucontrol(vcpu->kvm)) {
2020 rc = kvm_s390_deliver_pending_interrupts(vcpu);
2021 if (rc)
2022 return rc;
2023 }
0ff31867 2024
2c70fe44
CB
2025 rc = kvm_s390_handle_requests(vcpu);
2026 if (rc)
2027 return rc;
2028
27291e21
DH
2029 if (guestdbg_enabled(vcpu)) {
2030 kvm_s390_backup_guest_per_regs(vcpu);
2031 kvm_s390_patch_guest_per_regs(vcpu);
2032 }
2033
b0c632db 2034 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
2035 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
2036 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
2037 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 2038
3fb4c40f
TH
2039 return 0;
2040}
2041
492d8642
TH
2042static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
2043{
2044 psw_t *psw = &vcpu->arch.sie_block->gpsw;
2045 u8 opcode;
2046 int rc;
2047
2048 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
2049 trace_kvm_s390_sie_fault(vcpu);
2050
2051 /*
2052 * We want to inject an addressing exception, which is defined as a
2053 * suppressing or terminating exception. However, since we came here
2054 * by a DAT access exception, the PSW still points to the faulting
2055 * instruction since DAT exceptions are nullifying. So we've got
2056 * to look up the current opcode to get the length of the instruction
2057 * to be able to forward the PSW.
2058 */
8ae04b8f 2059 rc = read_guest(vcpu, psw->addr, 0, &opcode, 1);
492d8642
TH
2060 if (rc)
2061 return kvm_s390_inject_prog_cond(vcpu, rc);
2062 psw->addr = __rewind_psw(*psw, -insn_length(opcode));
2063
2064 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
2065}
2066
3fb4c40f
TH
2067static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
2068{
24eb3a82 2069 int rc = -1;
2b29a9fd
DD
2070
2071 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
2072 vcpu->arch.sie_block->icptcode);
2073 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
2074
27291e21
DH
2075 if (guestdbg_enabled(vcpu))
2076 kvm_s390_restore_guest_per_regs(vcpu);
2077
3fb4c40f 2078 if (exit_reason >= 0) {
7c470539 2079 rc = 0;
210b1607
TH
2080 } else if (kvm_is_ucontrol(vcpu->kvm)) {
2081 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
2082 vcpu->run->s390_ucontrol.trans_exc_code =
2083 current->thread.gmap_addr;
2084 vcpu->run->s390_ucontrol.pgm_code = 0x10;
2085 rc = -EREMOTE;
24eb3a82
DD
2086
2087 } else if (current->thread.gmap_pfault) {
3c038e6b 2088 trace_kvm_s390_major_guest_pfault(vcpu);
24eb3a82 2089 current->thread.gmap_pfault = 0;
fa576c58 2090 if (kvm_arch_setup_async_pf(vcpu)) {
24eb3a82 2091 rc = 0;
fa576c58
TH
2092 } else {
2093 gpa_t gpa = current->thread.gmap_addr;
2094 rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
2095 }
24eb3a82
DD
2096 }
2097
492d8642
TH
2098 if (rc == -1)
2099 rc = vcpu_post_run_fault_in_sie(vcpu);
b0c632db 2100
5a32c1af 2101 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
3fb4c40f 2102
a76ccff6
TH
2103 if (rc == 0) {
2104 if (kvm_is_ucontrol(vcpu->kvm))
2955c83f
CB
2105 /* Don't exit for host interrupts. */
2106 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
a76ccff6
TH
2107 else
2108 rc = kvm_handle_sie_intercept(vcpu);
2109 }
2110
3fb4c40f
TH
2111 return rc;
2112}
2113
2114static int __vcpu_run(struct kvm_vcpu *vcpu)
2115{
2116 int rc, exit_reason;
2117
800c1065
TH
2118 /*
2119 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2120 * ning the guest), so that memslots (and other stuff) are protected
2121 */
2122 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2123
a76ccff6
TH
2124 do {
2125 rc = vcpu_pre_run(vcpu);
2126 if (rc)
2127 break;
3fb4c40f 2128
800c1065 2129 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
2130 /*
2131 * As PF_VCPU will be used in fault handler, between
2132 * guest_enter and guest_exit should be no uaccess.
2133 */
0097d12e
CB
2134 local_irq_disable();
2135 __kvm_guest_enter();
2136 local_irq_enable();
a76ccff6
TH
2137 exit_reason = sie64a(vcpu->arch.sie_block,
2138 vcpu->run->s.regs.gprs);
0097d12e
CB
2139 local_irq_disable();
2140 __kvm_guest_exit();
2141 local_irq_enable();
800c1065 2142 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
2143
2144 rc = vcpu_post_run(vcpu, exit_reason);
27291e21 2145 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
3fb4c40f 2146
800c1065 2147 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 2148 return rc;
b0c632db
HC
2149}
2150
b028ee3e
DH
2151static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2152{
2153 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2154 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2155 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2156 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2157 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2158 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
d3d692c8
DH
2159 /* some control register changes require a tlb flush */
2160 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
b028ee3e
DH
2161 }
2162 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
2163 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
2164 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2165 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2166 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2167 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2168 }
2169 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2170 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2171 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2172 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
9fbd8082
DH
2173 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2174 kvm_clear_async_pf_completion_queue(vcpu);
b028ee3e
DH
2175 }
2176 kvm_run->kvm_dirty_regs = 0;
2177}
2178
2179static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2180{
2181 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2182 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2183 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2184 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
2185 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
2186 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2187 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2188 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2189 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2190 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2191 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2192 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2193}
2194
b0c632db
HC
2195int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2196{
8f2abe6a 2197 int rc;
b0c632db
HC
2198 sigset_t sigsaved;
2199
27291e21
DH
2200 if (guestdbg_exit_pending(vcpu)) {
2201 kvm_s390_prepare_debug_exit(vcpu);
2202 return 0;
2203 }
2204
b0c632db
HC
2205 if (vcpu->sigset_active)
2206 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2207
6352e4d2
DH
2208 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2209 kvm_s390_vcpu_start(vcpu);
2210 } else if (is_vcpu_stopped(vcpu)) {
ea2cdd27 2211 pr_err_ratelimited("can't run stopped vcpu %d\n",
6352e4d2
DH
2212 vcpu->vcpu_id);
2213 return -EINVAL;
2214 }
b0c632db 2215
b028ee3e 2216 sync_regs(vcpu, kvm_run);
d7b0b5eb 2217
dab4079d 2218 might_fault();
a76ccff6 2219 rc = __vcpu_run(vcpu);
9ace903d 2220
b1d16c49
CE
2221 if (signal_pending(current) && !rc) {
2222 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 2223 rc = -EINTR;
b1d16c49 2224 }
8f2abe6a 2225
27291e21
DH
2226 if (guestdbg_exit_pending(vcpu) && !rc) {
2227 kvm_s390_prepare_debug_exit(vcpu);
2228 rc = 0;
2229 }
2230
b8e660b8 2231 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
2232 /* intercept cannot be handled in-kernel, prepare kvm-run */
2233 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
2234 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
2235 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
2236 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
2237 rc = 0;
2238 }
2239
2240 if (rc == -EREMOTE) {
2241 /* intercept was handled, but userspace support is needed
2242 * kvm_run has been prepared by the handler */
2243 rc = 0;
2244 }
b0c632db 2245
b028ee3e 2246 store_regs(vcpu, kvm_run);
d7b0b5eb 2247
b0c632db
HC
2248 if (vcpu->sigset_active)
2249 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2250
b0c632db 2251 vcpu->stat.exit_userspace++;
7e8e6ab4 2252 return rc;
b0c632db
HC
2253}
2254
b0c632db
HC
2255/*
2256 * store status at address
2257 * we use have two special cases:
2258 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2259 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2260 */
d0bce605 2261int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
b0c632db 2262{
092670cd 2263 unsigned char archmode = 1;
fda902cb 2264 unsigned int px;
178bd789 2265 u64 clkcomp;
d0bce605 2266 int rc;
b0c632db 2267
d0bce605
HC
2268 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2269 if (write_guest_abs(vcpu, 163, &archmode, 1))
b0c632db 2270 return -EFAULT;
d0bce605
HC
2271 gpa = SAVE_AREA_BASE;
2272 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2273 if (write_guest_real(vcpu, 163, &archmode, 1))
b0c632db 2274 return -EFAULT;
d0bce605
HC
2275 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
2276 }
2277 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
2278 vcpu->arch.guest_fpregs.fprs, 128);
2279 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
2280 vcpu->run->s.regs.gprs, 128);
2281 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
2282 &vcpu->arch.sie_block->gpsw, 16);
fda902cb 2283 px = kvm_s390_get_prefix(vcpu);
d0bce605 2284 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
fda902cb 2285 &px, 4);
d0bce605
HC
2286 rc |= write_guest_abs(vcpu,
2287 gpa + offsetof(struct save_area, fp_ctrl_reg),
2288 &vcpu->arch.guest_fpregs.fpc, 4);
2289 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
2290 &vcpu->arch.sie_block->todpr, 4);
2291 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
2292 &vcpu->arch.sie_block->cputm, 8);
178bd789 2293 clkcomp = vcpu->arch.sie_block->ckc >> 8;
d0bce605
HC
2294 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
2295 &clkcomp, 8);
2296 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
2297 &vcpu->run->s.regs.acrs, 64);
2298 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
2299 &vcpu->arch.sie_block->gcr, 128);
2300 return rc ? -EFAULT : 0;
b0c632db
HC
2301}
2302
e879892c
TH
2303int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2304{
2305 /*
2306 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2307 * copying in vcpu load/put. Lets update our copies before we save
2308 * it into the save area
2309 */
d0164ee2 2310 save_fpu_regs();
9977e886
HB
2311 if (test_kvm_facility(vcpu->kvm, 129)) {
2312 /*
2313 * If the vector extension is available, the vector registers
2314 * which overlaps with floating-point registers are saved in
2315 * the SIE-control block. Hence, extract the floating-point
2316 * registers and the FPC value and store them in the
2317 * guest_fpregs structure.
2318 */
2319 WARN_ON(!is_vx_task(current)); /* XXX remove later */
2320 vcpu->arch.guest_fpregs.fpc = current->thread.fpu.fpc;
2321 convert_vx_to_fp(vcpu->arch.guest_fpregs.fprs,
2322 current->thread.fpu.vxrs);
2323 } else
2324 save_fpu_to(&vcpu->arch.guest_fpregs);
e879892c
TH
2325 save_access_regs(vcpu->run->s.regs.acrs);
2326
2327 return kvm_s390_store_status_unloaded(vcpu, addr);
2328}
2329
bc17de7c
EF
2330/*
2331 * store additional status at address
2332 */
2333int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2334 unsigned long gpa)
2335{
2336 /* Only bits 0-53 are used for address formation */
2337 if (!(gpa & ~0x3ff))
2338 return 0;
2339
2340 return write_guest_abs(vcpu, gpa & ~0x3ff,
2341 (void *)&vcpu->run->s.regs.vrs, 512);
2342}
2343
2344int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2345{
2346 if (!test_kvm_facility(vcpu->kvm, 129))
2347 return 0;
2348
2349 /*
2350 * The guest VXRS are in the host VXRs due to the lazy
9977e886
HB
2351 * copying in vcpu load/put. We can simply call save_fpu_regs()
2352 * to save the current register state because we are in the
2353 * middle of a load/put cycle.
2354 *
2355 * Let's update our copies before we save it into the save area.
bc17de7c 2356 */
d0164ee2 2357 save_fpu_regs();
bc17de7c
EF
2358
2359 return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2360}
2361
8ad35755
DH
2362static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2363{
2364 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
8e236546 2365 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
8ad35755
DH
2366}
2367
2368static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2369{
2370 unsigned int i;
2371 struct kvm_vcpu *vcpu;
2372
2373 kvm_for_each_vcpu(i, vcpu, kvm) {
2374 __disable_ibs_on_vcpu(vcpu);
2375 }
2376}
2377
2378static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2379{
2380 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
8e236546 2381 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
8ad35755
DH
2382}
2383
6852d7b6
DH
2384void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2385{
8ad35755
DH
2386 int i, online_vcpus, started_vcpus = 0;
2387
2388 if (!is_vcpu_stopped(vcpu))
2389 return;
2390
6852d7b6 2391 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
8ad35755 2392 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 2393 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
2394 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2395
2396 for (i = 0; i < online_vcpus; i++) {
2397 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2398 started_vcpus++;
2399 }
2400
2401 if (started_vcpus == 0) {
2402 /* we're the only active VCPU -> speed it up */
2403 __enable_ibs_on_vcpu(vcpu);
2404 } else if (started_vcpus == 1) {
2405 /*
2406 * As we are starting a second VCPU, we have to disable
2407 * the IBS facility on all VCPUs to remove potentially
2408 * oustanding ENABLE requests.
2409 */
2410 __disable_ibs_on_all_vcpus(vcpu->kvm);
2411 }
2412
805de8f4 2413 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
2414 /*
2415 * Another VCPU might have used IBS while we were offline.
2416 * Let's play safe and flush the VCPU at startup.
2417 */
d3d692c8 2418 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
433b9ee4 2419 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 2420 return;
6852d7b6
DH
2421}
2422
2423void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2424{
8ad35755
DH
2425 int i, online_vcpus, started_vcpus = 0;
2426 struct kvm_vcpu *started_vcpu = NULL;
2427
2428 if (is_vcpu_stopped(vcpu))
2429 return;
2430
6852d7b6 2431 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
8ad35755 2432 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 2433 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
2434 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2435
32f5ff63 2436 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
6cddd432 2437 kvm_s390_clear_stop_irq(vcpu);
32f5ff63 2438
805de8f4 2439 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
2440 __disable_ibs_on_vcpu(vcpu);
2441
2442 for (i = 0; i < online_vcpus; i++) {
2443 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2444 started_vcpus++;
2445 started_vcpu = vcpu->kvm->vcpus[i];
2446 }
2447 }
2448
2449 if (started_vcpus == 1) {
2450 /*
2451 * As we only have one VCPU left, we want to enable the
2452 * IBS facility for that VCPU to speed it up.
2453 */
2454 __enable_ibs_on_vcpu(started_vcpu);
2455 }
2456
433b9ee4 2457 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 2458 return;
6852d7b6
DH
2459}
2460
d6712df9
CH
2461static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2462 struct kvm_enable_cap *cap)
2463{
2464 int r;
2465
2466 if (cap->flags)
2467 return -EINVAL;
2468
2469 switch (cap->cap) {
fa6b7fe9
CH
2470 case KVM_CAP_S390_CSS_SUPPORT:
2471 if (!vcpu->kvm->arch.css_support) {
2472 vcpu->kvm->arch.css_support = 1;
c92ea7b9 2473 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
fa6b7fe9
CH
2474 trace_kvm_s390_enable_css(vcpu->kvm);
2475 }
2476 r = 0;
2477 break;
d6712df9
CH
2478 default:
2479 r = -EINVAL;
2480 break;
2481 }
2482 return r;
2483}
2484
41408c28
TH
2485static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
2486 struct kvm_s390_mem_op *mop)
2487{
2488 void __user *uaddr = (void __user *)mop->buf;
2489 void *tmpbuf = NULL;
2490 int r, srcu_idx;
2491 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
2492 | KVM_S390_MEMOP_F_CHECK_ONLY;
2493
2494 if (mop->flags & ~supported_flags)
2495 return -EINVAL;
2496
2497 if (mop->size > MEM_OP_MAX_SIZE)
2498 return -E2BIG;
2499
2500 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2501 tmpbuf = vmalloc(mop->size);
2502 if (!tmpbuf)
2503 return -ENOMEM;
2504 }
2505
2506 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2507
2508 switch (mop->op) {
2509 case KVM_S390_MEMOP_LOGICAL_READ:
2510 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2511 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, false);
2512 break;
2513 }
2514 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2515 if (r == 0) {
2516 if (copy_to_user(uaddr, tmpbuf, mop->size))
2517 r = -EFAULT;
2518 }
2519 break;
2520 case KVM_S390_MEMOP_LOGICAL_WRITE:
2521 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
2522 r = check_gva_range(vcpu, mop->gaddr, mop->ar, mop->size, true);
2523 break;
2524 }
2525 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2526 r = -EFAULT;
2527 break;
2528 }
2529 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2530 break;
2531 default:
2532 r = -EINVAL;
2533 }
2534
2535 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2536
2537 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
2538 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
2539
2540 vfree(tmpbuf);
2541 return r;
2542}
2543
b0c632db
HC
2544long kvm_arch_vcpu_ioctl(struct file *filp,
2545 unsigned int ioctl, unsigned long arg)
2546{
2547 struct kvm_vcpu *vcpu = filp->private_data;
2548 void __user *argp = (void __user *)arg;
800c1065 2549 int idx;
bc923cc9 2550 long r;
b0c632db 2551
93736624 2552 switch (ioctl) {
47b43c52
JF
2553 case KVM_S390_IRQ: {
2554 struct kvm_s390_irq s390irq;
2555
2556 r = -EFAULT;
2557 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
2558 break;
2559 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
2560 break;
2561 }
93736624 2562 case KVM_S390_INTERRUPT: {
ba5c1e9b 2563 struct kvm_s390_interrupt s390int;
383d0b05 2564 struct kvm_s390_irq s390irq;
ba5c1e9b 2565
93736624 2566 r = -EFAULT;
ba5c1e9b 2567 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624 2568 break;
383d0b05
JF
2569 if (s390int_to_s390irq(&s390int, &s390irq))
2570 return -EINVAL;
2571 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
93736624 2572 break;
ba5c1e9b 2573 }
b0c632db 2574 case KVM_S390_STORE_STATUS:
800c1065 2575 idx = srcu_read_lock(&vcpu->kvm->srcu);
bc923cc9 2576 r = kvm_s390_vcpu_store_status(vcpu, arg);
800c1065 2577 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 2578 break;
b0c632db
HC
2579 case KVM_S390_SET_INITIAL_PSW: {
2580 psw_t psw;
2581
bc923cc9 2582 r = -EFAULT;
b0c632db 2583 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
2584 break;
2585 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2586 break;
b0c632db
HC
2587 }
2588 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
2589 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2590 break;
14eebd91
CO
2591 case KVM_SET_ONE_REG:
2592 case KVM_GET_ONE_REG: {
2593 struct kvm_one_reg reg;
2594 r = -EFAULT;
2595 if (copy_from_user(&reg, argp, sizeof(reg)))
2596 break;
2597 if (ioctl == KVM_SET_ONE_REG)
2598 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
2599 else
2600 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
2601 break;
2602 }
27e0393f
CO
2603#ifdef CONFIG_KVM_S390_UCONTROL
2604 case KVM_S390_UCAS_MAP: {
2605 struct kvm_s390_ucas_mapping ucasmap;
2606
2607 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2608 r = -EFAULT;
2609 break;
2610 }
2611
2612 if (!kvm_is_ucontrol(vcpu->kvm)) {
2613 r = -EINVAL;
2614 break;
2615 }
2616
2617 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
2618 ucasmap.vcpu_addr, ucasmap.length);
2619 break;
2620 }
2621 case KVM_S390_UCAS_UNMAP: {
2622 struct kvm_s390_ucas_mapping ucasmap;
2623
2624 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2625 r = -EFAULT;
2626 break;
2627 }
2628
2629 if (!kvm_is_ucontrol(vcpu->kvm)) {
2630 r = -EINVAL;
2631 break;
2632 }
2633
2634 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
2635 ucasmap.length);
2636 break;
2637 }
2638#endif
ccc7910f 2639 case KVM_S390_VCPU_FAULT: {
527e30b4 2640 r = gmap_fault(vcpu->arch.gmap, arg, 0);
ccc7910f
CO
2641 break;
2642 }
d6712df9
CH
2643 case KVM_ENABLE_CAP:
2644 {
2645 struct kvm_enable_cap cap;
2646 r = -EFAULT;
2647 if (copy_from_user(&cap, argp, sizeof(cap)))
2648 break;
2649 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2650 break;
2651 }
41408c28
TH
2652 case KVM_S390_MEM_OP: {
2653 struct kvm_s390_mem_op mem_op;
2654
2655 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
2656 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
2657 else
2658 r = -EFAULT;
2659 break;
2660 }
816c7667
JF
2661 case KVM_S390_SET_IRQ_STATE: {
2662 struct kvm_s390_irq_state irq_state;
2663
2664 r = -EFAULT;
2665 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2666 break;
2667 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
2668 irq_state.len == 0 ||
2669 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
2670 r = -EINVAL;
2671 break;
2672 }
2673 r = kvm_s390_set_irq_state(vcpu,
2674 (void __user *) irq_state.buf,
2675 irq_state.len);
2676 break;
2677 }
2678 case KVM_S390_GET_IRQ_STATE: {
2679 struct kvm_s390_irq_state irq_state;
2680
2681 r = -EFAULT;
2682 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
2683 break;
2684 if (irq_state.len == 0) {
2685 r = -EINVAL;
2686 break;
2687 }
2688 r = kvm_s390_get_irq_state(vcpu,
2689 (__u8 __user *) irq_state.buf,
2690 irq_state.len);
2691 break;
2692 }
b0c632db 2693 default:
3e6afcf1 2694 r = -ENOTTY;
b0c632db 2695 }
bc923cc9 2696 return r;
b0c632db
HC
2697}
2698
5b1c1493
CO
2699int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2700{
2701#ifdef CONFIG_KVM_S390_UCONTROL
2702 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
2703 && (kvm_is_ucontrol(vcpu->kvm))) {
2704 vmf->page = virt_to_page(vcpu->arch.sie_block);
2705 get_page(vmf->page);
2706 return 0;
2707 }
2708#endif
2709 return VM_FAULT_SIGBUS;
2710}
2711
5587027c
AK
2712int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2713 unsigned long npages)
db3fe4eb
TY
2714{
2715 return 0;
2716}
2717
b0c632db 2718/* Section: memory related */
f7784b8e
MT
2719int kvm_arch_prepare_memory_region(struct kvm *kvm,
2720 struct kvm_memory_slot *memslot,
09170a49 2721 const struct kvm_userspace_memory_region *mem,
7b6195a9 2722 enum kvm_mr_change change)
b0c632db 2723{
dd2887e7
NW
2724 /* A few sanity checks. We can have memory slots which have to be
2725 located/ended at a segment boundary (1MB). The memory in userland is
2726 ok to be fragmented into various different vmas. It is okay to mmap()
2727 and munmap() stuff in this slot after doing this call at any time */
b0c632db 2728
598841ca 2729 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
2730 return -EINVAL;
2731
598841ca 2732 if (mem->memory_size & 0xffffful)
b0c632db
HC
2733 return -EINVAL;
2734
f7784b8e
MT
2735 return 0;
2736}
2737
2738void kvm_arch_commit_memory_region(struct kvm *kvm,
09170a49 2739 const struct kvm_userspace_memory_region *mem,
8482644a 2740 const struct kvm_memory_slot *old,
f36f3f28 2741 const struct kvm_memory_slot *new,
8482644a 2742 enum kvm_mr_change change)
f7784b8e 2743{
f7850c92 2744 int rc;
f7784b8e 2745
2cef4deb
CB
2746 /* If the basics of the memslot do not change, we do not want
2747 * to update the gmap. Every update causes several unnecessary
2748 * segment translation exceptions. This is usually handled just
2749 * fine by the normal fault handler + gmap, but it will also
2750 * cause faults on the prefix page of running guest CPUs.
2751 */
2752 if (old->userspace_addr == mem->userspace_addr &&
2753 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
2754 old->npages * PAGE_SIZE == mem->memory_size)
2755 return;
598841ca
CO
2756
2757 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2758 mem->guest_phys_addr, mem->memory_size);
2759 if (rc)
ea2cdd27 2760 pr_warn("failed to commit memory region\n");
598841ca 2761 return;
b0c632db
HC
2762}
2763
b0c632db
HC
2764static int __init kvm_s390_init(void)
2765{
9d8d5786 2766 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
b0c632db
HC
2767}
2768
2769static void __exit kvm_s390_exit(void)
2770{
2771 kvm_exit();
2772}
2773
2774module_init(kvm_s390_init);
2775module_exit(kvm_s390_exit);
566af940
CH
2776
2777/*
2778 * Enable autoloading of the kvm module.
2779 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2780 * since x86 takes a different approach.
2781 */
2782#include <linux/miscdevice.h>
2783MODULE_ALIAS_MISCDEV(KVM_MINOR);
2784MODULE_ALIAS("devname:kvm");