]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/s390/kvm/kvm-s390.c
KVM: s390: vsie: support 64-bit-SCAO
[mirror_ubuntu-artful-kernel.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15f36ebd 14 * Jason J. Herne <jjherne@us.ibm.com>
b0c632db
HC
15 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
ca872302 20#include <linux/hrtimer.h>
b0c632db
HC
21#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
b2d73b2a 24#include <linux/mman.h>
b0c632db 25#include <linux/module.h>
a374e892 26#include <linux/random.h>
b0c632db 27#include <linux/slab.h>
ba5c1e9b 28#include <linux/timer.h>
41408c28 29#include <linux/vmalloc.h>
15c9705f 30#include <linux/bitmap.h>
cbb870c8 31#include <asm/asm-offsets.h>
b0c632db 32#include <asm/lowcore.h>
fdf03650 33#include <asm/etr.h>
b0c632db 34#include <asm/pgtable.h>
1e133ab2 35#include <asm/gmap.h>
f5daba1d 36#include <asm/nmi.h>
a0616cde 37#include <asm/switch_to.h>
6d3da241 38#include <asm/isc.h>
1526bf9c 39#include <asm/sclp.h>
0a763c78
DH
40#include <asm/cpacf.h>
41#include <asm/etr.h>
8f2abe6a 42#include "kvm-s390.h"
b0c632db
HC
43#include "gaccess.h"
44
ea2cdd27
DH
45#define KMSG_COMPONENT "kvm-s390"
46#undef pr_fmt
47#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
48
5786fffa
CH
49#define CREATE_TRACE_POINTS
50#include "trace.h"
ade38c31 51#include "trace-s390.h"
5786fffa 52
41408c28 53#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
816c7667
JF
54#define LOCAL_IRQS 32
55#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
56 (KVM_MAX_VCPUS + LOCAL_IRQS))
41408c28 57
b0c632db
HC
58#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
59
60struct kvm_stats_debugfs_item debugfs_entries[] = {
61 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 62 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
63 { "exit_validity", VCPU_STAT(exit_validity) },
64 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
65 { "exit_external_request", VCPU_STAT(exit_external_request) },
66 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
67 { "exit_instruction", VCPU_STAT(exit_instruction) },
68 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
69 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
a011eeb2 70 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
f7819512 71 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
62bea5bf 72 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
3491caf2 73 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
ce2e4f0b 74 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
f5e10b09 75 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b 76 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
aba07508
DH
77 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
78 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
ba5c1e9b 79 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 80 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
81 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
82 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
83 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
84 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
85 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
86 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
87 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 88 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
89 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
90 { "instruction_spx", VCPU_STAT(instruction_spx) },
91 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
92 { "instruction_stap", VCPU_STAT(instruction_stap) },
93 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
8a242234 94 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
453423dc
CB
95 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
96 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
b31288fa 97 { "instruction_essa", VCPU_STAT(instruction_essa) },
453423dc
CB
98 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
99 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 100 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
95ca2cb5 101 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
a3508fbe 102 { "instruction_sie", VCPU_STAT(instruction_sie) },
5288fbf0 103 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 104 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 105 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0 106 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
42cb0c9f
DH
107 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
108 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
5288fbf0 109 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
42cb0c9f
DH
110 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
111 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
cd7b4b61 112 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
5288fbf0
CB
113 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
114 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
115 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
42cb0c9f
DH
116 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
117 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
118 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
388186bc 119 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 120 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 121 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
175a5c9e
CB
122 { "diagnose_258", VCPU_STAT(diagnose_258) },
123 { "diagnose_308", VCPU_STAT(diagnose_308) },
124 { "diagnose_500", VCPU_STAT(diagnose_500) },
b0c632db
HC
125 { NULL }
126};
127
9d8d5786 128/* upper facilities limit for kvm */
60a37709
AY
129unsigned long kvm_s390_fac_list_mask[16] = {
130 0xffe6000000000000UL,
131 0x005e000000000000UL,
9d8d5786 132};
b0c632db 133
9d8d5786 134unsigned long kvm_s390_fac_list_mask_size(void)
78c4b59f 135{
9d8d5786
MM
136 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
137 return ARRAY_SIZE(kvm_s390_fac_list_mask);
78c4b59f
MM
138}
139
15c9705f
DH
140/* available cpu features supported by kvm */
141static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
0a763c78
DH
142/* available subfunctions indicated via query / "test bit" */
143static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
15c9705f 144
9d8d5786 145static struct gmap_notifier gmap_notifier;
a3508fbe 146static struct gmap_notifier vsie_gmap_notifier;
78f26131 147debug_info_t *kvm_s390_dbf;
9d8d5786 148
b0c632db 149/* Section: not file related */
13a34e06 150int kvm_arch_hardware_enable(void)
b0c632db
HC
151{
152 /* every s390 is virtualization enabled ;-) */
10474ae8 153 return 0;
b0c632db
HC
154}
155
414d3b07
MS
156static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
157 unsigned long end);
2c70fe44 158
fdf03650
FZ
159/*
160 * This callback is executed during stop_machine(). All CPUs are therefore
161 * temporarily stopped. In order not to change guest behavior, we have to
162 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
163 * so a CPU won't be stopped while calculating with the epoch.
164 */
165static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
166 void *v)
167{
168 struct kvm *kvm;
169 struct kvm_vcpu *vcpu;
170 int i;
171 unsigned long long *delta = v;
172
173 list_for_each_entry(kvm, &vm_list, vm_list) {
174 kvm->arch.epoch -= *delta;
175 kvm_for_each_vcpu(i, vcpu, kvm) {
176 vcpu->arch.sie_block->epoch -= *delta;
db0758b2
DH
177 if (vcpu->arch.cputm_enabled)
178 vcpu->arch.cputm_start += *delta;
fdf03650
FZ
179 }
180 }
181 return NOTIFY_OK;
182}
183
184static struct notifier_block kvm_clock_notifier = {
185 .notifier_call = kvm_clock_sync,
186};
187
b0c632db
HC
188int kvm_arch_hardware_setup(void)
189{
2c70fe44 190 gmap_notifier.notifier_call = kvm_gmap_notifier;
b2d73b2a 191 gmap_register_pte_notifier(&gmap_notifier);
a3508fbe
DH
192 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
193 gmap_register_pte_notifier(&vsie_gmap_notifier);
fdf03650
FZ
194 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
195 &kvm_clock_notifier);
b0c632db
HC
196 return 0;
197}
198
199void kvm_arch_hardware_unsetup(void)
200{
b2d73b2a 201 gmap_unregister_pte_notifier(&gmap_notifier);
a3508fbe 202 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
fdf03650
FZ
203 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
204 &kvm_clock_notifier);
b0c632db
HC
205}
206
22be5a13
DH
207static void allow_cpu_feat(unsigned long nr)
208{
209 set_bit_inv(nr, kvm_s390_available_cpu_feat);
210}
211
0a763c78
DH
212static inline int plo_test_bit(unsigned char nr)
213{
214 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
215 int cc = 3; /* subfunction not available */
216
217 asm volatile(
218 /* Parameter registers are ignored for "test bit" */
219 " plo 0,0,0,0(0)\n"
220 " ipm %0\n"
221 " srl %0,28\n"
222 : "=d" (cc)
223 : "d" (r0)
224 : "cc");
225 return cc == 0;
226}
227
22be5a13
DH
228static void kvm_s390_cpu_feat_init(void)
229{
0a763c78
DH
230 int i;
231
232 for (i = 0; i < 256; ++i) {
233 if (plo_test_bit(i))
234 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
235 }
236
237 if (test_facility(28)) /* TOD-clock steering */
238 etr_ptff(kvm_s390_available_subfunc.ptff, ETR_PTFF_QAF);
239
240 if (test_facility(17)) { /* MSA */
241 __cpacf_query(CPACF_KMAC, kvm_s390_available_subfunc.kmac);
242 __cpacf_query(CPACF_KMC, kvm_s390_available_subfunc.kmc);
243 __cpacf_query(CPACF_KM, kvm_s390_available_subfunc.km);
244 __cpacf_query(CPACF_KIMD, kvm_s390_available_subfunc.kimd);
245 __cpacf_query(CPACF_KLMD, kvm_s390_available_subfunc.klmd);
246 }
247 if (test_facility(76)) /* MSA3 */
248 __cpacf_query(CPACF_PCKMO, kvm_s390_available_subfunc.pckmo);
249 if (test_facility(77)) { /* MSA4 */
250 __cpacf_query(CPACF_KMCTR, kvm_s390_available_subfunc.kmctr);
251 __cpacf_query(CPACF_KMF, kvm_s390_available_subfunc.kmf);
252 __cpacf_query(CPACF_KMO, kvm_s390_available_subfunc.kmo);
253 __cpacf_query(CPACF_PCC, kvm_s390_available_subfunc.pcc);
254 }
255 if (test_facility(57)) /* MSA5 */
256 __cpacf_query(CPACF_PPNO, kvm_s390_available_subfunc.ppno);
257
22be5a13
DH
258 if (MACHINE_HAS_ESOP)
259 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
a3508fbe
DH
260 /*
261 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
262 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
263 */
264 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
265 !test_facility(3))
266 return;
267 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
19c439b5
DH
268 if (sclp.has_64bscao)
269 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
22be5a13
DH
270}
271
b0c632db
HC
272int kvm_arch_init(void *opaque)
273{
78f26131
CB
274 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
275 if (!kvm_s390_dbf)
276 return -ENOMEM;
277
278 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
279 debug_unregister(kvm_s390_dbf);
280 return -ENOMEM;
281 }
282
22be5a13
DH
283 kvm_s390_cpu_feat_init();
284
84877d93
CH
285 /* Register floating interrupt controller interface. */
286 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
b0c632db
HC
287}
288
78f26131
CB
289void kvm_arch_exit(void)
290{
291 debug_unregister(kvm_s390_dbf);
292}
293
b0c632db
HC
294/* Section: device related */
295long kvm_arch_dev_ioctl(struct file *filp,
296 unsigned int ioctl, unsigned long arg)
297{
298 if (ioctl == KVM_S390_ENABLE_SIE)
299 return s390_enable_sie();
300 return -EINVAL;
301}
302
784aa3d7 303int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
b0c632db 304{
d7b0b5eb
CO
305 int r;
306
2bd0ac4e 307 switch (ext) {
d7b0b5eb 308 case KVM_CAP_S390_PSW:
b6cf8788 309 case KVM_CAP_S390_GMAP:
52e16b18 310 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
311#ifdef CONFIG_KVM_S390_UCONTROL
312 case KVM_CAP_S390_UCONTROL:
313#endif
3c038e6b 314 case KVM_CAP_ASYNC_PF:
60b413c9 315 case KVM_CAP_SYNC_REGS:
14eebd91 316 case KVM_CAP_ONE_REG:
d6712df9 317 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 318 case KVM_CAP_S390_CSS_SUPPORT:
10ccaa1e 319 case KVM_CAP_IOEVENTFD:
c05c4186 320 case KVM_CAP_DEVICE_CTRL:
d938dc55 321 case KVM_CAP_ENABLE_CAP_VM:
78599d90 322 case KVM_CAP_S390_IRQCHIP:
f2061656 323 case KVM_CAP_VM_ATTRIBUTES:
6352e4d2 324 case KVM_CAP_MP_STATE:
47b43c52 325 case KVM_CAP_S390_INJECT_IRQ:
2444b352 326 case KVM_CAP_S390_USER_SIGP:
e44fc8c9 327 case KVM_CAP_S390_USER_STSI:
30ee2a98 328 case KVM_CAP_S390_SKEYS:
816c7667 329 case KVM_CAP_S390_IRQ_STATE:
d7b0b5eb
CO
330 r = 1;
331 break;
41408c28
TH
332 case KVM_CAP_S390_MEM_OP:
333 r = MEM_OP_MAX_SIZE;
334 break;
e726b1bd
CB
335 case KVM_CAP_NR_VCPUS:
336 case KVM_CAP_MAX_VCPUS:
76a6dd72
DH
337 r = KVM_S390_BSCA_CPU_SLOTS;
338 if (sclp.has_esca && sclp.has_64bscao)
339 r = KVM_S390_ESCA_CPU_SLOTS;
e726b1bd 340 break;
e1e2e605
NW
341 case KVM_CAP_NR_MEMSLOTS:
342 r = KVM_USER_MEM_SLOTS;
343 break;
1526bf9c 344 case KVM_CAP_S390_COW:
abf09bed 345 r = MACHINE_HAS_ESOP;
1526bf9c 346 break;
68c55750
EF
347 case KVM_CAP_S390_VECTOR_REGISTERS:
348 r = MACHINE_HAS_VX;
349 break;
c6e5f166
FZ
350 case KVM_CAP_S390_RI:
351 r = test_facility(64);
352 break;
2bd0ac4e 353 default:
d7b0b5eb 354 r = 0;
2bd0ac4e 355 }
d7b0b5eb 356 return r;
b0c632db
HC
357}
358
15f36ebd
JH
359static void kvm_s390_sync_dirty_log(struct kvm *kvm,
360 struct kvm_memory_slot *memslot)
361{
362 gfn_t cur_gfn, last_gfn;
363 unsigned long address;
364 struct gmap *gmap = kvm->arch.gmap;
365
15f36ebd
JH
366 /* Loop over all guest pages */
367 last_gfn = memslot->base_gfn + memslot->npages;
368 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
369 address = gfn_to_hva_memslot(memslot, cur_gfn);
370
1e133ab2 371 if (test_and_clear_guest_dirty(gmap->mm, address))
15f36ebd 372 mark_page_dirty(kvm, cur_gfn);
1763f8d0
CB
373 if (fatal_signal_pending(current))
374 return;
70c88a00 375 cond_resched();
15f36ebd 376 }
15f36ebd
JH
377}
378
b0c632db 379/* Section: vm related */
a6e2f683
ED
380static void sca_del_vcpu(struct kvm_vcpu *vcpu);
381
b0c632db
HC
382/*
383 * Get (and clear) the dirty memory log for a memory slot.
384 */
385int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
386 struct kvm_dirty_log *log)
387{
15f36ebd
JH
388 int r;
389 unsigned long n;
9f6b8029 390 struct kvm_memslots *slots;
15f36ebd
JH
391 struct kvm_memory_slot *memslot;
392 int is_dirty = 0;
393
394 mutex_lock(&kvm->slots_lock);
395
396 r = -EINVAL;
397 if (log->slot >= KVM_USER_MEM_SLOTS)
398 goto out;
399
9f6b8029
PB
400 slots = kvm_memslots(kvm);
401 memslot = id_to_memslot(slots, log->slot);
15f36ebd
JH
402 r = -ENOENT;
403 if (!memslot->dirty_bitmap)
404 goto out;
405
406 kvm_s390_sync_dirty_log(kvm, memslot);
407 r = kvm_get_dirty_log(kvm, log, &is_dirty);
408 if (r)
409 goto out;
410
411 /* Clear the dirty log */
412 if (is_dirty) {
413 n = kvm_dirty_bitmap_bytes(memslot);
414 memset(memslot->dirty_bitmap, 0, n);
415 }
416 r = 0;
417out:
418 mutex_unlock(&kvm->slots_lock);
419 return r;
b0c632db
HC
420}
421
d938dc55
CH
422static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
423{
424 int r;
425
426 if (cap->flags)
427 return -EINVAL;
428
429 switch (cap->cap) {
84223598 430 case KVM_CAP_S390_IRQCHIP:
c92ea7b9 431 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
84223598
CH
432 kvm->arch.use_irqchip = 1;
433 r = 0;
434 break;
2444b352 435 case KVM_CAP_S390_USER_SIGP:
c92ea7b9 436 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
2444b352
DH
437 kvm->arch.user_sigp = 1;
438 r = 0;
439 break;
68c55750 440 case KVM_CAP_S390_VECTOR_REGISTERS:
5967c17b 441 mutex_lock(&kvm->lock);
a03825bb 442 if (kvm->created_vcpus) {
5967c17b
DH
443 r = -EBUSY;
444 } else if (MACHINE_HAS_VX) {
c54f0d6a
DH
445 set_kvm_facility(kvm->arch.model.fac_mask, 129);
446 set_kvm_facility(kvm->arch.model.fac_list, 129);
18280d8b
MM
447 r = 0;
448 } else
449 r = -EINVAL;
5967c17b 450 mutex_unlock(&kvm->lock);
c92ea7b9
CB
451 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
452 r ? "(not available)" : "(success)");
68c55750 453 break;
c6e5f166
FZ
454 case KVM_CAP_S390_RI:
455 r = -EINVAL;
456 mutex_lock(&kvm->lock);
a03825bb 457 if (kvm->created_vcpus) {
c6e5f166
FZ
458 r = -EBUSY;
459 } else if (test_facility(64)) {
c54f0d6a
DH
460 set_kvm_facility(kvm->arch.model.fac_mask, 64);
461 set_kvm_facility(kvm->arch.model.fac_list, 64);
c6e5f166
FZ
462 r = 0;
463 }
464 mutex_unlock(&kvm->lock);
465 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
466 r ? "(not available)" : "(success)");
467 break;
e44fc8c9 468 case KVM_CAP_S390_USER_STSI:
c92ea7b9 469 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
e44fc8c9
ET
470 kvm->arch.user_stsi = 1;
471 r = 0;
472 break;
d938dc55
CH
473 default:
474 r = -EINVAL;
475 break;
476 }
477 return r;
478}
479
8c0a7ce6
DD
480static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
481{
482 int ret;
483
484 switch (attr->attr) {
485 case KVM_S390_VM_MEM_LIMIT_SIZE:
486 ret = 0;
c92ea7b9 487 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
a3a92c31
DD
488 kvm->arch.mem_limit);
489 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
8c0a7ce6
DD
490 ret = -EFAULT;
491 break;
492 default:
493 ret = -ENXIO;
494 break;
495 }
496 return ret;
497}
498
499static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
4f718eab
DD
500{
501 int ret;
502 unsigned int idx;
503 switch (attr->attr) {
504 case KVM_S390_VM_MEM_ENABLE_CMMA:
f9cbd9b0 505 ret = -ENXIO;
c24cc9c8 506 if (!sclp.has_cmma)
e6db1d61
DD
507 break;
508
4f718eab 509 ret = -EBUSY;
c92ea7b9 510 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
4f718eab 511 mutex_lock(&kvm->lock);
a03825bb 512 if (!kvm->created_vcpus) {
4f718eab
DD
513 kvm->arch.use_cmma = 1;
514 ret = 0;
515 }
516 mutex_unlock(&kvm->lock);
517 break;
518 case KVM_S390_VM_MEM_CLR_CMMA:
f9cbd9b0
DH
519 ret = -ENXIO;
520 if (!sclp.has_cmma)
521 break;
c3489155
DD
522 ret = -EINVAL;
523 if (!kvm->arch.use_cmma)
524 break;
525
c92ea7b9 526 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
4f718eab
DD
527 mutex_lock(&kvm->lock);
528 idx = srcu_read_lock(&kvm->srcu);
a13cff31 529 s390_reset_cmma(kvm->arch.gmap->mm);
4f718eab
DD
530 srcu_read_unlock(&kvm->srcu, idx);
531 mutex_unlock(&kvm->lock);
532 ret = 0;
533 break;
8c0a7ce6
DD
534 case KVM_S390_VM_MEM_LIMIT_SIZE: {
535 unsigned long new_limit;
536
537 if (kvm_is_ucontrol(kvm))
538 return -EINVAL;
539
540 if (get_user(new_limit, (u64 __user *)attr->addr))
541 return -EFAULT;
542
a3a92c31
DD
543 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
544 new_limit > kvm->arch.mem_limit)
8c0a7ce6
DD
545 return -E2BIG;
546
a3a92c31
DD
547 if (!new_limit)
548 return -EINVAL;
549
6ea427bb 550 /* gmap_create takes last usable address */
a3a92c31
DD
551 if (new_limit != KVM_S390_NO_MEM_LIMIT)
552 new_limit -= 1;
553
8c0a7ce6
DD
554 ret = -EBUSY;
555 mutex_lock(&kvm->lock);
a03825bb 556 if (!kvm->created_vcpus) {
6ea427bb
MS
557 /* gmap_create will round the limit up */
558 struct gmap *new = gmap_create(current->mm, new_limit);
8c0a7ce6
DD
559
560 if (!new) {
561 ret = -ENOMEM;
562 } else {
6ea427bb 563 gmap_remove(kvm->arch.gmap);
8c0a7ce6
DD
564 new->private = kvm;
565 kvm->arch.gmap = new;
566 ret = 0;
567 }
568 }
569 mutex_unlock(&kvm->lock);
a3a92c31
DD
570 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
571 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
572 (void *) kvm->arch.gmap->asce);
8c0a7ce6
DD
573 break;
574 }
4f718eab
DD
575 default:
576 ret = -ENXIO;
577 break;
578 }
579 return ret;
580}
581
a374e892
TK
582static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
583
584static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
585{
586 struct kvm_vcpu *vcpu;
587 int i;
588
9d8d5786 589 if (!test_kvm_facility(kvm, 76))
a374e892
TK
590 return -EINVAL;
591
592 mutex_lock(&kvm->lock);
593 switch (attr->attr) {
594 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
595 get_random_bytes(
596 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
597 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
598 kvm->arch.crypto.aes_kw = 1;
c92ea7b9 599 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
a374e892
TK
600 break;
601 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
602 get_random_bytes(
603 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
604 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
605 kvm->arch.crypto.dea_kw = 1;
c92ea7b9 606 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
a374e892
TK
607 break;
608 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
609 kvm->arch.crypto.aes_kw = 0;
610 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
611 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
c92ea7b9 612 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
a374e892
TK
613 break;
614 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
615 kvm->arch.crypto.dea_kw = 0;
616 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
617 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
c92ea7b9 618 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
a374e892
TK
619 break;
620 default:
621 mutex_unlock(&kvm->lock);
622 return -ENXIO;
623 }
624
625 kvm_for_each_vcpu(i, vcpu, kvm) {
626 kvm_s390_vcpu_crypto_setup(vcpu);
627 exit_sie(vcpu);
628 }
629 mutex_unlock(&kvm->lock);
630 return 0;
631}
632
72f25020
JH
633static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
634{
635 u8 gtod_high;
636
637 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
638 sizeof(gtod_high)))
639 return -EFAULT;
640
641 if (gtod_high != 0)
642 return -EINVAL;
58c383c6 643 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
72f25020
JH
644
645 return 0;
646}
647
648static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
649{
5a3d883a 650 u64 gtod;
72f25020
JH
651
652 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
653 return -EFAULT;
654
25ed1675 655 kvm_s390_set_tod_clock(kvm, gtod);
58c383c6 656 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
72f25020
JH
657 return 0;
658}
659
660static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
661{
662 int ret;
663
664 if (attr->flags)
665 return -EINVAL;
666
667 switch (attr->attr) {
668 case KVM_S390_VM_TOD_HIGH:
669 ret = kvm_s390_set_tod_high(kvm, attr);
670 break;
671 case KVM_S390_VM_TOD_LOW:
672 ret = kvm_s390_set_tod_low(kvm, attr);
673 break;
674 default:
675 ret = -ENXIO;
676 break;
677 }
678 return ret;
679}
680
681static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
682{
683 u8 gtod_high = 0;
684
685 if (copy_to_user((void __user *)attr->addr, &gtod_high,
686 sizeof(gtod_high)))
687 return -EFAULT;
58c383c6 688 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
72f25020
JH
689
690 return 0;
691}
692
693static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
694{
5a3d883a 695 u64 gtod;
72f25020 696
60417fcc 697 gtod = kvm_s390_get_tod_clock_fast(kvm);
72f25020
JH
698 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
699 return -EFAULT;
58c383c6 700 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
72f25020
JH
701
702 return 0;
703}
704
705static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
706{
707 int ret;
708
709 if (attr->flags)
710 return -EINVAL;
711
712 switch (attr->attr) {
713 case KVM_S390_VM_TOD_HIGH:
714 ret = kvm_s390_get_tod_high(kvm, attr);
715 break;
716 case KVM_S390_VM_TOD_LOW:
717 ret = kvm_s390_get_tod_low(kvm, attr);
718 break;
719 default:
720 ret = -ENXIO;
721 break;
722 }
723 return ret;
724}
725
658b6eda
MM
726static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
727{
728 struct kvm_s390_vm_cpu_processor *proc;
053dd230 729 u16 lowest_ibc, unblocked_ibc;
658b6eda
MM
730 int ret = 0;
731
732 mutex_lock(&kvm->lock);
a03825bb 733 if (kvm->created_vcpus) {
658b6eda
MM
734 ret = -EBUSY;
735 goto out;
736 }
737 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
738 if (!proc) {
739 ret = -ENOMEM;
740 goto out;
741 }
742 if (!copy_from_user(proc, (void __user *)attr->addr,
743 sizeof(*proc))) {
9bb0ec09 744 kvm->arch.model.cpuid = proc->cpuid;
053dd230
DH
745 lowest_ibc = sclp.ibc >> 16 & 0xfff;
746 unblocked_ibc = sclp.ibc & 0xfff;
747 if (lowest_ibc) {
748 if (proc->ibc > unblocked_ibc)
749 kvm->arch.model.ibc = unblocked_ibc;
750 else if (proc->ibc < lowest_ibc)
751 kvm->arch.model.ibc = lowest_ibc;
752 else
753 kvm->arch.model.ibc = proc->ibc;
754 }
c54f0d6a 755 memcpy(kvm->arch.model.fac_list, proc->fac_list,
658b6eda
MM
756 S390_ARCH_FAC_LIST_SIZE_BYTE);
757 } else
758 ret = -EFAULT;
759 kfree(proc);
760out:
761 mutex_unlock(&kvm->lock);
762 return ret;
763}
764
15c9705f
DH
765static int kvm_s390_set_processor_feat(struct kvm *kvm,
766 struct kvm_device_attr *attr)
767{
768 struct kvm_s390_vm_cpu_feat data;
769 int ret = -EBUSY;
770
771 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
772 return -EFAULT;
773 if (!bitmap_subset((unsigned long *) data.feat,
774 kvm_s390_available_cpu_feat,
775 KVM_S390_VM_CPU_FEAT_NR_BITS))
776 return -EINVAL;
777
778 mutex_lock(&kvm->lock);
779 if (!atomic_read(&kvm->online_vcpus)) {
780 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
781 KVM_S390_VM_CPU_FEAT_NR_BITS);
782 ret = 0;
783 }
784 mutex_unlock(&kvm->lock);
785 return ret;
786}
787
0a763c78
DH
788static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
789 struct kvm_device_attr *attr)
790{
791 /*
792 * Once supported by kernel + hw, we have to store the subfunctions
793 * in kvm->arch and remember that user space configured them.
794 */
795 return -ENXIO;
796}
797
658b6eda
MM
798static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
799{
800 int ret = -ENXIO;
801
802 switch (attr->attr) {
803 case KVM_S390_VM_CPU_PROCESSOR:
804 ret = kvm_s390_set_processor(kvm, attr);
805 break;
15c9705f
DH
806 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
807 ret = kvm_s390_set_processor_feat(kvm, attr);
808 break;
0a763c78
DH
809 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
810 ret = kvm_s390_set_processor_subfunc(kvm, attr);
811 break;
658b6eda
MM
812 }
813 return ret;
814}
815
816static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
817{
818 struct kvm_s390_vm_cpu_processor *proc;
819 int ret = 0;
820
821 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
822 if (!proc) {
823 ret = -ENOMEM;
824 goto out;
825 }
9bb0ec09 826 proc->cpuid = kvm->arch.model.cpuid;
658b6eda 827 proc->ibc = kvm->arch.model.ibc;
c54f0d6a
DH
828 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
829 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda
MM
830 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
831 ret = -EFAULT;
832 kfree(proc);
833out:
834 return ret;
835}
836
837static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
838{
839 struct kvm_s390_vm_cpu_machine *mach;
840 int ret = 0;
841
842 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
843 if (!mach) {
844 ret = -ENOMEM;
845 goto out;
846 }
847 get_cpu_id((struct cpuid *) &mach->cpuid);
37c5f6c8 848 mach->ibc = sclp.ibc;
c54f0d6a 849 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
981467c9 850 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda 851 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
94422ee8 852 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda
MM
853 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
854 ret = -EFAULT;
855 kfree(mach);
856out:
857 return ret;
858}
859
15c9705f
DH
860static int kvm_s390_get_processor_feat(struct kvm *kvm,
861 struct kvm_device_attr *attr)
862{
863 struct kvm_s390_vm_cpu_feat data;
864
865 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
866 KVM_S390_VM_CPU_FEAT_NR_BITS);
867 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
868 return -EFAULT;
869 return 0;
870}
871
872static int kvm_s390_get_machine_feat(struct kvm *kvm,
873 struct kvm_device_attr *attr)
874{
875 struct kvm_s390_vm_cpu_feat data;
876
877 bitmap_copy((unsigned long *) data.feat,
878 kvm_s390_available_cpu_feat,
879 KVM_S390_VM_CPU_FEAT_NR_BITS);
880 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
881 return -EFAULT;
882 return 0;
883}
884
0a763c78
DH
885static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
886 struct kvm_device_attr *attr)
887{
888 /*
889 * Once we can actually configure subfunctions (kernel + hw support),
890 * we have to check if they were already set by user space, if so copy
891 * them from kvm->arch.
892 */
893 return -ENXIO;
894}
895
896static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
897 struct kvm_device_attr *attr)
898{
899 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
900 sizeof(struct kvm_s390_vm_cpu_subfunc)))
901 return -EFAULT;
902 return 0;
903}
658b6eda
MM
904static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
905{
906 int ret = -ENXIO;
907
908 switch (attr->attr) {
909 case KVM_S390_VM_CPU_PROCESSOR:
910 ret = kvm_s390_get_processor(kvm, attr);
911 break;
912 case KVM_S390_VM_CPU_MACHINE:
913 ret = kvm_s390_get_machine(kvm, attr);
914 break;
15c9705f
DH
915 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
916 ret = kvm_s390_get_processor_feat(kvm, attr);
917 break;
918 case KVM_S390_VM_CPU_MACHINE_FEAT:
919 ret = kvm_s390_get_machine_feat(kvm, attr);
920 break;
0a763c78
DH
921 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
922 ret = kvm_s390_get_processor_subfunc(kvm, attr);
923 break;
924 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
925 ret = kvm_s390_get_machine_subfunc(kvm, attr);
926 break;
658b6eda
MM
927 }
928 return ret;
929}
930
f2061656
DD
931static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
932{
933 int ret;
934
935 switch (attr->group) {
4f718eab 936 case KVM_S390_VM_MEM_CTRL:
8c0a7ce6 937 ret = kvm_s390_set_mem_control(kvm, attr);
4f718eab 938 break;
72f25020
JH
939 case KVM_S390_VM_TOD:
940 ret = kvm_s390_set_tod(kvm, attr);
941 break;
658b6eda
MM
942 case KVM_S390_VM_CPU_MODEL:
943 ret = kvm_s390_set_cpu_model(kvm, attr);
944 break;
a374e892
TK
945 case KVM_S390_VM_CRYPTO:
946 ret = kvm_s390_vm_set_crypto(kvm, attr);
947 break;
f2061656
DD
948 default:
949 ret = -ENXIO;
950 break;
951 }
952
953 return ret;
954}
955
956static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
957{
8c0a7ce6
DD
958 int ret;
959
960 switch (attr->group) {
961 case KVM_S390_VM_MEM_CTRL:
962 ret = kvm_s390_get_mem_control(kvm, attr);
963 break;
72f25020
JH
964 case KVM_S390_VM_TOD:
965 ret = kvm_s390_get_tod(kvm, attr);
966 break;
658b6eda
MM
967 case KVM_S390_VM_CPU_MODEL:
968 ret = kvm_s390_get_cpu_model(kvm, attr);
969 break;
8c0a7ce6
DD
970 default:
971 ret = -ENXIO;
972 break;
973 }
974
975 return ret;
f2061656
DD
976}
977
978static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
979{
980 int ret;
981
982 switch (attr->group) {
4f718eab
DD
983 case KVM_S390_VM_MEM_CTRL:
984 switch (attr->attr) {
985 case KVM_S390_VM_MEM_ENABLE_CMMA:
986 case KVM_S390_VM_MEM_CLR_CMMA:
f9cbd9b0
DH
987 ret = sclp.has_cmma ? 0 : -ENXIO;
988 break;
8c0a7ce6 989 case KVM_S390_VM_MEM_LIMIT_SIZE:
4f718eab
DD
990 ret = 0;
991 break;
992 default:
993 ret = -ENXIO;
994 break;
995 }
996 break;
72f25020
JH
997 case KVM_S390_VM_TOD:
998 switch (attr->attr) {
999 case KVM_S390_VM_TOD_LOW:
1000 case KVM_S390_VM_TOD_HIGH:
1001 ret = 0;
1002 break;
1003 default:
1004 ret = -ENXIO;
1005 break;
1006 }
1007 break;
658b6eda
MM
1008 case KVM_S390_VM_CPU_MODEL:
1009 switch (attr->attr) {
1010 case KVM_S390_VM_CPU_PROCESSOR:
1011 case KVM_S390_VM_CPU_MACHINE:
15c9705f
DH
1012 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1013 case KVM_S390_VM_CPU_MACHINE_FEAT:
0a763c78 1014 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
658b6eda
MM
1015 ret = 0;
1016 break;
0a763c78
DH
1017 /* configuring subfunctions is not supported yet */
1018 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
658b6eda
MM
1019 default:
1020 ret = -ENXIO;
1021 break;
1022 }
1023 break;
a374e892
TK
1024 case KVM_S390_VM_CRYPTO:
1025 switch (attr->attr) {
1026 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1027 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1028 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1029 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1030 ret = 0;
1031 break;
1032 default:
1033 ret = -ENXIO;
1034 break;
1035 }
1036 break;
f2061656
DD
1037 default:
1038 ret = -ENXIO;
1039 break;
1040 }
1041
1042 return ret;
1043}
1044
30ee2a98
JH
1045static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1046{
1047 uint8_t *keys;
1048 uint64_t hva;
30ee2a98
JH
1049 int i, r = 0;
1050
1051 if (args->flags != 0)
1052 return -EINVAL;
1053
1054 /* Is this guest using storage keys? */
1055 if (!mm_use_skey(current->mm))
1056 return KVM_S390_GET_SKEYS_NONE;
1057
1058 /* Enforce sane limit on memory allocation */
1059 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1060 return -EINVAL;
1061
1062 keys = kmalloc_array(args->count, sizeof(uint8_t),
1063 GFP_KERNEL | __GFP_NOWARN);
1064 if (!keys)
1065 keys = vmalloc(sizeof(uint8_t) * args->count);
1066 if (!keys)
1067 return -ENOMEM;
1068
d3ed1cee 1069 down_read(&current->mm->mmap_sem);
30ee2a98
JH
1070 for (i = 0; i < args->count; i++) {
1071 hva = gfn_to_hva(kvm, args->start_gfn + i);
1072 if (kvm_is_error_hva(hva)) {
1073 r = -EFAULT;
d3ed1cee 1074 break;
30ee2a98
JH
1075 }
1076
154c8c19
DH
1077 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1078 if (r)
d3ed1cee 1079 break;
30ee2a98 1080 }
d3ed1cee
MS
1081 up_read(&current->mm->mmap_sem);
1082
1083 if (!r) {
1084 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1085 sizeof(uint8_t) * args->count);
1086 if (r)
1087 r = -EFAULT;
1088 }
30ee2a98 1089
30ee2a98
JH
1090 kvfree(keys);
1091 return r;
1092}
1093
1094static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1095{
1096 uint8_t *keys;
1097 uint64_t hva;
1098 int i, r = 0;
1099
1100 if (args->flags != 0)
1101 return -EINVAL;
1102
1103 /* Enforce sane limit on memory allocation */
1104 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1105 return -EINVAL;
1106
1107 keys = kmalloc_array(args->count, sizeof(uint8_t),
1108 GFP_KERNEL | __GFP_NOWARN);
1109 if (!keys)
1110 keys = vmalloc(sizeof(uint8_t) * args->count);
1111 if (!keys)
1112 return -ENOMEM;
1113
1114 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1115 sizeof(uint8_t) * args->count);
1116 if (r) {
1117 r = -EFAULT;
1118 goto out;
1119 }
1120
1121 /* Enable storage key handling for the guest */
14d4a425
DD
1122 r = s390_enable_skey();
1123 if (r)
1124 goto out;
30ee2a98 1125
d3ed1cee 1126 down_read(&current->mm->mmap_sem);
30ee2a98
JH
1127 for (i = 0; i < args->count; i++) {
1128 hva = gfn_to_hva(kvm, args->start_gfn + i);
1129 if (kvm_is_error_hva(hva)) {
1130 r = -EFAULT;
d3ed1cee 1131 break;
30ee2a98
JH
1132 }
1133
1134 /* Lowest order bit is reserved */
1135 if (keys[i] & 0x01) {
1136 r = -EINVAL;
d3ed1cee 1137 break;
30ee2a98
JH
1138 }
1139
fe69eabf 1140 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
30ee2a98 1141 if (r)
d3ed1cee 1142 break;
30ee2a98 1143 }
d3ed1cee 1144 up_read(&current->mm->mmap_sem);
30ee2a98
JH
1145out:
1146 kvfree(keys);
1147 return r;
1148}
1149
b0c632db
HC
1150long kvm_arch_vm_ioctl(struct file *filp,
1151 unsigned int ioctl, unsigned long arg)
1152{
1153 struct kvm *kvm = filp->private_data;
1154 void __user *argp = (void __user *)arg;
f2061656 1155 struct kvm_device_attr attr;
b0c632db
HC
1156 int r;
1157
1158 switch (ioctl) {
ba5c1e9b
CO
1159 case KVM_S390_INTERRUPT: {
1160 struct kvm_s390_interrupt s390int;
1161
1162 r = -EFAULT;
1163 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1164 break;
1165 r = kvm_s390_inject_vm(kvm, &s390int);
1166 break;
1167 }
d938dc55
CH
1168 case KVM_ENABLE_CAP: {
1169 struct kvm_enable_cap cap;
1170 r = -EFAULT;
1171 if (copy_from_user(&cap, argp, sizeof(cap)))
1172 break;
1173 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1174 break;
1175 }
84223598
CH
1176 case KVM_CREATE_IRQCHIP: {
1177 struct kvm_irq_routing_entry routing;
1178
1179 r = -EINVAL;
1180 if (kvm->arch.use_irqchip) {
1181 /* Set up dummy routing. */
1182 memset(&routing, 0, sizeof(routing));
152b2839 1183 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
84223598
CH
1184 }
1185 break;
1186 }
f2061656
DD
1187 case KVM_SET_DEVICE_ATTR: {
1188 r = -EFAULT;
1189 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1190 break;
1191 r = kvm_s390_vm_set_attr(kvm, &attr);
1192 break;
1193 }
1194 case KVM_GET_DEVICE_ATTR: {
1195 r = -EFAULT;
1196 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1197 break;
1198 r = kvm_s390_vm_get_attr(kvm, &attr);
1199 break;
1200 }
1201 case KVM_HAS_DEVICE_ATTR: {
1202 r = -EFAULT;
1203 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1204 break;
1205 r = kvm_s390_vm_has_attr(kvm, &attr);
1206 break;
1207 }
30ee2a98
JH
1208 case KVM_S390_GET_SKEYS: {
1209 struct kvm_s390_skeys args;
1210
1211 r = -EFAULT;
1212 if (copy_from_user(&args, argp,
1213 sizeof(struct kvm_s390_skeys)))
1214 break;
1215 r = kvm_s390_get_skeys(kvm, &args);
1216 break;
1217 }
1218 case KVM_S390_SET_SKEYS: {
1219 struct kvm_s390_skeys args;
1220
1221 r = -EFAULT;
1222 if (copy_from_user(&args, argp,
1223 sizeof(struct kvm_s390_skeys)))
1224 break;
1225 r = kvm_s390_set_skeys(kvm, &args);
1226 break;
1227 }
b0c632db 1228 default:
367e1319 1229 r = -ENOTTY;
b0c632db
HC
1230 }
1231
1232 return r;
1233}
1234
45c9b47c
TK
1235static int kvm_s390_query_ap_config(u8 *config)
1236{
1237 u32 fcn_code = 0x04000000UL;
86044c8c 1238 u32 cc = 0;
45c9b47c 1239
86044c8c 1240 memset(config, 0, 128);
45c9b47c
TK
1241 asm volatile(
1242 "lgr 0,%1\n"
1243 "lgr 2,%2\n"
1244 ".long 0xb2af0000\n" /* PQAP(QCI) */
86044c8c 1245 "0: ipm %0\n"
45c9b47c 1246 "srl %0,28\n"
86044c8c
CB
1247 "1:\n"
1248 EX_TABLE(0b, 1b)
1249 : "+r" (cc)
45c9b47c
TK
1250 : "r" (fcn_code), "r" (config)
1251 : "cc", "0", "2", "memory"
1252 );
1253
1254 return cc;
1255}
1256
1257static int kvm_s390_apxa_installed(void)
1258{
1259 u8 config[128];
1260 int cc;
1261
a6aacc3f 1262 if (test_facility(12)) {
45c9b47c
TK
1263 cc = kvm_s390_query_ap_config(config);
1264
1265 if (cc)
1266 pr_err("PQAP(QCI) failed with cc=%d", cc);
1267 else
1268 return config[0] & 0x40;
1269 }
1270
1271 return 0;
1272}
1273
1274static void kvm_s390_set_crycb_format(struct kvm *kvm)
1275{
1276 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1277
1278 if (kvm_s390_apxa_installed())
1279 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1280 else
1281 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1282}
1283
9bb0ec09 1284static u64 kvm_s390_get_initial_cpuid(void)
9d8d5786 1285{
9bb0ec09
DH
1286 struct cpuid cpuid;
1287
1288 get_cpu_id(&cpuid);
1289 cpuid.version = 0xff;
1290 return *((u64 *) &cpuid);
9d8d5786
MM
1291}
1292
c54f0d6a 1293static void kvm_s390_crypto_init(struct kvm *kvm)
5102ee87 1294{
9d8d5786 1295 if (!test_kvm_facility(kvm, 76))
c54f0d6a 1296 return;
5102ee87 1297
c54f0d6a 1298 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
45c9b47c 1299 kvm_s390_set_crycb_format(kvm);
5102ee87 1300
ed6f76b4
TK
1301 /* Enable AES/DEA protected key functions by default */
1302 kvm->arch.crypto.aes_kw = 1;
1303 kvm->arch.crypto.dea_kw = 1;
1304 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1305 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1306 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1307 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
5102ee87
TK
1308}
1309
7d43bafc
ED
1310static void sca_dispose(struct kvm *kvm)
1311{
1312 if (kvm->arch.use_esca)
5e044315 1313 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
7d43bafc
ED
1314 else
1315 free_page((unsigned long)(kvm->arch.sca));
1316 kvm->arch.sca = NULL;
1317}
1318
e08b9637 1319int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 1320{
76a6dd72 1321 gfp_t alloc_flags = GFP_KERNEL;
9d8d5786 1322 int i, rc;
b0c632db 1323 char debug_name[16];
f6c137ff 1324 static unsigned long sca_offset;
b0c632db 1325
e08b9637
CO
1326 rc = -EINVAL;
1327#ifdef CONFIG_KVM_S390_UCONTROL
1328 if (type & ~KVM_VM_S390_UCONTROL)
1329 goto out_err;
1330 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1331 goto out_err;
1332#else
1333 if (type)
1334 goto out_err;
1335#endif
1336
b0c632db
HC
1337 rc = s390_enable_sie();
1338 if (rc)
d89f5eff 1339 goto out_err;
b0c632db 1340
b290411a
CO
1341 rc = -ENOMEM;
1342
7d0a5e62
JF
1343 ratelimit_state_init(&kvm->arch.sthyi_limit, 5 * HZ, 500);
1344
7d43bafc 1345 kvm->arch.use_esca = 0; /* start with basic SCA */
76a6dd72
DH
1346 if (!sclp.has_64bscao)
1347 alloc_flags |= GFP_DMA;
5e044315 1348 rwlock_init(&kvm->arch.sca_lock);
76a6dd72 1349 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
b0c632db 1350 if (!kvm->arch.sca)
d89f5eff 1351 goto out_err;
f6c137ff 1352 spin_lock(&kvm_lock);
c5c2c393 1353 sca_offset += 16;
bc784cce 1354 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
c5c2c393 1355 sca_offset = 0;
bc784cce
ED
1356 kvm->arch.sca = (struct bsca_block *)
1357 ((char *) kvm->arch.sca + sca_offset);
f6c137ff 1358 spin_unlock(&kvm_lock);
b0c632db
HC
1359
1360 sprintf(debug_name, "kvm-%u", current->pid);
1361
1cb9cf72 1362 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
b0c632db 1363 if (!kvm->arch.dbf)
40f5b735 1364 goto out_err;
b0c632db 1365
c54f0d6a
DH
1366 kvm->arch.sie_page2 =
1367 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1368 if (!kvm->arch.sie_page2)
40f5b735 1369 goto out_err;
9d8d5786 1370
fb5bf93f 1371 /* Populate the facility mask initially. */
c54f0d6a 1372 memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
94422ee8 1373 S390_ARCH_FAC_LIST_SIZE_BYTE);
9d8d5786
MM
1374 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1375 if (i < kvm_s390_fac_list_mask_size())
c54f0d6a 1376 kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
9d8d5786 1377 else
c54f0d6a 1378 kvm->arch.model.fac_mask[i] = 0UL;
9d8d5786
MM
1379 }
1380
981467c9 1381 /* Populate the facility list initially. */
c54f0d6a
DH
1382 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
1383 memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
981467c9
MM
1384 S390_ARCH_FAC_LIST_SIZE_BYTE);
1385
95ca2cb5
JF
1386 set_kvm_facility(kvm->arch.model.fac_mask, 74);
1387 set_kvm_facility(kvm->arch.model.fac_list, 74);
1388
9bb0ec09 1389 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
37c5f6c8 1390 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
9d8d5786 1391
c54f0d6a 1392 kvm_s390_crypto_init(kvm);
5102ee87 1393
ba5c1e9b 1394 spin_lock_init(&kvm->arch.float_int.lock);
6d3da241
JF
1395 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1396 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
8a242234 1397 init_waitqueue_head(&kvm->arch.ipte_wq);
a6b7e459 1398 mutex_init(&kvm->arch.ipte_mutex);
ba5c1e9b 1399
b0c632db 1400 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
78f26131 1401 VM_EVENT(kvm, 3, "vm created with type %lu", type);
b0c632db 1402
e08b9637
CO
1403 if (type & KVM_VM_S390_UCONTROL) {
1404 kvm->arch.gmap = NULL;
a3a92c31 1405 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
e08b9637 1406 } else {
32e6b236
GH
1407 if (sclp.hamax == U64_MAX)
1408 kvm->arch.mem_limit = TASK_MAX_SIZE;
1409 else
1410 kvm->arch.mem_limit = min_t(unsigned long, TASK_MAX_SIZE,
1411 sclp.hamax + 1);
6ea427bb 1412 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
e08b9637 1413 if (!kvm->arch.gmap)
40f5b735 1414 goto out_err;
2c70fe44 1415 kvm->arch.gmap->private = kvm;
24eb3a82 1416 kvm->arch.gmap->pfault_enabled = 0;
e08b9637 1417 }
fa6b7fe9
CH
1418
1419 kvm->arch.css_support = 0;
84223598 1420 kvm->arch.use_irqchip = 0;
72f25020 1421 kvm->arch.epoch = 0;
fa6b7fe9 1422
8ad35755 1423 spin_lock_init(&kvm->arch.start_stop_lock);
a3508fbe 1424 kvm_s390_vsie_init(kvm);
8335713a 1425 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
8ad35755 1426
d89f5eff 1427 return 0;
40f5b735 1428out_err:
c54f0d6a 1429 free_page((unsigned long)kvm->arch.sie_page2);
598841ca 1430 debug_unregister(kvm->arch.dbf);
7d43bafc 1431 sca_dispose(kvm);
78f26131 1432 KVM_EVENT(3, "creation of vm failed: %d", rc);
d89f5eff 1433 return rc;
b0c632db
HC
1434}
1435
d329c035
CB
1436void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1437{
1438 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 1439 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
67335e63 1440 kvm_s390_clear_local_irqs(vcpu);
3c038e6b 1441 kvm_clear_async_pf_completion_queue(vcpu);
bc784cce 1442 if (!kvm_is_ucontrol(vcpu->kvm))
a6e2f683 1443 sca_del_vcpu(vcpu);
27e0393f
CO
1444
1445 if (kvm_is_ucontrol(vcpu->kvm))
6ea427bb 1446 gmap_remove(vcpu->arch.gmap);
27e0393f 1447
e6db1d61 1448 if (vcpu->kvm->arch.use_cmma)
b31605c1 1449 kvm_s390_vcpu_unsetup_cmma(vcpu);
d329c035 1450 free_page((unsigned long)(vcpu->arch.sie_block));
b31288fa 1451
6692cef3 1452 kvm_vcpu_uninit(vcpu);
b110feaf 1453 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
1454}
1455
1456static void kvm_free_vcpus(struct kvm *kvm)
1457{
1458 unsigned int i;
988a2cae 1459 struct kvm_vcpu *vcpu;
d329c035 1460
988a2cae
GN
1461 kvm_for_each_vcpu(i, vcpu, kvm)
1462 kvm_arch_vcpu_destroy(vcpu);
1463
1464 mutex_lock(&kvm->lock);
1465 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1466 kvm->vcpus[i] = NULL;
1467
1468 atomic_set(&kvm->online_vcpus, 0);
1469 mutex_unlock(&kvm->lock);
d329c035
CB
1470}
1471
b0c632db
HC
1472void kvm_arch_destroy_vm(struct kvm *kvm)
1473{
d329c035 1474 kvm_free_vcpus(kvm);
7d43bafc 1475 sca_dispose(kvm);
d329c035 1476 debug_unregister(kvm->arch.dbf);
c54f0d6a 1477 free_page((unsigned long)kvm->arch.sie_page2);
27e0393f 1478 if (!kvm_is_ucontrol(kvm))
6ea427bb 1479 gmap_remove(kvm->arch.gmap);
841b91c5 1480 kvm_s390_destroy_adapters(kvm);
67335e63 1481 kvm_s390_clear_float_irqs(kvm);
a3508fbe 1482 kvm_s390_vsie_destroy(kvm);
8335713a 1483 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
b0c632db
HC
1484}
1485
1486/* Section: vcpu related */
dafd032a
DD
1487static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1488{
6ea427bb 1489 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
dafd032a
DD
1490 if (!vcpu->arch.gmap)
1491 return -ENOMEM;
1492 vcpu->arch.gmap->private = vcpu->kvm;
1493
1494 return 0;
1495}
1496
a6e2f683
ED
1497static void sca_del_vcpu(struct kvm_vcpu *vcpu)
1498{
5e044315 1499 read_lock(&vcpu->kvm->arch.sca_lock);
7d43bafc
ED
1500 if (vcpu->kvm->arch.use_esca) {
1501 struct esca_block *sca = vcpu->kvm->arch.sca;
a6e2f683 1502
7d43bafc 1503 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
10ce32d5 1504 sca->cpu[vcpu->vcpu_id].sda = 0;
7d43bafc
ED
1505 } else {
1506 struct bsca_block *sca = vcpu->kvm->arch.sca;
1507
1508 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
10ce32d5 1509 sca->cpu[vcpu->vcpu_id].sda = 0;
7d43bafc 1510 }
5e044315 1511 read_unlock(&vcpu->kvm->arch.sca_lock);
a6e2f683
ED
1512}
1513
eaa78f34 1514static void sca_add_vcpu(struct kvm_vcpu *vcpu)
a6e2f683 1515{
eaa78f34
DH
1516 read_lock(&vcpu->kvm->arch.sca_lock);
1517 if (vcpu->kvm->arch.use_esca) {
1518 struct esca_block *sca = vcpu->kvm->arch.sca;
7d43bafc 1519
eaa78f34 1520 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
7d43bafc
ED
1521 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1522 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
25508824 1523 vcpu->arch.sie_block->ecb2 |= 0x04U;
eaa78f34 1524 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
7d43bafc 1525 } else {
eaa78f34 1526 struct bsca_block *sca = vcpu->kvm->arch.sca;
a6e2f683 1527
eaa78f34 1528 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
7d43bafc
ED
1529 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1530 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
eaa78f34 1531 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
7d43bafc 1532 }
eaa78f34 1533 read_unlock(&vcpu->kvm->arch.sca_lock);
5e044315
ED
1534}
1535
1536/* Basic SCA to Extended SCA data copy routines */
1537static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
1538{
1539 d->sda = s->sda;
1540 d->sigp_ctrl.c = s->sigp_ctrl.c;
1541 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
1542}
1543
1544static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
1545{
1546 int i;
1547
1548 d->ipte_control = s->ipte_control;
1549 d->mcn[0] = s->mcn;
1550 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
1551 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
1552}
1553
1554static int sca_switch_to_extended(struct kvm *kvm)
1555{
1556 struct bsca_block *old_sca = kvm->arch.sca;
1557 struct esca_block *new_sca;
1558 struct kvm_vcpu *vcpu;
1559 unsigned int vcpu_idx;
1560 u32 scaol, scaoh;
1561
1562 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
1563 if (!new_sca)
1564 return -ENOMEM;
1565
1566 scaoh = (u32)((u64)(new_sca) >> 32);
1567 scaol = (u32)(u64)(new_sca) & ~0x3fU;
1568
1569 kvm_s390_vcpu_block_all(kvm);
1570 write_lock(&kvm->arch.sca_lock);
1571
1572 sca_copy_b_to_e(new_sca, old_sca);
1573
1574 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
1575 vcpu->arch.sie_block->scaoh = scaoh;
1576 vcpu->arch.sie_block->scaol = scaol;
1577 vcpu->arch.sie_block->ecb2 |= 0x04U;
1578 }
1579 kvm->arch.sca = new_sca;
1580 kvm->arch.use_esca = 1;
1581
1582 write_unlock(&kvm->arch.sca_lock);
1583 kvm_s390_vcpu_unblock_all(kvm);
1584
1585 free_page((unsigned long)old_sca);
1586
8335713a
CB
1587 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
1588 old_sca, kvm->arch.sca);
5e044315 1589 return 0;
a6e2f683
ED
1590}
1591
1592static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
1593{
5e044315
ED
1594 int rc;
1595
1596 if (id < KVM_S390_BSCA_CPU_SLOTS)
1597 return true;
76a6dd72 1598 if (!sclp.has_esca || !sclp.has_64bscao)
5e044315
ED
1599 return false;
1600
1601 mutex_lock(&kvm->lock);
1602 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
1603 mutex_unlock(&kvm->lock);
1604
1605 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
a6e2f683
ED
1606}
1607
b0c632db
HC
1608int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1609{
3c038e6b
DD
1610 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1611 kvm_clear_async_pf_completion_queue(vcpu);
59674c1a
CB
1612 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1613 KVM_SYNC_GPRS |
9eed0735 1614 KVM_SYNC_ACRS |
b028ee3e
DH
1615 KVM_SYNC_CRS |
1616 KVM_SYNC_ARCH0 |
1617 KVM_SYNC_PFAULT;
c6e5f166
FZ
1618 if (test_kvm_facility(vcpu->kvm, 64))
1619 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
f6aa6dc4
DH
1620 /* fprs can be synchronized via vrs, even if the guest has no vx. With
1621 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
1622 */
1623 if (MACHINE_HAS_VX)
68c55750 1624 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
6fd8e67d
DH
1625 else
1626 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
dafd032a
DD
1627
1628 if (kvm_is_ucontrol(vcpu->kvm))
1629 return __kvm_ucontrol_vcpu_init(vcpu);
1630
b0c632db
HC
1631 return 0;
1632}
1633
db0758b2
DH
1634/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1635static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1636{
1637 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
9c23a131 1638 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
db0758b2 1639 vcpu->arch.cputm_start = get_tod_clock_fast();
9c23a131 1640 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
db0758b2
DH
1641}
1642
1643/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1644static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1645{
1646 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
9c23a131 1647 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
db0758b2
DH
1648 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1649 vcpu->arch.cputm_start = 0;
9c23a131 1650 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
db0758b2
DH
1651}
1652
1653/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1654static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1655{
1656 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
1657 vcpu->arch.cputm_enabled = true;
1658 __start_cpu_timer_accounting(vcpu);
1659}
1660
1661/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1662static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1663{
1664 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
1665 __stop_cpu_timer_accounting(vcpu);
1666 vcpu->arch.cputm_enabled = false;
1667}
1668
1669static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1670{
1671 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1672 __enable_cpu_timer_accounting(vcpu);
1673 preempt_enable();
1674}
1675
1676static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1677{
1678 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1679 __disable_cpu_timer_accounting(vcpu);
1680 preempt_enable();
1681}
1682
4287f247
DH
1683/* set the cpu timer - may only be called from the VCPU thread itself */
1684void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
1685{
db0758b2 1686 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
9c23a131 1687 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
db0758b2
DH
1688 if (vcpu->arch.cputm_enabled)
1689 vcpu->arch.cputm_start = get_tod_clock_fast();
4287f247 1690 vcpu->arch.sie_block->cputm = cputm;
9c23a131 1691 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
db0758b2 1692 preempt_enable();
4287f247
DH
1693}
1694
db0758b2 1695/* update and get the cpu timer - can also be called from other VCPU threads */
4287f247
DH
1696__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
1697{
9c23a131 1698 unsigned int seq;
db0758b2 1699 __u64 value;
db0758b2
DH
1700
1701 if (unlikely(!vcpu->arch.cputm_enabled))
1702 return vcpu->arch.sie_block->cputm;
1703
9c23a131
DH
1704 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1705 do {
1706 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
1707 /*
1708 * If the writer would ever execute a read in the critical
1709 * section, e.g. in irq context, we have a deadlock.
1710 */
1711 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
1712 value = vcpu->arch.sie_block->cputm;
1713 /* if cputm_start is 0, accounting is being started/stopped */
1714 if (likely(vcpu->arch.cputm_start))
1715 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1716 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
1717 preempt_enable();
db0758b2 1718 return value;
4287f247
DH
1719}
1720
b0c632db
HC
1721void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1722{
9977e886 1723 /* Save host register state */
d0164ee2 1724 save_fpu_regs();
9abc2a08
DH
1725 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
1726 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
9977e886 1727
6fd8e67d
DH
1728 if (MACHINE_HAS_VX)
1729 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
1730 else
1731 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
9abc2a08 1732 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
9977e886 1733 if (test_fp_ctl(current->thread.fpu.fpc))
96b2d7a8 1734 /* User space provided an invalid FPC, let's clear it */
9977e886
HB
1735 current->thread.fpu.fpc = 0;
1736
1737 save_access_regs(vcpu->arch.host_acrs);
59674c1a 1738 restore_access_regs(vcpu->run->s.regs.acrs);
37d9df98 1739 gmap_enable(vcpu->arch.enabled_gmap);
805de8f4 1740 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
5ebda316 1741 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
db0758b2 1742 __start_cpu_timer_accounting(vcpu);
01a745ac 1743 vcpu->cpu = cpu;
b0c632db
HC
1744}
1745
1746void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1747{
01a745ac 1748 vcpu->cpu = -1;
5ebda316 1749 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
db0758b2 1750 __stop_cpu_timer_accounting(vcpu);
805de8f4 1751 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
37d9df98
DH
1752 vcpu->arch.enabled_gmap = gmap_get_enabled();
1753 gmap_disable(vcpu->arch.enabled_gmap);
9977e886 1754
9abc2a08 1755 /* Save guest register state */
d0164ee2 1756 save_fpu_regs();
9abc2a08 1757 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
9977e886 1758
9abc2a08
DH
1759 /* Restore host register state */
1760 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
1761 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
9977e886
HB
1762
1763 save_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
1764 restore_access_regs(vcpu->arch.host_acrs);
1765}
1766
1767static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1768{
1769 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1770 vcpu->arch.sie_block->gpsw.mask = 0UL;
1771 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 1772 kvm_s390_set_prefix(vcpu, 0);
4287f247 1773 kvm_s390_set_cpu_timer(vcpu, 0);
b0c632db
HC
1774 vcpu->arch.sie_block->ckc = 0UL;
1775 vcpu->arch.sie_block->todpr = 0;
1776 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1777 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
1778 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
9abc2a08
DH
1779 /* make sure the new fpc will be lazily loaded */
1780 save_fpu_regs();
1781 current->thread.fpu.fpc = 0;
b0c632db 1782 vcpu->arch.sie_block->gbea = 1;
672550fb 1783 vcpu->arch.sie_block->pp = 0;
3c038e6b
DD
1784 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1785 kvm_clear_async_pf_completion_queue(vcpu);
6352e4d2
DH
1786 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1787 kvm_s390_vcpu_stop(vcpu);
2ed10cc1 1788 kvm_s390_clear_local_irqs(vcpu);
b0c632db
HC
1789}
1790
31928aa5 1791void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
42897d86 1792{
72f25020 1793 mutex_lock(&vcpu->kvm->lock);
fdf03650 1794 preempt_disable();
72f25020 1795 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
fdf03650 1796 preempt_enable();
72f25020 1797 mutex_unlock(&vcpu->kvm->lock);
25508824 1798 if (!kvm_is_ucontrol(vcpu->kvm)) {
dafd032a 1799 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
eaa78f34 1800 sca_add_vcpu(vcpu);
25508824 1801 }
37d9df98
DH
1802 /* make vcpu_load load the right gmap on the first trigger */
1803 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
42897d86
MT
1804}
1805
5102ee87
TK
1806static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1807{
9d8d5786 1808 if (!test_kvm_facility(vcpu->kvm, 76))
5102ee87
TK
1809 return;
1810
a374e892
TK
1811 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1812
1813 if (vcpu->kvm->arch.crypto.aes_kw)
1814 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1815 if (vcpu->kvm->arch.crypto.dea_kw)
1816 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1817
5102ee87
TK
1818 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1819}
1820
b31605c1
DD
1821void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1822{
1823 free_page(vcpu->arch.sie_block->cbrlo);
1824 vcpu->arch.sie_block->cbrlo = 0;
1825}
1826
1827int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1828{
1829 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1830 if (!vcpu->arch.sie_block->cbrlo)
1831 return -ENOMEM;
1832
1833 vcpu->arch.sie_block->ecb2 |= 0x80;
1834 vcpu->arch.sie_block->ecb2 &= ~0x08;
1835 return 0;
1836}
1837
91520f1a
MM
1838static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1839{
1840 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1841
91520f1a 1842 vcpu->arch.sie_block->ibc = model->ibc;
80bc79dc 1843 if (test_kvm_facility(vcpu->kvm, 7))
c54f0d6a 1844 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
91520f1a
MM
1845}
1846
b0c632db
HC
1847int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1848{
b31605c1 1849 int rc = 0;
b31288fa 1850
9e6dabef
CH
1851 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1852 CPUSTAT_SM |
a4a4f191
GH
1853 CPUSTAT_STOPPED);
1854
53df84f8 1855 if (test_kvm_facility(vcpu->kvm, 78))
805de8f4 1856 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
53df84f8 1857 else if (test_kvm_facility(vcpu->kvm, 8))
805de8f4 1858 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
a4a4f191 1859
91520f1a
MM
1860 kvm_s390_vcpu_setup_model(vcpu);
1861
bdab09f3
DH
1862 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
1863 if (MACHINE_HAS_ESOP)
1864 vcpu->arch.sie_block->ecb |= 0x02;
bd50e8ec
DH
1865 if (test_kvm_facility(vcpu->kvm, 9))
1866 vcpu->arch.sie_block->ecb |= 0x04;
f597d24e 1867 if (test_kvm_facility(vcpu->kvm, 73))
7feb6bb8
MM
1868 vcpu->arch.sie_block->ecb |= 0x10;
1869
873b425e 1870 if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi)
d6af0b49 1871 vcpu->arch.sie_block->ecb2 |= 0x08;
48ee7d3a
DH
1872 vcpu->arch.sie_block->eca = 0x1002000U;
1873 if (sclp.has_cei)
1874 vcpu->arch.sie_block->eca |= 0x80000000U;
11ad65b7
DH
1875 if (sclp.has_ib)
1876 vcpu->arch.sie_block->eca |= 0x40000000U;
37c5f6c8 1877 if (sclp.has_siif)
217a4406 1878 vcpu->arch.sie_block->eca |= 1;
37c5f6c8 1879 if (sclp.has_sigpif)
ea5f4969 1880 vcpu->arch.sie_block->eca |= 0x10000000U;
c6e5f166
FZ
1881 if (test_kvm_facility(vcpu->kvm, 64))
1882 vcpu->arch.sie_block->ecb3 |= 0x01;
18280d8b 1883 if (test_kvm_facility(vcpu->kvm, 129)) {
13211ea7
EF
1884 vcpu->arch.sie_block->eca |= 0x00020000;
1885 vcpu->arch.sie_block->ecd |= 0x20000000;
1886 }
c6e5f166 1887 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
492d8642 1888 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
95ca2cb5
JF
1889 if (test_kvm_facility(vcpu->kvm, 74))
1890 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
5a5e6536 1891
e6db1d61 1892 if (vcpu->kvm->arch.use_cmma) {
b31605c1
DD
1893 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1894 if (rc)
1895 return rc;
b31288fa 1896 }
0ac96caf 1897 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ca872302 1898 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
9d8d5786 1899
5102ee87
TK
1900 kvm_s390_vcpu_crypto_setup(vcpu);
1901
b31605c1 1902 return rc;
b0c632db
HC
1903}
1904
1905struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1906 unsigned int id)
1907{
4d47555a 1908 struct kvm_vcpu *vcpu;
7feb6bb8 1909 struct sie_page *sie_page;
4d47555a
CO
1910 int rc = -EINVAL;
1911
4215825e 1912 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
4d47555a
CO
1913 goto out;
1914
1915 rc = -ENOMEM;
b0c632db 1916
b110feaf 1917 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 1918 if (!vcpu)
4d47555a 1919 goto out;
b0c632db 1920
7feb6bb8
MM
1921 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1922 if (!sie_page)
b0c632db
HC
1923 goto out_free_cpu;
1924
7feb6bb8
MM
1925 vcpu->arch.sie_block = &sie_page->sie_block;
1926 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
1927
efed1104
DH
1928 /* the real guest size will always be smaller than msl */
1929 vcpu->arch.sie_block->mso = 0;
1930 vcpu->arch.sie_block->msl = sclp.hamax;
1931
b0c632db 1932 vcpu->arch.sie_block->icpua = id;
ba5c1e9b 1933 spin_lock_init(&vcpu->arch.local_int.lock);
ba5c1e9b 1934 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
d0321a24 1935 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 1936 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
9c23a131 1937 seqcount_init(&vcpu->arch.cputm_seqcount);
ba5c1e9b 1938
b0c632db
HC
1939 rc = kvm_vcpu_init(vcpu, kvm, id);
1940 if (rc)
9abc2a08 1941 goto out_free_sie_block;
8335713a 1942 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
b0c632db 1943 vcpu->arch.sie_block);
ade38c31 1944 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 1945
b0c632db 1946 return vcpu;
7b06bf2f
WY
1947out_free_sie_block:
1948 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 1949out_free_cpu:
b110feaf 1950 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 1951out:
b0c632db
HC
1952 return ERR_PTR(rc);
1953}
1954
b0c632db
HC
1955int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1956{
9a022067 1957 return kvm_s390_vcpu_has_irq(vcpu, 0);
b0c632db
HC
1958}
1959
27406cd5 1960void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
49b99e1e 1961{
805de8f4 1962 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
61a6df54 1963 exit_sie(vcpu);
49b99e1e
CB
1964}
1965
27406cd5 1966void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
49b99e1e 1967{
805de8f4 1968 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
49b99e1e
CB
1969}
1970
8e236546
CB
1971static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
1972{
805de8f4 1973 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
61a6df54 1974 exit_sie(vcpu);
8e236546
CB
1975}
1976
1977static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
1978{
9bf9fde2 1979 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
8e236546
CB
1980}
1981
49b99e1e
CB
1982/*
1983 * Kick a guest cpu out of SIE and wait until SIE is not running.
1984 * If the CPU is not running (e.g. waiting as idle) the function will
1985 * return immediately. */
1986void exit_sie(struct kvm_vcpu *vcpu)
1987{
805de8f4 1988 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
49b99e1e
CB
1989 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1990 cpu_relax();
1991}
1992
8e236546
CB
1993/* Kick a guest cpu out of SIE to process a request synchronously */
1994void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
49b99e1e 1995{
8e236546
CB
1996 kvm_make_request(req, vcpu);
1997 kvm_s390_vcpu_request(vcpu);
49b99e1e
CB
1998}
1999
414d3b07
MS
2000static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
2001 unsigned long end)
2c70fe44 2002{
2c70fe44
CB
2003 struct kvm *kvm = gmap->private;
2004 struct kvm_vcpu *vcpu;
414d3b07
MS
2005 unsigned long prefix;
2006 int i;
2c70fe44 2007
65d0b0d4
DH
2008 if (gmap_is_shadow(gmap))
2009 return;
414d3b07
MS
2010 if (start >= 1UL << 31)
2011 /* We are only interested in prefix pages */
2012 return;
2c70fe44
CB
2013 kvm_for_each_vcpu(i, vcpu, kvm) {
2014 /* match against both prefix pages */
414d3b07
MS
2015 prefix = kvm_s390_get_prefix(vcpu);
2016 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
2017 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
2018 start, end);
8e236546 2019 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
2c70fe44
CB
2020 }
2021 }
2022}
2023
b6d33834
CD
2024int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
2025{
2026 /* kvm common code refers to this, but never calls it */
2027 BUG();
2028 return 0;
2029}
2030
14eebd91
CO
2031static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
2032 struct kvm_one_reg *reg)
2033{
2034 int r = -EINVAL;
2035
2036 switch (reg->id) {
29b7c71b
CO
2037 case KVM_REG_S390_TODPR:
2038 r = put_user(vcpu->arch.sie_block->todpr,
2039 (u32 __user *)reg->addr);
2040 break;
2041 case KVM_REG_S390_EPOCHDIFF:
2042 r = put_user(vcpu->arch.sie_block->epoch,
2043 (u64 __user *)reg->addr);
2044 break;
46a6dd1c 2045 case KVM_REG_S390_CPU_TIMER:
4287f247 2046 r = put_user(kvm_s390_get_cpu_timer(vcpu),
46a6dd1c
J
2047 (u64 __user *)reg->addr);
2048 break;
2049 case KVM_REG_S390_CLOCK_COMP:
2050 r = put_user(vcpu->arch.sie_block->ckc,
2051 (u64 __user *)reg->addr);
2052 break;
536336c2
DD
2053 case KVM_REG_S390_PFTOKEN:
2054 r = put_user(vcpu->arch.pfault_token,
2055 (u64 __user *)reg->addr);
2056 break;
2057 case KVM_REG_S390_PFCOMPARE:
2058 r = put_user(vcpu->arch.pfault_compare,
2059 (u64 __user *)reg->addr);
2060 break;
2061 case KVM_REG_S390_PFSELECT:
2062 r = put_user(vcpu->arch.pfault_select,
2063 (u64 __user *)reg->addr);
2064 break;
672550fb
CB
2065 case KVM_REG_S390_PP:
2066 r = put_user(vcpu->arch.sie_block->pp,
2067 (u64 __user *)reg->addr);
2068 break;
afa45ff5
CB
2069 case KVM_REG_S390_GBEA:
2070 r = put_user(vcpu->arch.sie_block->gbea,
2071 (u64 __user *)reg->addr);
2072 break;
14eebd91
CO
2073 default:
2074 break;
2075 }
2076
2077 return r;
2078}
2079
2080static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
2081 struct kvm_one_reg *reg)
2082{
2083 int r = -EINVAL;
4287f247 2084 __u64 val;
14eebd91
CO
2085
2086 switch (reg->id) {
29b7c71b
CO
2087 case KVM_REG_S390_TODPR:
2088 r = get_user(vcpu->arch.sie_block->todpr,
2089 (u32 __user *)reg->addr);
2090 break;
2091 case KVM_REG_S390_EPOCHDIFF:
2092 r = get_user(vcpu->arch.sie_block->epoch,
2093 (u64 __user *)reg->addr);
2094 break;
46a6dd1c 2095 case KVM_REG_S390_CPU_TIMER:
4287f247
DH
2096 r = get_user(val, (u64 __user *)reg->addr);
2097 if (!r)
2098 kvm_s390_set_cpu_timer(vcpu, val);
46a6dd1c
J
2099 break;
2100 case KVM_REG_S390_CLOCK_COMP:
2101 r = get_user(vcpu->arch.sie_block->ckc,
2102 (u64 __user *)reg->addr);
2103 break;
536336c2
DD
2104 case KVM_REG_S390_PFTOKEN:
2105 r = get_user(vcpu->arch.pfault_token,
2106 (u64 __user *)reg->addr);
9fbd8082
DH
2107 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2108 kvm_clear_async_pf_completion_queue(vcpu);
536336c2
DD
2109 break;
2110 case KVM_REG_S390_PFCOMPARE:
2111 r = get_user(vcpu->arch.pfault_compare,
2112 (u64 __user *)reg->addr);
2113 break;
2114 case KVM_REG_S390_PFSELECT:
2115 r = get_user(vcpu->arch.pfault_select,
2116 (u64 __user *)reg->addr);
2117 break;
672550fb
CB
2118 case KVM_REG_S390_PP:
2119 r = get_user(vcpu->arch.sie_block->pp,
2120 (u64 __user *)reg->addr);
2121 break;
afa45ff5
CB
2122 case KVM_REG_S390_GBEA:
2123 r = get_user(vcpu->arch.sie_block->gbea,
2124 (u64 __user *)reg->addr);
2125 break;
14eebd91
CO
2126 default:
2127 break;
2128 }
2129
2130 return r;
2131}
b6d33834 2132
b0c632db
HC
2133static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
2134{
b0c632db 2135 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
2136 return 0;
2137}
2138
2139int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2140{
5a32c1af 2141 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
2142 return 0;
2143}
2144
2145int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2146{
5a32c1af 2147 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
2148 return 0;
2149}
2150
2151int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2152 struct kvm_sregs *sregs)
2153{
59674c1a 2154 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 2155 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 2156 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
2157 return 0;
2158}
2159
2160int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2161 struct kvm_sregs *sregs)
2162{
59674c1a 2163 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 2164 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
2165 return 0;
2166}
2167
2168int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2169{
9abc2a08
DH
2170 /* make sure the new values will be lazily loaded */
2171 save_fpu_regs();
4725c860
MS
2172 if (test_fp_ctl(fpu->fpc))
2173 return -EINVAL;
9abc2a08
DH
2174 current->thread.fpu.fpc = fpu->fpc;
2175 if (MACHINE_HAS_VX)
2176 convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
2177 else
2178 memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
b0c632db
HC
2179 return 0;
2180}
2181
2182int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2183{
9abc2a08
DH
2184 /* make sure we have the latest values */
2185 save_fpu_regs();
2186 if (MACHINE_HAS_VX)
2187 convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
2188 else
2189 memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
2190 fpu->fpc = current->thread.fpu.fpc;
b0c632db
HC
2191 return 0;
2192}
2193
2194static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
2195{
2196 int rc = 0;
2197
7a42fdc2 2198 if (!is_vcpu_stopped(vcpu))
b0c632db 2199 rc = -EBUSY;
d7b0b5eb
CO
2200 else {
2201 vcpu->run->psw_mask = psw.mask;
2202 vcpu->run->psw_addr = psw.addr;
2203 }
b0c632db
HC
2204 return rc;
2205}
2206
2207int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2208 struct kvm_translation *tr)
2209{
2210 return -EINVAL; /* not implemented yet */
2211}
2212
27291e21
DH
2213#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
2214 KVM_GUESTDBG_USE_HW_BP | \
2215 KVM_GUESTDBG_ENABLE)
2216
d0bfb940
JK
2217int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2218 struct kvm_guest_debug *dbg)
b0c632db 2219{
27291e21
DH
2220 int rc = 0;
2221
2222 vcpu->guest_debug = 0;
2223 kvm_s390_clear_bp_data(vcpu);
2224
2de3bfc2 2225 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
27291e21 2226 return -EINVAL;
89b5b4de
DH
2227 if (!sclp.has_gpere)
2228 return -EINVAL;
27291e21
DH
2229
2230 if (dbg->control & KVM_GUESTDBG_ENABLE) {
2231 vcpu->guest_debug = dbg->control;
2232 /* enforce guest PER */
805de8f4 2233 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
27291e21
DH
2234
2235 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
2236 rc = kvm_s390_import_bp_data(vcpu, dbg);
2237 } else {
805de8f4 2238 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
27291e21
DH
2239 vcpu->arch.guestdbg.last_bp = 0;
2240 }
2241
2242 if (rc) {
2243 vcpu->guest_debug = 0;
2244 kvm_s390_clear_bp_data(vcpu);
805de8f4 2245 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
27291e21
DH
2246 }
2247
2248 return rc;
b0c632db
HC
2249}
2250
62d9f0db
MT
2251int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2252 struct kvm_mp_state *mp_state)
2253{
6352e4d2
DH
2254 /* CHECK_STOP and LOAD are not supported yet */
2255 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
2256 KVM_MP_STATE_OPERATING;
62d9f0db
MT
2257}
2258
2259int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2260 struct kvm_mp_state *mp_state)
2261{
6352e4d2
DH
2262 int rc = 0;
2263
2264 /* user space knows about this interface - let it control the state */
2265 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
2266
2267 switch (mp_state->mp_state) {
2268 case KVM_MP_STATE_STOPPED:
2269 kvm_s390_vcpu_stop(vcpu);
2270 break;
2271 case KVM_MP_STATE_OPERATING:
2272 kvm_s390_vcpu_start(vcpu);
2273 break;
2274 case KVM_MP_STATE_LOAD:
2275 case KVM_MP_STATE_CHECK_STOP:
2276 /* fall through - CHECK_STOP and LOAD are not supported yet */
2277 default:
2278 rc = -ENXIO;
2279 }
2280
2281 return rc;
62d9f0db
MT
2282}
2283
8ad35755
DH
2284static bool ibs_enabled(struct kvm_vcpu *vcpu)
2285{
2286 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
2287}
2288
2c70fe44
CB
2289static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
2290{
8ad35755 2291retry:
8e236546 2292 kvm_s390_vcpu_request_handled(vcpu);
586b7ccd
CB
2293 if (!vcpu->requests)
2294 return 0;
2c70fe44
CB
2295 /*
2296 * We use MMU_RELOAD just to re-arm the ipte notifier for the
b2d73b2a 2297 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
2c70fe44
CB
2298 * This ensures that the ipte instruction for this request has
2299 * already finished. We might race against a second unmapper that
2300 * wants to set the blocking bit. Lets just retry the request loop.
2301 */
8ad35755 2302 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
2c70fe44 2303 int rc;
b2d73b2a
MS
2304 rc = gmap_mprotect_notify(vcpu->arch.gmap,
2305 kvm_s390_get_prefix(vcpu),
2306 PAGE_SIZE * 2, PROT_WRITE);
2c70fe44
CB
2307 if (rc)
2308 return rc;
8ad35755 2309 goto retry;
2c70fe44 2310 }
8ad35755 2311
d3d692c8
DH
2312 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2313 vcpu->arch.sie_block->ihcpu = 0xffff;
2314 goto retry;
2315 }
2316
8ad35755
DH
2317 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
2318 if (!ibs_enabled(vcpu)) {
2319 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
805de8f4 2320 atomic_or(CPUSTAT_IBS,
8ad35755
DH
2321 &vcpu->arch.sie_block->cpuflags);
2322 }
2323 goto retry;
2c70fe44 2324 }
8ad35755
DH
2325
2326 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
2327 if (ibs_enabled(vcpu)) {
2328 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
805de8f4 2329 atomic_andnot(CPUSTAT_IBS,
8ad35755
DH
2330 &vcpu->arch.sie_block->cpuflags);
2331 }
2332 goto retry;
2333 }
2334
0759d068
DH
2335 /* nothing to do, just clear the request */
2336 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
2337
2c70fe44
CB
2338 return 0;
2339}
2340
25ed1675
DH
2341void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
2342{
2343 struct kvm_vcpu *vcpu;
2344 int i;
2345
2346 mutex_lock(&kvm->lock);
2347 preempt_disable();
2348 kvm->arch.epoch = tod - get_tod_clock();
2349 kvm_s390_vcpu_block_all(kvm);
2350 kvm_for_each_vcpu(i, vcpu, kvm)
2351 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
2352 kvm_s390_vcpu_unblock_all(kvm);
2353 preempt_enable();
2354 mutex_unlock(&kvm->lock);
2355}
2356
fa576c58
TH
2357/**
2358 * kvm_arch_fault_in_page - fault-in guest page if necessary
2359 * @vcpu: The corresponding virtual cpu
2360 * @gpa: Guest physical address
2361 * @writable: Whether the page should be writable or not
2362 *
2363 * Make sure that a guest page has been faulted-in on the host.
2364 *
2365 * Return: Zero on success, negative error code otherwise.
2366 */
2367long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
24eb3a82 2368{
527e30b4
MS
2369 return gmap_fault(vcpu->arch.gmap, gpa,
2370 writable ? FAULT_FLAG_WRITE : 0);
24eb3a82
DD
2371}
2372
3c038e6b
DD
2373static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
2374 unsigned long token)
2375{
2376 struct kvm_s390_interrupt inti;
383d0b05 2377 struct kvm_s390_irq irq;
3c038e6b
DD
2378
2379 if (start_token) {
383d0b05
JF
2380 irq.u.ext.ext_params2 = token;
2381 irq.type = KVM_S390_INT_PFAULT_INIT;
2382 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
3c038e6b
DD
2383 } else {
2384 inti.type = KVM_S390_INT_PFAULT_DONE;
383d0b05 2385 inti.parm64 = token;
3c038e6b
DD
2386 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
2387 }
2388}
2389
2390void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
2391 struct kvm_async_pf *work)
2392{
2393 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
2394 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
2395}
2396
2397void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
2398 struct kvm_async_pf *work)
2399{
2400 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
2401 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
2402}
2403
2404void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
2405 struct kvm_async_pf *work)
2406{
2407 /* s390 will always inject the page directly */
2408}
2409
2410bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
2411{
2412 /*
2413 * s390 will always inject the page directly,
2414 * but we still want check_async_completion to cleanup
2415 */
2416 return true;
2417}
2418
2419static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
2420{
2421 hva_t hva;
2422 struct kvm_arch_async_pf arch;
2423 int rc;
2424
2425 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2426 return 0;
2427 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
2428 vcpu->arch.pfault_compare)
2429 return 0;
2430 if (psw_extint_disabled(vcpu))
2431 return 0;
9a022067 2432 if (kvm_s390_vcpu_has_irq(vcpu, 0))
3c038e6b
DD
2433 return 0;
2434 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
2435 return 0;
2436 if (!vcpu->arch.gmap->pfault_enabled)
2437 return 0;
2438
81480cc1
HC
2439 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
2440 hva += current->thread.gmap_addr & ~PAGE_MASK;
2441 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
3c038e6b
DD
2442 return 0;
2443
2444 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
2445 return rc;
2446}
2447
3fb4c40f 2448static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 2449{
3fb4c40f 2450 int rc, cpuflags;
e168bf8d 2451
3c038e6b
DD
2452 /*
2453 * On s390 notifications for arriving pages will be delivered directly
2454 * to the guest but the house keeping for completed pfaults is
2455 * handled outside the worker.
2456 */
2457 kvm_check_async_pf_completion(vcpu);
2458
7ec7c8c7
CB
2459 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
2460 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
b0c632db
HC
2461
2462 if (need_resched())
2463 schedule();
2464
d3a73acb 2465 if (test_cpu_flag(CIF_MCCK_PENDING))
71cde587
CB
2466 s390_handle_mcck();
2467
79395031
JF
2468 if (!kvm_is_ucontrol(vcpu->kvm)) {
2469 rc = kvm_s390_deliver_pending_interrupts(vcpu);
2470 if (rc)
2471 return rc;
2472 }
0ff31867 2473
2c70fe44
CB
2474 rc = kvm_s390_handle_requests(vcpu);
2475 if (rc)
2476 return rc;
2477
27291e21
DH
2478 if (guestdbg_enabled(vcpu)) {
2479 kvm_s390_backup_guest_per_regs(vcpu);
2480 kvm_s390_patch_guest_per_regs(vcpu);
2481 }
2482
b0c632db 2483 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
2484 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
2485 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
2486 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 2487
3fb4c40f
TH
2488 return 0;
2489}
2490
492d8642
TH
2491static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
2492{
56317920
DH
2493 struct kvm_s390_pgm_info pgm_info = {
2494 .code = PGM_ADDRESSING,
2495 };
2496 u8 opcode, ilen;
492d8642
TH
2497 int rc;
2498
2499 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
2500 trace_kvm_s390_sie_fault(vcpu);
2501
2502 /*
2503 * We want to inject an addressing exception, which is defined as a
2504 * suppressing or terminating exception. However, since we came here
2505 * by a DAT access exception, the PSW still points to the faulting
2506 * instruction since DAT exceptions are nullifying. So we've got
2507 * to look up the current opcode to get the length of the instruction
2508 * to be able to forward the PSW.
2509 */
65977322 2510 rc = read_guest_instr(vcpu, &opcode, 1);
56317920 2511 ilen = insn_length(opcode);
9b0d721a
DH
2512 if (rc < 0) {
2513 return rc;
2514 } else if (rc) {
2515 /* Instruction-Fetching Exceptions - we can't detect the ilen.
2516 * Forward by arbitrary ilc, injection will take care of
2517 * nullification if necessary.
2518 */
2519 pgm_info = vcpu->arch.pgm;
2520 ilen = 4;
2521 }
56317920
DH
2522 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
2523 kvm_s390_forward_psw(vcpu, ilen);
2524 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
492d8642
TH
2525}
2526
3fb4c40f
TH
2527static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
2528{
2b29a9fd
DD
2529 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
2530 vcpu->arch.sie_block->icptcode);
2531 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
2532
27291e21
DH
2533 if (guestdbg_enabled(vcpu))
2534 kvm_s390_restore_guest_per_regs(vcpu);
2535
7ec7c8c7
CB
2536 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
2537 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
71f116bf
DH
2538
2539 if (vcpu->arch.sie_block->icptcode > 0) {
2540 int rc = kvm_handle_sie_intercept(vcpu);
2541
2542 if (rc != -EOPNOTSUPP)
2543 return rc;
2544 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
2545 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
2546 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
2547 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
2548 return -EREMOTE;
2549 } else if (exit_reason != -EFAULT) {
2550 vcpu->stat.exit_null++;
2551 return 0;
210b1607
TH
2552 } else if (kvm_is_ucontrol(vcpu->kvm)) {
2553 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
2554 vcpu->run->s390_ucontrol.trans_exc_code =
2555 current->thread.gmap_addr;
2556 vcpu->run->s390_ucontrol.pgm_code = 0x10;
71f116bf 2557 return -EREMOTE;
24eb3a82 2558 } else if (current->thread.gmap_pfault) {
3c038e6b 2559 trace_kvm_s390_major_guest_pfault(vcpu);
24eb3a82 2560 current->thread.gmap_pfault = 0;
71f116bf
DH
2561 if (kvm_arch_setup_async_pf(vcpu))
2562 return 0;
2563 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
a76ccff6 2564 }
71f116bf 2565 return vcpu_post_run_fault_in_sie(vcpu);
3fb4c40f
TH
2566}
2567
2568static int __vcpu_run(struct kvm_vcpu *vcpu)
2569{
2570 int rc, exit_reason;
2571
800c1065
TH
2572 /*
2573 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2574 * ning the guest), so that memslots (and other stuff) are protected
2575 */
2576 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2577
a76ccff6
TH
2578 do {
2579 rc = vcpu_pre_run(vcpu);
2580 if (rc)
2581 break;
3fb4c40f 2582
800c1065 2583 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
2584 /*
2585 * As PF_VCPU will be used in fault handler, between
2586 * guest_enter and guest_exit should be no uaccess.
2587 */
0097d12e
CB
2588 local_irq_disable();
2589 __kvm_guest_enter();
db0758b2 2590 __disable_cpu_timer_accounting(vcpu);
0097d12e 2591 local_irq_enable();
a76ccff6
TH
2592 exit_reason = sie64a(vcpu->arch.sie_block,
2593 vcpu->run->s.regs.gprs);
0097d12e 2594 local_irq_disable();
db0758b2 2595 __enable_cpu_timer_accounting(vcpu);
0097d12e
CB
2596 __kvm_guest_exit();
2597 local_irq_enable();
800c1065 2598 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
2599
2600 rc = vcpu_post_run(vcpu, exit_reason);
27291e21 2601 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
3fb4c40f 2602
800c1065 2603 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 2604 return rc;
b0c632db
HC
2605}
2606
b028ee3e
DH
2607static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2608{
2609 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2610 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2611 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2612 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2613 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2614 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
d3d692c8
DH
2615 /* some control register changes require a tlb flush */
2616 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
b028ee3e
DH
2617 }
2618 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4287f247 2619 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
b028ee3e
DH
2620 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2621 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2622 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2623 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2624 }
2625 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2626 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2627 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2628 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
9fbd8082
DH
2629 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2630 kvm_clear_async_pf_completion_queue(vcpu);
b028ee3e
DH
2631 }
2632 kvm_run->kvm_dirty_regs = 0;
2633}
2634
2635static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2636{
2637 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2638 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2639 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2640 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
4287f247 2641 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
b028ee3e
DH
2642 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2643 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2644 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2645 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2646 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2647 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2648 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2649}
2650
b0c632db
HC
2651int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2652{
8f2abe6a 2653 int rc;
b0c632db
HC
2654 sigset_t sigsaved;
2655
27291e21
DH
2656 if (guestdbg_exit_pending(vcpu)) {
2657 kvm_s390_prepare_debug_exit(vcpu);
2658 return 0;
2659 }
2660
b0c632db
HC
2661 if (vcpu->sigset_active)
2662 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2663
6352e4d2
DH
2664 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2665 kvm_s390_vcpu_start(vcpu);
2666 } else if (is_vcpu_stopped(vcpu)) {
ea2cdd27 2667 pr_err_ratelimited("can't run stopped vcpu %d\n",
6352e4d2
DH
2668 vcpu->vcpu_id);
2669 return -EINVAL;
2670 }
b0c632db 2671
b028ee3e 2672 sync_regs(vcpu, kvm_run);
db0758b2 2673 enable_cpu_timer_accounting(vcpu);
d7b0b5eb 2674
dab4079d 2675 might_fault();
a76ccff6 2676 rc = __vcpu_run(vcpu);
9ace903d 2677
b1d16c49
CE
2678 if (signal_pending(current) && !rc) {
2679 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 2680 rc = -EINTR;
b1d16c49 2681 }
8f2abe6a 2682
27291e21
DH
2683 if (guestdbg_exit_pending(vcpu) && !rc) {
2684 kvm_s390_prepare_debug_exit(vcpu);
2685 rc = 0;
2686 }
2687
8f2abe6a 2688 if (rc == -EREMOTE) {
71f116bf 2689 /* userspace support is needed, kvm_run has been prepared */
8f2abe6a
CB
2690 rc = 0;
2691 }
b0c632db 2692
db0758b2 2693 disable_cpu_timer_accounting(vcpu);
b028ee3e 2694 store_regs(vcpu, kvm_run);
d7b0b5eb 2695
b0c632db
HC
2696 if (vcpu->sigset_active)
2697 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2698
b0c632db 2699 vcpu->stat.exit_userspace++;
7e8e6ab4 2700 return rc;
b0c632db
HC
2701}
2702
b0c632db
HC
2703/*
2704 * store status at address
2705 * we use have two special cases:
2706 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2707 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2708 */
d0bce605 2709int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
b0c632db 2710{
092670cd 2711 unsigned char archmode = 1;
9abc2a08 2712 freg_t fprs[NUM_FPRS];
fda902cb 2713 unsigned int px;
4287f247 2714 u64 clkcomp, cputm;
d0bce605 2715 int rc;
b0c632db 2716
d9a3a09a 2717 px = kvm_s390_get_prefix(vcpu);
d0bce605
HC
2718 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2719 if (write_guest_abs(vcpu, 163, &archmode, 1))
b0c632db 2720 return -EFAULT;
d9a3a09a 2721 gpa = 0;
d0bce605
HC
2722 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2723 if (write_guest_real(vcpu, 163, &archmode, 1))
b0c632db 2724 return -EFAULT;
d9a3a09a
MS
2725 gpa = px;
2726 } else
2727 gpa -= __LC_FPREGS_SAVE_AREA;
9abc2a08
DH
2728
2729 /* manually convert vector registers if necessary */
2730 if (MACHINE_HAS_VX) {
9522b37f 2731 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
9abc2a08
DH
2732 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
2733 fprs, 128);
2734 } else {
2735 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
6fd8e67d 2736 vcpu->run->s.regs.fprs, 128);
9abc2a08 2737 }
d9a3a09a 2738 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
d0bce605 2739 vcpu->run->s.regs.gprs, 128);
d9a3a09a 2740 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
d0bce605 2741 &vcpu->arch.sie_block->gpsw, 16);
d9a3a09a 2742 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
fda902cb 2743 &px, 4);
d9a3a09a 2744 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
9abc2a08 2745 &vcpu->run->s.regs.fpc, 4);
d9a3a09a 2746 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
d0bce605 2747 &vcpu->arch.sie_block->todpr, 4);
4287f247 2748 cputm = kvm_s390_get_cpu_timer(vcpu);
d9a3a09a 2749 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
4287f247 2750 &cputm, 8);
178bd789 2751 clkcomp = vcpu->arch.sie_block->ckc >> 8;
d9a3a09a 2752 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
d0bce605 2753 &clkcomp, 8);
d9a3a09a 2754 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
d0bce605 2755 &vcpu->run->s.regs.acrs, 64);
d9a3a09a 2756 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
d0bce605
HC
2757 &vcpu->arch.sie_block->gcr, 128);
2758 return rc ? -EFAULT : 0;
b0c632db
HC
2759}
2760
e879892c
TH
2761int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2762{
2763 /*
2764 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2765 * copying in vcpu load/put. Lets update our copies before we save
2766 * it into the save area
2767 */
d0164ee2 2768 save_fpu_regs();
9abc2a08 2769 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
e879892c
TH
2770 save_access_regs(vcpu->run->s.regs.acrs);
2771
2772 return kvm_s390_store_status_unloaded(vcpu, addr);
2773}
2774
bc17de7c
EF
2775/*
2776 * store additional status at address
2777 */
2778int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2779 unsigned long gpa)
2780{
2781 /* Only bits 0-53 are used for address formation */
2782 if (!(gpa & ~0x3ff))
2783 return 0;
2784
2785 return write_guest_abs(vcpu, gpa & ~0x3ff,
2786 (void *)&vcpu->run->s.regs.vrs, 512);
2787}
2788
2789int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2790{
2791 if (!test_kvm_facility(vcpu->kvm, 129))
2792 return 0;
2793
2794 /*
2795 * The guest VXRS are in the host VXRs due to the lazy
9977e886
HB
2796 * copying in vcpu load/put. We can simply call save_fpu_regs()
2797 * to save the current register state because we are in the
2798 * middle of a load/put cycle.
2799 *
2800 * Let's update our copies before we save it into the save area.
bc17de7c 2801 */
d0164ee2 2802 save_fpu_regs();
bc17de7c
EF
2803
2804 return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2805}
2806
8ad35755
DH
2807static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2808{
2809 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
8e236546 2810 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
8ad35755
DH
2811}
2812
2813static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2814{
2815 unsigned int i;
2816 struct kvm_vcpu *vcpu;
2817
2818 kvm_for_each_vcpu(i, vcpu, kvm) {
2819 __disable_ibs_on_vcpu(vcpu);
2820 }
2821}
2822
2823static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2824{
09a400e7
DH
2825 if (!sclp.has_ibs)
2826 return;
8ad35755 2827 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
8e236546 2828 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
8ad35755
DH
2829}
2830
6852d7b6
DH
2831void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2832{
8ad35755
DH
2833 int i, online_vcpus, started_vcpus = 0;
2834
2835 if (!is_vcpu_stopped(vcpu))
2836 return;
2837
6852d7b6 2838 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
8ad35755 2839 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 2840 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
2841 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2842
2843 for (i = 0; i < online_vcpus; i++) {
2844 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2845 started_vcpus++;
2846 }
2847
2848 if (started_vcpus == 0) {
2849 /* we're the only active VCPU -> speed it up */
2850 __enable_ibs_on_vcpu(vcpu);
2851 } else if (started_vcpus == 1) {
2852 /*
2853 * As we are starting a second VCPU, we have to disable
2854 * the IBS facility on all VCPUs to remove potentially
2855 * oustanding ENABLE requests.
2856 */
2857 __disable_ibs_on_all_vcpus(vcpu->kvm);
2858 }
2859
805de8f4 2860 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
2861 /*
2862 * Another VCPU might have used IBS while we were offline.
2863 * Let's play safe and flush the VCPU at startup.
2864 */
d3d692c8 2865 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
433b9ee4 2866 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 2867 return;
6852d7b6
DH
2868}
2869
2870void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2871{
8ad35755
DH
2872 int i, online_vcpus, started_vcpus = 0;
2873 struct kvm_vcpu *started_vcpu = NULL;
2874
2875 if (is_vcpu_stopped(vcpu))
2876 return;
2877
6852d7b6 2878 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
8ad35755 2879 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 2880 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
2881 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2882
32f5ff63 2883 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
6cddd432 2884 kvm_s390_clear_stop_irq(vcpu);
32f5ff63 2885
805de8f4 2886 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
2887 __disable_ibs_on_vcpu(vcpu);
2888
2889 for (i = 0; i < online_vcpus; i++) {
2890 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2891 started_vcpus++;
2892 started_vcpu = vcpu->kvm->vcpus[i];
2893 }
2894 }
2895
2896 if (started_vcpus == 1) {
2897 /*
2898 * As we only have one VCPU left, we want to enable the
2899 * IBS facility for that VCPU to speed it up.
2900 */
2901 __enable_ibs_on_vcpu(started_vcpu);
2902 }
2903
433b9ee4 2904 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 2905 return;
6852d7b6
DH
2906}
2907
d6712df9
CH
2908static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2909 struct kvm_enable_cap *cap)
2910{
2911 int r;
2912
2913 if (cap->flags)
2914 return -EINVAL;
2915
2916 switch (cap->cap) {
fa6b7fe9
CH
2917 case KVM_CAP_S390_CSS_SUPPORT:
2918 if (!vcpu->kvm->arch.css_support) {
2919 vcpu->kvm->arch.css_support = 1;
c92ea7b9 2920 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
fa6b7fe9
CH
2921 trace_kvm_s390_enable_css(vcpu->kvm);
2922 }
2923 r = 0;
2924 break;
d6712df9
CH
2925 default:
2926 r = -EINVAL;
2927 break;
2928 }
2929 return r;
2930}
2931
41408c28
TH
2932static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
2933 struct kvm_s390_mem_op *mop)
2934{
2935 void __user *uaddr = (void __user *)mop->buf;
2936 void *tmpbuf = NULL;
2937 int r, srcu_idx;
2938 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
2939 | KVM_S390_MEMOP_F_CHECK_ONLY;
2940
2941 if (mop->flags & ~supported_flags)
2942 return -EINVAL;
2943
2944 if (mop->size > MEM_OP_MAX_SIZE)
2945 return -E2BIG;
2946
2947 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2948 tmpbuf = vmalloc(mop->size);
2949 if (!tmpbuf)
2950 return -ENOMEM;
2951 }
2952
2953 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2954
2955 switch (mop->op) {
2956 case KVM_S390_MEMOP_LOGICAL_READ:
2957 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
92c96321
DH
2958 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
2959 mop->size, GACC_FETCH);
41408c28
TH
2960 break;
2961 }
2962 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2963 if (r == 0) {
2964 if (copy_to_user(uaddr, tmpbuf, mop->size))
2965 r = -EFAULT;
2966 }
2967 break;
2968 case KVM_S390_MEMOP_LOGICAL_WRITE:
2969 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
92c96321
DH
2970 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
2971 mop->size, GACC_STORE);
41408c28
TH
2972 break;
2973 }
2974 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2975 r = -EFAULT;
2976 break;
2977 }
2978 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2979 break;
2980 default:
2981 r = -EINVAL;
2982 }
2983
2984 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2985
2986 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
2987 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
2988
2989 vfree(tmpbuf);
2990 return r;
2991}
2992
b0c632db
HC
2993long kvm_arch_vcpu_ioctl(struct file *filp,
2994 unsigned int ioctl, unsigned long arg)
2995{
2996 struct kvm_vcpu *vcpu = filp->private_data;
2997 void __user *argp = (void __user *)arg;
800c1065 2998 int idx;
bc923cc9 2999 long r;
b0c632db 3000
93736624 3001 switch (ioctl) {
47b43c52
JF
3002 case KVM_S390_IRQ: {
3003 struct kvm_s390_irq s390irq;
3004
3005 r = -EFAULT;
3006 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
3007 break;
3008 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
3009 break;
3010 }
93736624 3011 case KVM_S390_INTERRUPT: {
ba5c1e9b 3012 struct kvm_s390_interrupt s390int;
383d0b05 3013 struct kvm_s390_irq s390irq;
ba5c1e9b 3014
93736624 3015 r = -EFAULT;
ba5c1e9b 3016 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624 3017 break;
383d0b05
JF
3018 if (s390int_to_s390irq(&s390int, &s390irq))
3019 return -EINVAL;
3020 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
93736624 3021 break;
ba5c1e9b 3022 }
b0c632db 3023 case KVM_S390_STORE_STATUS:
800c1065 3024 idx = srcu_read_lock(&vcpu->kvm->srcu);
bc923cc9 3025 r = kvm_s390_vcpu_store_status(vcpu, arg);
800c1065 3026 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 3027 break;
b0c632db
HC
3028 case KVM_S390_SET_INITIAL_PSW: {
3029 psw_t psw;
3030
bc923cc9 3031 r = -EFAULT;
b0c632db 3032 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
3033 break;
3034 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
3035 break;
b0c632db
HC
3036 }
3037 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
3038 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3039 break;
14eebd91
CO
3040 case KVM_SET_ONE_REG:
3041 case KVM_GET_ONE_REG: {
3042 struct kvm_one_reg reg;
3043 r = -EFAULT;
3044 if (copy_from_user(&reg, argp, sizeof(reg)))
3045 break;
3046 if (ioctl == KVM_SET_ONE_REG)
3047 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
3048 else
3049 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
3050 break;
3051 }
27e0393f
CO
3052#ifdef CONFIG_KVM_S390_UCONTROL
3053 case KVM_S390_UCAS_MAP: {
3054 struct kvm_s390_ucas_mapping ucasmap;
3055
3056 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3057 r = -EFAULT;
3058 break;
3059 }
3060
3061 if (!kvm_is_ucontrol(vcpu->kvm)) {
3062 r = -EINVAL;
3063 break;
3064 }
3065
3066 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
3067 ucasmap.vcpu_addr, ucasmap.length);
3068 break;
3069 }
3070 case KVM_S390_UCAS_UNMAP: {
3071 struct kvm_s390_ucas_mapping ucasmap;
3072
3073 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3074 r = -EFAULT;
3075 break;
3076 }
3077
3078 if (!kvm_is_ucontrol(vcpu->kvm)) {
3079 r = -EINVAL;
3080 break;
3081 }
3082
3083 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
3084 ucasmap.length);
3085 break;
3086 }
3087#endif
ccc7910f 3088 case KVM_S390_VCPU_FAULT: {
527e30b4 3089 r = gmap_fault(vcpu->arch.gmap, arg, 0);
ccc7910f
CO
3090 break;
3091 }
d6712df9
CH
3092 case KVM_ENABLE_CAP:
3093 {
3094 struct kvm_enable_cap cap;
3095 r = -EFAULT;
3096 if (copy_from_user(&cap, argp, sizeof(cap)))
3097 break;
3098 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
3099 break;
3100 }
41408c28
TH
3101 case KVM_S390_MEM_OP: {
3102 struct kvm_s390_mem_op mem_op;
3103
3104 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
3105 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
3106 else
3107 r = -EFAULT;
3108 break;
3109 }
816c7667
JF
3110 case KVM_S390_SET_IRQ_STATE: {
3111 struct kvm_s390_irq_state irq_state;
3112
3113 r = -EFAULT;
3114 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3115 break;
3116 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
3117 irq_state.len == 0 ||
3118 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
3119 r = -EINVAL;
3120 break;
3121 }
3122 r = kvm_s390_set_irq_state(vcpu,
3123 (void __user *) irq_state.buf,
3124 irq_state.len);
3125 break;
3126 }
3127 case KVM_S390_GET_IRQ_STATE: {
3128 struct kvm_s390_irq_state irq_state;
3129
3130 r = -EFAULT;
3131 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3132 break;
3133 if (irq_state.len == 0) {
3134 r = -EINVAL;
3135 break;
3136 }
3137 r = kvm_s390_get_irq_state(vcpu,
3138 (__u8 __user *) irq_state.buf,
3139 irq_state.len);
3140 break;
3141 }
b0c632db 3142 default:
3e6afcf1 3143 r = -ENOTTY;
b0c632db 3144 }
bc923cc9 3145 return r;
b0c632db
HC
3146}
3147
5b1c1493
CO
3148int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
3149{
3150#ifdef CONFIG_KVM_S390_UCONTROL
3151 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
3152 && (kvm_is_ucontrol(vcpu->kvm))) {
3153 vmf->page = virt_to_page(vcpu->arch.sie_block);
3154 get_page(vmf->page);
3155 return 0;
3156 }
3157#endif
3158 return VM_FAULT_SIGBUS;
3159}
3160
5587027c
AK
3161int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
3162 unsigned long npages)
db3fe4eb
TY
3163{
3164 return 0;
3165}
3166
b0c632db 3167/* Section: memory related */
f7784b8e
MT
3168int kvm_arch_prepare_memory_region(struct kvm *kvm,
3169 struct kvm_memory_slot *memslot,
09170a49 3170 const struct kvm_userspace_memory_region *mem,
7b6195a9 3171 enum kvm_mr_change change)
b0c632db 3172{
dd2887e7
NW
3173 /* A few sanity checks. We can have memory slots which have to be
3174 located/ended at a segment boundary (1MB). The memory in userland is
3175 ok to be fragmented into various different vmas. It is okay to mmap()
3176 and munmap() stuff in this slot after doing this call at any time */
b0c632db 3177
598841ca 3178 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
3179 return -EINVAL;
3180
598841ca 3181 if (mem->memory_size & 0xffffful)
b0c632db
HC
3182 return -EINVAL;
3183
a3a92c31
DD
3184 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
3185 return -EINVAL;
3186
f7784b8e
MT
3187 return 0;
3188}
3189
3190void kvm_arch_commit_memory_region(struct kvm *kvm,
09170a49 3191 const struct kvm_userspace_memory_region *mem,
8482644a 3192 const struct kvm_memory_slot *old,
f36f3f28 3193 const struct kvm_memory_slot *new,
8482644a 3194 enum kvm_mr_change change)
f7784b8e 3195{
f7850c92 3196 int rc;
f7784b8e 3197
2cef4deb
CB
3198 /* If the basics of the memslot do not change, we do not want
3199 * to update the gmap. Every update causes several unnecessary
3200 * segment translation exceptions. This is usually handled just
3201 * fine by the normal fault handler + gmap, but it will also
3202 * cause faults on the prefix page of running guest CPUs.
3203 */
3204 if (old->userspace_addr == mem->userspace_addr &&
3205 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
3206 old->npages * PAGE_SIZE == mem->memory_size)
3207 return;
598841ca
CO
3208
3209 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
3210 mem->guest_phys_addr, mem->memory_size);
3211 if (rc)
ea2cdd27 3212 pr_warn("failed to commit memory region\n");
598841ca 3213 return;
b0c632db
HC
3214}
3215
60a37709
AY
3216static inline unsigned long nonhyp_mask(int i)
3217{
3218 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
3219
3220 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
3221}
3222
3491caf2
CB
3223void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
3224{
3225 vcpu->valid_wakeup = false;
3226}
3227
b0c632db
HC
3228static int __init kvm_s390_init(void)
3229{
60a37709
AY
3230 int i;
3231
07197fd0
DH
3232 if (!sclp.has_sief2) {
3233 pr_info("SIE not available\n");
3234 return -ENODEV;
3235 }
3236
60a37709
AY
3237 for (i = 0; i < 16; i++)
3238 kvm_s390_fac_list_mask[i] |=
3239 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
3240
9d8d5786 3241 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
b0c632db
HC
3242}
3243
3244static void __exit kvm_s390_exit(void)
3245{
3246 kvm_exit();
3247}
3248
3249module_init(kvm_s390_init);
3250module_exit(kvm_s390_exit);
566af940
CH
3251
3252/*
3253 * Enable autoloading of the kvm module.
3254 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
3255 * since x86 takes a different approach.
3256 */
3257#include <linux/miscdevice.h>
3258MODULE_ALIAS_MISCDEV(KVM_MINOR);
3259MODULE_ALIAS("devname:kvm");