]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/s390/kvm/kvm-s390.c
UBUNTU: Ubuntu-4.15.0-96.97
[mirror_ubuntu-bionic-kernel.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
d809aa23 1// SPDX-License-Identifier: GPL-2.0
b0c632db 2/*
bb64da9a 3 * hosting IBM Z kernel virtual machines (s390x)
b0c632db 4 *
a4c2f584 5 * Copyright IBM Corp. 2008, 2018
b0c632db
HC
6 *
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 10 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15f36ebd 11 * Jason J. Herne <jjherne@us.ibm.com>
b0c632db
HC
12 */
13
14#include <linux/compiler.h>
15#include <linux/err.h>
16#include <linux/fs.h>
ca872302 17#include <linux/hrtimer.h>
b0c632db
HC
18#include <linux/init.h>
19#include <linux/kvm.h>
20#include <linux/kvm_host.h>
b2d73b2a 21#include <linux/mman.h>
b0c632db 22#include <linux/module.h>
d3217967 23#include <linux/moduleparam.h>
a374e892 24#include <linux/random.h>
b0c632db 25#include <linux/slab.h>
ba5c1e9b 26#include <linux/timer.h>
41408c28 27#include <linux/vmalloc.h>
15c9705f 28#include <linux/bitmap.h>
174cd4b1 29#include <linux/sched/signal.h>
190df4a2 30#include <linux/string.h>
174cd4b1 31
cbb870c8 32#include <asm/asm-offsets.h>
b0c632db 33#include <asm/lowcore.h>
fd5ada04 34#include <asm/stp.h>
b0c632db 35#include <asm/pgtable.h>
1e133ab2 36#include <asm/gmap.h>
f5daba1d 37#include <asm/nmi.h>
a0616cde 38#include <asm/switch_to.h>
6d3da241 39#include <asm/isc.h>
1526bf9c 40#include <asm/sclp.h>
0a763c78 41#include <asm/cpacf.h>
221bb8a4 42#include <asm/timex.h>
5f131c12 43#include <asm/ap.h>
8f2abe6a 44#include "kvm-s390.h"
b0c632db
HC
45#include "gaccess.h"
46
ea2cdd27
DH
47#define KMSG_COMPONENT "kvm-s390"
48#undef pr_fmt
49#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
50
5786fffa
CH
51#define CREATE_TRACE_POINTS
52#include "trace.h"
ade38c31 53#include "trace-s390.h"
5786fffa 54
41408c28 55#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
816c7667
JF
56#define LOCAL_IRQS 32
57#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
58 (KVM_MAX_VCPUS + LOCAL_IRQS))
41408c28 59
b0c632db
HC
60#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
61
62struct kvm_stats_debugfs_item debugfs_entries[] = {
63 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 64 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
65 { "exit_validity", VCPU_STAT(exit_validity) },
66 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
67 { "exit_external_request", VCPU_STAT(exit_external_request) },
68 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b 69 { "exit_instruction", VCPU_STAT(exit_instruction) },
9ec6de19 70 { "exit_pei", VCPU_STAT(exit_pei) },
ba5c1e9b
CO
71 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
72 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
a011eeb2 73 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
f7819512 74 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
62bea5bf 75 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
3491caf2 76 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
ce2e4f0b 77 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
f5e10b09 78 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b 79 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
aba07508
DH
80 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
81 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
ba5c1e9b 82 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 83 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
84 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
85 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
86 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
87 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
88 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
89 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
90 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
a4c2f584
CB
91 { "instruction_epsw", VCPU_STAT(instruction_epsw) },
92 { "instruction_gs", VCPU_STAT(instruction_gs) },
93 { "instruction_io_other", VCPU_STAT(instruction_io_other) },
94 { "instruction_lpsw", VCPU_STAT(instruction_lpsw) },
95 { "instruction_lpswe", VCPU_STAT(instruction_lpswe) },
69d0d3a3 96 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
a4c2f584 97 { "instruction_ptff", VCPU_STAT(instruction_ptff) },
453423dc 98 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
a4c2f584
CB
99 { "instruction_sck", VCPU_STAT(instruction_sck) },
100 { "instruction_sckpf", VCPU_STAT(instruction_sckpf) },
453423dc
CB
101 { "instruction_spx", VCPU_STAT(instruction_spx) },
102 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
103 { "instruction_stap", VCPU_STAT(instruction_stap) },
a4c2f584
CB
104 { "instruction_iske", VCPU_STAT(instruction_iske) },
105 { "instruction_ri", VCPU_STAT(instruction_ri) },
106 { "instruction_rrbe", VCPU_STAT(instruction_rrbe) },
107 { "instruction_sske", VCPU_STAT(instruction_sske) },
8a242234 108 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
b31288fa 109 { "instruction_essa", VCPU_STAT(instruction_essa) },
453423dc
CB
110 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
111 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
a4c2f584
CB
112 { "instruction_tb", VCPU_STAT(instruction_tb) },
113 { "instruction_tpi", VCPU_STAT(instruction_tpi) },
bb25b9ba 114 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
a4c2f584 115 { "instruction_tsch", VCPU_STAT(instruction_tsch) },
95ca2cb5 116 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
a3508fbe 117 { "instruction_sie", VCPU_STAT(instruction_sie) },
5288fbf0 118 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 119 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 120 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0 121 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
42cb0c9f
DH
122 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
123 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
5288fbf0 124 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
42cb0c9f
DH
125 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
126 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
cd7b4b61 127 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
5288fbf0
CB
128 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
129 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
130 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
42cb0c9f
DH
131 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
132 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
133 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
03a7c577
CB
134 { "instruction_diag_10", VCPU_STAT(diagnose_10) },
135 { "instruction_diag_44", VCPU_STAT(diagnose_44) },
136 { "instruction_diag_9c", VCPU_STAT(diagnose_9c) },
137 { "instruction_diag_258", VCPU_STAT(diagnose_258) },
138 { "instruction_diag_308", VCPU_STAT(diagnose_308) },
139 { "instruction_diag_500", VCPU_STAT(diagnose_500) },
a4c2f584 140 { "instruction_diag_other", VCPU_STAT(diagnose_other) },
b0c632db
HC
141 { NULL }
142};
143
8fa1696e
CW
144struct kvm_s390_tod_clock_ext {
145 __u8 epoch_idx;
146 __u64 tod;
147 __u8 reserved[7];
148} __packed;
149
a411edf1
DH
150/* allow nested virtualization in KVM (if enabled by user space) */
151static int nested;
152module_param(nested, int, S_IRUGO);
153MODULE_PARM_DESC(nested, "Nested virtualization support");
154
b0c632db 155
c936f04c
CB
156/*
157 * For now we handle at most 16 double words as this is what the s390 base
158 * kernel handles and stores in the prefix page. If we ever need to go beyond
159 * this, this requires changes to code, but the external uapi can stay.
160 */
161#define SIZE_INTERNAL 16
162
163/*
164 * Base feature mask that defines default mask for facilities. Consists of the
165 * defines in FACILITIES_KVM and the non-hypervisor managed bits.
166 */
167static unsigned long kvm_s390_fac_base[SIZE_INTERNAL] = { FACILITIES_KVM };
168/*
169 * Extended feature mask. Consists of the defines in FACILITIES_KVM_CPUMODEL
170 * and defines the facilities that can be enabled via a cpu model.
171 */
172static unsigned long kvm_s390_fac_ext[SIZE_INTERNAL] = { FACILITIES_KVM_CPUMODEL };
173
174static unsigned long kvm_s390_fac_size(void)
78c4b59f 175{
c936f04c
CB
176 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_MASK_SIZE_U64);
177 BUILD_BUG_ON(SIZE_INTERNAL > S390_ARCH_FAC_LIST_SIZE_U64);
178 BUILD_BUG_ON(SIZE_INTERNAL * sizeof(unsigned long) >
179 sizeof(S390_lowcore.stfle_fac_list));
180
181 return SIZE_INTERNAL;
78c4b59f
MM
182}
183
15c9705f
DH
184/* available cpu features supported by kvm */
185static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
0a763c78
DH
186/* available subfunctions indicated via query / "test bit" */
187static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
15c9705f 188
9d8d5786 189static struct gmap_notifier gmap_notifier;
a3508fbe 190static struct gmap_notifier vsie_gmap_notifier;
78f26131 191debug_info_t *kvm_s390_dbf;
9d8d5786 192
b0c632db 193/* Section: not file related */
13a34e06 194int kvm_arch_hardware_enable(void)
b0c632db
HC
195{
196 /* every s390 is virtualization enabled ;-) */
10474ae8 197 return 0;
b0c632db
HC
198}
199
414d3b07
MS
200static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
201 unsigned long end);
2c70fe44 202
422b8485
DH
203static void kvm_clock_sync_scb(struct kvm_s390_sie_block *scb, u64 delta)
204{
205 u8 delta_idx = 0;
206
207 /*
208 * The TOD jumps by delta, we have to compensate this by adding
209 * -delta to the epoch.
210 */
211 delta = -delta;
212
213 /* sign-extension - we're adding to signed values below */
214 if ((s64)delta < 0)
215 delta_idx = -1;
216
217 scb->epoch += delta;
218 if (scb->ecd & ECD_MEF) {
219 scb->epdx += delta_idx;
220 if (scb->epoch < delta)
221 scb->epdx += 1;
222 }
223}
224
fdf03650
FZ
225/*
226 * This callback is executed during stop_machine(). All CPUs are therefore
227 * temporarily stopped. In order not to change guest behavior, we have to
228 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
229 * so a CPU won't be stopped while calculating with the epoch.
230 */
231static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
232 void *v)
233{
234 struct kvm *kvm;
235 struct kvm_vcpu *vcpu;
236 int i;
237 unsigned long long *delta = v;
238
239 list_for_each_entry(kvm, &vm_list, vm_list) {
fdf03650 240 kvm_for_each_vcpu(i, vcpu, kvm) {
422b8485
DH
241 kvm_clock_sync_scb(vcpu->arch.sie_block, *delta);
242 if (i == 0) {
243 kvm->arch.epoch = vcpu->arch.sie_block->epoch;
244 kvm->arch.epdx = vcpu->arch.sie_block->epdx;
245 }
db0758b2
DH
246 if (vcpu->arch.cputm_enabled)
247 vcpu->arch.cputm_start += *delta;
91473b48 248 if (vcpu->arch.vsie_block)
422b8485
DH
249 kvm_clock_sync_scb(vcpu->arch.vsie_block,
250 *delta);
fdf03650
FZ
251 }
252 }
253 return NOTIFY_OK;
254}
255
256static struct notifier_block kvm_clock_notifier = {
257 .notifier_call = kvm_clock_sync,
258};
259
b0c632db
HC
260int kvm_arch_hardware_setup(void)
261{
2c70fe44 262 gmap_notifier.notifier_call = kvm_gmap_notifier;
b2d73b2a 263 gmap_register_pte_notifier(&gmap_notifier);
a3508fbe
DH
264 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
265 gmap_register_pte_notifier(&vsie_gmap_notifier);
fdf03650
FZ
266 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
267 &kvm_clock_notifier);
b0c632db
HC
268 return 0;
269}
270
271void kvm_arch_hardware_unsetup(void)
272{
b2d73b2a 273 gmap_unregister_pte_notifier(&gmap_notifier);
a3508fbe 274 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
fdf03650
FZ
275 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
276 &kvm_clock_notifier);
b0c632db
HC
277}
278
22be5a13
DH
279static void allow_cpu_feat(unsigned long nr)
280{
281 set_bit_inv(nr, kvm_s390_available_cpu_feat);
282}
283
0a763c78
DH
284static inline int plo_test_bit(unsigned char nr)
285{
286 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
d051ae53 287 int cc;
0a763c78
DH
288
289 asm volatile(
290 /* Parameter registers are ignored for "test bit" */
291 " plo 0,0,0,0(0)\n"
292 " ipm %0\n"
293 " srl %0,28\n"
294 : "=d" (cc)
295 : "d" (r0)
296 : "cc");
297 return cc == 0;
298}
299
addc9d61
CB
300static inline void __insn32_query(unsigned int opcode, u8 query[32])
301{
302 register unsigned long r0 asm("0") = 0; /* query function */
303 register unsigned long r1 asm("1") = (unsigned long) query;
304
305 asm volatile(
306 /* Parameter regs are ignored */
307 " .insn rrf,%[opc] << 16,2,4,6,0\n"
308 : "=m" (*query)
309 : "d" (r0), "a" (r1), [opc] "i" (opcode)
310 : "cc");
311}
312
9e20d5a3 313#define INSN_SORTL 0xb938
e71243b6 314#define INSN_DFLTCC 0xb939
9e20d5a3 315
22be5a13
DH
316static void kvm_s390_cpu_feat_init(void)
317{
0a763c78
DH
318 int i;
319
320 for (i = 0; i < 256; ++i) {
321 if (plo_test_bit(i))
322 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
323 }
324
325 if (test_facility(28)) /* TOD-clock steering */
221bb8a4
LT
326 ptff(kvm_s390_available_subfunc.ptff,
327 sizeof(kvm_s390_available_subfunc.ptff),
328 PTFF_QAF);
0a763c78
DH
329
330 if (test_facility(17)) { /* MSA */
69c0e360
MS
331 __cpacf_query(CPACF_KMAC, (cpacf_mask_t *)
332 kvm_s390_available_subfunc.kmac);
333 __cpacf_query(CPACF_KMC, (cpacf_mask_t *)
334 kvm_s390_available_subfunc.kmc);
335 __cpacf_query(CPACF_KM, (cpacf_mask_t *)
336 kvm_s390_available_subfunc.km);
337 __cpacf_query(CPACF_KIMD, (cpacf_mask_t *)
338 kvm_s390_available_subfunc.kimd);
339 __cpacf_query(CPACF_KLMD, (cpacf_mask_t *)
340 kvm_s390_available_subfunc.klmd);
0a763c78
DH
341 }
342 if (test_facility(76)) /* MSA3 */
69c0e360
MS
343 __cpacf_query(CPACF_PCKMO, (cpacf_mask_t *)
344 kvm_s390_available_subfunc.pckmo);
0a763c78 345 if (test_facility(77)) { /* MSA4 */
69c0e360
MS
346 __cpacf_query(CPACF_KMCTR, (cpacf_mask_t *)
347 kvm_s390_available_subfunc.kmctr);
348 __cpacf_query(CPACF_KMF, (cpacf_mask_t *)
349 kvm_s390_available_subfunc.kmf);
350 __cpacf_query(CPACF_KMO, (cpacf_mask_t *)
351 kvm_s390_available_subfunc.kmo);
352 __cpacf_query(CPACF_PCC, (cpacf_mask_t *)
353 kvm_s390_available_subfunc.pcc);
0a763c78
DH
354 }
355 if (test_facility(57)) /* MSA5 */
985a9d20 356 __cpacf_query(CPACF_PRNO, (cpacf_mask_t *)
69c0e360 357 kvm_s390_available_subfunc.ppno);
0a763c78 358
e000b8e0
JH
359 if (test_facility(146)) /* MSA8 */
360 __cpacf_query(CPACF_KMA, (cpacf_mask_t *)
361 kvm_s390_available_subfunc.kma);
362
3a36e7bb
CB
363 if (test_facility(155)) /* MSA9 */
364 __cpacf_query(CPACF_KDSA, (cpacf_mask_t *)
365 kvm_s390_available_subfunc.kdsa);
366
9e20d5a3
CB
367 if (test_facility(150)) /* SORTL */
368 __insn32_query(INSN_SORTL, kvm_s390_available_subfunc.sortl);
369
e71243b6
CB
370 if (test_facility(151)) /* DFLTCC */
371 __insn32_query(INSN_DFLTCC, kvm_s390_available_subfunc.dfltcc);
372
22be5a13
DH
373 if (MACHINE_HAS_ESOP)
374 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
a3508fbe
DH
375 /*
376 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
377 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
378 */
379 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
a411edf1 380 !test_facility(3) || !nested)
a3508fbe
DH
381 return;
382 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
19c439b5
DH
383 if (sclp.has_64bscao)
384 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_64BSCAO);
0615a326
DH
385 if (sclp.has_siif)
386 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIIF);
77d18f6d
DH
387 if (sclp.has_gpere)
388 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GPERE);
a1b7b9b2
DH
389 if (sclp.has_gsls)
390 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_GSLS);
5630a8e8
DH
391 if (sclp.has_ib)
392 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IB);
13ee3f67
DH
393 if (sclp.has_cei)
394 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_CEI);
7fd7f39d
DH
395 if (sclp.has_ibs)
396 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_IBS);
730cd632
FA
397 if (sclp.has_kss)
398 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_KSS);
5d3876a8
DH
399 /*
400 * KVM_S390_VM_CPU_FEAT_SKEY: Wrong shadow of PTE.I bits will make
401 * all skey handling functions read/set the skey from the PGSTE
402 * instead of the real storage key.
403 *
404 * KVM_S390_VM_CPU_FEAT_CMMA: Wrong shadow of PTE.I bits will make
405 * pages being detected as preserved although they are resident.
406 *
407 * KVM_S390_VM_CPU_FEAT_PFMFI: Wrong shadow of PTE.I bits will
408 * have the same effect as for KVM_S390_VM_CPU_FEAT_SKEY.
409 *
410 * For KVM_S390_VM_CPU_FEAT_SKEY, KVM_S390_VM_CPU_FEAT_CMMA and
411 * KVM_S390_VM_CPU_FEAT_PFMFI, all PTE.I and PGSTE bits have to be
412 * correctly shadowed. We can do that for the PGSTE but not for PTE.I.
413 *
414 * KVM_S390_VM_CPU_FEAT_SIGPIF: Wrong SCB addresses in the SCA. We
415 * cannot easily shadow the SCA because of the ipte lock.
416 */
22be5a13
DH
417}
418
b0c632db
HC
419int kvm_arch_init(void *opaque)
420{
599ab88f
MM
421 int rc;
422
78f26131
CB
423 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
424 if (!kvm_s390_dbf)
425 return -ENOMEM;
426
427 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
599ab88f
MM
428 rc = -ENOMEM;
429 goto out_debug_unreg;
78f26131
CB
430 }
431
22be5a13
DH
432 kvm_s390_cpu_feat_init();
433
84877d93 434 /* Register floating interrupt controller interface. */
599ab88f
MM
435 rc = kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
436 if (rc) {
437 pr_err("Failed to register FLIC rc=%d\n", rc);
438 goto out_debug_unreg;
439 }
440 return 0;
441
442out_debug_unreg:
443 debug_unregister(kvm_s390_dbf);
444 return rc;
b0c632db
HC
445}
446
78f26131
CB
447void kvm_arch_exit(void)
448{
449 debug_unregister(kvm_s390_dbf);
450}
451
b0c632db
HC
452/* Section: device related */
453long kvm_arch_dev_ioctl(struct file *filp,
454 unsigned int ioctl, unsigned long arg)
455{
456 if (ioctl == KVM_S390_ENABLE_SIE)
457 return s390_enable_sie();
458 return -EINVAL;
459}
460
784aa3d7 461int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
b0c632db 462{
d7b0b5eb
CO
463 int r;
464
2bd0ac4e 465 switch (ext) {
d7b0b5eb 466 case KVM_CAP_S390_PSW:
b6cf8788 467 case KVM_CAP_S390_GMAP:
52e16b18 468 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
469#ifdef CONFIG_KVM_S390_UCONTROL
470 case KVM_CAP_S390_UCONTROL:
471#endif
3c038e6b 472 case KVM_CAP_ASYNC_PF:
60b413c9 473 case KVM_CAP_SYNC_REGS:
14eebd91 474 case KVM_CAP_ONE_REG:
d6712df9 475 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 476 case KVM_CAP_S390_CSS_SUPPORT:
10ccaa1e 477 case KVM_CAP_IOEVENTFD:
c05c4186 478 case KVM_CAP_DEVICE_CTRL:
d938dc55 479 case KVM_CAP_ENABLE_CAP_VM:
78599d90 480 case KVM_CAP_S390_IRQCHIP:
f2061656 481 case KVM_CAP_VM_ATTRIBUTES:
6352e4d2 482 case KVM_CAP_MP_STATE:
460df4c1 483 case KVM_CAP_IMMEDIATE_EXIT:
47b43c52 484 case KVM_CAP_S390_INJECT_IRQ:
2444b352 485 case KVM_CAP_S390_USER_SIGP:
e44fc8c9 486 case KVM_CAP_S390_USER_STSI:
30ee2a98 487 case KVM_CAP_S390_SKEYS:
816c7667 488 case KVM_CAP_S390_IRQ_STATE:
6502a34c 489 case KVM_CAP_S390_USER_INSTR0:
4036e387 490 case KVM_CAP_S390_CMMA_MIGRATION:
47a4693e 491 case KVM_CAP_S390_AIS:
da9a1446 492 case KVM_CAP_S390_AIS_MIGRATION:
d7b0b5eb
CO
493 r = 1;
494 break;
41408c28
TH
495 case KVM_CAP_S390_MEM_OP:
496 r = MEM_OP_MAX_SIZE;
497 break;
e726b1bd
CB
498 case KVM_CAP_NR_VCPUS:
499 case KVM_CAP_MAX_VCPUS:
83df27fd 500 case KVM_CAP_MAX_VCPU_ID:
76a6dd72 501 r = KVM_S390_BSCA_CPU_SLOTS;
a6940674
DH
502 if (!kvm_s390_use_sca_entries())
503 r = KVM_MAX_VCPUS;
504 else if (sclp.has_esca && sclp.has_64bscao)
76a6dd72 505 r = KVM_S390_ESCA_CPU_SLOTS;
e726b1bd 506 break;
e1e2e605
NW
507 case KVM_CAP_NR_MEMSLOTS:
508 r = KVM_USER_MEM_SLOTS;
509 break;
1526bf9c 510 case KVM_CAP_S390_COW:
abf09bed 511 r = MACHINE_HAS_ESOP;
1526bf9c 512 break;
68c55750
EF
513 case KVM_CAP_S390_VECTOR_REGISTERS:
514 r = MACHINE_HAS_VX;
515 break;
c6e5f166
FZ
516 case KVM_CAP_S390_RI:
517 r = test_facility(64);
518 break;
4e0b1ab7
FZ
519 case KVM_CAP_S390_GS:
520 r = test_facility(133);
521 break;
35b3fde6
CB
522 case KVM_CAP_S390_BPB:
523 r = test_facility(82);
524 break;
2bd0ac4e 525 default:
d7b0b5eb 526 r = 0;
2bd0ac4e 527 }
d7b0b5eb 528 return r;
b0c632db
HC
529}
530
15f36ebd
JH
531static void kvm_s390_sync_dirty_log(struct kvm *kvm,
532 struct kvm_memory_slot *memslot)
533{
534 gfn_t cur_gfn, last_gfn;
535 unsigned long address;
536 struct gmap *gmap = kvm->arch.gmap;
537
15f36ebd
JH
538 /* Loop over all guest pages */
539 last_gfn = memslot->base_gfn + memslot->npages;
540 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
541 address = gfn_to_hva_memslot(memslot, cur_gfn);
542
1e133ab2 543 if (test_and_clear_guest_dirty(gmap->mm, address))
15f36ebd 544 mark_page_dirty(kvm, cur_gfn);
1763f8d0
CB
545 if (fatal_signal_pending(current))
546 return;
70c88a00 547 cond_resched();
15f36ebd 548 }
15f36ebd
JH
549}
550
b0c632db 551/* Section: vm related */
a6e2f683
ED
552static void sca_del_vcpu(struct kvm_vcpu *vcpu);
553
b0c632db
HC
554/*
555 * Get (and clear) the dirty memory log for a memory slot.
556 */
557int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
558 struct kvm_dirty_log *log)
559{
15f36ebd
JH
560 int r;
561 unsigned long n;
9f6b8029 562 struct kvm_memslots *slots;
15f36ebd
JH
563 struct kvm_memory_slot *memslot;
564 int is_dirty = 0;
565
e1e8a962
JF
566 if (kvm_is_ucontrol(kvm))
567 return -EINVAL;
568
15f36ebd
JH
569 mutex_lock(&kvm->slots_lock);
570
571 r = -EINVAL;
572 if (log->slot >= KVM_USER_MEM_SLOTS)
573 goto out;
574
9f6b8029
PB
575 slots = kvm_memslots(kvm);
576 memslot = id_to_memslot(slots, log->slot);
15f36ebd
JH
577 r = -ENOENT;
578 if (!memslot->dirty_bitmap)
579 goto out;
580
581 kvm_s390_sync_dirty_log(kvm, memslot);
582 r = kvm_get_dirty_log(kvm, log, &is_dirty);
583 if (r)
584 goto out;
585
586 /* Clear the dirty log */
587 if (is_dirty) {
588 n = kvm_dirty_bitmap_bytes(memslot);
589 memset(memslot->dirty_bitmap, 0, n);
590 }
591 r = 0;
592out:
593 mutex_unlock(&kvm->slots_lock);
594 return r;
b0c632db
HC
595}
596
6502a34c
DH
597static void icpt_operexc_on_all_vcpus(struct kvm *kvm)
598{
599 unsigned int i;
600 struct kvm_vcpu *vcpu;
601
602 kvm_for_each_vcpu(i, vcpu, kvm) {
603 kvm_s390_sync_request(KVM_REQ_ICPT_OPEREXC, vcpu);
604 }
605}
606
d938dc55
CH
607static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
608{
609 int r;
610
611 if (cap->flags)
612 return -EINVAL;
613
614 switch (cap->cap) {
84223598 615 case KVM_CAP_S390_IRQCHIP:
c92ea7b9 616 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
84223598
CH
617 kvm->arch.use_irqchip = 1;
618 r = 0;
619 break;
2444b352 620 case KVM_CAP_S390_USER_SIGP:
c92ea7b9 621 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
2444b352
DH
622 kvm->arch.user_sigp = 1;
623 r = 0;
624 break;
68c55750 625 case KVM_CAP_S390_VECTOR_REGISTERS:
5967c17b 626 mutex_lock(&kvm->lock);
a03825bb 627 if (kvm->created_vcpus) {
5967c17b
DH
628 r = -EBUSY;
629 } else if (MACHINE_HAS_VX) {
c54f0d6a
DH
630 set_kvm_facility(kvm->arch.model.fac_mask, 129);
631 set_kvm_facility(kvm->arch.model.fac_list, 129);
2f87d942
GH
632 if (test_facility(134)) {
633 set_kvm_facility(kvm->arch.model.fac_mask, 134);
634 set_kvm_facility(kvm->arch.model.fac_list, 134);
635 }
53743aa7
MS
636 if (test_facility(135)) {
637 set_kvm_facility(kvm->arch.model.fac_mask, 135);
638 set_kvm_facility(kvm->arch.model.fac_list, 135);
639 }
4db5e138
CB
640 if (test_facility(148)) {
641 set_kvm_facility(kvm->arch.model.fac_mask, 148);
642 set_kvm_facility(kvm->arch.model.fac_list, 148);
643 }
469d87fa
CB
644 if (test_facility(152)) {
645 set_kvm_facility(kvm->arch.model.fac_mask, 152);
646 set_kvm_facility(kvm->arch.model.fac_list, 152);
647 }
18280d8b
MM
648 r = 0;
649 } else
650 r = -EINVAL;
5967c17b 651 mutex_unlock(&kvm->lock);
c92ea7b9
CB
652 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
653 r ? "(not available)" : "(success)");
68c55750 654 break;
c6e5f166
FZ
655 case KVM_CAP_S390_RI:
656 r = -EINVAL;
657 mutex_lock(&kvm->lock);
a03825bb 658 if (kvm->created_vcpus) {
c6e5f166
FZ
659 r = -EBUSY;
660 } else if (test_facility(64)) {
c54f0d6a
DH
661 set_kvm_facility(kvm->arch.model.fac_mask, 64);
662 set_kvm_facility(kvm->arch.model.fac_list, 64);
c6e5f166
FZ
663 r = 0;
664 }
665 mutex_unlock(&kvm->lock);
666 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
667 r ? "(not available)" : "(success)");
668 break;
47a4693e
YMZ
669 case KVM_CAP_S390_AIS:
670 mutex_lock(&kvm->lock);
671 if (kvm->created_vcpus) {
672 r = -EBUSY;
673 } else {
674 set_kvm_facility(kvm->arch.model.fac_mask, 72);
675 set_kvm_facility(kvm->arch.model.fac_list, 72);
47a4693e
YMZ
676 r = 0;
677 }
678 mutex_unlock(&kvm->lock);
679 VM_EVENT(kvm, 3, "ENABLE: AIS %s",
680 r ? "(not available)" : "(success)");
681 break;
4e0b1ab7
FZ
682 case KVM_CAP_S390_GS:
683 r = -EINVAL;
684 mutex_lock(&kvm->lock);
c86ea55b 685 if (kvm->created_vcpus) {
4e0b1ab7
FZ
686 r = -EBUSY;
687 } else if (test_facility(133)) {
688 set_kvm_facility(kvm->arch.model.fac_mask, 133);
689 set_kvm_facility(kvm->arch.model.fac_list, 133);
690 r = 0;
691 }
692 mutex_unlock(&kvm->lock);
693 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_GS %s",
694 r ? "(not available)" : "(success)");
695 break;
e44fc8c9 696 case KVM_CAP_S390_USER_STSI:
c92ea7b9 697 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
e44fc8c9
ET
698 kvm->arch.user_stsi = 1;
699 r = 0;
700 break;
6502a34c
DH
701 case KVM_CAP_S390_USER_INSTR0:
702 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_INSTR0");
703 kvm->arch.user_instr0 = 1;
704 icpt_operexc_on_all_vcpus(kvm);
705 r = 0;
706 break;
d938dc55
CH
707 default:
708 r = -EINVAL;
709 break;
710 }
711 return r;
712}
713
8c0a7ce6
DD
714static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
715{
716 int ret;
717
718 switch (attr->attr) {
719 case KVM_S390_VM_MEM_LIMIT_SIZE:
720 ret = 0;
c92ea7b9 721 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
a3a92c31
DD
722 kvm->arch.mem_limit);
723 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
8c0a7ce6
DD
724 ret = -EFAULT;
725 break;
726 default:
727 ret = -ENXIO;
728 break;
729 }
730 return ret;
731}
732
733static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
4f718eab
DD
734{
735 int ret;
736 unsigned int idx;
737 switch (attr->attr) {
738 case KVM_S390_VM_MEM_ENABLE_CMMA:
f9cbd9b0 739 ret = -ENXIO;
c24cc9c8 740 if (!sclp.has_cmma)
e6db1d61
DD
741 break;
742
4f718eab 743 ret = -EBUSY;
c92ea7b9 744 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
4f718eab 745 mutex_lock(&kvm->lock);
a03825bb 746 if (!kvm->created_vcpus) {
4f718eab
DD
747 kvm->arch.use_cmma = 1;
748 ret = 0;
749 }
750 mutex_unlock(&kvm->lock);
751 break;
752 case KVM_S390_VM_MEM_CLR_CMMA:
f9cbd9b0
DH
753 ret = -ENXIO;
754 if (!sclp.has_cmma)
755 break;
c3489155
DD
756 ret = -EINVAL;
757 if (!kvm->arch.use_cmma)
758 break;
759
c92ea7b9 760 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
4f718eab
DD
761 mutex_lock(&kvm->lock);
762 idx = srcu_read_lock(&kvm->srcu);
a13cff31 763 s390_reset_cmma(kvm->arch.gmap->mm);
4f718eab
DD
764 srcu_read_unlock(&kvm->srcu, idx);
765 mutex_unlock(&kvm->lock);
766 ret = 0;
767 break;
8c0a7ce6
DD
768 case KVM_S390_VM_MEM_LIMIT_SIZE: {
769 unsigned long new_limit;
770
771 if (kvm_is_ucontrol(kvm))
772 return -EINVAL;
773
774 if (get_user(new_limit, (u64 __user *)attr->addr))
775 return -EFAULT;
776
a3a92c31
DD
777 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
778 new_limit > kvm->arch.mem_limit)
8c0a7ce6
DD
779 return -E2BIG;
780
a3a92c31
DD
781 if (!new_limit)
782 return -EINVAL;
783
6ea427bb 784 /* gmap_create takes last usable address */
a3a92c31
DD
785 if (new_limit != KVM_S390_NO_MEM_LIMIT)
786 new_limit -= 1;
787
8c0a7ce6
DD
788 ret = -EBUSY;
789 mutex_lock(&kvm->lock);
a03825bb 790 if (!kvm->created_vcpus) {
6ea427bb
MS
791 /* gmap_create will round the limit up */
792 struct gmap *new = gmap_create(current->mm, new_limit);
8c0a7ce6
DD
793
794 if (!new) {
795 ret = -ENOMEM;
796 } else {
6ea427bb 797 gmap_remove(kvm->arch.gmap);
8c0a7ce6
DD
798 new->private = kvm;
799 kvm->arch.gmap = new;
800 ret = 0;
801 }
802 }
803 mutex_unlock(&kvm->lock);
a3a92c31
DD
804 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
805 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
806 (void *) kvm->arch.gmap->asce);
8c0a7ce6
DD
807 break;
808 }
4f718eab
DD
809 default:
810 ret = -ENXIO;
811 break;
812 }
813 return ret;
814}
815
a374e892
TK
816static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
817
74046a28 818void kvm_s390_vcpu_crypto_reset_all(struct kvm *kvm)
a374e892
TK
819{
820 struct kvm_vcpu *vcpu;
821 int i;
822
74046a28
TK
823 kvm_s390_vcpu_block_all(kvm);
824
b6787f53 825 kvm_for_each_vcpu(i, vcpu, kvm) {
74046a28 826 kvm_s390_vcpu_crypto_setup(vcpu);
b6787f53
DH
827 /* recreate the shadow crycb by leaving the VSIE handler */
828 kvm_s390_sync_request(KVM_REQ_VSIE_RESTART, vcpu);
829 }
74046a28
TK
830
831 kvm_s390_vcpu_unblock_all(kvm);
832}
833
834static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
835{
a374e892
TK
836 mutex_lock(&kvm->lock);
837 switch (attr->attr) {
838 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
c8658909
CB
839 if (!test_kvm_facility(kvm, 76)) {
840 mutex_unlock(&kvm->lock);
bec73b14 841 return -EINVAL;
c8658909 842 }
a374e892
TK
843 get_random_bytes(
844 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
845 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
846 kvm->arch.crypto.aes_kw = 1;
c92ea7b9 847 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
a374e892
TK
848 break;
849 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
c8658909
CB
850 if (!test_kvm_facility(kvm, 76)) {
851 mutex_unlock(&kvm->lock);
bec73b14 852 return -EINVAL;
c8658909 853 }
a374e892
TK
854 get_random_bytes(
855 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
856 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
857 kvm->arch.crypto.dea_kw = 1;
c92ea7b9 858 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
a374e892
TK
859 break;
860 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
c8658909
CB
861 if (!test_kvm_facility(kvm, 76)) {
862 mutex_unlock(&kvm->lock);
bec73b14 863 return -EINVAL;
c8658909 864 }
a374e892
TK
865 kvm->arch.crypto.aes_kw = 0;
866 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
867 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
c92ea7b9 868 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
a374e892
TK
869 break;
870 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
c8658909
CB
871 if (!test_kvm_facility(kvm, 76)) {
872 mutex_unlock(&kvm->lock);
bec73b14 873 return -EINVAL;
c8658909 874 }
a374e892
TK
875 kvm->arch.crypto.dea_kw = 0;
876 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
877 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
c92ea7b9 878 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
a374e892 879 break;
bec73b14
TK
880 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
881 if (!ap_instructions_available()) {
882 mutex_unlock(&kvm->lock);
883 return -EOPNOTSUPP;
884 }
885 kvm->arch.crypto.apie = 1;
886 break;
887 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
888 if (!ap_instructions_available()) {
889 mutex_unlock(&kvm->lock);
890 return -EOPNOTSUPP;
891 }
892 kvm->arch.crypto.apie = 0;
893 break;
a374e892
TK
894 default:
895 mutex_unlock(&kvm->lock);
896 return -ENXIO;
897 }
898
74046a28 899 kvm_s390_vcpu_crypto_reset_all(kvm);
a374e892
TK
900 mutex_unlock(&kvm->lock);
901 return 0;
902}
903
190df4a2
CI
904static void kvm_s390_sync_request_broadcast(struct kvm *kvm, int req)
905{
906 int cx;
907 struct kvm_vcpu *vcpu;
908
909 kvm_for_each_vcpu(cx, vcpu, kvm)
910 kvm_s390_sync_request(req, vcpu);
911}
912
913/*
914 * Must be called with kvm->srcu held to avoid races on memslots, and with
1de1ea7e 915 * kvm->slots_lock to avoid races with ourselves and kvm_s390_vm_stop_migration.
190df4a2
CI
916 */
917static int kvm_s390_vm_start_migration(struct kvm *kvm)
918{
919 struct kvm_s390_migration_state *mgs;
920 struct kvm_memory_slot *ms;
921 /* should be the only one */
922 struct kvm_memslots *slots;
923 unsigned long ram_pages;
924 int slotnr;
925
926 /* migration mode already enabled */
927 if (kvm->arch.migration_state)
928 return 0;
929
930 slots = kvm_memslots(kvm);
931 if (!slots || !slots->used_slots)
932 return -EINVAL;
933
934 mgs = kzalloc(sizeof(*mgs), GFP_KERNEL);
935 if (!mgs)
936 return -ENOMEM;
937 kvm->arch.migration_state = mgs;
938
939 if (kvm->arch.use_cmma) {
940 /*
32aa144f
CB
941 * Get the first slot. They are reverse sorted by base_gfn, so
942 * the first slot is also the one at the end of the address
943 * space. We have verified above that at least one slot is
944 * present.
190df4a2 945 */
32aa144f 946 ms = slots->memslots;
190df4a2
CI
947 /* round up so we only use full longs */
948 ram_pages = roundup(ms->base_gfn + ms->npages, BITS_PER_LONG);
949 /* allocate enough bytes to store all the bits */
950 mgs->pgste_bitmap = vmalloc(ram_pages / 8);
951 if (!mgs->pgste_bitmap) {
952 kfree(mgs);
953 kvm->arch.migration_state = NULL;
954 return -ENOMEM;
955 }
956
957 mgs->bitmap_size = ram_pages;
958 atomic64_set(&mgs->dirty_pages, ram_pages);
959 /* mark all the pages in active slots as dirty */
960 for (slotnr = 0; slotnr < slots->used_slots; slotnr++) {
961 ms = slots->memslots + slotnr;
962 bitmap_set(mgs->pgste_bitmap, ms->base_gfn, ms->npages);
963 }
964
965 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_START_MIGRATION);
966 }
967 return 0;
968}
969
970/*
1de1ea7e 971 * Must be called with kvm->slots_lock to avoid races with ourselves and
190df4a2
CI
972 * kvm_s390_vm_start_migration.
973 */
974static int kvm_s390_vm_stop_migration(struct kvm *kvm)
975{
976 struct kvm_s390_migration_state *mgs;
977
978 /* migration mode already disabled */
979 if (!kvm->arch.migration_state)
980 return 0;
981 mgs = kvm->arch.migration_state;
982 kvm->arch.migration_state = NULL;
983
984 if (kvm->arch.use_cmma) {
985 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_STOP_MIGRATION);
1de1ea7e
CB
986 /* We have to wait for the essa emulation to finish */
987 synchronize_srcu(&kvm->srcu);
190df4a2
CI
988 vfree(mgs->pgste_bitmap);
989 }
990 kfree(mgs);
991 return 0;
992}
993
994static int kvm_s390_vm_set_migration(struct kvm *kvm,
995 struct kvm_device_attr *attr)
996{
1de1ea7e 997 int res = -ENXIO;
190df4a2 998
1de1ea7e 999 mutex_lock(&kvm->slots_lock);
190df4a2
CI
1000 switch (attr->attr) {
1001 case KVM_S390_VM_MIGRATION_START:
190df4a2 1002 res = kvm_s390_vm_start_migration(kvm);
190df4a2
CI
1003 break;
1004 case KVM_S390_VM_MIGRATION_STOP:
1005 res = kvm_s390_vm_stop_migration(kvm);
1006 break;
1007 default:
1008 break;
1009 }
1de1ea7e 1010 mutex_unlock(&kvm->slots_lock);
190df4a2
CI
1011
1012 return res;
1013}
1014
1015static int kvm_s390_vm_get_migration(struct kvm *kvm,
1016 struct kvm_device_attr *attr)
1017{
1018 u64 mig = (kvm->arch.migration_state != NULL);
1019
1020 if (attr->attr != KVM_S390_VM_MIGRATION_STATUS)
1021 return -ENXIO;
1022
1023 if (copy_to_user((void __user *)attr->addr, &mig, sizeof(mig)))
1024 return -EFAULT;
1025 return 0;
1026}
1027
8fa1696e
CW
1028static int kvm_s390_set_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1029{
1030 struct kvm_s390_vm_tod_clock gtod;
1031
1032 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
1033 return -EFAULT;
1034
e5a63654 1035 if (!test_kvm_facility(kvm, 139) && gtod.epoch_idx)
8fa1696e 1036 return -EINVAL;
e5a63654 1037 kvm_s390_set_tod_clock(kvm, &gtod);
8fa1696e
CW
1038
1039 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x, TOD base: 0x%llx",
1040 gtod.epoch_idx, gtod.tod);
1041
1042 return 0;
1043}
1044
72f25020
JH
1045static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1046{
1047 u8 gtod_high;
1048
1049 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
1050 sizeof(gtod_high)))
1051 return -EFAULT;
1052
1053 if (gtod_high != 0)
1054 return -EINVAL;
58c383c6 1055 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
72f25020
JH
1056
1057 return 0;
1058}
1059
1060static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1061{
e5a63654 1062 struct kvm_s390_vm_tod_clock gtod = { 0 };
72f25020 1063
e5a63654
DH
1064 if (copy_from_user(&gtod.tod, (void __user *)attr->addr,
1065 sizeof(gtod.tod)))
72f25020
JH
1066 return -EFAULT;
1067
e5a63654
DH
1068 kvm_s390_set_tod_clock(kvm, &gtod);
1069 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod.tod);
72f25020
JH
1070 return 0;
1071}
1072
1073static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1074{
1075 int ret;
1076
1077 if (attr->flags)
1078 return -EINVAL;
1079
1080 switch (attr->attr) {
8fa1696e
CW
1081 case KVM_S390_VM_TOD_EXT:
1082 ret = kvm_s390_set_tod_ext(kvm, attr);
1083 break;
72f25020
JH
1084 case KVM_S390_VM_TOD_HIGH:
1085 ret = kvm_s390_set_tod_high(kvm, attr);
1086 break;
1087 case KVM_S390_VM_TOD_LOW:
1088 ret = kvm_s390_set_tod_low(kvm, attr);
1089 break;
1090 default:
1091 ret = -ENXIO;
1092 break;
1093 }
1094 return ret;
1095}
1096
8fa1696e
CW
1097static void kvm_s390_get_tod_clock_ext(struct kvm *kvm,
1098 struct kvm_s390_vm_tod_clock *gtod)
1099{
1100 struct kvm_s390_tod_clock_ext htod;
1101
1102 preempt_disable();
1103
1104 get_tod_clock_ext((char *)&htod);
1105
1106 gtod->tod = htod.tod + kvm->arch.epoch;
1107 gtod->epoch_idx = htod.epoch_idx + kvm->arch.epdx;
1108
1109 if (gtod->tod < htod.tod)
1110 gtod->epoch_idx += 1;
1111
1112 preempt_enable();
1113}
1114
1115static int kvm_s390_get_tod_ext(struct kvm *kvm, struct kvm_device_attr *attr)
1116{
1117 struct kvm_s390_vm_tod_clock gtod;
1118
1119 memset(&gtod, 0, sizeof(gtod));
1120
1121 if (test_kvm_facility(kvm, 139))
1122 kvm_s390_get_tod_clock_ext(kvm, &gtod);
1123 else
1124 gtod.tod = kvm_s390_get_tod_clock_fast(kvm);
1125
1126 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1127 return -EFAULT;
1128
1129 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x, TOD base: 0x%llx",
1130 gtod.epoch_idx, gtod.tod);
1131 return 0;
1132}
1133
72f25020
JH
1134static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
1135{
1136 u8 gtod_high = 0;
1137
1138 if (copy_to_user((void __user *)attr->addr, &gtod_high,
1139 sizeof(gtod_high)))
1140 return -EFAULT;
58c383c6 1141 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
72f25020
JH
1142
1143 return 0;
1144}
1145
1146static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
1147{
5a3d883a 1148 u64 gtod;
72f25020 1149
60417fcc 1150 gtod = kvm_s390_get_tod_clock_fast(kvm);
72f25020
JH
1151 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
1152 return -EFAULT;
58c383c6 1153 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
72f25020
JH
1154
1155 return 0;
1156}
1157
1158static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
1159{
1160 int ret;
1161
1162 if (attr->flags)
1163 return -EINVAL;
1164
1165 switch (attr->attr) {
8fa1696e
CW
1166 case KVM_S390_VM_TOD_EXT:
1167 ret = kvm_s390_get_tod_ext(kvm, attr);
1168 break;
72f25020
JH
1169 case KVM_S390_VM_TOD_HIGH:
1170 ret = kvm_s390_get_tod_high(kvm, attr);
1171 break;
1172 case KVM_S390_VM_TOD_LOW:
1173 ret = kvm_s390_get_tod_low(kvm, attr);
1174 break;
1175 default:
1176 ret = -ENXIO;
1177 break;
1178 }
1179 return ret;
1180}
1181
658b6eda
MM
1182static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1183{
1184 struct kvm_s390_vm_cpu_processor *proc;
053dd230 1185 u16 lowest_ibc, unblocked_ibc;
658b6eda
MM
1186 int ret = 0;
1187
1188 mutex_lock(&kvm->lock);
a03825bb 1189 if (kvm->created_vcpus) {
658b6eda
MM
1190 ret = -EBUSY;
1191 goto out;
1192 }
1193 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1194 if (!proc) {
1195 ret = -ENOMEM;
1196 goto out;
1197 }
1198 if (!copy_from_user(proc, (void __user *)attr->addr,
1199 sizeof(*proc))) {
9bb0ec09 1200 kvm->arch.model.cpuid = proc->cpuid;
053dd230
DH
1201 lowest_ibc = sclp.ibc >> 16 & 0xfff;
1202 unblocked_ibc = sclp.ibc & 0xfff;
0487c44d 1203 if (lowest_ibc && proc->ibc) {
053dd230
DH
1204 if (proc->ibc > unblocked_ibc)
1205 kvm->arch.model.ibc = unblocked_ibc;
1206 else if (proc->ibc < lowest_ibc)
1207 kvm->arch.model.ibc = lowest_ibc;
1208 else
1209 kvm->arch.model.ibc = proc->ibc;
1210 }
c54f0d6a 1211 memcpy(kvm->arch.model.fac_list, proc->fac_list,
658b6eda 1212 S390_ARCH_FAC_LIST_SIZE_BYTE);
a8c39dd7
CB
1213 VM_EVENT(kvm, 3, "SET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1214 kvm->arch.model.ibc,
1215 kvm->arch.model.cpuid);
1216 VM_EVENT(kvm, 3, "SET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1217 kvm->arch.model.fac_list[0],
1218 kvm->arch.model.fac_list[1],
1219 kvm->arch.model.fac_list[2]);
658b6eda
MM
1220 } else
1221 ret = -EFAULT;
1222 kfree(proc);
1223out:
1224 mutex_unlock(&kvm->lock);
1225 return ret;
1226}
1227
15c9705f
DH
1228static int kvm_s390_set_processor_feat(struct kvm *kvm,
1229 struct kvm_device_attr *attr)
1230{
1231 struct kvm_s390_vm_cpu_feat data;
1232 int ret = -EBUSY;
1233
1234 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
1235 return -EFAULT;
1236 if (!bitmap_subset((unsigned long *) data.feat,
1237 kvm_s390_available_cpu_feat,
1238 KVM_S390_VM_CPU_FEAT_NR_BITS))
1239 return -EINVAL;
1240
1241 mutex_lock(&kvm->lock);
c86ea55b 1242 if (!kvm->created_vcpus) {
15c9705f
DH
1243 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
1244 KVM_S390_VM_CPU_FEAT_NR_BITS);
1245 ret = 0;
1246 }
1247 mutex_unlock(&kvm->lock);
1248 return ret;
1249}
1250
0a763c78
DH
1251static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
1252 struct kvm_device_attr *attr)
1253{
de95a98d
CB
1254 mutex_lock(&kvm->lock);
1255 if (kvm->created_vcpus) {
1256 mutex_unlock(&kvm->lock);
1257 return -EBUSY;
1258 }
1259
1260 if (copy_from_user(&kvm->arch.model.subfuncs, (void __user *)attr->addr,
1261 sizeof(struct kvm_s390_vm_cpu_subfunc))) {
1262 mutex_unlock(&kvm->lock);
1263 return -EFAULT;
1264 }
1265 mutex_unlock(&kvm->lock);
1266
3a36e7bb
CB
1267 VM_EVENT(kvm, 3, "SET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1268 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1269 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1270 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1271 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1272 VM_EVENT(kvm, 3, "SET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1273 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1274 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1275 VM_EVENT(kvm, 3, "SET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1276 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1277 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1278 VM_EVENT(kvm, 3, "SET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1279 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1280 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1281 VM_EVENT(kvm, 3, "SET: guest KM subfunc 0x%16.16lx.%16.16lx",
1282 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1283 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1284 VM_EVENT(kvm, 3, "SET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1285 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1286 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1287 VM_EVENT(kvm, 3, "SET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1288 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1289 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1290 VM_EVENT(kvm, 3, "SET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1291 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1292 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1293 VM_EVENT(kvm, 3, "SET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1294 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1295 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1296 VM_EVENT(kvm, 3, "SET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1297 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1298 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1299 VM_EVENT(kvm, 3, "SET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1300 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1301 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1302 VM_EVENT(kvm, 3, "SET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1303 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1304 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1305 VM_EVENT(kvm, 3, "SET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1306 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1307 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1308 VM_EVENT(kvm, 3, "SET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1309 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1310 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1311 VM_EVENT(kvm, 3, "SET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1312 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1313 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
9e20d5a3
CB
1314 VM_EVENT(kvm, 3, "SET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1315 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1316 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1317 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1318 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
e71243b6
CB
1319 VM_EVENT(kvm, 3, "SET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1320 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1321 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1322 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1323 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
3a36e7bb 1324
de95a98d 1325 return 0;
0a763c78
DH
1326}
1327
658b6eda
MM
1328static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1329{
1330 int ret = -ENXIO;
1331
1332 switch (attr->attr) {
1333 case KVM_S390_VM_CPU_PROCESSOR:
1334 ret = kvm_s390_set_processor(kvm, attr);
1335 break;
15c9705f
DH
1336 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1337 ret = kvm_s390_set_processor_feat(kvm, attr);
1338 break;
0a763c78
DH
1339 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1340 ret = kvm_s390_set_processor_subfunc(kvm, attr);
1341 break;
658b6eda
MM
1342 }
1343 return ret;
1344}
1345
1346static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
1347{
1348 struct kvm_s390_vm_cpu_processor *proc;
1349 int ret = 0;
1350
1351 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
1352 if (!proc) {
1353 ret = -ENOMEM;
1354 goto out;
1355 }
9bb0ec09 1356 proc->cpuid = kvm->arch.model.cpuid;
658b6eda 1357 proc->ibc = kvm->arch.model.ibc;
c54f0d6a
DH
1358 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
1359 S390_ARCH_FAC_LIST_SIZE_BYTE);
a8c39dd7
CB
1360 VM_EVENT(kvm, 3, "GET: guest ibc: 0x%4.4x, guest cpuid: 0x%16.16llx",
1361 kvm->arch.model.ibc,
1362 kvm->arch.model.cpuid);
1363 VM_EVENT(kvm, 3, "GET: guest faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1364 kvm->arch.model.fac_list[0],
1365 kvm->arch.model.fac_list[1],
1366 kvm->arch.model.fac_list[2]);
658b6eda
MM
1367 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
1368 ret = -EFAULT;
1369 kfree(proc);
1370out:
1371 return ret;
1372}
1373
1374static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
1375{
1376 struct kvm_s390_vm_cpu_machine *mach;
1377 int ret = 0;
1378
1379 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
1380 if (!mach) {
1381 ret = -ENOMEM;
1382 goto out;
1383 }
1384 get_cpu_id((struct cpuid *) &mach->cpuid);
37c5f6c8 1385 mach->ibc = sclp.ibc;
c54f0d6a 1386 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
981467c9 1387 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda 1388 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
04478197 1389 sizeof(S390_lowcore.stfle_fac_list));
a8c39dd7
CB
1390 VM_EVENT(kvm, 3, "GET: host ibc: 0x%4.4x, host cpuid: 0x%16.16llx",
1391 kvm->arch.model.ibc,
1392 kvm->arch.model.cpuid);
1393 VM_EVENT(kvm, 3, "GET: host facmask: 0x%16.16llx.%16.16llx.%16.16llx",
1394 mach->fac_mask[0],
1395 mach->fac_mask[1],
1396 mach->fac_mask[2]);
1397 VM_EVENT(kvm, 3, "GET: host faclist: 0x%16.16llx.%16.16llx.%16.16llx",
1398 mach->fac_list[0],
1399 mach->fac_list[1],
1400 mach->fac_list[2]);
658b6eda
MM
1401 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
1402 ret = -EFAULT;
1403 kfree(mach);
1404out:
1405 return ret;
1406}
1407
15c9705f
DH
1408static int kvm_s390_get_processor_feat(struct kvm *kvm,
1409 struct kvm_device_attr *attr)
1410{
1411 struct kvm_s390_vm_cpu_feat data;
1412
1413 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
1414 KVM_S390_VM_CPU_FEAT_NR_BITS);
1415 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1416 return -EFAULT;
1417 return 0;
1418}
1419
1420static int kvm_s390_get_machine_feat(struct kvm *kvm,
1421 struct kvm_device_attr *attr)
1422{
1423 struct kvm_s390_vm_cpu_feat data;
1424
1425 bitmap_copy((unsigned long *) data.feat,
1426 kvm_s390_available_cpu_feat,
1427 KVM_S390_VM_CPU_FEAT_NR_BITS);
1428 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
1429 return -EFAULT;
1430 return 0;
1431}
1432
0a763c78
DH
1433static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
1434 struct kvm_device_attr *attr)
1435{
de95a98d
CB
1436 if (copy_to_user((void __user *)attr->addr, &kvm->arch.model.subfuncs,
1437 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1438 return -EFAULT;
1439
3a36e7bb
CB
1440 VM_EVENT(kvm, 3, "GET: guest PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1441 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[0],
1442 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[1],
1443 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[2],
1444 ((unsigned long *) &kvm->arch.model.subfuncs.plo)[3]);
1445 VM_EVENT(kvm, 3, "GET: guest PTFF subfunc 0x%16.16lx.%16.16lx",
1446 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[0],
1447 ((unsigned long *) &kvm->arch.model.subfuncs.ptff)[1]);
1448 VM_EVENT(kvm, 3, "GET: guest KMAC subfunc 0x%16.16lx.%16.16lx",
1449 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[0],
1450 ((unsigned long *) &kvm->arch.model.subfuncs.kmac)[1]);
1451 VM_EVENT(kvm, 3, "GET: guest KMC subfunc 0x%16.16lx.%16.16lx",
1452 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[0],
1453 ((unsigned long *) &kvm->arch.model.subfuncs.kmc)[1]);
1454 VM_EVENT(kvm, 3, "GET: guest KM subfunc 0x%16.16lx.%16.16lx",
1455 ((unsigned long *) &kvm->arch.model.subfuncs.km)[0],
1456 ((unsigned long *) &kvm->arch.model.subfuncs.km)[1]);
1457 VM_EVENT(kvm, 3, "GET: guest KIMD subfunc 0x%16.16lx.%16.16lx",
1458 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[0],
1459 ((unsigned long *) &kvm->arch.model.subfuncs.kimd)[1]);
1460 VM_EVENT(kvm, 3, "GET: guest KLMD subfunc 0x%16.16lx.%16.16lx",
1461 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[0],
1462 ((unsigned long *) &kvm->arch.model.subfuncs.klmd)[1]);
1463 VM_EVENT(kvm, 3, "GET: guest PCKMO subfunc 0x%16.16lx.%16.16lx",
1464 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[0],
1465 ((unsigned long *) &kvm->arch.model.subfuncs.pckmo)[1]);
1466 VM_EVENT(kvm, 3, "GET: guest KMCTR subfunc 0x%16.16lx.%16.16lx",
1467 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[0],
1468 ((unsigned long *) &kvm->arch.model.subfuncs.kmctr)[1]);
1469 VM_EVENT(kvm, 3, "GET: guest KMF subfunc 0x%16.16lx.%16.16lx",
1470 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[0],
1471 ((unsigned long *) &kvm->arch.model.subfuncs.kmf)[1]);
1472 VM_EVENT(kvm, 3, "GET: guest KMO subfunc 0x%16.16lx.%16.16lx",
1473 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[0],
1474 ((unsigned long *) &kvm->arch.model.subfuncs.kmo)[1]);
1475 VM_EVENT(kvm, 3, "GET: guest PCC subfunc 0x%16.16lx.%16.16lx",
1476 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[0],
1477 ((unsigned long *) &kvm->arch.model.subfuncs.pcc)[1]);
1478 VM_EVENT(kvm, 3, "GET: guest PPNO subfunc 0x%16.16lx.%16.16lx",
1479 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[0],
1480 ((unsigned long *) &kvm->arch.model.subfuncs.ppno)[1]);
1481 VM_EVENT(kvm, 3, "GET: guest KMA subfunc 0x%16.16lx.%16.16lx",
1482 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[0],
1483 ((unsigned long *) &kvm->arch.model.subfuncs.kma)[1]);
1484 VM_EVENT(kvm, 3, "GET: guest KDSA subfunc 0x%16.16lx.%16.16lx",
1485 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[0],
1486 ((unsigned long *) &kvm->arch.model.subfuncs.kdsa)[1]);
9e20d5a3
CB
1487 VM_EVENT(kvm, 3, "GET: guest SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1488 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[0],
1489 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[1],
1490 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[2],
1491 ((unsigned long *) &kvm->arch.model.subfuncs.sortl)[3]);
e71243b6
CB
1492 VM_EVENT(kvm, 3, "GET: guest DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1493 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[0],
1494 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[1],
1495 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[2],
1496 ((unsigned long *) &kvm->arch.model.subfuncs.dfltcc)[3]);
3a36e7bb 1497
de95a98d 1498 return 0;
0a763c78
DH
1499}
1500
1501static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
1502 struct kvm_device_attr *attr)
1503{
1504 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
1505 sizeof(struct kvm_s390_vm_cpu_subfunc)))
1506 return -EFAULT;
de95a98d
CB
1507
1508 VM_EVENT(kvm, 3, "GET: host PLO subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1509 ((unsigned long *) &kvm_s390_available_subfunc.plo)[0],
1510 ((unsigned long *) &kvm_s390_available_subfunc.plo)[1],
1511 ((unsigned long *) &kvm_s390_available_subfunc.plo)[2],
1512 ((unsigned long *) &kvm_s390_available_subfunc.plo)[3]);
1513 VM_EVENT(kvm, 3, "GET: host PTFF subfunc 0x%16.16lx.%16.16lx",
1514 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[0],
1515 ((unsigned long *) &kvm_s390_available_subfunc.ptff)[1]);
1516 VM_EVENT(kvm, 3, "GET: host KMAC subfunc 0x%16.16lx.%16.16lx",
1517 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[0],
1518 ((unsigned long *) &kvm_s390_available_subfunc.kmac)[1]);
1519 VM_EVENT(kvm, 3, "GET: host KMC subfunc 0x%16.16lx.%16.16lx",
1520 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[0],
1521 ((unsigned long *) &kvm_s390_available_subfunc.kmc)[1]);
1522 VM_EVENT(kvm, 3, "GET: host KM subfunc 0x%16.16lx.%16.16lx",
1523 ((unsigned long *) &kvm_s390_available_subfunc.km)[0],
1524 ((unsigned long *) &kvm_s390_available_subfunc.km)[1]);
1525 VM_EVENT(kvm, 3, "GET: host KIMD subfunc 0x%16.16lx.%16.16lx",
1526 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[0],
1527 ((unsigned long *) &kvm_s390_available_subfunc.kimd)[1]);
1528 VM_EVENT(kvm, 3, "GET: host KLMD subfunc 0x%16.16lx.%16.16lx",
1529 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[0],
1530 ((unsigned long *) &kvm_s390_available_subfunc.klmd)[1]);
1531 VM_EVENT(kvm, 3, "GET: host PCKMO subfunc 0x%16.16lx.%16.16lx",
1532 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[0],
1533 ((unsigned long *) &kvm_s390_available_subfunc.pckmo)[1]);
1534 VM_EVENT(kvm, 3, "GET: host KMCTR subfunc 0x%16.16lx.%16.16lx",
1535 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[0],
1536 ((unsigned long *) &kvm_s390_available_subfunc.kmctr)[1]);
1537 VM_EVENT(kvm, 3, "GET: host KMF subfunc 0x%16.16lx.%16.16lx",
1538 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[0],
1539 ((unsigned long *) &kvm_s390_available_subfunc.kmf)[1]);
1540 VM_EVENT(kvm, 3, "GET: host KMO subfunc 0x%16.16lx.%16.16lx",
1541 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[0],
1542 ((unsigned long *) &kvm_s390_available_subfunc.kmo)[1]);
1543 VM_EVENT(kvm, 3, "GET: host PCC subfunc 0x%16.16lx.%16.16lx",
1544 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[0],
1545 ((unsigned long *) &kvm_s390_available_subfunc.pcc)[1]);
1546 VM_EVENT(kvm, 3, "GET: host PPNO subfunc 0x%16.16lx.%16.16lx",
1547 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[0],
1548 ((unsigned long *) &kvm_s390_available_subfunc.ppno)[1]);
1549 VM_EVENT(kvm, 3, "GET: host KMA subfunc 0x%16.16lx.%16.16lx",
1550 ((unsigned long *) &kvm_s390_available_subfunc.kma)[0],
1551 ((unsigned long *) &kvm_s390_available_subfunc.kma)[1]);
3a36e7bb
CB
1552 VM_EVENT(kvm, 3, "GET: host KDSA subfunc 0x%16.16lx.%16.16lx",
1553 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[0],
1554 ((unsigned long *) &kvm_s390_available_subfunc.kdsa)[1]);
9e20d5a3
CB
1555 VM_EVENT(kvm, 3, "GET: host SORTL subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1556 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[0],
1557 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[1],
1558 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[2],
1559 ((unsigned long *) &kvm_s390_available_subfunc.sortl)[3]);
e71243b6
CB
1560 VM_EVENT(kvm, 3, "GET: host DFLTCC subfunc 0x%16.16lx.%16.16lx.%16.16lx.%16.16lx",
1561 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[0],
1562 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[1],
1563 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[2],
1564 ((unsigned long *) &kvm_s390_available_subfunc.dfltcc)[3]);
de95a98d 1565
0a763c78
DH
1566 return 0;
1567}
658e657d 1568
658b6eda
MM
1569static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
1570{
1571 int ret = -ENXIO;
1572
1573 switch (attr->attr) {
1574 case KVM_S390_VM_CPU_PROCESSOR:
1575 ret = kvm_s390_get_processor(kvm, attr);
1576 break;
1577 case KVM_S390_VM_CPU_MACHINE:
1578 ret = kvm_s390_get_machine(kvm, attr);
1579 break;
15c9705f
DH
1580 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1581 ret = kvm_s390_get_processor_feat(kvm, attr);
1582 break;
1583 case KVM_S390_VM_CPU_MACHINE_FEAT:
1584 ret = kvm_s390_get_machine_feat(kvm, attr);
1585 break;
0a763c78
DH
1586 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
1587 ret = kvm_s390_get_processor_subfunc(kvm, attr);
1588 break;
1589 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
1590 ret = kvm_s390_get_machine_subfunc(kvm, attr);
1591 break;
658b6eda
MM
1592 }
1593 return ret;
1594}
1595
f2061656
DD
1596static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1597{
1598 int ret;
1599
1600 switch (attr->group) {
4f718eab 1601 case KVM_S390_VM_MEM_CTRL:
8c0a7ce6 1602 ret = kvm_s390_set_mem_control(kvm, attr);
4f718eab 1603 break;
72f25020
JH
1604 case KVM_S390_VM_TOD:
1605 ret = kvm_s390_set_tod(kvm, attr);
1606 break;
658b6eda
MM
1607 case KVM_S390_VM_CPU_MODEL:
1608 ret = kvm_s390_set_cpu_model(kvm, attr);
1609 break;
a374e892
TK
1610 case KVM_S390_VM_CRYPTO:
1611 ret = kvm_s390_vm_set_crypto(kvm, attr);
1612 break;
190df4a2
CI
1613 case KVM_S390_VM_MIGRATION:
1614 ret = kvm_s390_vm_set_migration(kvm, attr);
1615 break;
f2061656
DD
1616 default:
1617 ret = -ENXIO;
1618 break;
1619 }
1620
1621 return ret;
1622}
1623
1624static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1625{
8c0a7ce6
DD
1626 int ret;
1627
1628 switch (attr->group) {
1629 case KVM_S390_VM_MEM_CTRL:
1630 ret = kvm_s390_get_mem_control(kvm, attr);
1631 break;
72f25020
JH
1632 case KVM_S390_VM_TOD:
1633 ret = kvm_s390_get_tod(kvm, attr);
1634 break;
658b6eda
MM
1635 case KVM_S390_VM_CPU_MODEL:
1636 ret = kvm_s390_get_cpu_model(kvm, attr);
1637 break;
190df4a2
CI
1638 case KVM_S390_VM_MIGRATION:
1639 ret = kvm_s390_vm_get_migration(kvm, attr);
1640 break;
8c0a7ce6
DD
1641 default:
1642 ret = -ENXIO;
1643 break;
1644 }
1645
1646 return ret;
f2061656
DD
1647}
1648
1649static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
1650{
1651 int ret;
1652
1653 switch (attr->group) {
4f718eab
DD
1654 case KVM_S390_VM_MEM_CTRL:
1655 switch (attr->attr) {
1656 case KVM_S390_VM_MEM_ENABLE_CMMA:
1657 case KVM_S390_VM_MEM_CLR_CMMA:
f9cbd9b0
DH
1658 ret = sclp.has_cmma ? 0 : -ENXIO;
1659 break;
8c0a7ce6 1660 case KVM_S390_VM_MEM_LIMIT_SIZE:
4f718eab
DD
1661 ret = 0;
1662 break;
1663 default:
1664 ret = -ENXIO;
1665 break;
1666 }
1667 break;
72f25020
JH
1668 case KVM_S390_VM_TOD:
1669 switch (attr->attr) {
1670 case KVM_S390_VM_TOD_LOW:
1671 case KVM_S390_VM_TOD_HIGH:
1672 ret = 0;
1673 break;
1674 default:
1675 ret = -ENXIO;
1676 break;
1677 }
1678 break;
658b6eda
MM
1679 case KVM_S390_VM_CPU_MODEL:
1680 switch (attr->attr) {
1681 case KVM_S390_VM_CPU_PROCESSOR:
1682 case KVM_S390_VM_CPU_MACHINE:
15c9705f
DH
1683 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1684 case KVM_S390_VM_CPU_MACHINE_FEAT:
0a763c78 1685 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
658e657d 1686 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
658b6eda
MM
1687 ret = 0;
1688 break;
1689 default:
1690 ret = -ENXIO;
1691 break;
1692 }
1693 break;
a374e892
TK
1694 case KVM_S390_VM_CRYPTO:
1695 switch (attr->attr) {
1696 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1697 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1698 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1699 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1700 ret = 0;
1701 break;
bec73b14
TK
1702 case KVM_S390_VM_CRYPTO_ENABLE_APIE:
1703 case KVM_S390_VM_CRYPTO_DISABLE_APIE:
1704 ret = ap_instructions_available() ? 0 : -ENXIO;
1705 break;
a374e892
TK
1706 default:
1707 ret = -ENXIO;
1708 break;
1709 }
1710 break;
190df4a2
CI
1711 case KVM_S390_VM_MIGRATION:
1712 ret = 0;
1713 break;
f2061656
DD
1714 default:
1715 ret = -ENXIO;
1716 break;
1717 }
1718
1719 return ret;
1720}
1721
30ee2a98
JH
1722static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1723{
1724 uint8_t *keys;
1725 uint64_t hva;
4f899147 1726 int srcu_idx, i, r = 0;
30ee2a98
JH
1727
1728 if (args->flags != 0)
1729 return -EINVAL;
1730
1731 /* Is this guest using storage keys? */
1732 if (!mm_use_skey(current->mm))
1733 return KVM_S390_GET_SKEYS_NONE;
1734
1735 /* Enforce sane limit on memory allocation */
1736 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1737 return -EINVAL;
1738
752ade68 1739 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
30ee2a98
JH
1740 if (!keys)
1741 return -ENOMEM;
1742
d3ed1cee 1743 down_read(&current->mm->mmap_sem);
4f899147 1744 srcu_idx = srcu_read_lock(&kvm->srcu);
30ee2a98
JH
1745 for (i = 0; i < args->count; i++) {
1746 hva = gfn_to_hva(kvm, args->start_gfn + i);
1747 if (kvm_is_error_hva(hva)) {
1748 r = -EFAULT;
d3ed1cee 1749 break;
30ee2a98
JH
1750 }
1751
154c8c19
DH
1752 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1753 if (r)
d3ed1cee 1754 break;
30ee2a98 1755 }
4f899147 1756 srcu_read_unlock(&kvm->srcu, srcu_idx);
d3ed1cee
MS
1757 up_read(&current->mm->mmap_sem);
1758
1759 if (!r) {
1760 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1761 sizeof(uint8_t) * args->count);
1762 if (r)
1763 r = -EFAULT;
30ee2a98
JH
1764 }
1765
30ee2a98
JH
1766 kvfree(keys);
1767 return r;
1768}
1769
1770static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1771{
1772 uint8_t *keys;
1773 uint64_t hva;
4f899147 1774 int srcu_idx, i, r = 0;
30ee2a98
JH
1775
1776 if (args->flags != 0)
1777 return -EINVAL;
1778
1779 /* Enforce sane limit on memory allocation */
1780 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1781 return -EINVAL;
1782
752ade68 1783 keys = kvmalloc_array(args->count, sizeof(uint8_t), GFP_KERNEL);
30ee2a98
JH
1784 if (!keys)
1785 return -ENOMEM;
1786
1787 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1788 sizeof(uint8_t) * args->count);
1789 if (r) {
1790 r = -EFAULT;
1791 goto out;
1792 }
1793
1794 /* Enable storage key handling for the guest */
14d4a425
DD
1795 r = s390_enable_skey();
1796 if (r)
1797 goto out;
30ee2a98 1798
d3ed1cee 1799 down_read(&current->mm->mmap_sem);
4f899147 1800 srcu_idx = srcu_read_lock(&kvm->srcu);
30ee2a98
JH
1801 for (i = 0; i < args->count; i++) {
1802 hva = gfn_to_hva(kvm, args->start_gfn + i);
1803 if (kvm_is_error_hva(hva)) {
1804 r = -EFAULT;
d3ed1cee 1805 break;
30ee2a98
JH
1806 }
1807
1808 /* Lowest order bit is reserved */
1809 if (keys[i] & 0x01) {
1810 r = -EINVAL;
d3ed1cee 1811 break;
30ee2a98
JH
1812 }
1813
fe69eabf 1814 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
30ee2a98 1815 if (r)
d3ed1cee 1816 break;
30ee2a98 1817 }
4f899147 1818 srcu_read_unlock(&kvm->srcu, srcu_idx);
d3ed1cee 1819 up_read(&current->mm->mmap_sem);
30ee2a98
JH
1820out:
1821 kvfree(keys);
1822 return r;
1823}
1824
4036e387
CI
1825/*
1826 * Base address and length must be sent at the start of each block, therefore
1827 * it's cheaper to send some clean data, as long as it's less than the size of
1828 * two longs.
1829 */
1830#define KVM_S390_MAX_BIT_DISTANCE (2 * sizeof(void *))
1831/* for consistency */
1832#define KVM_S390_CMMA_SIZE_MAX ((u32)KVM_S390_SKEYS_MAX)
1833
1834/*
1835 * This function searches for the next page with dirty CMMA attributes, and
1836 * saves the attributes in the buffer up to either the end of the buffer or
1837 * until a block of at least KVM_S390_MAX_BIT_DISTANCE clean bits is found;
1838 * no trailing clean bytes are saved.
1839 * In case no dirty bits were found, or if CMMA was not enabled or used, the
1840 * output buffer will indicate 0 as length.
1841 */
1842static int kvm_s390_get_cmma_bits(struct kvm *kvm,
1843 struct kvm_s390_cmma_log *args)
1844{
1845 struct kvm_s390_migration_state *s = kvm->arch.migration_state;
1846 unsigned long bufsize, hva, pgstev, i, next, cur;
1847 int srcu_idx, peek, r = 0, rr;
1848 u8 *res;
1849
1850 cur = args->start_gfn;
1851 i = next = pgstev = 0;
1852
1853 if (unlikely(!kvm->arch.use_cmma))
1854 return -ENXIO;
1855 /* Invalid/unsupported flags were specified */
1856 if (args->flags & ~KVM_S390_CMMA_PEEK)
1857 return -EINVAL;
1858 /* Migration mode query, and we are not doing a migration */
1859 peek = !!(args->flags & KVM_S390_CMMA_PEEK);
1860 if (!peek && !s)
1861 return -EINVAL;
1862 /* CMMA is disabled or was not used, or the buffer has length zero */
1863 bufsize = min(args->count, KVM_S390_CMMA_SIZE_MAX);
1864 if (!bufsize || !kvm->mm->context.use_cmma) {
1865 memset(args, 0, sizeof(*args));
1866 return 0;
1867 }
1868
1869 if (!peek) {
1870 /* We are not peeking, and there are no dirty pages */
1871 if (!atomic64_read(&s->dirty_pages)) {
1872 memset(args, 0, sizeof(*args));
1873 return 0;
1874 }
1875 cur = find_next_bit(s->pgste_bitmap, s->bitmap_size,
1876 args->start_gfn);
1877 if (cur >= s->bitmap_size) /* nothing found, loop back */
1878 cur = find_next_bit(s->pgste_bitmap, s->bitmap_size, 0);
1879 if (cur >= s->bitmap_size) { /* again! (very unlikely) */
1880 memset(args, 0, sizeof(*args));
1881 return 0;
1882 }
1883 next = find_next_bit(s->pgste_bitmap, s->bitmap_size, cur + 1);
1884 }
1885
1886 res = vmalloc(bufsize);
1887 if (!res)
1888 return -ENOMEM;
1889
1890 args->start_gfn = cur;
1891
1892 down_read(&kvm->mm->mmap_sem);
1893 srcu_idx = srcu_read_lock(&kvm->srcu);
1894 while (i < bufsize) {
1895 hva = gfn_to_hva(kvm, cur);
1896 if (kvm_is_error_hva(hva)) {
1897 r = -EFAULT;
1898 break;
1899 }
1900 /* decrement only if we actually flipped the bit to 0 */
1901 if (!peek && test_and_clear_bit(cur, s->pgste_bitmap))
1902 atomic64_dec(&s->dirty_pages);
1903 r = get_pgste(kvm->mm, hva, &pgstev);
1904 if (r < 0)
1905 pgstev = 0;
1906 /* save the value */
1bab1c02 1907 res[i++] = (pgstev >> 24) & 0x43;
4036e387
CI
1908 /*
1909 * if the next bit is too far away, stop.
1910 * if we reached the previous "next", find the next one
1911 */
1912 if (!peek) {
1913 if (next > cur + KVM_S390_MAX_BIT_DISTANCE)
1914 break;
1915 if (cur == next)
1916 next = find_next_bit(s->pgste_bitmap,
1917 s->bitmap_size, cur + 1);
1918 /* reached the end of the bitmap or of the buffer, stop */
1919 if ((next >= s->bitmap_size) ||
1920 (next >= args->start_gfn + bufsize))
1921 break;
1922 }
1923 cur++;
1924 }
1925 srcu_read_unlock(&kvm->srcu, srcu_idx);
1926 up_read(&kvm->mm->mmap_sem);
1927 args->count = i;
1928 args->remaining = s ? atomic64_read(&s->dirty_pages) : 0;
1929
1930 rr = copy_to_user((void __user *)args->values, res, args->count);
1931 if (rr)
1932 r = -EFAULT;
1933
1934 vfree(res);
1935 return r;
1936}
1937
1938/*
1939 * This function sets the CMMA attributes for the given pages. If the input
1940 * buffer has zero length, no action is taken, otherwise the attributes are
1941 * set and the mm->context.use_cmma flag is set.
1942 */
1943static int kvm_s390_set_cmma_bits(struct kvm *kvm,
1944 const struct kvm_s390_cmma_log *args)
1945{
1946 unsigned long hva, mask, pgstev, i;
1947 uint8_t *bits;
1948 int srcu_idx, r = 0;
1949
1950 mask = args->mask;
1951
1952 if (!kvm->arch.use_cmma)
1953 return -ENXIO;
1954 /* invalid/unsupported flags */
1955 if (args->flags != 0)
1956 return -EINVAL;
1957 /* Enforce sane limit on memory allocation */
1958 if (args->count > KVM_S390_CMMA_SIZE_MAX)
1959 return -EINVAL;
1960 /* Nothing to do */
1961 if (args->count == 0)
1962 return 0;
1963
1964 bits = vmalloc(sizeof(*bits) * args->count);
1965 if (!bits)
1966 return -ENOMEM;
1967
1968 r = copy_from_user(bits, (void __user *)args->values, args->count);
1969 if (r) {
1970 r = -EFAULT;
1971 goto out;
1972 }
1973
1974 down_read(&kvm->mm->mmap_sem);
1975 srcu_idx = srcu_read_lock(&kvm->srcu);
1976 for (i = 0; i < args->count; i++) {
1977 hva = gfn_to_hva(kvm, args->start_gfn + i);
1978 if (kvm_is_error_hva(hva)) {
1979 r = -EFAULT;
1980 break;
1981 }
1982
1983 pgstev = bits[i];
1984 pgstev = pgstev << 24;
1bab1c02 1985 mask &= _PGSTE_GPS_USAGE_MASK | _PGSTE_GPS_NODAT;
4036e387
CI
1986 set_pgste_bits(kvm->mm, hva, mask, pgstev);
1987 }
1988 srcu_read_unlock(&kvm->srcu, srcu_idx);
1989 up_read(&kvm->mm->mmap_sem);
1990
1991 if (!kvm->mm->context.use_cmma) {
1992 down_write(&kvm->mm->mmap_sem);
1993 kvm->mm->context.use_cmma = 1;
1994 up_write(&kvm->mm->mmap_sem);
1995 }
1996out:
1997 vfree(bits);
1998 return r;
1999}
2000
b0c632db
HC
2001long kvm_arch_vm_ioctl(struct file *filp,
2002 unsigned int ioctl, unsigned long arg)
2003{
2004 struct kvm *kvm = filp->private_data;
2005 void __user *argp = (void __user *)arg;
f2061656 2006 struct kvm_device_attr attr;
b0c632db
HC
2007 int r;
2008
2009 switch (ioctl) {
ba5c1e9b
CO
2010 case KVM_S390_INTERRUPT: {
2011 struct kvm_s390_interrupt s390int;
2012
2013 r = -EFAULT;
2014 if (copy_from_user(&s390int, argp, sizeof(s390int)))
2015 break;
2016 r = kvm_s390_inject_vm(kvm, &s390int);
2017 break;
2018 }
d938dc55
CH
2019 case KVM_ENABLE_CAP: {
2020 struct kvm_enable_cap cap;
2021 r = -EFAULT;
2022 if (copy_from_user(&cap, argp, sizeof(cap)))
2023 break;
2024 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
2025 break;
2026 }
84223598
CH
2027 case KVM_CREATE_IRQCHIP: {
2028 struct kvm_irq_routing_entry routing;
2029
2030 r = -EINVAL;
2031 if (kvm->arch.use_irqchip) {
2032 /* Set up dummy routing. */
2033 memset(&routing, 0, sizeof(routing));
152b2839 2034 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
84223598
CH
2035 }
2036 break;
2037 }
f2061656
DD
2038 case KVM_SET_DEVICE_ATTR: {
2039 r = -EFAULT;
2040 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2041 break;
2042 r = kvm_s390_vm_set_attr(kvm, &attr);
2043 break;
2044 }
2045 case KVM_GET_DEVICE_ATTR: {
2046 r = -EFAULT;
2047 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2048 break;
2049 r = kvm_s390_vm_get_attr(kvm, &attr);
2050 break;
2051 }
2052 case KVM_HAS_DEVICE_ATTR: {
2053 r = -EFAULT;
2054 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
2055 break;
2056 r = kvm_s390_vm_has_attr(kvm, &attr);
2057 break;
2058 }
30ee2a98
JH
2059 case KVM_S390_GET_SKEYS: {
2060 struct kvm_s390_skeys args;
2061
2062 r = -EFAULT;
2063 if (copy_from_user(&args, argp,
2064 sizeof(struct kvm_s390_skeys)))
2065 break;
2066 r = kvm_s390_get_skeys(kvm, &args);
2067 break;
2068 }
2069 case KVM_S390_SET_SKEYS: {
2070 struct kvm_s390_skeys args;
2071
2072 r = -EFAULT;
2073 if (copy_from_user(&args, argp,
2074 sizeof(struct kvm_s390_skeys)))
2075 break;
2076 r = kvm_s390_set_skeys(kvm, &args);
2077 break;
2078 }
4036e387
CI
2079 case KVM_S390_GET_CMMA_BITS: {
2080 struct kvm_s390_cmma_log args;
2081
2082 r = -EFAULT;
2083 if (copy_from_user(&args, argp, sizeof(args)))
2084 break;
1de1ea7e 2085 mutex_lock(&kvm->slots_lock);
4036e387 2086 r = kvm_s390_get_cmma_bits(kvm, &args);
1de1ea7e 2087 mutex_unlock(&kvm->slots_lock);
4036e387
CI
2088 if (!r) {
2089 r = copy_to_user(argp, &args, sizeof(args));
2090 if (r)
2091 r = -EFAULT;
2092 }
2093 break;
2094 }
2095 case KVM_S390_SET_CMMA_BITS: {
2096 struct kvm_s390_cmma_log args;
2097
2098 r = -EFAULT;
2099 if (copy_from_user(&args, argp, sizeof(args)))
2100 break;
1de1ea7e 2101 mutex_lock(&kvm->slots_lock);
4036e387 2102 r = kvm_s390_set_cmma_bits(kvm, &args);
1de1ea7e 2103 mutex_unlock(&kvm->slots_lock);
4036e387
CI
2104 break;
2105 }
b0c632db 2106 default:
367e1319 2107 r = -ENOTTY;
b0c632db
HC
2108 }
2109
2110 return r;
2111}
2112
45c9b47c
TK
2113static int kvm_s390_apxa_installed(void)
2114{
5f131c12 2115 struct ap_config_info info;
45c9b47c 2116
5f131c12
TK
2117 if (ap_instructions_available()) {
2118 if (ap_qci(&info) == 0)
2119 return info.apxa;
45c9b47c
TK
2120 }
2121
2122 return 0;
2123}
2124
5f131c12
TK
2125/*
2126 * The format of the crypto control block (CRYCB) is specified in the 3 low
2127 * order bits of the CRYCB designation (CRYCBD) field as follows:
2128 * Format 0: Neither the message security assist extension 3 (MSAX3) nor the
2129 * AP extended addressing (APXA) facility are installed.
2130 * Format 1: The APXA facility is not installed but the MSAX3 facility is.
2131 * Format 2: Both the APXA and MSAX3 facilities are installed
2132 */
45c9b47c
TK
2133static void kvm_s390_set_crycb_format(struct kvm *kvm)
2134{
2135 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
2136
5f131c12
TK
2137 /* Clear the CRYCB format bits - i.e., set format 0 by default */
2138 kvm->arch.crypto.crycbd &= ~(CRYCB_FORMAT_MASK);
2139
2140 /* Check whether MSAX3 is installed */
2141 if (!test_kvm_facility(kvm, 76))
2142 return;
2143
45c9b47c
TK
2144 if (kvm_s390_apxa_installed())
2145 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
2146 else
2147 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
2148}
2149
57c2cb90
PM
2150void kvm_arch_crypto_set_masks(struct kvm *kvm, unsigned long *apm,
2151 unsigned long *aqm, unsigned long *adm)
2152{
2153 struct kvm_s390_crypto_cb *crycb = kvm->arch.crypto.crycb;
2154
2155 mutex_lock(&kvm->lock);
2156 kvm_s390_vcpu_block_all(kvm);
2157
2158 switch (kvm->arch.crypto.crycbd & CRYCB_FORMAT_MASK) {
2159 case CRYCB_FORMAT2: /* APCB1 use 256 bits */
2160 memcpy(crycb->apcb1.apm, apm, 32);
2161 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx %016lx %016lx %016lx",
2162 apm[0], apm[1], apm[2], apm[3]);
2163 memcpy(crycb->apcb1.aqm, aqm, 32);
2164 VM_EVENT(kvm, 3, "SET CRYCB: aqm %016lx %016lx %016lx %016lx",
2165 aqm[0], aqm[1], aqm[2], aqm[3]);
2166 memcpy(crycb->apcb1.adm, adm, 32);
2167 VM_EVENT(kvm, 3, "SET CRYCB: adm %016lx %016lx %016lx %016lx",
2168 adm[0], adm[1], adm[2], adm[3]);
2169 break;
2170 case CRYCB_FORMAT1:
2171 case CRYCB_FORMAT0: /* Fall through both use APCB0 */
2172 memcpy(crycb->apcb0.apm, apm, 8);
2173 memcpy(crycb->apcb0.aqm, aqm, 2);
2174 memcpy(crycb->apcb0.adm, adm, 2);
2175 VM_EVENT(kvm, 3, "SET CRYCB: apm %016lx aqm %04x adm %04x",
2176 apm[0], *((unsigned short *)aqm),
2177 *((unsigned short *)adm));
2178 break;
2179 default: /* Can not happen */
2180 break;
2181 }
2182
2183 /* recreate the shadow crycb for each vcpu */
2184 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
2185 kvm_s390_vcpu_unblock_all(kvm);
2186 mutex_unlock(&kvm->lock);
2187}
2188EXPORT_SYMBOL_GPL(kvm_arch_crypto_set_masks);
2189
dcf2a7e1
TK
2190void kvm_arch_crypto_clear_masks(struct kvm *kvm)
2191{
2192 mutex_lock(&kvm->lock);
2193 kvm_s390_vcpu_block_all(kvm);
2194
2195 memset(&kvm->arch.crypto.crycb->apcb0, 0,
2196 sizeof(kvm->arch.crypto.crycb->apcb0));
2197 memset(&kvm->arch.crypto.crycb->apcb1, 0,
2198 sizeof(kvm->arch.crypto.crycb->apcb1));
2199
57c2cb90 2200 VM_EVENT(kvm, 3, "%s", "CLR CRYCB:");
7331634f
PM
2201 /* recreate the shadow crycb for each vcpu */
2202 kvm_s390_sync_request_broadcast(kvm, KVM_REQ_VSIE_RESTART);
dcf2a7e1
TK
2203 kvm_s390_vcpu_unblock_all(kvm);
2204 mutex_unlock(&kvm->lock);
2205}
2206EXPORT_SYMBOL_GPL(kvm_arch_crypto_clear_masks);
2207
9bb0ec09 2208static u64 kvm_s390_get_initial_cpuid(void)
9d8d5786 2209{
9bb0ec09
DH
2210 struct cpuid cpuid;
2211
2212 get_cpu_id(&cpuid);
2213 cpuid.version = 0xff;
2214 return *((u64 *) &cpuid);
9d8d5786
MM
2215}
2216
c54f0d6a 2217static void kvm_s390_crypto_init(struct kvm *kvm)
5102ee87 2218{
c54f0d6a 2219 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
45c9b47c 2220 kvm_s390_set_crycb_format(kvm);
5102ee87 2221
5f131c12
TK
2222 if (!test_kvm_facility(kvm, 76))
2223 return;
2224
ed6f76b4
TK
2225 /* Enable AES/DEA protected key functions by default */
2226 kvm->arch.crypto.aes_kw = 1;
2227 kvm->arch.crypto.dea_kw = 1;
2228 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
2229 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
2230 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
2231 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
5102ee87
TK
2232}
2233
7d43bafc
ED
2234static void sca_dispose(struct kvm *kvm)
2235{
2236 if (kvm->arch.use_esca)
5e044315 2237 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
7d43bafc
ED
2238 else
2239 free_page((unsigned long)(kvm->arch.sca));
2240 kvm->arch.sca = NULL;
2241}
2242
e08b9637 2243int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 2244{
76a6dd72 2245 gfp_t alloc_flags = GFP_KERNEL;
9d8d5786 2246 int i, rc;
b0c632db 2247 char debug_name[16];
f6c137ff 2248 static unsigned long sca_offset;
b0c632db 2249
e08b9637
CO
2250 rc = -EINVAL;
2251#ifdef CONFIG_KVM_S390_UCONTROL
2252 if (type & ~KVM_VM_S390_UCONTROL)
2253 goto out_err;
2254 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
2255 goto out_err;
2256#else
2257 if (type)
2258 goto out_err;
2259#endif
2260
b0c632db
HC
2261 rc = s390_enable_sie();
2262 if (rc)
d89f5eff 2263 goto out_err;
b0c632db 2264
b290411a
CO
2265 rc = -ENOMEM;
2266
7d43bafc 2267 kvm->arch.use_esca = 0; /* start with basic SCA */
76a6dd72
DH
2268 if (!sclp.has_64bscao)
2269 alloc_flags |= GFP_DMA;
5e044315 2270 rwlock_init(&kvm->arch.sca_lock);
76a6dd72 2271 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
b0c632db 2272 if (!kvm->arch.sca)
d89f5eff 2273 goto out_err;
34d52a52 2274 mutex_lock(&kvm_lock);
c5c2c393 2275 sca_offset += 16;
bc784cce 2276 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
c5c2c393 2277 sca_offset = 0;
bc784cce
ED
2278 kvm->arch.sca = (struct bsca_block *)
2279 ((char *) kvm->arch.sca + sca_offset);
34d52a52 2280 mutex_unlock(&kvm_lock);
b0c632db
HC
2281
2282 sprintf(debug_name, "kvm-%u", current->pid);
2283
1cb9cf72 2284 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
b0c632db 2285 if (!kvm->arch.dbf)
40f5b735 2286 goto out_err;
b0c632db 2287
c54f0d6a
DH
2288 kvm->arch.sie_page2 =
2289 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
2290 if (!kvm->arch.sie_page2)
40f5b735 2291 goto out_err;
9d8d5786 2292
c54f0d6a 2293 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
c936f04c
CB
2294
2295 for (i = 0; i < kvm_s390_fac_size(); i++) {
2296 kvm->arch.model.fac_mask[i] = S390_lowcore.stfle_fac_list[i] &
2297 (kvm_s390_fac_base[i] |
2298 kvm_s390_fac_ext[i]);
2299 kvm->arch.model.fac_list[i] = S390_lowcore.stfle_fac_list[i] &
2300 kvm_s390_fac_base[i];
2301 }
658e657d 2302 kvm->arch.model.subfuncs = kvm_s390_available_subfunc;
981467c9 2303
1935222d
DH
2304 /* we are always in czam mode - even on pre z14 machines */
2305 set_kvm_facility(kvm->arch.model.fac_mask, 138);
2306 set_kvm_facility(kvm->arch.model.fac_list, 138);
2307 /* we emulate STHYI in kvm */
95ca2cb5
JF
2308 set_kvm_facility(kvm->arch.model.fac_mask, 74);
2309 set_kvm_facility(kvm->arch.model.fac_list, 74);
1bab1c02
CI
2310 if (MACHINE_HAS_TLB_GUEST) {
2311 set_kvm_facility(kvm->arch.model.fac_mask, 147);
2312 set_kvm_facility(kvm->arch.model.fac_list, 147);
2313 }
95ca2cb5 2314
9bb0ec09 2315 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
37c5f6c8 2316 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
9d8d5786 2317
c54f0d6a 2318 kvm_s390_crypto_init(kvm);
5102ee87 2319
51978393
FL
2320 mutex_init(&kvm->arch.float_int.ais_lock);
2321 kvm->arch.float_int.simm = 0;
2322 kvm->arch.float_int.nimm = 0;
ba5c1e9b 2323 spin_lock_init(&kvm->arch.float_int.lock);
6d3da241
JF
2324 for (i = 0; i < FIRQ_LIST_COUNT; i++)
2325 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
8a242234 2326 init_waitqueue_head(&kvm->arch.ipte_wq);
a6b7e459 2327 mutex_init(&kvm->arch.ipte_mutex);
ba5c1e9b 2328
b0c632db 2329 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
78f26131 2330 VM_EVENT(kvm, 3, "vm created with type %lu", type);
b0c632db 2331
e08b9637
CO
2332 if (type & KVM_VM_S390_UCONTROL) {
2333 kvm->arch.gmap = NULL;
a3a92c31 2334 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
e08b9637 2335 } else {
32e6b236 2336 if (sclp.hamax == U64_MAX)
ee71d16d 2337 kvm->arch.mem_limit = TASK_SIZE_MAX;
32e6b236 2338 else
ee71d16d 2339 kvm->arch.mem_limit = min_t(unsigned long, TASK_SIZE_MAX,
32e6b236 2340 sclp.hamax + 1);
6ea427bb 2341 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
e08b9637 2342 if (!kvm->arch.gmap)
40f5b735 2343 goto out_err;
2c70fe44 2344 kvm->arch.gmap->private = kvm;
24eb3a82 2345 kvm->arch.gmap->pfault_enabled = 0;
e08b9637 2346 }
fa6b7fe9
CH
2347
2348 kvm->arch.css_support = 0;
84223598 2349 kvm->arch.use_irqchip = 0;
72f25020 2350 kvm->arch.epoch = 0;
fa6b7fe9 2351
8ad35755 2352 spin_lock_init(&kvm->arch.start_stop_lock);
a3508fbe 2353 kvm_s390_vsie_init(kvm);
8335713a 2354 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
8ad35755 2355
d89f5eff 2356 return 0;
40f5b735 2357out_err:
c54f0d6a 2358 free_page((unsigned long)kvm->arch.sie_page2);
598841ca 2359 debug_unregister(kvm->arch.dbf);
7d43bafc 2360 sca_dispose(kvm);
78f26131 2361 KVM_EVENT(3, "creation of vm failed: %d", rc);
d89f5eff 2362 return rc;
b0c632db
HC
2363}
2364
235539b4
LC
2365bool kvm_arch_has_vcpu_debugfs(void)
2366{
2367 return false;
2368}
2369
2370int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu)
2371{
2372 return 0;
2373}
2374
d329c035
CB
2375void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
2376{
2377 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 2378 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
67335e63 2379 kvm_s390_clear_local_irqs(vcpu);
3c038e6b 2380 kvm_clear_async_pf_completion_queue(vcpu);
bc784cce 2381 if (!kvm_is_ucontrol(vcpu->kvm))
a6e2f683 2382 sca_del_vcpu(vcpu);
27e0393f
CO
2383
2384 if (kvm_is_ucontrol(vcpu->kvm))
6ea427bb 2385 gmap_remove(vcpu->arch.gmap);
27e0393f 2386
e6db1d61 2387 if (vcpu->kvm->arch.use_cmma)
b31605c1 2388 kvm_s390_vcpu_unsetup_cmma(vcpu);
d329c035 2389 free_page((unsigned long)(vcpu->arch.sie_block));
b31288fa 2390
6692cef3 2391 kvm_vcpu_uninit(vcpu);
b110feaf 2392 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
2393}
2394
2395static void kvm_free_vcpus(struct kvm *kvm)
2396{
2397 unsigned int i;
988a2cae 2398 struct kvm_vcpu *vcpu;
d329c035 2399
988a2cae
GN
2400 kvm_for_each_vcpu(i, vcpu, kvm)
2401 kvm_arch_vcpu_destroy(vcpu);
2402
2403 mutex_lock(&kvm->lock);
2404 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
2405 kvm->vcpus[i] = NULL;
2406
2407 atomic_set(&kvm->online_vcpus, 0);
2408 mutex_unlock(&kvm->lock);
d329c035
CB
2409}
2410
b0c632db
HC
2411void kvm_arch_destroy_vm(struct kvm *kvm)
2412{
d329c035 2413 kvm_free_vcpus(kvm);
7d43bafc 2414 sca_dispose(kvm);
d329c035 2415 debug_unregister(kvm->arch.dbf);
c54f0d6a 2416 free_page((unsigned long)kvm->arch.sie_page2);
27e0393f 2417 if (!kvm_is_ucontrol(kvm))
6ea427bb 2418 gmap_remove(kvm->arch.gmap);
841b91c5 2419 kvm_s390_destroy_adapters(kvm);
67335e63 2420 kvm_s390_clear_float_irqs(kvm);
a3508fbe 2421 kvm_s390_vsie_destroy(kvm);
190df4a2
CI
2422 if (kvm->arch.migration_state) {
2423 vfree(kvm->arch.migration_state->pgste_bitmap);
2424 kfree(kvm->arch.migration_state);
2425 }
8335713a 2426 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
b0c632db
HC
2427}
2428
2429/* Section: vcpu related */
dafd032a
DD
2430static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
2431{
6ea427bb 2432 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
dafd032a
DD
2433 if (!vcpu->arch.gmap)
2434 return -ENOMEM;
2435 vcpu->arch.gmap->private = vcpu->kvm;
2436
2437 return 0;
2438}
2439
a6e2f683
ED
2440static void sca_del_vcpu(struct kvm_vcpu *vcpu)
2441{
a6940674
DH
2442 if (!kvm_s390_use_sca_entries())
2443 return;
5e044315 2444 read_lock(&vcpu->kvm->arch.sca_lock);
7d43bafc
ED
2445 if (vcpu->kvm->arch.use_esca) {
2446 struct esca_block *sca = vcpu->kvm->arch.sca;
a6e2f683 2447
7d43bafc 2448 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
10ce32d5 2449 sca->cpu[vcpu->vcpu_id].sda = 0;
7d43bafc
ED
2450 } else {
2451 struct bsca_block *sca = vcpu->kvm->arch.sca;
2452
2453 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
10ce32d5 2454 sca->cpu[vcpu->vcpu_id].sda = 0;
7d43bafc 2455 }
5e044315 2456 read_unlock(&vcpu->kvm->arch.sca_lock);
a6e2f683
ED
2457}
2458
eaa78f34 2459static void sca_add_vcpu(struct kvm_vcpu *vcpu)
a6e2f683 2460{
a6940674
DH
2461 if (!kvm_s390_use_sca_entries()) {
2462 struct bsca_block *sca = vcpu->kvm->arch.sca;
2463
2464 /* we still need the basic sca for the ipte control */
2465 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2466 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
c3ece90c 2467 return;
a6940674 2468 }
eaa78f34
DH
2469 read_lock(&vcpu->kvm->arch.sca_lock);
2470 if (vcpu->kvm->arch.use_esca) {
2471 struct esca_block *sca = vcpu->kvm->arch.sca;
7d43bafc 2472
eaa78f34 2473 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
7d43bafc
ED
2474 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2475 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
0c9d8683 2476 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
eaa78f34 2477 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
7d43bafc 2478 } else {
eaa78f34 2479 struct bsca_block *sca = vcpu->kvm->arch.sca;
a6e2f683 2480
eaa78f34 2481 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
7d43bafc
ED
2482 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
2483 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
eaa78f34 2484 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
7d43bafc 2485 }
eaa78f34 2486 read_unlock(&vcpu->kvm->arch.sca_lock);
5e044315
ED
2487}
2488
2489/* Basic SCA to Extended SCA data copy routines */
2490static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
2491{
2492 d->sda = s->sda;
2493 d->sigp_ctrl.c = s->sigp_ctrl.c;
2494 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
2495}
2496
2497static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
2498{
2499 int i;
2500
2501 d->ipte_control = s->ipte_control;
2502 d->mcn[0] = s->mcn;
2503 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
2504 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
2505}
2506
2507static int sca_switch_to_extended(struct kvm *kvm)
2508{
2509 struct bsca_block *old_sca = kvm->arch.sca;
2510 struct esca_block *new_sca;
2511 struct kvm_vcpu *vcpu;
2512 unsigned int vcpu_idx;
2513 u32 scaol, scaoh;
2514
2515 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
2516 if (!new_sca)
2517 return -ENOMEM;
2518
2519 scaoh = (u32)((u64)(new_sca) >> 32);
2520 scaol = (u32)(u64)(new_sca) & ~0x3fU;
2521
2522 kvm_s390_vcpu_block_all(kvm);
2523 write_lock(&kvm->arch.sca_lock);
2524
2525 sca_copy_b_to_e(new_sca, old_sca);
2526
2527 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
2528 vcpu->arch.sie_block->scaoh = scaoh;
2529 vcpu->arch.sie_block->scaol = scaol;
0c9d8683 2530 vcpu->arch.sie_block->ecb2 |= ECB2_ESCA;
5e044315
ED
2531 }
2532 kvm->arch.sca = new_sca;
2533 kvm->arch.use_esca = 1;
2534
2535 write_unlock(&kvm->arch.sca_lock);
2536 kvm_s390_vcpu_unblock_all(kvm);
2537
2538 free_page((unsigned long)old_sca);
2539
8335713a
CB
2540 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
2541 old_sca, kvm->arch.sca);
5e044315 2542 return 0;
a6e2f683
ED
2543}
2544
2545static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
2546{
5e044315
ED
2547 int rc;
2548
a6940674
DH
2549 if (!kvm_s390_use_sca_entries()) {
2550 if (id < KVM_MAX_VCPUS)
2551 return true;
2552 return false;
2553 }
5e044315
ED
2554 if (id < KVM_S390_BSCA_CPU_SLOTS)
2555 return true;
76a6dd72 2556 if (!sclp.has_esca || !sclp.has_64bscao)
5e044315
ED
2557 return false;
2558
2559 mutex_lock(&kvm->lock);
2560 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
2561 mutex_unlock(&kvm->lock);
2562
2563 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
a6e2f683
ED
2564}
2565
b0c632db
HC
2566int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
2567{
3c038e6b
DD
2568 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2569 kvm_clear_async_pf_completion_queue(vcpu);
59674c1a
CB
2570 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
2571 KVM_SYNC_GPRS |
9eed0735 2572 KVM_SYNC_ACRS |
b028ee3e
DH
2573 KVM_SYNC_CRS |
2574 KVM_SYNC_ARCH0 |
2575 KVM_SYNC_PFAULT;
75a4615c 2576 kvm_s390_set_prefix(vcpu, 0);
c6e5f166
FZ
2577 if (test_kvm_facility(vcpu->kvm, 64))
2578 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
35b3fde6
CB
2579 if (test_kvm_facility(vcpu->kvm, 82))
2580 vcpu->run->kvm_valid_regs |= KVM_SYNC_BPBC;
4e0b1ab7
FZ
2581 if (test_kvm_facility(vcpu->kvm, 133))
2582 vcpu->run->kvm_valid_regs |= KVM_SYNC_GSCB;
012d8745
CB
2583 if (test_kvm_facility(vcpu->kvm, 156))
2584 vcpu->run->kvm_valid_regs |= KVM_SYNC_ETOKEN;
f6aa6dc4
DH
2585 /* fprs can be synchronized via vrs, even if the guest has no vx. With
2586 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
2587 */
2588 if (MACHINE_HAS_VX)
68c55750 2589 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
6fd8e67d
DH
2590 else
2591 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
dafd032a
DD
2592
2593 if (kvm_is_ucontrol(vcpu->kvm))
2594 return __kvm_ucontrol_vcpu_init(vcpu);
2595
b0c632db
HC
2596 return 0;
2597}
2598
db0758b2
DH
2599/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2600static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2601{
2602 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
9c23a131 2603 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
db0758b2 2604 vcpu->arch.cputm_start = get_tod_clock_fast();
9c23a131 2605 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
db0758b2
DH
2606}
2607
2608/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2609static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2610{
2611 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
9c23a131 2612 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
db0758b2
DH
2613 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2614 vcpu->arch.cputm_start = 0;
9c23a131 2615 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
db0758b2
DH
2616}
2617
2618/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2619static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2620{
2621 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
2622 vcpu->arch.cputm_enabled = true;
2623 __start_cpu_timer_accounting(vcpu);
2624}
2625
2626/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
2627static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2628{
2629 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
2630 __stop_cpu_timer_accounting(vcpu);
2631 vcpu->arch.cputm_enabled = false;
2632}
2633
2634static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2635{
2636 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2637 __enable_cpu_timer_accounting(vcpu);
2638 preempt_enable();
2639}
2640
2641static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
2642{
2643 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2644 __disable_cpu_timer_accounting(vcpu);
2645 preempt_enable();
2646}
2647
4287f247
DH
2648/* set the cpu timer - may only be called from the VCPU thread itself */
2649void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
2650{
db0758b2 2651 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
9c23a131 2652 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
db0758b2
DH
2653 if (vcpu->arch.cputm_enabled)
2654 vcpu->arch.cputm_start = get_tod_clock_fast();
4287f247 2655 vcpu->arch.sie_block->cputm = cputm;
9c23a131 2656 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
db0758b2 2657 preempt_enable();
4287f247
DH
2658}
2659
db0758b2 2660/* update and get the cpu timer - can also be called from other VCPU threads */
4287f247
DH
2661__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
2662{
9c23a131 2663 unsigned int seq;
db0758b2 2664 __u64 value;
db0758b2
DH
2665
2666 if (unlikely(!vcpu->arch.cputm_enabled))
2667 return vcpu->arch.sie_block->cputm;
2668
9c23a131
DH
2669 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
2670 do {
2671 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
2672 /*
2673 * If the writer would ever execute a read in the critical
2674 * section, e.g. in irq context, we have a deadlock.
2675 */
2676 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
2677 value = vcpu->arch.sie_block->cputm;
2678 /* if cputm_start is 0, accounting is being started/stopped */
2679 if (likely(vcpu->arch.cputm_start))
2680 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
2681 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
2682 preempt_enable();
db0758b2 2683 return value;
4287f247
DH
2684}
2685
b0c632db
HC
2686void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2687{
9977e886 2688
37d9df98 2689 gmap_enable(vcpu->arch.enabled_gmap);
805de8f4 2690 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
5ebda316 2691 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
db0758b2 2692 __start_cpu_timer_accounting(vcpu);
01a745ac 2693 vcpu->cpu = cpu;
b0c632db
HC
2694}
2695
2696void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2697{
01a745ac 2698 vcpu->cpu = -1;
5ebda316 2699 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
db0758b2 2700 __stop_cpu_timer_accounting(vcpu);
805de8f4 2701 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
37d9df98
DH
2702 vcpu->arch.enabled_gmap = gmap_get_enabled();
2703 gmap_disable(vcpu->arch.enabled_gmap);
9977e886 2704
b0c632db
HC
2705}
2706
2707static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
2708{
2709 /* this equals initial cpu reset in pop, but we don't switch to ESA */
2710 vcpu->arch.sie_block->gpsw.mask = 0UL;
2711 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 2712 kvm_s390_set_prefix(vcpu, 0);
4287f247 2713 kvm_s390_set_cpu_timer(vcpu, 0);
b0c632db
HC
2714 vcpu->arch.sie_block->ckc = 0UL;
2715 vcpu->arch.sie_block->todpr = 0;
2716 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
2717 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
2718 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
bd45c812 2719 vcpu->run->s.regs.fpc = 0;
b0c632db 2720 vcpu->arch.sie_block->gbea = 1;
672550fb 2721 vcpu->arch.sie_block->pp = 0;
35b3fde6 2722 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3c038e6b
DD
2723 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
2724 kvm_clear_async_pf_completion_queue(vcpu);
6352e4d2
DH
2725 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
2726 kvm_s390_vcpu_stop(vcpu);
2ed10cc1 2727 kvm_s390_clear_local_irqs(vcpu);
b0c632db
HC
2728}
2729
31928aa5 2730void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
42897d86 2731{
72f25020 2732 mutex_lock(&vcpu->kvm->lock);
fdf03650 2733 preempt_disable();
72f25020 2734 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
f96b92a1 2735 vcpu->arch.sie_block->epdx = vcpu->kvm->arch.epdx;
fdf03650 2736 preempt_enable();
72f25020 2737 mutex_unlock(&vcpu->kvm->lock);
25508824 2738 if (!kvm_is_ucontrol(vcpu->kvm)) {
dafd032a 2739 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
eaa78f34 2740 sca_add_vcpu(vcpu);
25508824 2741 }
6502a34c
DH
2742 if (test_kvm_facility(vcpu->kvm, 74) || vcpu->kvm->arch.user_instr0)
2743 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
37d9df98
DH
2744 /* make vcpu_load load the right gmap on the first trigger */
2745 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
42897d86
MT
2746}
2747
049e9859
CB
2748static bool kvm_has_pckmo_subfunc(struct kvm *kvm, unsigned long nr)
2749{
2750 if (test_bit_inv(nr, (unsigned long *)&kvm->arch.model.subfuncs.pckmo) &&
2751 test_bit_inv(nr, (unsigned long *)&kvm_s390_available_subfunc.pckmo))
2752 return true;
2753 return false;
2754}
2755
2756static bool kvm_has_pckmo_ecc(struct kvm *kvm)
2757{
2758 /* At least one ECC subfunction must be present */
2759 return kvm_has_pckmo_subfunc(kvm, 32) ||
2760 kvm_has_pckmo_subfunc(kvm, 33) ||
2761 kvm_has_pckmo_subfunc(kvm, 34) ||
2762 kvm_has_pckmo_subfunc(kvm, 40) ||
2763 kvm_has_pckmo_subfunc(kvm, 41);
2764
2765}
2766
5102ee87
TK
2767static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
2768{
5f131c12
TK
2769 /*
2770 * If the AP instructions are not being interpreted and the MSAX3
2771 * facility is not configured for the guest, there is nothing to set up.
2772 */
2773 if (!vcpu->kvm->arch.crypto.apie && !test_kvm_facility(vcpu->kvm, 76))
5102ee87
TK
2774 return;
2775
5f131c12 2776 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
a374e892 2777 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
bec73b14 2778 vcpu->arch.sie_block->eca &= ~ECA_APIE;
049e9859 2779 vcpu->arch.sie_block->ecd &= ~ECD_ECC;
a374e892 2780
5f131c12
TK
2781 if (vcpu->kvm->arch.crypto.apie)
2782 vcpu->arch.sie_block->eca |= ECA_APIE;
2783
2784 /* Set up protected key support */
049e9859 2785 if (vcpu->kvm->arch.crypto.aes_kw) {
a374e892 2786 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
049e9859
CB
2787 /* ecc is also wrapped with AES key */
2788 if (kvm_has_pckmo_ecc(vcpu->kvm))
2789 vcpu->arch.sie_block->ecd |= ECD_ECC;
2790 }
2791
a374e892
TK
2792 if (vcpu->kvm->arch.crypto.dea_kw)
2793 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
5102ee87
TK
2794}
2795
b31605c1
DD
2796void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
2797{
2798 free_page(vcpu->arch.sie_block->cbrlo);
2799 vcpu->arch.sie_block->cbrlo = 0;
2800}
2801
2802int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
2803{
2804 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
2805 if (!vcpu->arch.sie_block->cbrlo)
2806 return -ENOMEM;
2807
0c9d8683 2808 vcpu->arch.sie_block->ecb2 &= ~ECB2_PFMFI;
b31605c1
DD
2809 return 0;
2810}
2811
91520f1a
MM
2812static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
2813{
2814 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
2815
91520f1a 2816 vcpu->arch.sie_block->ibc = model->ibc;
80bc79dc 2817 if (test_kvm_facility(vcpu->kvm, 7))
c54f0d6a 2818 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
91520f1a
MM
2819}
2820
b0c632db
HC
2821int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
2822{
b31605c1 2823 int rc = 0;
b31288fa 2824
9e6dabef
CH
2825 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
2826 CPUSTAT_SM |
a4a4f191
GH
2827 CPUSTAT_STOPPED);
2828
53df84f8 2829 if (test_kvm_facility(vcpu->kvm, 78))
805de8f4 2830 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
53df84f8 2831 else if (test_kvm_facility(vcpu->kvm, 8))
805de8f4 2832 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
a4a4f191 2833
91520f1a
MM
2834 kvm_s390_vcpu_setup_model(vcpu);
2835
bdab09f3
DH
2836 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
2837 if (MACHINE_HAS_ESOP)
0c9d8683 2838 vcpu->arch.sie_block->ecb |= ECB_HOSTPROTINT;
bd50e8ec 2839 if (test_kvm_facility(vcpu->kvm, 9))
0c9d8683 2840 vcpu->arch.sie_block->ecb |= ECB_SRSI;
f597d24e 2841 if (test_kvm_facility(vcpu->kvm, 73))
0c9d8683 2842 vcpu->arch.sie_block->ecb |= ECB_TE;
7feb6bb8 2843
873b425e 2844 if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi)
0c9d8683 2845 vcpu->arch.sie_block->ecb2 |= ECB2_PFMFI;
cd1836f5 2846 if (test_kvm_facility(vcpu->kvm, 130))
0c9d8683
DH
2847 vcpu->arch.sie_block->ecb2 |= ECB2_IEP;
2848 vcpu->arch.sie_block->eca = ECA_MVPGI | ECA_PROTEXCI;
48ee7d3a 2849 if (sclp.has_cei)
0c9d8683 2850 vcpu->arch.sie_block->eca |= ECA_CEI;
11ad65b7 2851 if (sclp.has_ib)
0c9d8683 2852 vcpu->arch.sie_block->eca |= ECA_IB;
37c5f6c8 2853 if (sclp.has_siif)
0c9d8683 2854 vcpu->arch.sie_block->eca |= ECA_SII;
37c5f6c8 2855 if (sclp.has_sigpif)
0c9d8683 2856 vcpu->arch.sie_block->eca |= ECA_SIGPI;
18280d8b 2857 if (test_kvm_facility(vcpu->kvm, 129)) {
0c9d8683
DH
2858 vcpu->arch.sie_block->eca |= ECA_VX;
2859 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
13211ea7 2860 }
8fa1696e
CW
2861 if (test_kvm_facility(vcpu->kvm, 139))
2862 vcpu->arch.sie_block->ecd |= ECD_MEF;
012d8745
CB
2863 if (test_kvm_facility(vcpu->kvm, 156))
2864 vcpu->arch.sie_block->ecd |= ECD_ETOKENF;
4e0b1ab7
FZ
2865 vcpu->arch.sie_block->sdnxo = ((unsigned long) &vcpu->run->s.regs.sdnx)
2866 | SDNXC;
c6e5f166 2867 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
730cd632
FA
2868
2869 if (sclp.has_kss)
2870 atomic_or(CPUSTAT_KSS, &vcpu->arch.sie_block->cpuflags);
2871 else
2872 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
5a5e6536 2873
e6db1d61 2874 if (vcpu->kvm->arch.use_cmma) {
b31605c1
DD
2875 rc = kvm_s390_vcpu_setup_cmma(vcpu);
2876 if (rc)
2877 return rc;
b31288fa 2878 }
0ac96caf 2879 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ca872302 2880 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
9d8d5786 2881
5102ee87
TK
2882 kvm_s390_vcpu_crypto_setup(vcpu);
2883
b31605c1 2884 return rc;
b0c632db
HC
2885}
2886
2887struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
2888 unsigned int id)
2889{
4d47555a 2890 struct kvm_vcpu *vcpu;
7feb6bb8 2891 struct sie_page *sie_page;
4d47555a
CO
2892 int rc = -EINVAL;
2893
4215825e 2894 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
4d47555a
CO
2895 goto out;
2896
2897 rc = -ENOMEM;
b0c632db 2898
b110feaf 2899 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 2900 if (!vcpu)
4d47555a 2901 goto out;
b0c632db 2902
da72ca4d 2903 BUILD_BUG_ON(sizeof(struct sie_page) != 4096);
7feb6bb8
MM
2904 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
2905 if (!sie_page)
b0c632db
HC
2906 goto out_free_cpu;
2907
7feb6bb8
MM
2908 vcpu->arch.sie_block = &sie_page->sie_block;
2909 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
2910
efed1104
DH
2911 /* the real guest size will always be smaller than msl */
2912 vcpu->arch.sie_block->mso = 0;
2913 vcpu->arch.sie_block->msl = sclp.hamax;
2914
b0c632db 2915 vcpu->arch.sie_block->icpua = id;
ba5c1e9b 2916 spin_lock_init(&vcpu->arch.local_int.lock);
ba5c1e9b 2917 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
d0321a24 2918 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 2919 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
9c23a131 2920 seqcount_init(&vcpu->arch.cputm_seqcount);
ba5c1e9b 2921
b0c632db
HC
2922 rc = kvm_vcpu_init(vcpu, kvm, id);
2923 if (rc)
9abc2a08 2924 goto out_free_sie_block;
8335713a 2925 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
b0c632db 2926 vcpu->arch.sie_block);
ade38c31 2927 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 2928
b0c632db 2929 return vcpu;
7b06bf2f
WY
2930out_free_sie_block:
2931 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 2932out_free_cpu:
b110feaf 2933 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 2934out:
b0c632db
HC
2935 return ERR_PTR(rc);
2936}
2937
b0c632db
HC
2938int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
2939{
9a022067 2940 return kvm_s390_vcpu_has_irq(vcpu, 0);
b0c632db
HC
2941}
2942
199b5763
LM
2943bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
2944{
0546c63d 2945 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE);
199b5763
LM
2946}
2947
27406cd5 2948void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
49b99e1e 2949{
805de8f4 2950 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
61a6df54 2951 exit_sie(vcpu);
49b99e1e
CB
2952}
2953
27406cd5 2954void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
49b99e1e 2955{
805de8f4 2956 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
49b99e1e
CB
2957}
2958
8e236546
CB
2959static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
2960{
805de8f4 2961 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
61a6df54 2962 exit_sie(vcpu);
8e236546
CB
2963}
2964
687e04e6
DH
2965bool kvm_s390_vcpu_sie_inhibited(struct kvm_vcpu *vcpu)
2966{
2967 return atomic_read(&vcpu->arch.sie_block->prog20) &
2968 (PROG_BLOCK_SIE | PROG_REQUEST);
2969}
2970
8e236546
CB
2971static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
2972{
9bf9fde2 2973 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
8e236546
CB
2974}
2975
49b99e1e 2976/*
687e04e6 2977 * Kick a guest cpu out of (v)SIE and wait until (v)SIE is not running.
49b99e1e
CB
2978 * If the CPU is not running (e.g. waiting as idle) the function will
2979 * return immediately. */
2980void exit_sie(struct kvm_vcpu *vcpu)
2981{
805de8f4 2982 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
687e04e6 2983 kvm_s390_vsie_kick(vcpu);
49b99e1e
CB
2984 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
2985 cpu_relax();
2986}
2987
8e236546
CB
2988/* Kick a guest cpu out of SIE to process a request synchronously */
2989void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
49b99e1e 2990{
8e236546
CB
2991 kvm_make_request(req, vcpu);
2992 kvm_s390_vcpu_request(vcpu);
49b99e1e
CB
2993}
2994
414d3b07
MS
2995static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
2996 unsigned long end)
2c70fe44 2997{
2c70fe44
CB
2998 struct kvm *kvm = gmap->private;
2999 struct kvm_vcpu *vcpu;
414d3b07
MS
3000 unsigned long prefix;
3001 int i;
2c70fe44 3002
65d0b0d4
DH
3003 if (gmap_is_shadow(gmap))
3004 return;
414d3b07
MS
3005 if (start >= 1UL << 31)
3006 /* We are only interested in prefix pages */
3007 return;
2c70fe44
CB
3008 kvm_for_each_vcpu(i, vcpu, kvm) {
3009 /* match against both prefix pages */
414d3b07
MS
3010 prefix = kvm_s390_get_prefix(vcpu);
3011 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
3012 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
3013 start, end);
8e236546 3014 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
2c70fe44
CB
3015 }
3016 }
3017}
3018
b6d33834
CD
3019int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
3020{
3021 /* kvm common code refers to this, but never calls it */
3022 BUG();
3023 return 0;
3024}
3025
14eebd91
CO
3026static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
3027 struct kvm_one_reg *reg)
3028{
3029 int r = -EINVAL;
3030
3031 switch (reg->id) {
29b7c71b
CO
3032 case KVM_REG_S390_TODPR:
3033 r = put_user(vcpu->arch.sie_block->todpr,
3034 (u32 __user *)reg->addr);
3035 break;
3036 case KVM_REG_S390_EPOCHDIFF:
3037 r = put_user(vcpu->arch.sie_block->epoch,
3038 (u64 __user *)reg->addr);
3039 break;
46a6dd1c 3040 case KVM_REG_S390_CPU_TIMER:
4287f247 3041 r = put_user(kvm_s390_get_cpu_timer(vcpu),
46a6dd1c
J
3042 (u64 __user *)reg->addr);
3043 break;
3044 case KVM_REG_S390_CLOCK_COMP:
3045 r = put_user(vcpu->arch.sie_block->ckc,
3046 (u64 __user *)reg->addr);
3047 break;
536336c2
DD
3048 case KVM_REG_S390_PFTOKEN:
3049 r = put_user(vcpu->arch.pfault_token,
3050 (u64 __user *)reg->addr);
3051 break;
3052 case KVM_REG_S390_PFCOMPARE:
3053 r = put_user(vcpu->arch.pfault_compare,
3054 (u64 __user *)reg->addr);
3055 break;
3056 case KVM_REG_S390_PFSELECT:
3057 r = put_user(vcpu->arch.pfault_select,
3058 (u64 __user *)reg->addr);
3059 break;
672550fb
CB
3060 case KVM_REG_S390_PP:
3061 r = put_user(vcpu->arch.sie_block->pp,
3062 (u64 __user *)reg->addr);
3063 break;
afa45ff5
CB
3064 case KVM_REG_S390_GBEA:
3065 r = put_user(vcpu->arch.sie_block->gbea,
3066 (u64 __user *)reg->addr);
3067 break;
14eebd91
CO
3068 default:
3069 break;
3070 }
3071
3072 return r;
3073}
3074
3075static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
3076 struct kvm_one_reg *reg)
3077{
3078 int r = -EINVAL;
4287f247 3079 __u64 val;
14eebd91
CO
3080
3081 switch (reg->id) {
29b7c71b
CO
3082 case KVM_REG_S390_TODPR:
3083 r = get_user(vcpu->arch.sie_block->todpr,
3084 (u32 __user *)reg->addr);
3085 break;
3086 case KVM_REG_S390_EPOCHDIFF:
3087 r = get_user(vcpu->arch.sie_block->epoch,
3088 (u64 __user *)reg->addr);
3089 break;
46a6dd1c 3090 case KVM_REG_S390_CPU_TIMER:
4287f247
DH
3091 r = get_user(val, (u64 __user *)reg->addr);
3092 if (!r)
3093 kvm_s390_set_cpu_timer(vcpu, val);
46a6dd1c
J
3094 break;
3095 case KVM_REG_S390_CLOCK_COMP:
3096 r = get_user(vcpu->arch.sie_block->ckc,
3097 (u64 __user *)reg->addr);
3098 break;
536336c2
DD
3099 case KVM_REG_S390_PFTOKEN:
3100 r = get_user(vcpu->arch.pfault_token,
3101 (u64 __user *)reg->addr);
9fbd8082
DH
3102 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3103 kvm_clear_async_pf_completion_queue(vcpu);
536336c2
DD
3104 break;
3105 case KVM_REG_S390_PFCOMPARE:
3106 r = get_user(vcpu->arch.pfault_compare,
3107 (u64 __user *)reg->addr);
3108 break;
3109 case KVM_REG_S390_PFSELECT:
3110 r = get_user(vcpu->arch.pfault_select,
3111 (u64 __user *)reg->addr);
3112 break;
672550fb
CB
3113 case KVM_REG_S390_PP:
3114 r = get_user(vcpu->arch.sie_block->pp,
3115 (u64 __user *)reg->addr);
3116 break;
afa45ff5
CB
3117 case KVM_REG_S390_GBEA:
3118 r = get_user(vcpu->arch.sie_block->gbea,
3119 (u64 __user *)reg->addr);
3120 break;
14eebd91
CO
3121 default:
3122 break;
3123 }
3124
3125 return r;
3126}
b6d33834 3127
b0c632db
HC
3128static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
3129{
b0c632db 3130 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
3131 return 0;
3132}
3133
3134int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3135{
5a32c1af 3136 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
3137 return 0;
3138}
3139
3140int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
3141{
5a32c1af 3142 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
3143 return 0;
3144}
3145
3146int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
3147 struct kvm_sregs *sregs)
3148{
59674c1a 3149 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 3150 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
b0c632db
HC
3151 return 0;
3152}
3153
3154int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
3155 struct kvm_sregs *sregs)
3156{
59674c1a 3157 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 3158 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
3159 return 0;
3160}
3161
3162int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3163{
4725c860
MS
3164 if (test_fp_ctl(fpu->fpc))
3165 return -EINVAL;
e1788bb9 3166 vcpu->run->s.regs.fpc = fpu->fpc;
9abc2a08 3167 if (MACHINE_HAS_VX)
a7d4b8f2
DH
3168 convert_fp_to_vx((__vector128 *) vcpu->run->s.regs.vrs,
3169 (freg_t *) fpu->fprs);
9abc2a08 3170 else
a7d4b8f2 3171 memcpy(vcpu->run->s.regs.fprs, &fpu->fprs, sizeof(fpu->fprs));
b0c632db
HC
3172 return 0;
3173}
3174
3175int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
3176{
9abc2a08
DH
3177 /* make sure we have the latest values */
3178 save_fpu_regs();
3179 if (MACHINE_HAS_VX)
a7d4b8f2
DH
3180 convert_vx_to_fp((freg_t *) fpu->fprs,
3181 (__vector128 *) vcpu->run->s.regs.vrs);
9abc2a08 3182 else
a7d4b8f2 3183 memcpy(fpu->fprs, vcpu->run->s.regs.fprs, sizeof(fpu->fprs));
e1788bb9 3184 fpu->fpc = vcpu->run->s.regs.fpc;
b0c632db
HC
3185 return 0;
3186}
3187
3188static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
3189{
3190 int rc = 0;
3191
7a42fdc2 3192 if (!is_vcpu_stopped(vcpu))
b0c632db 3193 rc = -EBUSY;
d7b0b5eb
CO
3194 else {
3195 vcpu->run->psw_mask = psw.mask;
3196 vcpu->run->psw_addr = psw.addr;
3197 }
b0c632db
HC
3198 return rc;
3199}
3200
3201int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
3202 struct kvm_translation *tr)
3203{
3204 return -EINVAL; /* not implemented yet */
3205}
3206
27291e21
DH
3207#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
3208 KVM_GUESTDBG_USE_HW_BP | \
3209 KVM_GUESTDBG_ENABLE)
3210
d0bfb940
JK
3211int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
3212 struct kvm_guest_debug *dbg)
b0c632db 3213{
27291e21
DH
3214 int rc = 0;
3215
3216 vcpu->guest_debug = 0;
3217 kvm_s390_clear_bp_data(vcpu);
3218
2de3bfc2 3219 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
27291e21 3220 return -EINVAL;
89b5b4de
DH
3221 if (!sclp.has_gpere)
3222 return -EINVAL;
27291e21
DH
3223
3224 if (dbg->control & KVM_GUESTDBG_ENABLE) {
3225 vcpu->guest_debug = dbg->control;
3226 /* enforce guest PER */
805de8f4 3227 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
27291e21
DH
3228
3229 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
3230 rc = kvm_s390_import_bp_data(vcpu, dbg);
3231 } else {
805de8f4 3232 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
27291e21
DH
3233 vcpu->arch.guestdbg.last_bp = 0;
3234 }
3235
3236 if (rc) {
3237 vcpu->guest_debug = 0;
3238 kvm_s390_clear_bp_data(vcpu);
805de8f4 3239 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
27291e21
DH
3240 }
3241
3242 return rc;
b0c632db
HC
3243}
3244
62d9f0db
MT
3245int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
3246 struct kvm_mp_state *mp_state)
3247{
6352e4d2
DH
3248 /* CHECK_STOP and LOAD are not supported yet */
3249 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
3250 KVM_MP_STATE_OPERATING;
62d9f0db
MT
3251}
3252
3253int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
3254 struct kvm_mp_state *mp_state)
3255{
6352e4d2
DH
3256 int rc = 0;
3257
3258 /* user space knows about this interface - let it control the state */
3259 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
3260
3261 switch (mp_state->mp_state) {
3262 case KVM_MP_STATE_STOPPED:
3263 kvm_s390_vcpu_stop(vcpu);
3264 break;
3265 case KVM_MP_STATE_OPERATING:
3266 kvm_s390_vcpu_start(vcpu);
3267 break;
3268 case KVM_MP_STATE_LOAD:
3269 case KVM_MP_STATE_CHECK_STOP:
3270 /* fall through - CHECK_STOP and LOAD are not supported yet */
3271 default:
3272 rc = -ENXIO;
3273 }
3274
3275 return rc;
62d9f0db
MT
3276}
3277
8ad35755
DH
3278static bool ibs_enabled(struct kvm_vcpu *vcpu)
3279{
3280 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
3281}
3282
2c70fe44
CB
3283static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
3284{
8ad35755 3285retry:
8e236546 3286 kvm_s390_vcpu_request_handled(vcpu);
2fa6e1e1 3287 if (!kvm_request_pending(vcpu))
586b7ccd 3288 return 0;
2c70fe44
CB
3289 /*
3290 * We use MMU_RELOAD just to re-arm the ipte notifier for the
b2d73b2a 3291 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
2c70fe44
CB
3292 * This ensures that the ipte instruction for this request has
3293 * already finished. We might race against a second unmapper that
3294 * wants to set the blocking bit. Lets just retry the request loop.
3295 */
8ad35755 3296 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
2c70fe44 3297 int rc;
b2d73b2a
MS
3298 rc = gmap_mprotect_notify(vcpu->arch.gmap,
3299 kvm_s390_get_prefix(vcpu),
3300 PAGE_SIZE * 2, PROT_WRITE);
aca411a4
JN
3301 if (rc) {
3302 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
2c70fe44 3303 return rc;
aca411a4 3304 }
8ad35755 3305 goto retry;
2c70fe44 3306 }
8ad35755 3307
d3d692c8
DH
3308 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
3309 vcpu->arch.sie_block->ihcpu = 0xffff;
3310 goto retry;
3311 }
3312
8ad35755
DH
3313 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
3314 if (!ibs_enabled(vcpu)) {
3315 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
805de8f4 3316 atomic_or(CPUSTAT_IBS,
8ad35755
DH
3317 &vcpu->arch.sie_block->cpuflags);
3318 }
3319 goto retry;
2c70fe44 3320 }
8ad35755
DH
3321
3322 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
3323 if (ibs_enabled(vcpu)) {
3324 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
805de8f4 3325 atomic_andnot(CPUSTAT_IBS,
8ad35755
DH
3326 &vcpu->arch.sie_block->cpuflags);
3327 }
3328 goto retry;
3329 }
3330
6502a34c
DH
3331 if (kvm_check_request(KVM_REQ_ICPT_OPEREXC, vcpu)) {
3332 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
3333 goto retry;
3334 }
3335
190df4a2
CI
3336 if (kvm_check_request(KVM_REQ_START_MIGRATION, vcpu)) {
3337 /*
3338 * Disable CMMA virtualization; we will emulate the ESSA
3339 * instruction manually, in order to provide additional
3340 * functionalities needed for live migration.
3341 */
3342 vcpu->arch.sie_block->ecb2 &= ~ECB2_CMMA;
3343 goto retry;
3344 }
3345
3346 if (kvm_check_request(KVM_REQ_STOP_MIGRATION, vcpu)) {
3347 /*
3348 * Re-enable CMMA virtualization if CMMA is available and
3349 * was used.
3350 */
3351 if ((vcpu->kvm->arch.use_cmma) &&
3352 (vcpu->kvm->mm->context.use_cmma))
3353 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
3354 goto retry;
3355 }
3356
0759d068 3357 /* nothing to do, just clear the request */
72875d8a 3358 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
b6787f53
DH
3359 /* we left the vsie handler, nothing to do, just clear the request */
3360 kvm_clear_request(KVM_REQ_VSIE_RESTART, vcpu);
0759d068 3361
2c70fe44
CB
3362 return 0;
3363}
3364
e5a63654
DH
3365void kvm_s390_set_tod_clock(struct kvm *kvm,
3366 const struct kvm_s390_vm_tod_clock *gtod)
8fa1696e
CW
3367{
3368 struct kvm_vcpu *vcpu;
3369 struct kvm_s390_tod_clock_ext htod;
3370 int i;
3371
3372 mutex_lock(&kvm->lock);
3373 preempt_disable();
3374
3375 get_tod_clock_ext((char *)&htod);
3376
3377 kvm->arch.epoch = gtod->tod - htod.tod;
e5a63654
DH
3378 kvm->arch.epdx = 0;
3379 if (test_kvm_facility(kvm, 139)) {
3380 kvm->arch.epdx = gtod->epoch_idx - htod.epoch_idx;
3381 if (kvm->arch.epoch > gtod->tod)
3382 kvm->arch.epdx -= 1;
3383 }
8fa1696e
CW
3384
3385 kvm_s390_vcpu_block_all(kvm);
3386 kvm_for_each_vcpu(i, vcpu, kvm) {
3387 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
3388 vcpu->arch.sie_block->epdx = kvm->arch.epdx;
3389 }
3390
3391 kvm_s390_vcpu_unblock_all(kvm);
25ed1675
DH
3392 preempt_enable();
3393 mutex_unlock(&kvm->lock);
3394}
3395
fa576c58
TH
3396/**
3397 * kvm_arch_fault_in_page - fault-in guest page if necessary
3398 * @vcpu: The corresponding virtual cpu
3399 * @gpa: Guest physical address
3400 * @writable: Whether the page should be writable or not
3401 *
3402 * Make sure that a guest page has been faulted-in on the host.
3403 *
3404 * Return: Zero on success, negative error code otherwise.
3405 */
3406long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
24eb3a82 3407{
527e30b4
MS
3408 return gmap_fault(vcpu->arch.gmap, gpa,
3409 writable ? FAULT_FLAG_WRITE : 0);
24eb3a82
DD
3410}
3411
3c038e6b
DD
3412static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
3413 unsigned long token)
3414{
3415 struct kvm_s390_interrupt inti;
383d0b05 3416 struct kvm_s390_irq irq;
3c038e6b
DD
3417
3418 if (start_token) {
383d0b05
JF
3419 irq.u.ext.ext_params2 = token;
3420 irq.type = KVM_S390_INT_PFAULT_INIT;
3421 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
3c038e6b
DD
3422 } else {
3423 inti.type = KVM_S390_INT_PFAULT_DONE;
383d0b05 3424 inti.parm64 = token;
3c038e6b
DD
3425 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
3426 }
3427}
3428
3429void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
3430 struct kvm_async_pf *work)
3431{
3432 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
3433 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
3434}
3435
3436void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
3437 struct kvm_async_pf *work)
3438{
3439 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
3440 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
3441}
3442
3443void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
3444 struct kvm_async_pf *work)
3445{
3446 /* s390 will always inject the page directly */
3447}
3448
3449bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
3450{
3451 /*
3452 * s390 will always inject the page directly,
3453 * but we still want check_async_completion to cleanup
3454 */
3455 return true;
3456}
3457
3458static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
3459{
3460 hva_t hva;
3461 struct kvm_arch_async_pf arch;
3462 int rc;
3463
3464 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3465 return 0;
3466 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
3467 vcpu->arch.pfault_compare)
3468 return 0;
3469 if (psw_extint_disabled(vcpu))
3470 return 0;
9a022067 3471 if (kvm_s390_vcpu_has_irq(vcpu, 0))
3c038e6b
DD
3472 return 0;
3473 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
3474 return 0;
3475 if (!vcpu->arch.gmap->pfault_enabled)
3476 return 0;
3477
81480cc1
HC
3478 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
3479 hva += current->thread.gmap_addr & ~PAGE_MASK;
3480 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
3c038e6b
DD
3481 return 0;
3482
3483 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
3484 return rc;
3485}
3486
3fb4c40f 3487static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 3488{
3fb4c40f 3489 int rc, cpuflags;
e168bf8d 3490
3c038e6b
DD
3491 /*
3492 * On s390 notifications for arriving pages will be delivered directly
3493 * to the guest but the house keeping for completed pfaults is
3494 * handled outside the worker.
3495 */
3496 kvm_check_async_pf_completion(vcpu);
3497
7ec7c8c7
CB
3498 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
3499 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
b0c632db
HC
3500
3501 if (need_resched())
3502 schedule();
3503
d3a73acb 3504 if (test_cpu_flag(CIF_MCCK_PENDING))
71cde587
CB
3505 s390_handle_mcck();
3506
79395031
JF
3507 if (!kvm_is_ucontrol(vcpu->kvm)) {
3508 rc = kvm_s390_deliver_pending_interrupts(vcpu);
3509 if (rc)
3510 return rc;
3511 }
0ff31867 3512
2c70fe44
CB
3513 rc = kvm_s390_handle_requests(vcpu);
3514 if (rc)
3515 return rc;
3516
27291e21
DH
3517 if (guestdbg_enabled(vcpu)) {
3518 kvm_s390_backup_guest_per_regs(vcpu);
3519 kvm_s390_patch_guest_per_regs(vcpu);
3520 }
3521
b0c632db 3522 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
3523 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
3524 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
3525 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 3526
3fb4c40f
TH
3527 return 0;
3528}
3529
492d8642
TH
3530static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
3531{
56317920
DH
3532 struct kvm_s390_pgm_info pgm_info = {
3533 .code = PGM_ADDRESSING,
3534 };
3535 u8 opcode, ilen;
492d8642
TH
3536 int rc;
3537
3538 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
3539 trace_kvm_s390_sie_fault(vcpu);
3540
3541 /*
3542 * We want to inject an addressing exception, which is defined as a
3543 * suppressing or terminating exception. However, since we came here
3544 * by a DAT access exception, the PSW still points to the faulting
3545 * instruction since DAT exceptions are nullifying. So we've got
3546 * to look up the current opcode to get the length of the instruction
3547 * to be able to forward the PSW.
3548 */
3fa8cad7 3549 rc = read_guest_instr(vcpu, vcpu->arch.sie_block->gpsw.addr, &opcode, 1);
56317920 3550 ilen = insn_length(opcode);
9b0d721a
DH
3551 if (rc < 0) {
3552 return rc;
3553 } else if (rc) {
3554 /* Instruction-Fetching Exceptions - we can't detect the ilen.
3555 * Forward by arbitrary ilc, injection will take care of
3556 * nullification if necessary.
3557 */
3558 pgm_info = vcpu->arch.pgm;
3559 ilen = 4;
3560 }
56317920
DH
3561 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
3562 kvm_s390_forward_psw(vcpu, ilen);
3563 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
492d8642
TH
3564}
3565
3fb4c40f
TH
3566static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
3567{
4d62fcc0
QH
3568 struct mcck_volatile_info *mcck_info;
3569 struct sie_page *sie_page;
3570
2b29a9fd
DD
3571 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
3572 vcpu->arch.sie_block->icptcode);
3573 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
3574
27291e21
DH
3575 if (guestdbg_enabled(vcpu))
3576 kvm_s390_restore_guest_per_regs(vcpu);
3577
7ec7c8c7
CB
3578 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
3579 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
71f116bf 3580
4d62fcc0
QH
3581 if (exit_reason == -EINTR) {
3582 VCPU_EVENT(vcpu, 3, "%s", "machine check");
3583 sie_page = container_of(vcpu->arch.sie_block,
3584 struct sie_page, sie_block);
3585 mcck_info = &sie_page->mcck_info;
3586 kvm_s390_reinject_machine_check(vcpu, mcck_info);
3587 return 0;
3588 }
3589
71f116bf
DH
3590 if (vcpu->arch.sie_block->icptcode > 0) {
3591 int rc = kvm_handle_sie_intercept(vcpu);
3592
3593 if (rc != -EOPNOTSUPP)
3594 return rc;
3595 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
3596 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
3597 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
3598 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
3599 return -EREMOTE;
3600 } else if (exit_reason != -EFAULT) {
3601 vcpu->stat.exit_null++;
3602 return 0;
210b1607
TH
3603 } else if (kvm_is_ucontrol(vcpu->kvm)) {
3604 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
3605 vcpu->run->s390_ucontrol.trans_exc_code =
3606 current->thread.gmap_addr;
3607 vcpu->run->s390_ucontrol.pgm_code = 0x10;
71f116bf 3608 return -EREMOTE;
24eb3a82 3609 } else if (current->thread.gmap_pfault) {
3c038e6b 3610 trace_kvm_s390_major_guest_pfault(vcpu);
24eb3a82 3611 current->thread.gmap_pfault = 0;
71f116bf
DH
3612 if (kvm_arch_setup_async_pf(vcpu))
3613 return 0;
3614 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
a76ccff6 3615 }
71f116bf 3616 return vcpu_post_run_fault_in_sie(vcpu);
3fb4c40f
TH
3617}
3618
3619static int __vcpu_run(struct kvm_vcpu *vcpu)
3620{
3621 int rc, exit_reason;
3622
800c1065
TH
3623 /*
3624 * We try to hold kvm->srcu during most of vcpu_run (except when run-
3625 * ning the guest), so that memslots (and other stuff) are protected
3626 */
3627 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3628
a76ccff6
TH
3629 do {
3630 rc = vcpu_pre_run(vcpu);
3631 if (rc)
3632 break;
3fb4c40f 3633
800c1065 3634 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
3635 /*
3636 * As PF_VCPU will be used in fault handler, between
3637 * guest_enter and guest_exit should be no uaccess.
3638 */
0097d12e 3639 local_irq_disable();
6edaa530 3640 guest_enter_irqoff();
db0758b2 3641 __disable_cpu_timer_accounting(vcpu);
0097d12e 3642 local_irq_enable();
a76ccff6
TH
3643 exit_reason = sie64a(vcpu->arch.sie_block,
3644 vcpu->run->s.regs.gprs);
0097d12e 3645 local_irq_disable();
db0758b2 3646 __enable_cpu_timer_accounting(vcpu);
6edaa530 3647 guest_exit_irqoff();
0097d12e 3648 local_irq_enable();
800c1065 3649 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
3650
3651 rc = vcpu_post_run(vcpu, exit_reason);
27291e21 3652 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
3fb4c40f 3653
800c1065 3654 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 3655 return rc;
b0c632db
HC
3656}
3657
b028ee3e
DH
3658static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3659{
4d5f2c04 3660 struct runtime_instr_cb *riccb;
4e0b1ab7 3661 struct gs_cb *gscb;
4d5f2c04
CB
3662
3663 riccb = (struct runtime_instr_cb *) &kvm_run->s.regs.riccb;
4e0b1ab7 3664 gscb = (struct gs_cb *) &kvm_run->s.regs.gscb;
b028ee3e
DH
3665 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
3666 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
3667 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
3668 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
3669 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
3670 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
d3d692c8
DH
3671 /* some control register changes require a tlb flush */
3672 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
b028ee3e
DH
3673 }
3674 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4287f247 3675 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
b028ee3e
DH
3676 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
3677 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
3678 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
3679 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
3680 }
3681 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
3682 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
3683 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
3684 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
9fbd8082
DH
3685 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
3686 kvm_clear_async_pf_completion_queue(vcpu);
b028ee3e 3687 }
80cd8763
FZ
3688 /*
3689 * If userspace sets the riccb (e.g. after migration) to a valid state,
3690 * we should enable RI here instead of doing the lazy enablement.
3691 */
3692 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_RICCB) &&
4d5f2c04 3693 test_kvm_facility(vcpu->kvm, 64) &&
bb59c2da 3694 riccb->v &&
0c9d8683 3695 !(vcpu->arch.sie_block->ecb3 & ECB3_RI)) {
4d5f2c04 3696 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (sync_regs)");
0c9d8683 3697 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
80cd8763 3698 }
4e0b1ab7
FZ
3699 /*
3700 * If userspace sets the gscb (e.g. after migration) to non-zero,
3701 * we should enable GS here instead of doing the lazy enablement.
3702 */
3703 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_GSCB) &&
3704 test_kvm_facility(vcpu->kvm, 133) &&
3705 gscb->gssm &&
3706 !vcpu->arch.gs_enabled) {
3707 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (sync_regs)");
3708 vcpu->arch.sie_block->ecb |= ECB_GS;
3709 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
3710 vcpu->arch.gs_enabled = 1;
80cd8763 3711 }
35b3fde6
CB
3712 if ((kvm_run->kvm_dirty_regs & KVM_SYNC_BPBC) &&
3713 test_kvm_facility(vcpu->kvm, 82)) {
3714 vcpu->arch.sie_block->fpf &= ~FPF_BPBC;
3715 vcpu->arch.sie_block->fpf |= kvm_run->s.regs.bpbc ? FPF_BPBC : 0;
3716 }
31d8b8d4
CB
3717 save_access_regs(vcpu->arch.host_acrs);
3718 restore_access_regs(vcpu->run->s.regs.acrs);
e1788bb9
CB
3719 /* save host (userspace) fprs/vrs */
3720 save_fpu_regs();
3721 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
3722 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
3723 if (MACHINE_HAS_VX)
3724 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
3725 else
3726 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
3727 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
3728 if (test_fp_ctl(current->thread.fpu.fpc))
3729 /* User space provided an invalid FPC, let's clear it */
3730 current->thread.fpu.fpc = 0;
4e0b1ab7
FZ
3731 if (MACHINE_HAS_GS) {
3732 preempt_disable();
3733 __ctl_set_bit(2, 4);
3734 if (current->thread.gs_cb) {
3735 vcpu->arch.host_gscb = current->thread.gs_cb;
3736 save_gs_cb(vcpu->arch.host_gscb);
3737 }
3738 if (vcpu->arch.gs_enabled) {
3739 current->thread.gs_cb = (struct gs_cb *)
3740 &vcpu->run->s.regs.gscb;
3741 restore_gs_cb(current->thread.gs_cb);
3742 }
3743 preempt_enable();
3744 }
012d8745 3745 /* SIE will load etoken directly from SDNX and therefore kvm_run */
80cd8763 3746
b028ee3e
DH
3747 kvm_run->kvm_dirty_regs = 0;
3748}
3749
3750static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3751{
3752 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
3753 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
3754 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
3755 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
4287f247 3756 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
b028ee3e
DH
3757 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
3758 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
3759 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
3760 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
3761 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
3762 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
3763 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
35b3fde6 3764 kvm_run->s.regs.bpbc = (vcpu->arch.sie_block->fpf & FPF_BPBC) == FPF_BPBC;
31d8b8d4
CB
3765 save_access_regs(vcpu->run->s.regs.acrs);
3766 restore_access_regs(vcpu->arch.host_acrs);
e1788bb9
CB
3767 /* Save guest register state */
3768 save_fpu_regs();
3769 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
3770 /* Restore will be done lazily at return */
3771 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
3772 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
4e0b1ab7
FZ
3773 if (MACHINE_HAS_GS) {
3774 __ctl_set_bit(2, 4);
3775 if (vcpu->arch.gs_enabled)
3776 save_gs_cb(current->thread.gs_cb);
3777 preempt_disable();
3778 current->thread.gs_cb = vcpu->arch.host_gscb;
3779 restore_gs_cb(vcpu->arch.host_gscb);
3780 preempt_enable();
3781 if (!vcpu->arch.host_gscb)
3782 __ctl_clear_bit(2, 4);
3783 vcpu->arch.host_gscb = NULL;
3784 }
012d8745 3785 /* SIE will save etoken directly into SDNX and therefore kvm_run */
b028ee3e
DH
3786}
3787
b0c632db
HC
3788int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3789{
8f2abe6a 3790 int rc;
b0c632db 3791
460df4c1
PB
3792 if (kvm_run->immediate_exit)
3793 return -EINTR;
3794
27291e21
DH
3795 if (guestdbg_exit_pending(vcpu)) {
3796 kvm_s390_prepare_debug_exit(vcpu);
3797 return 0;
3798 }
3799
20b7035c 3800 kvm_sigset_activate(vcpu);
b0c632db 3801
6352e4d2
DH
3802 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
3803 kvm_s390_vcpu_start(vcpu);
3804 } else if (is_vcpu_stopped(vcpu)) {
ea2cdd27 3805 pr_err_ratelimited("can't run stopped vcpu %d\n",
6352e4d2
DH
3806 vcpu->vcpu_id);
3807 return -EINVAL;
3808 }
b0c632db 3809
b028ee3e 3810 sync_regs(vcpu, kvm_run);
db0758b2 3811 enable_cpu_timer_accounting(vcpu);
d7b0b5eb 3812
dab4079d 3813 might_fault();
a76ccff6 3814 rc = __vcpu_run(vcpu);
9ace903d 3815
b1d16c49
CE
3816 if (signal_pending(current) && !rc) {
3817 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 3818 rc = -EINTR;
b1d16c49 3819 }
8f2abe6a 3820
27291e21
DH
3821 if (guestdbg_exit_pending(vcpu) && !rc) {
3822 kvm_s390_prepare_debug_exit(vcpu);
3823 rc = 0;
3824 }
3825
8f2abe6a 3826 if (rc == -EREMOTE) {
71f116bf 3827 /* userspace support is needed, kvm_run has been prepared */
8f2abe6a
CB
3828 rc = 0;
3829 }
b0c632db 3830
db0758b2 3831 disable_cpu_timer_accounting(vcpu);
b028ee3e 3832 store_regs(vcpu, kvm_run);
d7b0b5eb 3833
20b7035c 3834 kvm_sigset_deactivate(vcpu);
b0c632db 3835
b0c632db 3836 vcpu->stat.exit_userspace++;
7e8e6ab4 3837 return rc;
b0c632db
HC
3838}
3839
b0c632db
HC
3840/*
3841 * store status at address
3842 * we use have two special cases:
3843 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
3844 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
3845 */
d0bce605 3846int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
b0c632db 3847{
092670cd 3848 unsigned char archmode = 1;
9abc2a08 3849 freg_t fprs[NUM_FPRS];
fda902cb 3850 unsigned int px;
4287f247 3851 u64 clkcomp, cputm;
d0bce605 3852 int rc;
b0c632db 3853
d9a3a09a 3854 px = kvm_s390_get_prefix(vcpu);
d0bce605
HC
3855 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
3856 if (write_guest_abs(vcpu, 163, &archmode, 1))
b0c632db 3857 return -EFAULT;
d9a3a09a 3858 gpa = 0;
d0bce605
HC
3859 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
3860 if (write_guest_real(vcpu, 163, &archmode, 1))
b0c632db 3861 return -EFAULT;
d9a3a09a
MS
3862 gpa = px;
3863 } else
3864 gpa -= __LC_FPREGS_SAVE_AREA;
9abc2a08
DH
3865
3866 /* manually convert vector registers if necessary */
3867 if (MACHINE_HAS_VX) {
9522b37f 3868 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
9abc2a08
DH
3869 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
3870 fprs, 128);
3871 } else {
3872 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
6fd8e67d 3873 vcpu->run->s.regs.fprs, 128);
9abc2a08 3874 }
d9a3a09a 3875 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
d0bce605 3876 vcpu->run->s.regs.gprs, 128);
d9a3a09a 3877 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
d0bce605 3878 &vcpu->arch.sie_block->gpsw, 16);
d9a3a09a 3879 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
fda902cb 3880 &px, 4);
d9a3a09a 3881 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
9abc2a08 3882 &vcpu->run->s.regs.fpc, 4);
d9a3a09a 3883 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
d0bce605 3884 &vcpu->arch.sie_block->todpr, 4);
4287f247 3885 cputm = kvm_s390_get_cpu_timer(vcpu);
d9a3a09a 3886 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
4287f247 3887 &cputm, 8);
178bd789 3888 clkcomp = vcpu->arch.sie_block->ckc >> 8;
d9a3a09a 3889 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
d0bce605 3890 &clkcomp, 8);
d9a3a09a 3891 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
d0bce605 3892 &vcpu->run->s.regs.acrs, 64);
d9a3a09a 3893 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
d0bce605
HC
3894 &vcpu->arch.sie_block->gcr, 128);
3895 return rc ? -EFAULT : 0;
b0c632db
HC
3896}
3897
e879892c
TH
3898int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
3899{
3900 /*
3901 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
31d8b8d4 3902 * switch in the run ioctl. Let's update our copies before we save
e879892c
TH
3903 * it into the save area
3904 */
d0164ee2 3905 save_fpu_regs();
9abc2a08 3906 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
e879892c
TH
3907 save_access_regs(vcpu->run->s.regs.acrs);
3908
3909 return kvm_s390_store_status_unloaded(vcpu, addr);
3910}
3911
8ad35755
DH
3912static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
3913{
3914 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
8e236546 3915 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
8ad35755
DH
3916}
3917
3918static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
3919{
3920 unsigned int i;
3921 struct kvm_vcpu *vcpu;
3922
3923 kvm_for_each_vcpu(i, vcpu, kvm) {
3924 __disable_ibs_on_vcpu(vcpu);
3925 }
3926}
3927
3928static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
3929{
09a400e7
DH
3930 if (!sclp.has_ibs)
3931 return;
8ad35755 3932 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
8e236546 3933 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
8ad35755
DH
3934}
3935
6852d7b6
DH
3936void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
3937{
8ad35755
DH
3938 int i, online_vcpus, started_vcpus = 0;
3939
3940 if (!is_vcpu_stopped(vcpu))
3941 return;
3942
6852d7b6 3943 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
8ad35755 3944 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 3945 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
3946 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
3947
3948 for (i = 0; i < online_vcpus; i++) {
3949 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
3950 started_vcpus++;
3951 }
3952
3953 if (started_vcpus == 0) {
3954 /* we're the only active VCPU -> speed it up */
3955 __enable_ibs_on_vcpu(vcpu);
3956 } else if (started_vcpus == 1) {
3957 /*
3958 * As we are starting a second VCPU, we have to disable
3959 * the IBS facility on all VCPUs to remove potentially
3960 * oustanding ENABLE requests.
3961 */
3962 __disable_ibs_on_all_vcpus(vcpu->kvm);
3963 }
3964
805de8f4 3965 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
3966 /*
3967 * Another VCPU might have used IBS while we were offline.
3968 * Let's play safe and flush the VCPU at startup.
3969 */
d3d692c8 3970 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
433b9ee4 3971 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 3972 return;
6852d7b6
DH
3973}
3974
3975void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
3976{
8ad35755
DH
3977 int i, online_vcpus, started_vcpus = 0;
3978 struct kvm_vcpu *started_vcpu = NULL;
3979
3980 if (is_vcpu_stopped(vcpu))
3981 return;
3982
6852d7b6 3983 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
8ad35755 3984 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 3985 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
3986 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
3987
32f5ff63 3988 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
6cddd432 3989 kvm_s390_clear_stop_irq(vcpu);
32f5ff63 3990
805de8f4 3991 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
3992 __disable_ibs_on_vcpu(vcpu);
3993
3994 for (i = 0; i < online_vcpus; i++) {
3995 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
3996 started_vcpus++;
3997 started_vcpu = vcpu->kvm->vcpus[i];
3998 }
3999 }
4000
4001 if (started_vcpus == 1) {
4002 /*
4003 * As we only have one VCPU left, we want to enable the
4004 * IBS facility for that VCPU to speed it up.
4005 */
4006 __enable_ibs_on_vcpu(started_vcpu);
4007 }
4008
433b9ee4 4009 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 4010 return;
6852d7b6
DH
4011}
4012
d6712df9
CH
4013static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
4014 struct kvm_enable_cap *cap)
4015{
4016 int r;
4017
4018 if (cap->flags)
4019 return -EINVAL;
4020
4021 switch (cap->cap) {
fa6b7fe9
CH
4022 case KVM_CAP_S390_CSS_SUPPORT:
4023 if (!vcpu->kvm->arch.css_support) {
4024 vcpu->kvm->arch.css_support = 1;
c92ea7b9 4025 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
fa6b7fe9
CH
4026 trace_kvm_s390_enable_css(vcpu->kvm);
4027 }
4028 r = 0;
4029 break;
d6712df9
CH
4030 default:
4031 r = -EINVAL;
4032 break;
4033 }
4034 return r;
4035}
4036
41408c28
TH
4037static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
4038 struct kvm_s390_mem_op *mop)
4039{
4040 void __user *uaddr = (void __user *)mop->buf;
4041 void *tmpbuf = NULL;
4042 int r, srcu_idx;
4043 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
4044 | KVM_S390_MEMOP_F_CHECK_ONLY;
4045
722964f2 4046 if (mop->flags & ~supported_flags || mop->ar >= NUM_ACRS || !mop->size)
41408c28
TH
4047 return -EINVAL;
4048
4049 if (mop->size > MEM_OP_MAX_SIZE)
4050 return -E2BIG;
4051
4052 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
4053 tmpbuf = vmalloc(mop->size);
4054 if (!tmpbuf)
4055 return -ENOMEM;
4056 }
4057
4058 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
4059
4060 switch (mop->op) {
4061 case KVM_S390_MEMOP_LOGICAL_READ:
4062 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
92c96321
DH
4063 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4064 mop->size, GACC_FETCH);
41408c28
TH
4065 break;
4066 }
4067 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4068 if (r == 0) {
4069 if (copy_to_user(uaddr, tmpbuf, mop->size))
4070 r = -EFAULT;
4071 }
4072 break;
4073 case KVM_S390_MEMOP_LOGICAL_WRITE:
4074 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
92c96321
DH
4075 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
4076 mop->size, GACC_STORE);
41408c28
TH
4077 break;
4078 }
4079 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
4080 r = -EFAULT;
4081 break;
4082 }
4083 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
4084 break;
4085 default:
4086 r = -EINVAL;
4087 }
4088
4089 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
4090
4091 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
4092 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
4093
4094 vfree(tmpbuf);
4095 return r;
4096}
4097
b0c632db
HC
4098long kvm_arch_vcpu_ioctl(struct file *filp,
4099 unsigned int ioctl, unsigned long arg)
4100{
4101 struct kvm_vcpu *vcpu = filp->private_data;
4102 void __user *argp = (void __user *)arg;
800c1065 4103 int idx;
bc923cc9 4104 long r;
b0c632db 4105
93736624 4106 switch (ioctl) {
47b43c52
JF
4107 case KVM_S390_IRQ: {
4108 struct kvm_s390_irq s390irq;
4109
4110 r = -EFAULT;
4111 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
4112 break;
4113 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
4114 break;
4115 }
93736624 4116 case KVM_S390_INTERRUPT: {
ba5c1e9b 4117 struct kvm_s390_interrupt s390int;
895d4c61 4118 struct kvm_s390_irq s390irq = {};
ba5c1e9b 4119
93736624 4120 r = -EFAULT;
ba5c1e9b 4121 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624 4122 break;
383d0b05
JF
4123 if (s390int_to_s390irq(&s390int, &s390irq))
4124 return -EINVAL;
4125 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
93736624 4126 break;
ba5c1e9b 4127 }
b0c632db 4128 case KVM_S390_STORE_STATUS:
800c1065 4129 idx = srcu_read_lock(&vcpu->kvm->srcu);
bd45c812 4130 r = kvm_s390_store_status_unloaded(vcpu, arg);
800c1065 4131 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 4132 break;
b0c632db
HC
4133 case KVM_S390_SET_INITIAL_PSW: {
4134 psw_t psw;
4135
bc923cc9 4136 r = -EFAULT;
b0c632db 4137 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
4138 break;
4139 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
4140 break;
b0c632db
HC
4141 }
4142 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
4143 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
4144 break;
14eebd91
CO
4145 case KVM_SET_ONE_REG:
4146 case KVM_GET_ONE_REG: {
4147 struct kvm_one_reg reg;
4148 r = -EFAULT;
4149 if (copy_from_user(&reg, argp, sizeof(reg)))
4150 break;
4151 if (ioctl == KVM_SET_ONE_REG)
4152 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
4153 else
4154 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
4155 break;
4156 }
27e0393f
CO
4157#ifdef CONFIG_KVM_S390_UCONTROL
4158 case KVM_S390_UCAS_MAP: {
4159 struct kvm_s390_ucas_mapping ucasmap;
4160
4161 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4162 r = -EFAULT;
4163 break;
4164 }
4165
4166 if (!kvm_is_ucontrol(vcpu->kvm)) {
4167 r = -EINVAL;
4168 break;
4169 }
4170
4171 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
4172 ucasmap.vcpu_addr, ucasmap.length);
4173 break;
4174 }
4175 case KVM_S390_UCAS_UNMAP: {
4176 struct kvm_s390_ucas_mapping ucasmap;
4177
4178 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
4179 r = -EFAULT;
4180 break;
4181 }
4182
4183 if (!kvm_is_ucontrol(vcpu->kvm)) {
4184 r = -EINVAL;
4185 break;
4186 }
4187
4188 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
4189 ucasmap.length);
4190 break;
4191 }
4192#endif
ccc7910f 4193 case KVM_S390_VCPU_FAULT: {
527e30b4 4194 r = gmap_fault(vcpu->arch.gmap, arg, 0);
ccc7910f
CO
4195 break;
4196 }
d6712df9
CH
4197 case KVM_ENABLE_CAP:
4198 {
4199 struct kvm_enable_cap cap;
4200 r = -EFAULT;
4201 if (copy_from_user(&cap, argp, sizeof(cap)))
4202 break;
4203 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
4204 break;
4205 }
41408c28
TH
4206 case KVM_S390_MEM_OP: {
4207 struct kvm_s390_mem_op mem_op;
4208
4209 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
4210 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
4211 else
4212 r = -EFAULT;
4213 break;
4214 }
816c7667
JF
4215 case KVM_S390_SET_IRQ_STATE: {
4216 struct kvm_s390_irq_state irq_state;
4217
4218 r = -EFAULT;
4219 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4220 break;
4221 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
4222 irq_state.len == 0 ||
4223 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
4224 r = -EINVAL;
4225 break;
4226 }
bb64da9a 4227 /* do not use irq_state.flags, it will break old QEMUs */
816c7667
JF
4228 r = kvm_s390_set_irq_state(vcpu,
4229 (void __user *) irq_state.buf,
4230 irq_state.len);
4231 break;
4232 }
4233 case KVM_S390_GET_IRQ_STATE: {
4234 struct kvm_s390_irq_state irq_state;
4235
4236 r = -EFAULT;
4237 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
4238 break;
4239 if (irq_state.len == 0) {
4240 r = -EINVAL;
4241 break;
4242 }
bb64da9a 4243 /* do not use irq_state.flags, it will break old QEMUs */
816c7667
JF
4244 r = kvm_s390_get_irq_state(vcpu,
4245 (__u8 __user *) irq_state.buf,
4246 irq_state.len);
4247 break;
4248 }
b0c632db 4249 default:
3e6afcf1 4250 r = -ENOTTY;
b0c632db 4251 }
bc923cc9 4252 return r;
b0c632db
HC
4253}
4254
5b1c1493
CO
4255int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
4256{
4257#ifdef CONFIG_KVM_S390_UCONTROL
4258 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
4259 && (kvm_is_ucontrol(vcpu->kvm))) {
4260 vmf->page = virt_to_page(vcpu->arch.sie_block);
4261 get_page(vmf->page);
4262 return 0;
4263 }
4264#endif
4265 return VM_FAULT_SIGBUS;
4266}
4267
5587027c
AK
4268int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
4269 unsigned long npages)
db3fe4eb
TY
4270{
4271 return 0;
4272}
4273
b0c632db 4274/* Section: memory related */
f7784b8e
MT
4275int kvm_arch_prepare_memory_region(struct kvm *kvm,
4276 struct kvm_memory_slot *memslot,
09170a49 4277 const struct kvm_userspace_memory_region *mem,
7b6195a9 4278 enum kvm_mr_change change)
b0c632db 4279{
dd2887e7
NW
4280 /* A few sanity checks. We can have memory slots which have to be
4281 located/ended at a segment boundary (1MB). The memory in userland is
4282 ok to be fragmented into various different vmas. It is okay to mmap()
4283 and munmap() stuff in this slot after doing this call at any time */
b0c632db 4284
598841ca 4285 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
4286 return -EINVAL;
4287
598841ca 4288 if (mem->memory_size & 0xffffful)
b0c632db
HC
4289 return -EINVAL;
4290
a3a92c31
DD
4291 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
4292 return -EINVAL;
4293
f7784b8e
MT
4294 return 0;
4295}
4296
4297void kvm_arch_commit_memory_region(struct kvm *kvm,
09170a49 4298 const struct kvm_userspace_memory_region *mem,
8482644a 4299 const struct kvm_memory_slot *old,
f36f3f28 4300 const struct kvm_memory_slot *new,
8482644a 4301 enum kvm_mr_change change)
f7784b8e 4302{
326f839f 4303 int rc = 0;
598841ca 4304
326f839f
CB
4305 switch (change) {
4306 case KVM_MR_DELETE:
4307 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
4308 old->npages * PAGE_SIZE);
4309 break;
4310 case KVM_MR_MOVE:
4311 rc = gmap_unmap_segment(kvm->arch.gmap, old->base_gfn * PAGE_SIZE,
4312 old->npages * PAGE_SIZE);
4313 if (rc)
4314 break;
4315 /* FALLTHROUGH */
4316 case KVM_MR_CREATE:
4317 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
4318 mem->guest_phys_addr, mem->memory_size);
4319 break;
4320 case KVM_MR_FLAGS_ONLY:
4321 break;
4322 default:
4323 WARN(1, "Unknown KVM MR CHANGE: %d\n", change);
4324 }
598841ca 4325 if (rc)
ea2cdd27 4326 pr_warn("failed to commit memory region\n");
598841ca 4327 return;
b0c632db
HC
4328}
4329
60a37709
AY
4330static inline unsigned long nonhyp_mask(int i)
4331{
4332 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
4333
4334 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
4335}
4336
3491caf2
CB
4337void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
4338{
4339 vcpu->valid_wakeup = false;
4340}
4341
b0c632db
HC
4342static int __init kvm_s390_init(void)
4343{
60a37709
AY
4344 int i;
4345
07197fd0
DH
4346 if (!sclp.has_sief2) {
4347 pr_info("SIE not available\n");
4348 return -ENODEV;
4349 }
4350
60a37709 4351 for (i = 0; i < 16; i++)
c936f04c 4352 kvm_s390_fac_base[i] |=
60a37709
AY
4353 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
4354
9d8d5786 4355 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
b0c632db
HC
4356}
4357
4358static void __exit kvm_s390_exit(void)
4359{
4360 kvm_exit();
4361}
4362
4363module_init(kvm_s390_init);
4364module_exit(kvm_s390_exit);
566af940
CH
4365
4366/*
4367 * Enable autoloading of the kvm module.
4368 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
4369 * since x86 takes a different approach.
4370 */
4371#include <linux/miscdevice.h>
4372MODULE_ALIAS_MISCDEV(KVM_MINOR);
4373MODULE_ALIAS("devname:kvm");