]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/s390/kvm/kvm-s390.c
s390/sclp: detect 64-bit-SCAO facility
[mirror_ubuntu-hirsute-kernel.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15f36ebd 14 * Jason J. Herne <jjherne@us.ibm.com>
b0c632db
HC
15 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
ca872302 20#include <linux/hrtimer.h>
b0c632db
HC
21#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
a374e892 25#include <linux/random.h>
b0c632db 26#include <linux/slab.h>
ba5c1e9b 27#include <linux/timer.h>
41408c28 28#include <linux/vmalloc.h>
15c9705f 29#include <linux/bitmap.h>
cbb870c8 30#include <asm/asm-offsets.h>
b0c632db 31#include <asm/lowcore.h>
fdf03650 32#include <asm/etr.h>
b0c632db 33#include <asm/pgtable.h>
1e133ab2 34#include <asm/gmap.h>
f5daba1d 35#include <asm/nmi.h>
a0616cde 36#include <asm/switch_to.h>
6d3da241 37#include <asm/isc.h>
1526bf9c 38#include <asm/sclp.h>
0a763c78
DH
39#include <asm/cpacf.h>
40#include <asm/etr.h>
8f2abe6a 41#include "kvm-s390.h"
b0c632db
HC
42#include "gaccess.h"
43
ea2cdd27
DH
44#define KMSG_COMPONENT "kvm-s390"
45#undef pr_fmt
46#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
47
5786fffa
CH
48#define CREATE_TRACE_POINTS
49#include "trace.h"
ade38c31 50#include "trace-s390.h"
5786fffa 51
41408c28 52#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
816c7667
JF
53#define LOCAL_IRQS 32
54#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
55 (KVM_MAX_VCPUS + LOCAL_IRQS))
41408c28 56
b0c632db
HC
57#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
58
59struct kvm_stats_debugfs_item debugfs_entries[] = {
60 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 61 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
62 { "exit_validity", VCPU_STAT(exit_validity) },
63 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
64 { "exit_external_request", VCPU_STAT(exit_external_request) },
65 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
66 { "exit_instruction", VCPU_STAT(exit_instruction) },
67 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
68 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
a011eeb2 69 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
f7819512 70 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
62bea5bf 71 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
3491caf2 72 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
ce2e4f0b 73 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
f5e10b09 74 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b 75 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
aba07508
DH
76 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
77 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
ba5c1e9b 78 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 79 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
80 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
81 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
82 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
83 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
84 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
85 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
86 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 87 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
88 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
89 { "instruction_spx", VCPU_STAT(instruction_spx) },
90 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
91 { "instruction_stap", VCPU_STAT(instruction_stap) },
92 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
8a242234 93 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
453423dc
CB
94 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
95 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
b31288fa 96 { "instruction_essa", VCPU_STAT(instruction_essa) },
453423dc
CB
97 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
98 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 99 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
95ca2cb5 100 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
5288fbf0 101 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 102 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 103 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0 104 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
42cb0c9f
DH
105 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
106 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
5288fbf0 107 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
42cb0c9f
DH
108 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
109 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
cd7b4b61 110 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
5288fbf0
CB
111 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
112 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
113 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
42cb0c9f
DH
114 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
115 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
116 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
388186bc 117 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 118 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 119 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
175a5c9e
CB
120 { "diagnose_258", VCPU_STAT(diagnose_258) },
121 { "diagnose_308", VCPU_STAT(diagnose_308) },
122 { "diagnose_500", VCPU_STAT(diagnose_500) },
b0c632db
HC
123 { NULL }
124};
125
9d8d5786 126/* upper facilities limit for kvm */
60a37709
AY
127unsigned long kvm_s390_fac_list_mask[16] = {
128 0xffe6000000000000UL,
129 0x005e000000000000UL,
9d8d5786 130};
b0c632db 131
9d8d5786 132unsigned long kvm_s390_fac_list_mask_size(void)
78c4b59f 133{
9d8d5786
MM
134 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
135 return ARRAY_SIZE(kvm_s390_fac_list_mask);
78c4b59f
MM
136}
137
15c9705f
DH
138/* available cpu features supported by kvm */
139static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
0a763c78
DH
140/* available subfunctions indicated via query / "test bit" */
141static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
15c9705f 142
9d8d5786 143static struct gmap_notifier gmap_notifier;
78f26131 144debug_info_t *kvm_s390_dbf;
9d8d5786 145
b0c632db 146/* Section: not file related */
13a34e06 147int kvm_arch_hardware_enable(void)
b0c632db
HC
148{
149 /* every s390 is virtualization enabled ;-) */
10474ae8 150 return 0;
b0c632db
HC
151}
152
2c70fe44
CB
153static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
154
fdf03650
FZ
155/*
156 * This callback is executed during stop_machine(). All CPUs are therefore
157 * temporarily stopped. In order not to change guest behavior, we have to
158 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
159 * so a CPU won't be stopped while calculating with the epoch.
160 */
161static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
162 void *v)
163{
164 struct kvm *kvm;
165 struct kvm_vcpu *vcpu;
166 int i;
167 unsigned long long *delta = v;
168
169 list_for_each_entry(kvm, &vm_list, vm_list) {
170 kvm->arch.epoch -= *delta;
171 kvm_for_each_vcpu(i, vcpu, kvm) {
172 vcpu->arch.sie_block->epoch -= *delta;
db0758b2
DH
173 if (vcpu->arch.cputm_enabled)
174 vcpu->arch.cputm_start += *delta;
fdf03650
FZ
175 }
176 }
177 return NOTIFY_OK;
178}
179
180static struct notifier_block kvm_clock_notifier = {
181 .notifier_call = kvm_clock_sync,
182};
183
b0c632db
HC
184int kvm_arch_hardware_setup(void)
185{
2c70fe44
CB
186 gmap_notifier.notifier_call = kvm_gmap_notifier;
187 gmap_register_ipte_notifier(&gmap_notifier);
fdf03650
FZ
188 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
189 &kvm_clock_notifier);
b0c632db
HC
190 return 0;
191}
192
193void kvm_arch_hardware_unsetup(void)
194{
2c70fe44 195 gmap_unregister_ipte_notifier(&gmap_notifier);
fdf03650
FZ
196 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
197 &kvm_clock_notifier);
b0c632db
HC
198}
199
22be5a13
DH
200static void allow_cpu_feat(unsigned long nr)
201{
202 set_bit_inv(nr, kvm_s390_available_cpu_feat);
203}
204
0a763c78
DH
205static inline int plo_test_bit(unsigned char nr)
206{
207 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
208 int cc = 3; /* subfunction not available */
209
210 asm volatile(
211 /* Parameter registers are ignored for "test bit" */
212 " plo 0,0,0,0(0)\n"
213 " ipm %0\n"
214 " srl %0,28\n"
215 : "=d" (cc)
216 : "d" (r0)
217 : "cc");
218 return cc == 0;
219}
220
22be5a13
DH
221static void kvm_s390_cpu_feat_init(void)
222{
0a763c78
DH
223 int i;
224
225 for (i = 0; i < 256; ++i) {
226 if (plo_test_bit(i))
227 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
228 }
229
230 if (test_facility(28)) /* TOD-clock steering */
231 etr_ptff(kvm_s390_available_subfunc.ptff, ETR_PTFF_QAF);
232
233 if (test_facility(17)) { /* MSA */
234 __cpacf_query(CPACF_KMAC, kvm_s390_available_subfunc.kmac);
235 __cpacf_query(CPACF_KMC, kvm_s390_available_subfunc.kmc);
236 __cpacf_query(CPACF_KM, kvm_s390_available_subfunc.km);
237 __cpacf_query(CPACF_KIMD, kvm_s390_available_subfunc.kimd);
238 __cpacf_query(CPACF_KLMD, kvm_s390_available_subfunc.klmd);
239 }
240 if (test_facility(76)) /* MSA3 */
241 __cpacf_query(CPACF_PCKMO, kvm_s390_available_subfunc.pckmo);
242 if (test_facility(77)) { /* MSA4 */
243 __cpacf_query(CPACF_KMCTR, kvm_s390_available_subfunc.kmctr);
244 __cpacf_query(CPACF_KMF, kvm_s390_available_subfunc.kmf);
245 __cpacf_query(CPACF_KMO, kvm_s390_available_subfunc.kmo);
246 __cpacf_query(CPACF_PCC, kvm_s390_available_subfunc.pcc);
247 }
248 if (test_facility(57)) /* MSA5 */
249 __cpacf_query(CPACF_PPNO, kvm_s390_available_subfunc.ppno);
250
22be5a13
DH
251 if (MACHINE_HAS_ESOP)
252 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
253}
254
b0c632db
HC
255int kvm_arch_init(void *opaque)
256{
78f26131
CB
257 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
258 if (!kvm_s390_dbf)
259 return -ENOMEM;
260
261 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
262 debug_unregister(kvm_s390_dbf);
263 return -ENOMEM;
264 }
265
22be5a13
DH
266 kvm_s390_cpu_feat_init();
267
84877d93
CH
268 /* Register floating interrupt controller interface. */
269 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
b0c632db
HC
270}
271
78f26131
CB
272void kvm_arch_exit(void)
273{
274 debug_unregister(kvm_s390_dbf);
275}
276
b0c632db
HC
277/* Section: device related */
278long kvm_arch_dev_ioctl(struct file *filp,
279 unsigned int ioctl, unsigned long arg)
280{
281 if (ioctl == KVM_S390_ENABLE_SIE)
282 return s390_enable_sie();
283 return -EINVAL;
284}
285
784aa3d7 286int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
b0c632db 287{
d7b0b5eb
CO
288 int r;
289
2bd0ac4e 290 switch (ext) {
d7b0b5eb 291 case KVM_CAP_S390_PSW:
b6cf8788 292 case KVM_CAP_S390_GMAP:
52e16b18 293 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
294#ifdef CONFIG_KVM_S390_UCONTROL
295 case KVM_CAP_S390_UCONTROL:
296#endif
3c038e6b 297 case KVM_CAP_ASYNC_PF:
60b413c9 298 case KVM_CAP_SYNC_REGS:
14eebd91 299 case KVM_CAP_ONE_REG:
d6712df9 300 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 301 case KVM_CAP_S390_CSS_SUPPORT:
10ccaa1e 302 case KVM_CAP_IOEVENTFD:
c05c4186 303 case KVM_CAP_DEVICE_CTRL:
d938dc55 304 case KVM_CAP_ENABLE_CAP_VM:
78599d90 305 case KVM_CAP_S390_IRQCHIP:
f2061656 306 case KVM_CAP_VM_ATTRIBUTES:
6352e4d2 307 case KVM_CAP_MP_STATE:
47b43c52 308 case KVM_CAP_S390_INJECT_IRQ:
2444b352 309 case KVM_CAP_S390_USER_SIGP:
e44fc8c9 310 case KVM_CAP_S390_USER_STSI:
30ee2a98 311 case KVM_CAP_S390_SKEYS:
816c7667 312 case KVM_CAP_S390_IRQ_STATE:
d7b0b5eb
CO
313 r = 1;
314 break;
41408c28
TH
315 case KVM_CAP_S390_MEM_OP:
316 r = MEM_OP_MAX_SIZE;
317 break;
e726b1bd
CB
318 case KVM_CAP_NR_VCPUS:
319 case KVM_CAP_MAX_VCPUS:
fe0edcb7
ED
320 r = sclp.has_esca ? KVM_S390_ESCA_CPU_SLOTS
321 : KVM_S390_BSCA_CPU_SLOTS;
e726b1bd 322 break;
e1e2e605
NW
323 case KVM_CAP_NR_MEMSLOTS:
324 r = KVM_USER_MEM_SLOTS;
325 break;
1526bf9c 326 case KVM_CAP_S390_COW:
abf09bed 327 r = MACHINE_HAS_ESOP;
1526bf9c 328 break;
68c55750
EF
329 case KVM_CAP_S390_VECTOR_REGISTERS:
330 r = MACHINE_HAS_VX;
331 break;
c6e5f166
FZ
332 case KVM_CAP_S390_RI:
333 r = test_facility(64);
334 break;
2bd0ac4e 335 default:
d7b0b5eb 336 r = 0;
2bd0ac4e 337 }
d7b0b5eb 338 return r;
b0c632db
HC
339}
340
15f36ebd
JH
341static void kvm_s390_sync_dirty_log(struct kvm *kvm,
342 struct kvm_memory_slot *memslot)
343{
344 gfn_t cur_gfn, last_gfn;
345 unsigned long address;
346 struct gmap *gmap = kvm->arch.gmap;
347
15f36ebd
JH
348 /* Loop over all guest pages */
349 last_gfn = memslot->base_gfn + memslot->npages;
350 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
351 address = gfn_to_hva_memslot(memslot, cur_gfn);
352
1e133ab2 353 if (test_and_clear_guest_dirty(gmap->mm, address))
15f36ebd 354 mark_page_dirty(kvm, cur_gfn);
1763f8d0
CB
355 if (fatal_signal_pending(current))
356 return;
70c88a00 357 cond_resched();
15f36ebd 358 }
15f36ebd
JH
359}
360
b0c632db 361/* Section: vm related */
a6e2f683
ED
362static void sca_del_vcpu(struct kvm_vcpu *vcpu);
363
b0c632db
HC
364/*
365 * Get (and clear) the dirty memory log for a memory slot.
366 */
367int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
368 struct kvm_dirty_log *log)
369{
15f36ebd
JH
370 int r;
371 unsigned long n;
9f6b8029 372 struct kvm_memslots *slots;
15f36ebd
JH
373 struct kvm_memory_slot *memslot;
374 int is_dirty = 0;
375
376 mutex_lock(&kvm->slots_lock);
377
378 r = -EINVAL;
379 if (log->slot >= KVM_USER_MEM_SLOTS)
380 goto out;
381
9f6b8029
PB
382 slots = kvm_memslots(kvm);
383 memslot = id_to_memslot(slots, log->slot);
15f36ebd
JH
384 r = -ENOENT;
385 if (!memslot->dirty_bitmap)
386 goto out;
387
388 kvm_s390_sync_dirty_log(kvm, memslot);
389 r = kvm_get_dirty_log(kvm, log, &is_dirty);
390 if (r)
391 goto out;
392
393 /* Clear the dirty log */
394 if (is_dirty) {
395 n = kvm_dirty_bitmap_bytes(memslot);
396 memset(memslot->dirty_bitmap, 0, n);
397 }
398 r = 0;
399out:
400 mutex_unlock(&kvm->slots_lock);
401 return r;
b0c632db
HC
402}
403
d938dc55
CH
404static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
405{
406 int r;
407
408 if (cap->flags)
409 return -EINVAL;
410
411 switch (cap->cap) {
84223598 412 case KVM_CAP_S390_IRQCHIP:
c92ea7b9 413 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
84223598
CH
414 kvm->arch.use_irqchip = 1;
415 r = 0;
416 break;
2444b352 417 case KVM_CAP_S390_USER_SIGP:
c92ea7b9 418 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
2444b352
DH
419 kvm->arch.user_sigp = 1;
420 r = 0;
421 break;
68c55750 422 case KVM_CAP_S390_VECTOR_REGISTERS:
5967c17b
DH
423 mutex_lock(&kvm->lock);
424 if (atomic_read(&kvm->online_vcpus)) {
425 r = -EBUSY;
426 } else if (MACHINE_HAS_VX) {
c54f0d6a
DH
427 set_kvm_facility(kvm->arch.model.fac_mask, 129);
428 set_kvm_facility(kvm->arch.model.fac_list, 129);
18280d8b
MM
429 r = 0;
430 } else
431 r = -EINVAL;
5967c17b 432 mutex_unlock(&kvm->lock);
c92ea7b9
CB
433 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
434 r ? "(not available)" : "(success)");
68c55750 435 break;
c6e5f166
FZ
436 case KVM_CAP_S390_RI:
437 r = -EINVAL;
438 mutex_lock(&kvm->lock);
439 if (atomic_read(&kvm->online_vcpus)) {
440 r = -EBUSY;
441 } else if (test_facility(64)) {
c54f0d6a
DH
442 set_kvm_facility(kvm->arch.model.fac_mask, 64);
443 set_kvm_facility(kvm->arch.model.fac_list, 64);
c6e5f166
FZ
444 r = 0;
445 }
446 mutex_unlock(&kvm->lock);
447 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
448 r ? "(not available)" : "(success)");
449 break;
e44fc8c9 450 case KVM_CAP_S390_USER_STSI:
c92ea7b9 451 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
e44fc8c9
ET
452 kvm->arch.user_stsi = 1;
453 r = 0;
454 break;
d938dc55
CH
455 default:
456 r = -EINVAL;
457 break;
458 }
459 return r;
460}
461
8c0a7ce6
DD
462static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
463{
464 int ret;
465
466 switch (attr->attr) {
467 case KVM_S390_VM_MEM_LIMIT_SIZE:
468 ret = 0;
c92ea7b9 469 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
a3a92c31
DD
470 kvm->arch.mem_limit);
471 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
8c0a7ce6
DD
472 ret = -EFAULT;
473 break;
474 default:
475 ret = -ENXIO;
476 break;
477 }
478 return ret;
479}
480
481static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
4f718eab
DD
482{
483 int ret;
484 unsigned int idx;
485 switch (attr->attr) {
486 case KVM_S390_VM_MEM_ENABLE_CMMA:
e6db1d61
DD
487 /* enable CMMA only for z10 and later (EDAT_1) */
488 ret = -EINVAL;
489 if (!MACHINE_IS_LPAR || !MACHINE_HAS_EDAT1)
490 break;
491
4f718eab 492 ret = -EBUSY;
c92ea7b9 493 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
4f718eab
DD
494 mutex_lock(&kvm->lock);
495 if (atomic_read(&kvm->online_vcpus) == 0) {
496 kvm->arch.use_cmma = 1;
497 ret = 0;
498 }
499 mutex_unlock(&kvm->lock);
500 break;
501 case KVM_S390_VM_MEM_CLR_CMMA:
c3489155
DD
502 ret = -EINVAL;
503 if (!kvm->arch.use_cmma)
504 break;
505
c92ea7b9 506 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
4f718eab
DD
507 mutex_lock(&kvm->lock);
508 idx = srcu_read_lock(&kvm->srcu);
a13cff31 509 s390_reset_cmma(kvm->arch.gmap->mm);
4f718eab
DD
510 srcu_read_unlock(&kvm->srcu, idx);
511 mutex_unlock(&kvm->lock);
512 ret = 0;
513 break;
8c0a7ce6
DD
514 case KVM_S390_VM_MEM_LIMIT_SIZE: {
515 unsigned long new_limit;
516
517 if (kvm_is_ucontrol(kvm))
518 return -EINVAL;
519
520 if (get_user(new_limit, (u64 __user *)attr->addr))
521 return -EFAULT;
522
a3a92c31
DD
523 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
524 new_limit > kvm->arch.mem_limit)
8c0a7ce6
DD
525 return -E2BIG;
526
a3a92c31
DD
527 if (!new_limit)
528 return -EINVAL;
529
530 /* gmap_alloc takes last usable address */
531 if (new_limit != KVM_S390_NO_MEM_LIMIT)
532 new_limit -= 1;
533
8c0a7ce6
DD
534 ret = -EBUSY;
535 mutex_lock(&kvm->lock);
536 if (atomic_read(&kvm->online_vcpus) == 0) {
537 /* gmap_alloc will round the limit up */
538 struct gmap *new = gmap_alloc(current->mm, new_limit);
539
540 if (!new) {
541 ret = -ENOMEM;
542 } else {
543 gmap_free(kvm->arch.gmap);
544 new->private = kvm;
545 kvm->arch.gmap = new;
546 ret = 0;
547 }
548 }
549 mutex_unlock(&kvm->lock);
a3a92c31
DD
550 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
551 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
552 (void *) kvm->arch.gmap->asce);
8c0a7ce6
DD
553 break;
554 }
4f718eab
DD
555 default:
556 ret = -ENXIO;
557 break;
558 }
559 return ret;
560}
561
a374e892
TK
562static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
563
564static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
565{
566 struct kvm_vcpu *vcpu;
567 int i;
568
9d8d5786 569 if (!test_kvm_facility(kvm, 76))
a374e892
TK
570 return -EINVAL;
571
572 mutex_lock(&kvm->lock);
573 switch (attr->attr) {
574 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
575 get_random_bytes(
576 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
577 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
578 kvm->arch.crypto.aes_kw = 1;
c92ea7b9 579 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
a374e892
TK
580 break;
581 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
582 get_random_bytes(
583 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
584 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
585 kvm->arch.crypto.dea_kw = 1;
c92ea7b9 586 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
a374e892
TK
587 break;
588 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
589 kvm->arch.crypto.aes_kw = 0;
590 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
591 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
c92ea7b9 592 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
a374e892
TK
593 break;
594 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
595 kvm->arch.crypto.dea_kw = 0;
596 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
597 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
c92ea7b9 598 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
a374e892
TK
599 break;
600 default:
601 mutex_unlock(&kvm->lock);
602 return -ENXIO;
603 }
604
605 kvm_for_each_vcpu(i, vcpu, kvm) {
606 kvm_s390_vcpu_crypto_setup(vcpu);
607 exit_sie(vcpu);
608 }
609 mutex_unlock(&kvm->lock);
610 return 0;
611}
612
72f25020
JH
613static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
614{
615 u8 gtod_high;
616
617 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
618 sizeof(gtod_high)))
619 return -EFAULT;
620
621 if (gtod_high != 0)
622 return -EINVAL;
58c383c6 623 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
72f25020
JH
624
625 return 0;
626}
627
628static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
629{
5a3d883a 630 u64 gtod;
72f25020
JH
631
632 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
633 return -EFAULT;
634
25ed1675 635 kvm_s390_set_tod_clock(kvm, gtod);
58c383c6 636 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
72f25020
JH
637 return 0;
638}
639
640static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
641{
642 int ret;
643
644 if (attr->flags)
645 return -EINVAL;
646
647 switch (attr->attr) {
648 case KVM_S390_VM_TOD_HIGH:
649 ret = kvm_s390_set_tod_high(kvm, attr);
650 break;
651 case KVM_S390_VM_TOD_LOW:
652 ret = kvm_s390_set_tod_low(kvm, attr);
653 break;
654 default:
655 ret = -ENXIO;
656 break;
657 }
658 return ret;
659}
660
661static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
662{
663 u8 gtod_high = 0;
664
665 if (copy_to_user((void __user *)attr->addr, &gtod_high,
666 sizeof(gtod_high)))
667 return -EFAULT;
58c383c6 668 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
72f25020
JH
669
670 return 0;
671}
672
673static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
674{
5a3d883a 675 u64 gtod;
72f25020 676
60417fcc 677 gtod = kvm_s390_get_tod_clock_fast(kvm);
72f25020
JH
678 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
679 return -EFAULT;
58c383c6 680 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
72f25020
JH
681
682 return 0;
683}
684
685static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
686{
687 int ret;
688
689 if (attr->flags)
690 return -EINVAL;
691
692 switch (attr->attr) {
693 case KVM_S390_VM_TOD_HIGH:
694 ret = kvm_s390_get_tod_high(kvm, attr);
695 break;
696 case KVM_S390_VM_TOD_LOW:
697 ret = kvm_s390_get_tod_low(kvm, attr);
698 break;
699 default:
700 ret = -ENXIO;
701 break;
702 }
703 return ret;
704}
705
658b6eda
MM
706static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
707{
708 struct kvm_s390_vm_cpu_processor *proc;
053dd230 709 u16 lowest_ibc, unblocked_ibc;
658b6eda
MM
710 int ret = 0;
711
712 mutex_lock(&kvm->lock);
713 if (atomic_read(&kvm->online_vcpus)) {
714 ret = -EBUSY;
715 goto out;
716 }
717 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
718 if (!proc) {
719 ret = -ENOMEM;
720 goto out;
721 }
722 if (!copy_from_user(proc, (void __user *)attr->addr,
723 sizeof(*proc))) {
9bb0ec09 724 kvm->arch.model.cpuid = proc->cpuid;
053dd230
DH
725 lowest_ibc = sclp.ibc >> 16 & 0xfff;
726 unblocked_ibc = sclp.ibc & 0xfff;
727 if (lowest_ibc) {
728 if (proc->ibc > unblocked_ibc)
729 kvm->arch.model.ibc = unblocked_ibc;
730 else if (proc->ibc < lowest_ibc)
731 kvm->arch.model.ibc = lowest_ibc;
732 else
733 kvm->arch.model.ibc = proc->ibc;
734 }
c54f0d6a 735 memcpy(kvm->arch.model.fac_list, proc->fac_list,
658b6eda
MM
736 S390_ARCH_FAC_LIST_SIZE_BYTE);
737 } else
738 ret = -EFAULT;
739 kfree(proc);
740out:
741 mutex_unlock(&kvm->lock);
742 return ret;
743}
744
15c9705f
DH
745static int kvm_s390_set_processor_feat(struct kvm *kvm,
746 struct kvm_device_attr *attr)
747{
748 struct kvm_s390_vm_cpu_feat data;
749 int ret = -EBUSY;
750
751 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
752 return -EFAULT;
753 if (!bitmap_subset((unsigned long *) data.feat,
754 kvm_s390_available_cpu_feat,
755 KVM_S390_VM_CPU_FEAT_NR_BITS))
756 return -EINVAL;
757
758 mutex_lock(&kvm->lock);
759 if (!atomic_read(&kvm->online_vcpus)) {
760 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
761 KVM_S390_VM_CPU_FEAT_NR_BITS);
762 ret = 0;
763 }
764 mutex_unlock(&kvm->lock);
765 return ret;
766}
767
0a763c78
DH
768static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
769 struct kvm_device_attr *attr)
770{
771 /*
772 * Once supported by kernel + hw, we have to store the subfunctions
773 * in kvm->arch and remember that user space configured them.
774 */
775 return -ENXIO;
776}
777
658b6eda
MM
778static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
779{
780 int ret = -ENXIO;
781
782 switch (attr->attr) {
783 case KVM_S390_VM_CPU_PROCESSOR:
784 ret = kvm_s390_set_processor(kvm, attr);
785 break;
15c9705f
DH
786 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
787 ret = kvm_s390_set_processor_feat(kvm, attr);
788 break;
0a763c78
DH
789 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
790 ret = kvm_s390_set_processor_subfunc(kvm, attr);
791 break;
658b6eda
MM
792 }
793 return ret;
794}
795
796static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
797{
798 struct kvm_s390_vm_cpu_processor *proc;
799 int ret = 0;
800
801 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
802 if (!proc) {
803 ret = -ENOMEM;
804 goto out;
805 }
9bb0ec09 806 proc->cpuid = kvm->arch.model.cpuid;
658b6eda 807 proc->ibc = kvm->arch.model.ibc;
c54f0d6a
DH
808 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
809 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda
MM
810 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
811 ret = -EFAULT;
812 kfree(proc);
813out:
814 return ret;
815}
816
817static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
818{
819 struct kvm_s390_vm_cpu_machine *mach;
820 int ret = 0;
821
822 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
823 if (!mach) {
824 ret = -ENOMEM;
825 goto out;
826 }
827 get_cpu_id((struct cpuid *) &mach->cpuid);
37c5f6c8 828 mach->ibc = sclp.ibc;
c54f0d6a 829 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
981467c9 830 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda 831 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
94422ee8 832 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda
MM
833 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
834 ret = -EFAULT;
835 kfree(mach);
836out:
837 return ret;
838}
839
15c9705f
DH
840static int kvm_s390_get_processor_feat(struct kvm *kvm,
841 struct kvm_device_attr *attr)
842{
843 struct kvm_s390_vm_cpu_feat data;
844
845 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
846 KVM_S390_VM_CPU_FEAT_NR_BITS);
847 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
848 return -EFAULT;
849 return 0;
850}
851
852static int kvm_s390_get_machine_feat(struct kvm *kvm,
853 struct kvm_device_attr *attr)
854{
855 struct kvm_s390_vm_cpu_feat data;
856
857 bitmap_copy((unsigned long *) data.feat,
858 kvm_s390_available_cpu_feat,
859 KVM_S390_VM_CPU_FEAT_NR_BITS);
860 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
861 return -EFAULT;
862 return 0;
863}
864
0a763c78
DH
865static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
866 struct kvm_device_attr *attr)
867{
868 /*
869 * Once we can actually configure subfunctions (kernel + hw support),
870 * we have to check if they were already set by user space, if so copy
871 * them from kvm->arch.
872 */
873 return -ENXIO;
874}
875
876static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
877 struct kvm_device_attr *attr)
878{
879 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
880 sizeof(struct kvm_s390_vm_cpu_subfunc)))
881 return -EFAULT;
882 return 0;
883}
658b6eda
MM
884static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
885{
886 int ret = -ENXIO;
887
888 switch (attr->attr) {
889 case KVM_S390_VM_CPU_PROCESSOR:
890 ret = kvm_s390_get_processor(kvm, attr);
891 break;
892 case KVM_S390_VM_CPU_MACHINE:
893 ret = kvm_s390_get_machine(kvm, attr);
894 break;
15c9705f
DH
895 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
896 ret = kvm_s390_get_processor_feat(kvm, attr);
897 break;
898 case KVM_S390_VM_CPU_MACHINE_FEAT:
899 ret = kvm_s390_get_machine_feat(kvm, attr);
900 break;
0a763c78
DH
901 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
902 ret = kvm_s390_get_processor_subfunc(kvm, attr);
903 break;
904 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
905 ret = kvm_s390_get_machine_subfunc(kvm, attr);
906 break;
658b6eda
MM
907 }
908 return ret;
909}
910
f2061656
DD
911static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
912{
913 int ret;
914
915 switch (attr->group) {
4f718eab 916 case KVM_S390_VM_MEM_CTRL:
8c0a7ce6 917 ret = kvm_s390_set_mem_control(kvm, attr);
4f718eab 918 break;
72f25020
JH
919 case KVM_S390_VM_TOD:
920 ret = kvm_s390_set_tod(kvm, attr);
921 break;
658b6eda
MM
922 case KVM_S390_VM_CPU_MODEL:
923 ret = kvm_s390_set_cpu_model(kvm, attr);
924 break;
a374e892
TK
925 case KVM_S390_VM_CRYPTO:
926 ret = kvm_s390_vm_set_crypto(kvm, attr);
927 break;
f2061656
DD
928 default:
929 ret = -ENXIO;
930 break;
931 }
932
933 return ret;
934}
935
936static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
937{
8c0a7ce6
DD
938 int ret;
939
940 switch (attr->group) {
941 case KVM_S390_VM_MEM_CTRL:
942 ret = kvm_s390_get_mem_control(kvm, attr);
943 break;
72f25020
JH
944 case KVM_S390_VM_TOD:
945 ret = kvm_s390_get_tod(kvm, attr);
946 break;
658b6eda
MM
947 case KVM_S390_VM_CPU_MODEL:
948 ret = kvm_s390_get_cpu_model(kvm, attr);
949 break;
8c0a7ce6
DD
950 default:
951 ret = -ENXIO;
952 break;
953 }
954
955 return ret;
f2061656
DD
956}
957
958static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
959{
960 int ret;
961
962 switch (attr->group) {
4f718eab
DD
963 case KVM_S390_VM_MEM_CTRL:
964 switch (attr->attr) {
965 case KVM_S390_VM_MEM_ENABLE_CMMA:
966 case KVM_S390_VM_MEM_CLR_CMMA:
8c0a7ce6 967 case KVM_S390_VM_MEM_LIMIT_SIZE:
4f718eab
DD
968 ret = 0;
969 break;
970 default:
971 ret = -ENXIO;
972 break;
973 }
974 break;
72f25020
JH
975 case KVM_S390_VM_TOD:
976 switch (attr->attr) {
977 case KVM_S390_VM_TOD_LOW:
978 case KVM_S390_VM_TOD_HIGH:
979 ret = 0;
980 break;
981 default:
982 ret = -ENXIO;
983 break;
984 }
985 break;
658b6eda
MM
986 case KVM_S390_VM_CPU_MODEL:
987 switch (attr->attr) {
988 case KVM_S390_VM_CPU_PROCESSOR:
989 case KVM_S390_VM_CPU_MACHINE:
15c9705f
DH
990 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
991 case KVM_S390_VM_CPU_MACHINE_FEAT:
0a763c78 992 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
658b6eda
MM
993 ret = 0;
994 break;
0a763c78
DH
995 /* configuring subfunctions is not supported yet */
996 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
658b6eda
MM
997 default:
998 ret = -ENXIO;
999 break;
1000 }
1001 break;
a374e892
TK
1002 case KVM_S390_VM_CRYPTO:
1003 switch (attr->attr) {
1004 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1005 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1006 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1007 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1008 ret = 0;
1009 break;
1010 default:
1011 ret = -ENXIO;
1012 break;
1013 }
1014 break;
f2061656
DD
1015 default:
1016 ret = -ENXIO;
1017 break;
1018 }
1019
1020 return ret;
1021}
1022
30ee2a98
JH
1023static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1024{
1025 uint8_t *keys;
1026 uint64_t hva;
1027 unsigned long curkey;
1028 int i, r = 0;
1029
1030 if (args->flags != 0)
1031 return -EINVAL;
1032
1033 /* Is this guest using storage keys? */
1034 if (!mm_use_skey(current->mm))
1035 return KVM_S390_GET_SKEYS_NONE;
1036
1037 /* Enforce sane limit on memory allocation */
1038 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1039 return -EINVAL;
1040
1041 keys = kmalloc_array(args->count, sizeof(uint8_t),
1042 GFP_KERNEL | __GFP_NOWARN);
1043 if (!keys)
1044 keys = vmalloc(sizeof(uint8_t) * args->count);
1045 if (!keys)
1046 return -ENOMEM;
1047
1048 for (i = 0; i < args->count; i++) {
1049 hva = gfn_to_hva(kvm, args->start_gfn + i);
1050 if (kvm_is_error_hva(hva)) {
1051 r = -EFAULT;
1052 goto out;
1053 }
1054
1055 curkey = get_guest_storage_key(current->mm, hva);
1056 if (IS_ERR_VALUE(curkey)) {
1057 r = curkey;
1058 goto out;
1059 }
1060 keys[i] = curkey;
1061 }
1062
1063 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1064 sizeof(uint8_t) * args->count);
1065 if (r)
1066 r = -EFAULT;
1067out:
1068 kvfree(keys);
1069 return r;
1070}
1071
1072static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1073{
1074 uint8_t *keys;
1075 uint64_t hva;
1076 int i, r = 0;
1077
1078 if (args->flags != 0)
1079 return -EINVAL;
1080
1081 /* Enforce sane limit on memory allocation */
1082 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1083 return -EINVAL;
1084
1085 keys = kmalloc_array(args->count, sizeof(uint8_t),
1086 GFP_KERNEL | __GFP_NOWARN);
1087 if (!keys)
1088 keys = vmalloc(sizeof(uint8_t) * args->count);
1089 if (!keys)
1090 return -ENOMEM;
1091
1092 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1093 sizeof(uint8_t) * args->count);
1094 if (r) {
1095 r = -EFAULT;
1096 goto out;
1097 }
1098
1099 /* Enable storage key handling for the guest */
14d4a425
DD
1100 r = s390_enable_skey();
1101 if (r)
1102 goto out;
30ee2a98
JH
1103
1104 for (i = 0; i < args->count; i++) {
1105 hva = gfn_to_hva(kvm, args->start_gfn + i);
1106 if (kvm_is_error_hva(hva)) {
1107 r = -EFAULT;
1108 goto out;
1109 }
1110
1111 /* Lowest order bit is reserved */
1112 if (keys[i] & 0x01) {
1113 r = -EINVAL;
1114 goto out;
1115 }
1116
1117 r = set_guest_storage_key(current->mm, hva,
1118 (unsigned long)keys[i], 0);
1119 if (r)
1120 goto out;
1121 }
1122out:
1123 kvfree(keys);
1124 return r;
1125}
1126
b0c632db
HC
1127long kvm_arch_vm_ioctl(struct file *filp,
1128 unsigned int ioctl, unsigned long arg)
1129{
1130 struct kvm *kvm = filp->private_data;
1131 void __user *argp = (void __user *)arg;
f2061656 1132 struct kvm_device_attr attr;
b0c632db
HC
1133 int r;
1134
1135 switch (ioctl) {
ba5c1e9b
CO
1136 case KVM_S390_INTERRUPT: {
1137 struct kvm_s390_interrupt s390int;
1138
1139 r = -EFAULT;
1140 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1141 break;
1142 r = kvm_s390_inject_vm(kvm, &s390int);
1143 break;
1144 }
d938dc55
CH
1145 case KVM_ENABLE_CAP: {
1146 struct kvm_enable_cap cap;
1147 r = -EFAULT;
1148 if (copy_from_user(&cap, argp, sizeof(cap)))
1149 break;
1150 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1151 break;
1152 }
84223598
CH
1153 case KVM_CREATE_IRQCHIP: {
1154 struct kvm_irq_routing_entry routing;
1155
1156 r = -EINVAL;
1157 if (kvm->arch.use_irqchip) {
1158 /* Set up dummy routing. */
1159 memset(&routing, 0, sizeof(routing));
152b2839 1160 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
84223598
CH
1161 }
1162 break;
1163 }
f2061656
DD
1164 case KVM_SET_DEVICE_ATTR: {
1165 r = -EFAULT;
1166 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1167 break;
1168 r = kvm_s390_vm_set_attr(kvm, &attr);
1169 break;
1170 }
1171 case KVM_GET_DEVICE_ATTR: {
1172 r = -EFAULT;
1173 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1174 break;
1175 r = kvm_s390_vm_get_attr(kvm, &attr);
1176 break;
1177 }
1178 case KVM_HAS_DEVICE_ATTR: {
1179 r = -EFAULT;
1180 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1181 break;
1182 r = kvm_s390_vm_has_attr(kvm, &attr);
1183 break;
1184 }
30ee2a98
JH
1185 case KVM_S390_GET_SKEYS: {
1186 struct kvm_s390_skeys args;
1187
1188 r = -EFAULT;
1189 if (copy_from_user(&args, argp,
1190 sizeof(struct kvm_s390_skeys)))
1191 break;
1192 r = kvm_s390_get_skeys(kvm, &args);
1193 break;
1194 }
1195 case KVM_S390_SET_SKEYS: {
1196 struct kvm_s390_skeys args;
1197
1198 r = -EFAULT;
1199 if (copy_from_user(&args, argp,
1200 sizeof(struct kvm_s390_skeys)))
1201 break;
1202 r = kvm_s390_set_skeys(kvm, &args);
1203 break;
1204 }
b0c632db 1205 default:
367e1319 1206 r = -ENOTTY;
b0c632db
HC
1207 }
1208
1209 return r;
1210}
1211
45c9b47c
TK
1212static int kvm_s390_query_ap_config(u8 *config)
1213{
1214 u32 fcn_code = 0x04000000UL;
86044c8c 1215 u32 cc = 0;
45c9b47c 1216
86044c8c 1217 memset(config, 0, 128);
45c9b47c
TK
1218 asm volatile(
1219 "lgr 0,%1\n"
1220 "lgr 2,%2\n"
1221 ".long 0xb2af0000\n" /* PQAP(QCI) */
86044c8c 1222 "0: ipm %0\n"
45c9b47c 1223 "srl %0,28\n"
86044c8c
CB
1224 "1:\n"
1225 EX_TABLE(0b, 1b)
1226 : "+r" (cc)
45c9b47c
TK
1227 : "r" (fcn_code), "r" (config)
1228 : "cc", "0", "2", "memory"
1229 );
1230
1231 return cc;
1232}
1233
1234static int kvm_s390_apxa_installed(void)
1235{
1236 u8 config[128];
1237 int cc;
1238
a6aacc3f 1239 if (test_facility(12)) {
45c9b47c
TK
1240 cc = kvm_s390_query_ap_config(config);
1241
1242 if (cc)
1243 pr_err("PQAP(QCI) failed with cc=%d", cc);
1244 else
1245 return config[0] & 0x40;
1246 }
1247
1248 return 0;
1249}
1250
1251static void kvm_s390_set_crycb_format(struct kvm *kvm)
1252{
1253 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1254
1255 if (kvm_s390_apxa_installed())
1256 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1257 else
1258 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1259}
1260
9bb0ec09 1261static u64 kvm_s390_get_initial_cpuid(void)
9d8d5786 1262{
9bb0ec09
DH
1263 struct cpuid cpuid;
1264
1265 get_cpu_id(&cpuid);
1266 cpuid.version = 0xff;
1267 return *((u64 *) &cpuid);
9d8d5786
MM
1268}
1269
c54f0d6a 1270static void kvm_s390_crypto_init(struct kvm *kvm)
5102ee87 1271{
9d8d5786 1272 if (!test_kvm_facility(kvm, 76))
c54f0d6a 1273 return;
5102ee87 1274
c54f0d6a 1275 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
45c9b47c 1276 kvm_s390_set_crycb_format(kvm);
5102ee87 1277
ed6f76b4
TK
1278 /* Enable AES/DEA protected key functions by default */
1279 kvm->arch.crypto.aes_kw = 1;
1280 kvm->arch.crypto.dea_kw = 1;
1281 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1282 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1283 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1284 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
5102ee87
TK
1285}
1286
7d43bafc
ED
1287static void sca_dispose(struct kvm *kvm)
1288{
1289 if (kvm->arch.use_esca)
5e044315 1290 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
7d43bafc
ED
1291 else
1292 free_page((unsigned long)(kvm->arch.sca));
1293 kvm->arch.sca = NULL;
1294}
1295
e08b9637 1296int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 1297{
9d8d5786 1298 int i, rc;
b0c632db 1299 char debug_name[16];
f6c137ff 1300 static unsigned long sca_offset;
b0c632db 1301
e08b9637
CO
1302 rc = -EINVAL;
1303#ifdef CONFIG_KVM_S390_UCONTROL
1304 if (type & ~KVM_VM_S390_UCONTROL)
1305 goto out_err;
1306 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1307 goto out_err;
1308#else
1309 if (type)
1310 goto out_err;
1311#endif
1312
b0c632db
HC
1313 rc = s390_enable_sie();
1314 if (rc)
d89f5eff 1315 goto out_err;
b0c632db 1316
b290411a
CO
1317 rc = -ENOMEM;
1318
7d0a5e62
JF
1319 ratelimit_state_init(&kvm->arch.sthyi_limit, 5 * HZ, 500);
1320
7d43bafc 1321 kvm->arch.use_esca = 0; /* start with basic SCA */
5e044315 1322 rwlock_init(&kvm->arch.sca_lock);
bc784cce 1323 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(GFP_KERNEL);
b0c632db 1324 if (!kvm->arch.sca)
d89f5eff 1325 goto out_err;
f6c137ff 1326 spin_lock(&kvm_lock);
c5c2c393 1327 sca_offset += 16;
bc784cce 1328 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
c5c2c393 1329 sca_offset = 0;
bc784cce
ED
1330 kvm->arch.sca = (struct bsca_block *)
1331 ((char *) kvm->arch.sca + sca_offset);
f6c137ff 1332 spin_unlock(&kvm_lock);
b0c632db
HC
1333
1334 sprintf(debug_name, "kvm-%u", current->pid);
1335
1cb9cf72 1336 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
b0c632db 1337 if (!kvm->arch.dbf)
40f5b735 1338 goto out_err;
b0c632db 1339
c54f0d6a
DH
1340 kvm->arch.sie_page2 =
1341 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1342 if (!kvm->arch.sie_page2)
40f5b735 1343 goto out_err;
9d8d5786 1344
fb5bf93f 1345 /* Populate the facility mask initially. */
c54f0d6a 1346 memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
94422ee8 1347 S390_ARCH_FAC_LIST_SIZE_BYTE);
9d8d5786
MM
1348 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1349 if (i < kvm_s390_fac_list_mask_size())
c54f0d6a 1350 kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
9d8d5786 1351 else
c54f0d6a 1352 kvm->arch.model.fac_mask[i] = 0UL;
9d8d5786
MM
1353 }
1354
981467c9 1355 /* Populate the facility list initially. */
c54f0d6a
DH
1356 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
1357 memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
981467c9
MM
1358 S390_ARCH_FAC_LIST_SIZE_BYTE);
1359
95ca2cb5
JF
1360 set_kvm_facility(kvm->arch.model.fac_mask, 74);
1361 set_kvm_facility(kvm->arch.model.fac_list, 74);
1362
9bb0ec09 1363 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
37c5f6c8 1364 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
9d8d5786 1365
c54f0d6a 1366 kvm_s390_crypto_init(kvm);
5102ee87 1367
ba5c1e9b 1368 spin_lock_init(&kvm->arch.float_int.lock);
6d3da241
JF
1369 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1370 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
8a242234 1371 init_waitqueue_head(&kvm->arch.ipte_wq);
a6b7e459 1372 mutex_init(&kvm->arch.ipte_mutex);
ba5c1e9b 1373
b0c632db 1374 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
78f26131 1375 VM_EVENT(kvm, 3, "vm created with type %lu", type);
b0c632db 1376
e08b9637
CO
1377 if (type & KVM_VM_S390_UCONTROL) {
1378 kvm->arch.gmap = NULL;
a3a92c31 1379 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
e08b9637 1380 } else {
32e6b236
GH
1381 if (sclp.hamax == U64_MAX)
1382 kvm->arch.mem_limit = TASK_MAX_SIZE;
1383 else
1384 kvm->arch.mem_limit = min_t(unsigned long, TASK_MAX_SIZE,
1385 sclp.hamax + 1);
a3a92c31 1386 kvm->arch.gmap = gmap_alloc(current->mm, kvm->arch.mem_limit - 1);
e08b9637 1387 if (!kvm->arch.gmap)
40f5b735 1388 goto out_err;
2c70fe44 1389 kvm->arch.gmap->private = kvm;
24eb3a82 1390 kvm->arch.gmap->pfault_enabled = 0;
e08b9637 1391 }
fa6b7fe9
CH
1392
1393 kvm->arch.css_support = 0;
84223598 1394 kvm->arch.use_irqchip = 0;
72f25020 1395 kvm->arch.epoch = 0;
fa6b7fe9 1396
8ad35755 1397 spin_lock_init(&kvm->arch.start_stop_lock);
8335713a 1398 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
8ad35755 1399
d89f5eff 1400 return 0;
40f5b735 1401out_err:
c54f0d6a 1402 free_page((unsigned long)kvm->arch.sie_page2);
598841ca 1403 debug_unregister(kvm->arch.dbf);
7d43bafc 1404 sca_dispose(kvm);
78f26131 1405 KVM_EVENT(3, "creation of vm failed: %d", rc);
d89f5eff 1406 return rc;
b0c632db
HC
1407}
1408
d329c035
CB
1409void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1410{
1411 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 1412 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
67335e63 1413 kvm_s390_clear_local_irqs(vcpu);
3c038e6b 1414 kvm_clear_async_pf_completion_queue(vcpu);
bc784cce 1415 if (!kvm_is_ucontrol(vcpu->kvm))
a6e2f683 1416 sca_del_vcpu(vcpu);
27e0393f
CO
1417
1418 if (kvm_is_ucontrol(vcpu->kvm))
1419 gmap_free(vcpu->arch.gmap);
1420
e6db1d61 1421 if (vcpu->kvm->arch.use_cmma)
b31605c1 1422 kvm_s390_vcpu_unsetup_cmma(vcpu);
d329c035 1423 free_page((unsigned long)(vcpu->arch.sie_block));
b31288fa 1424
6692cef3 1425 kvm_vcpu_uninit(vcpu);
b110feaf 1426 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
1427}
1428
1429static void kvm_free_vcpus(struct kvm *kvm)
1430{
1431 unsigned int i;
988a2cae 1432 struct kvm_vcpu *vcpu;
d329c035 1433
988a2cae
GN
1434 kvm_for_each_vcpu(i, vcpu, kvm)
1435 kvm_arch_vcpu_destroy(vcpu);
1436
1437 mutex_lock(&kvm->lock);
1438 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1439 kvm->vcpus[i] = NULL;
1440
1441 atomic_set(&kvm->online_vcpus, 0);
1442 mutex_unlock(&kvm->lock);
d329c035
CB
1443}
1444
b0c632db
HC
1445void kvm_arch_destroy_vm(struct kvm *kvm)
1446{
d329c035 1447 kvm_free_vcpus(kvm);
7d43bafc 1448 sca_dispose(kvm);
d329c035 1449 debug_unregister(kvm->arch.dbf);
c54f0d6a 1450 free_page((unsigned long)kvm->arch.sie_page2);
27e0393f
CO
1451 if (!kvm_is_ucontrol(kvm))
1452 gmap_free(kvm->arch.gmap);
841b91c5 1453 kvm_s390_destroy_adapters(kvm);
67335e63 1454 kvm_s390_clear_float_irqs(kvm);
8335713a 1455 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
b0c632db
HC
1456}
1457
1458/* Section: vcpu related */
dafd032a
DD
1459static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1460{
1461 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
1462 if (!vcpu->arch.gmap)
1463 return -ENOMEM;
1464 vcpu->arch.gmap->private = vcpu->kvm;
1465
1466 return 0;
1467}
1468
a6e2f683
ED
1469static void sca_del_vcpu(struct kvm_vcpu *vcpu)
1470{
5e044315 1471 read_lock(&vcpu->kvm->arch.sca_lock);
7d43bafc
ED
1472 if (vcpu->kvm->arch.use_esca) {
1473 struct esca_block *sca = vcpu->kvm->arch.sca;
a6e2f683 1474
7d43bafc 1475 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
10ce32d5 1476 sca->cpu[vcpu->vcpu_id].sda = 0;
7d43bafc
ED
1477 } else {
1478 struct bsca_block *sca = vcpu->kvm->arch.sca;
1479
1480 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
10ce32d5 1481 sca->cpu[vcpu->vcpu_id].sda = 0;
7d43bafc 1482 }
5e044315 1483 read_unlock(&vcpu->kvm->arch.sca_lock);
a6e2f683
ED
1484}
1485
eaa78f34 1486static void sca_add_vcpu(struct kvm_vcpu *vcpu)
a6e2f683 1487{
eaa78f34
DH
1488 read_lock(&vcpu->kvm->arch.sca_lock);
1489 if (vcpu->kvm->arch.use_esca) {
1490 struct esca_block *sca = vcpu->kvm->arch.sca;
7d43bafc 1491
eaa78f34 1492 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
7d43bafc
ED
1493 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1494 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
25508824 1495 vcpu->arch.sie_block->ecb2 |= 0x04U;
eaa78f34 1496 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
7d43bafc 1497 } else {
eaa78f34 1498 struct bsca_block *sca = vcpu->kvm->arch.sca;
a6e2f683 1499
eaa78f34 1500 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
7d43bafc
ED
1501 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1502 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
eaa78f34 1503 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
7d43bafc 1504 }
eaa78f34 1505 read_unlock(&vcpu->kvm->arch.sca_lock);
5e044315
ED
1506}
1507
1508/* Basic SCA to Extended SCA data copy routines */
1509static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
1510{
1511 d->sda = s->sda;
1512 d->sigp_ctrl.c = s->sigp_ctrl.c;
1513 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
1514}
1515
1516static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
1517{
1518 int i;
1519
1520 d->ipte_control = s->ipte_control;
1521 d->mcn[0] = s->mcn;
1522 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
1523 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
1524}
1525
1526static int sca_switch_to_extended(struct kvm *kvm)
1527{
1528 struct bsca_block *old_sca = kvm->arch.sca;
1529 struct esca_block *new_sca;
1530 struct kvm_vcpu *vcpu;
1531 unsigned int vcpu_idx;
1532 u32 scaol, scaoh;
1533
1534 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
1535 if (!new_sca)
1536 return -ENOMEM;
1537
1538 scaoh = (u32)((u64)(new_sca) >> 32);
1539 scaol = (u32)(u64)(new_sca) & ~0x3fU;
1540
1541 kvm_s390_vcpu_block_all(kvm);
1542 write_lock(&kvm->arch.sca_lock);
1543
1544 sca_copy_b_to_e(new_sca, old_sca);
1545
1546 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
1547 vcpu->arch.sie_block->scaoh = scaoh;
1548 vcpu->arch.sie_block->scaol = scaol;
1549 vcpu->arch.sie_block->ecb2 |= 0x04U;
1550 }
1551 kvm->arch.sca = new_sca;
1552 kvm->arch.use_esca = 1;
1553
1554 write_unlock(&kvm->arch.sca_lock);
1555 kvm_s390_vcpu_unblock_all(kvm);
1556
1557 free_page((unsigned long)old_sca);
1558
8335713a
CB
1559 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
1560 old_sca, kvm->arch.sca);
5e044315 1561 return 0;
a6e2f683
ED
1562}
1563
1564static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
1565{
5e044315
ED
1566 int rc;
1567
1568 if (id < KVM_S390_BSCA_CPU_SLOTS)
1569 return true;
1570 if (!sclp.has_esca)
1571 return false;
1572
1573 mutex_lock(&kvm->lock);
1574 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
1575 mutex_unlock(&kvm->lock);
1576
1577 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
a6e2f683
ED
1578}
1579
b0c632db
HC
1580int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1581{
3c038e6b
DD
1582 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1583 kvm_clear_async_pf_completion_queue(vcpu);
59674c1a
CB
1584 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1585 KVM_SYNC_GPRS |
9eed0735 1586 KVM_SYNC_ACRS |
b028ee3e
DH
1587 KVM_SYNC_CRS |
1588 KVM_SYNC_ARCH0 |
1589 KVM_SYNC_PFAULT;
c6e5f166
FZ
1590 if (test_kvm_facility(vcpu->kvm, 64))
1591 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
f6aa6dc4
DH
1592 /* fprs can be synchronized via vrs, even if the guest has no vx. With
1593 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
1594 */
1595 if (MACHINE_HAS_VX)
68c55750 1596 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
6fd8e67d
DH
1597 else
1598 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
dafd032a
DD
1599
1600 if (kvm_is_ucontrol(vcpu->kvm))
1601 return __kvm_ucontrol_vcpu_init(vcpu);
1602
b0c632db
HC
1603 return 0;
1604}
1605
db0758b2
DH
1606/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1607static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1608{
1609 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
9c23a131 1610 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
db0758b2 1611 vcpu->arch.cputm_start = get_tod_clock_fast();
9c23a131 1612 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
db0758b2
DH
1613}
1614
1615/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1616static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1617{
1618 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
9c23a131 1619 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
db0758b2
DH
1620 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1621 vcpu->arch.cputm_start = 0;
9c23a131 1622 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
db0758b2
DH
1623}
1624
1625/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1626static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1627{
1628 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
1629 vcpu->arch.cputm_enabled = true;
1630 __start_cpu_timer_accounting(vcpu);
1631}
1632
1633/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1634static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1635{
1636 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
1637 __stop_cpu_timer_accounting(vcpu);
1638 vcpu->arch.cputm_enabled = false;
1639}
1640
1641static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1642{
1643 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1644 __enable_cpu_timer_accounting(vcpu);
1645 preempt_enable();
1646}
1647
1648static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1649{
1650 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1651 __disable_cpu_timer_accounting(vcpu);
1652 preempt_enable();
1653}
1654
4287f247
DH
1655/* set the cpu timer - may only be called from the VCPU thread itself */
1656void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
1657{
db0758b2 1658 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
9c23a131 1659 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
db0758b2
DH
1660 if (vcpu->arch.cputm_enabled)
1661 vcpu->arch.cputm_start = get_tod_clock_fast();
4287f247 1662 vcpu->arch.sie_block->cputm = cputm;
9c23a131 1663 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
db0758b2 1664 preempt_enable();
4287f247
DH
1665}
1666
db0758b2 1667/* update and get the cpu timer - can also be called from other VCPU threads */
4287f247
DH
1668__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
1669{
9c23a131 1670 unsigned int seq;
db0758b2 1671 __u64 value;
db0758b2
DH
1672
1673 if (unlikely(!vcpu->arch.cputm_enabled))
1674 return vcpu->arch.sie_block->cputm;
1675
9c23a131
DH
1676 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1677 do {
1678 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
1679 /*
1680 * If the writer would ever execute a read in the critical
1681 * section, e.g. in irq context, we have a deadlock.
1682 */
1683 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
1684 value = vcpu->arch.sie_block->cputm;
1685 /* if cputm_start is 0, accounting is being started/stopped */
1686 if (likely(vcpu->arch.cputm_start))
1687 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1688 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
1689 preempt_enable();
db0758b2 1690 return value;
4287f247
DH
1691}
1692
b0c632db
HC
1693void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1694{
9977e886 1695 /* Save host register state */
d0164ee2 1696 save_fpu_regs();
9abc2a08
DH
1697 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
1698 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
9977e886 1699
6fd8e67d
DH
1700 if (MACHINE_HAS_VX)
1701 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
1702 else
1703 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
9abc2a08 1704 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
9977e886 1705 if (test_fp_ctl(current->thread.fpu.fpc))
96b2d7a8 1706 /* User space provided an invalid FPC, let's clear it */
9977e886
HB
1707 current->thread.fpu.fpc = 0;
1708
1709 save_access_regs(vcpu->arch.host_acrs);
59674c1a 1710 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 1711 gmap_enable(vcpu->arch.gmap);
805de8f4 1712 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
5ebda316 1713 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
db0758b2 1714 __start_cpu_timer_accounting(vcpu);
01a745ac 1715 vcpu->cpu = cpu;
b0c632db
HC
1716}
1717
1718void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1719{
01a745ac 1720 vcpu->cpu = -1;
5ebda316 1721 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
db0758b2 1722 __stop_cpu_timer_accounting(vcpu);
805de8f4 1723 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 1724 gmap_disable(vcpu->arch.gmap);
9977e886 1725
9abc2a08 1726 /* Save guest register state */
d0164ee2 1727 save_fpu_regs();
9abc2a08 1728 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
9977e886 1729
9abc2a08
DH
1730 /* Restore host register state */
1731 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
1732 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
9977e886
HB
1733
1734 save_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
1735 restore_access_regs(vcpu->arch.host_acrs);
1736}
1737
1738static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1739{
1740 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1741 vcpu->arch.sie_block->gpsw.mask = 0UL;
1742 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 1743 kvm_s390_set_prefix(vcpu, 0);
4287f247 1744 kvm_s390_set_cpu_timer(vcpu, 0);
b0c632db
HC
1745 vcpu->arch.sie_block->ckc = 0UL;
1746 vcpu->arch.sie_block->todpr = 0;
1747 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1748 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
1749 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
9abc2a08
DH
1750 /* make sure the new fpc will be lazily loaded */
1751 save_fpu_regs();
1752 current->thread.fpu.fpc = 0;
b0c632db 1753 vcpu->arch.sie_block->gbea = 1;
672550fb 1754 vcpu->arch.sie_block->pp = 0;
3c038e6b
DD
1755 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1756 kvm_clear_async_pf_completion_queue(vcpu);
6352e4d2
DH
1757 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1758 kvm_s390_vcpu_stop(vcpu);
2ed10cc1 1759 kvm_s390_clear_local_irqs(vcpu);
b0c632db
HC
1760}
1761
31928aa5 1762void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
42897d86 1763{
72f25020 1764 mutex_lock(&vcpu->kvm->lock);
fdf03650 1765 preempt_disable();
72f25020 1766 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
fdf03650 1767 preempt_enable();
72f25020 1768 mutex_unlock(&vcpu->kvm->lock);
25508824 1769 if (!kvm_is_ucontrol(vcpu->kvm)) {
dafd032a 1770 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
eaa78f34 1771 sca_add_vcpu(vcpu);
25508824
DH
1772 }
1773
42897d86
MT
1774}
1775
5102ee87
TK
1776static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1777{
9d8d5786 1778 if (!test_kvm_facility(vcpu->kvm, 76))
5102ee87
TK
1779 return;
1780
a374e892
TK
1781 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1782
1783 if (vcpu->kvm->arch.crypto.aes_kw)
1784 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1785 if (vcpu->kvm->arch.crypto.dea_kw)
1786 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1787
5102ee87
TK
1788 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1789}
1790
b31605c1
DD
1791void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1792{
1793 free_page(vcpu->arch.sie_block->cbrlo);
1794 vcpu->arch.sie_block->cbrlo = 0;
1795}
1796
1797int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1798{
1799 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1800 if (!vcpu->arch.sie_block->cbrlo)
1801 return -ENOMEM;
1802
1803 vcpu->arch.sie_block->ecb2 |= 0x80;
1804 vcpu->arch.sie_block->ecb2 &= ~0x08;
1805 return 0;
1806}
1807
91520f1a
MM
1808static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1809{
1810 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1811
91520f1a 1812 vcpu->arch.sie_block->ibc = model->ibc;
80bc79dc 1813 if (test_kvm_facility(vcpu->kvm, 7))
c54f0d6a 1814 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
91520f1a
MM
1815}
1816
b0c632db
HC
1817int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1818{
b31605c1 1819 int rc = 0;
b31288fa 1820
9e6dabef
CH
1821 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1822 CPUSTAT_SM |
a4a4f191
GH
1823 CPUSTAT_STOPPED);
1824
53df84f8 1825 if (test_kvm_facility(vcpu->kvm, 78))
805de8f4 1826 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
53df84f8 1827 else if (test_kvm_facility(vcpu->kvm, 8))
805de8f4 1828 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
a4a4f191 1829
91520f1a
MM
1830 kvm_s390_vcpu_setup_model(vcpu);
1831
bd50e8ec
DH
1832 vcpu->arch.sie_block->ecb = 0x02;
1833 if (test_kvm_facility(vcpu->kvm, 9))
1834 vcpu->arch.sie_block->ecb |= 0x04;
9d8d5786 1835 if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
7feb6bb8
MM
1836 vcpu->arch.sie_block->ecb |= 0x10;
1837
d6af0b49
DH
1838 if (test_kvm_facility(vcpu->kvm, 8))
1839 vcpu->arch.sie_block->ecb2 |= 0x08;
ea5f4969 1840 vcpu->arch.sie_block->eca = 0xC1002000U;
37c5f6c8 1841 if (sclp.has_siif)
217a4406 1842 vcpu->arch.sie_block->eca |= 1;
37c5f6c8 1843 if (sclp.has_sigpif)
ea5f4969 1844 vcpu->arch.sie_block->eca |= 0x10000000U;
c6e5f166
FZ
1845 if (test_kvm_facility(vcpu->kvm, 64))
1846 vcpu->arch.sie_block->ecb3 |= 0x01;
18280d8b 1847 if (test_kvm_facility(vcpu->kvm, 129)) {
13211ea7
EF
1848 vcpu->arch.sie_block->eca |= 0x00020000;
1849 vcpu->arch.sie_block->ecd |= 0x20000000;
1850 }
c6e5f166 1851 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
492d8642 1852 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
95ca2cb5
JF
1853 if (test_kvm_facility(vcpu->kvm, 74))
1854 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
5a5e6536 1855
e6db1d61 1856 if (vcpu->kvm->arch.use_cmma) {
b31605c1
DD
1857 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1858 if (rc)
1859 return rc;
b31288fa 1860 }
0ac96caf 1861 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ca872302 1862 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
9d8d5786 1863
5102ee87
TK
1864 kvm_s390_vcpu_crypto_setup(vcpu);
1865
b31605c1 1866 return rc;
b0c632db
HC
1867}
1868
1869struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1870 unsigned int id)
1871{
4d47555a 1872 struct kvm_vcpu *vcpu;
7feb6bb8 1873 struct sie_page *sie_page;
4d47555a
CO
1874 int rc = -EINVAL;
1875
4215825e 1876 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
4d47555a
CO
1877 goto out;
1878
1879 rc = -ENOMEM;
b0c632db 1880
b110feaf 1881 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 1882 if (!vcpu)
4d47555a 1883 goto out;
b0c632db 1884
7feb6bb8
MM
1885 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1886 if (!sie_page)
b0c632db
HC
1887 goto out_free_cpu;
1888
7feb6bb8
MM
1889 vcpu->arch.sie_block = &sie_page->sie_block;
1890 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
1891
b0c632db 1892 vcpu->arch.sie_block->icpua = id;
ba5c1e9b 1893 spin_lock_init(&vcpu->arch.local_int.lock);
ba5c1e9b 1894 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
d0321a24 1895 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 1896 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
9c23a131 1897 seqcount_init(&vcpu->arch.cputm_seqcount);
ba5c1e9b 1898
b0c632db
HC
1899 rc = kvm_vcpu_init(vcpu, kvm, id);
1900 if (rc)
9abc2a08 1901 goto out_free_sie_block;
8335713a 1902 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
b0c632db 1903 vcpu->arch.sie_block);
ade38c31 1904 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 1905
b0c632db 1906 return vcpu;
7b06bf2f
WY
1907out_free_sie_block:
1908 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 1909out_free_cpu:
b110feaf 1910 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 1911out:
b0c632db
HC
1912 return ERR_PTR(rc);
1913}
1914
b0c632db
HC
1915int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1916{
9a022067 1917 return kvm_s390_vcpu_has_irq(vcpu, 0);
b0c632db
HC
1918}
1919
27406cd5 1920void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
49b99e1e 1921{
805de8f4 1922 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
61a6df54 1923 exit_sie(vcpu);
49b99e1e
CB
1924}
1925
27406cd5 1926void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
49b99e1e 1927{
805de8f4 1928 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
49b99e1e
CB
1929}
1930
8e236546
CB
1931static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
1932{
805de8f4 1933 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
61a6df54 1934 exit_sie(vcpu);
8e236546
CB
1935}
1936
1937static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
1938{
9bf9fde2 1939 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
8e236546
CB
1940}
1941
49b99e1e
CB
1942/*
1943 * Kick a guest cpu out of SIE and wait until SIE is not running.
1944 * If the CPU is not running (e.g. waiting as idle) the function will
1945 * return immediately. */
1946void exit_sie(struct kvm_vcpu *vcpu)
1947{
805de8f4 1948 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
49b99e1e
CB
1949 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1950 cpu_relax();
1951}
1952
8e236546
CB
1953/* Kick a guest cpu out of SIE to process a request synchronously */
1954void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
49b99e1e 1955{
8e236546
CB
1956 kvm_make_request(req, vcpu);
1957 kvm_s390_vcpu_request(vcpu);
49b99e1e
CB
1958}
1959
2c70fe44
CB
1960static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1961{
1962 int i;
1963 struct kvm *kvm = gmap->private;
1964 struct kvm_vcpu *vcpu;
1965
1966 kvm_for_each_vcpu(i, vcpu, kvm) {
1967 /* match against both prefix pages */
fda902cb 1968 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
2c70fe44 1969 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
8e236546 1970 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
2c70fe44
CB
1971 }
1972 }
1973}
1974
b6d33834
CD
1975int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1976{
1977 /* kvm common code refers to this, but never calls it */
1978 BUG();
1979 return 0;
1980}
1981
14eebd91
CO
1982static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1983 struct kvm_one_reg *reg)
1984{
1985 int r = -EINVAL;
1986
1987 switch (reg->id) {
29b7c71b
CO
1988 case KVM_REG_S390_TODPR:
1989 r = put_user(vcpu->arch.sie_block->todpr,
1990 (u32 __user *)reg->addr);
1991 break;
1992 case KVM_REG_S390_EPOCHDIFF:
1993 r = put_user(vcpu->arch.sie_block->epoch,
1994 (u64 __user *)reg->addr);
1995 break;
46a6dd1c 1996 case KVM_REG_S390_CPU_TIMER:
4287f247 1997 r = put_user(kvm_s390_get_cpu_timer(vcpu),
46a6dd1c
J
1998 (u64 __user *)reg->addr);
1999 break;
2000 case KVM_REG_S390_CLOCK_COMP:
2001 r = put_user(vcpu->arch.sie_block->ckc,
2002 (u64 __user *)reg->addr);
2003 break;
536336c2
DD
2004 case KVM_REG_S390_PFTOKEN:
2005 r = put_user(vcpu->arch.pfault_token,
2006 (u64 __user *)reg->addr);
2007 break;
2008 case KVM_REG_S390_PFCOMPARE:
2009 r = put_user(vcpu->arch.pfault_compare,
2010 (u64 __user *)reg->addr);
2011 break;
2012 case KVM_REG_S390_PFSELECT:
2013 r = put_user(vcpu->arch.pfault_select,
2014 (u64 __user *)reg->addr);
2015 break;
672550fb
CB
2016 case KVM_REG_S390_PP:
2017 r = put_user(vcpu->arch.sie_block->pp,
2018 (u64 __user *)reg->addr);
2019 break;
afa45ff5
CB
2020 case KVM_REG_S390_GBEA:
2021 r = put_user(vcpu->arch.sie_block->gbea,
2022 (u64 __user *)reg->addr);
2023 break;
14eebd91
CO
2024 default:
2025 break;
2026 }
2027
2028 return r;
2029}
2030
2031static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
2032 struct kvm_one_reg *reg)
2033{
2034 int r = -EINVAL;
4287f247 2035 __u64 val;
14eebd91
CO
2036
2037 switch (reg->id) {
29b7c71b
CO
2038 case KVM_REG_S390_TODPR:
2039 r = get_user(vcpu->arch.sie_block->todpr,
2040 (u32 __user *)reg->addr);
2041 break;
2042 case KVM_REG_S390_EPOCHDIFF:
2043 r = get_user(vcpu->arch.sie_block->epoch,
2044 (u64 __user *)reg->addr);
2045 break;
46a6dd1c 2046 case KVM_REG_S390_CPU_TIMER:
4287f247
DH
2047 r = get_user(val, (u64 __user *)reg->addr);
2048 if (!r)
2049 kvm_s390_set_cpu_timer(vcpu, val);
46a6dd1c
J
2050 break;
2051 case KVM_REG_S390_CLOCK_COMP:
2052 r = get_user(vcpu->arch.sie_block->ckc,
2053 (u64 __user *)reg->addr);
2054 break;
536336c2
DD
2055 case KVM_REG_S390_PFTOKEN:
2056 r = get_user(vcpu->arch.pfault_token,
2057 (u64 __user *)reg->addr);
9fbd8082
DH
2058 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2059 kvm_clear_async_pf_completion_queue(vcpu);
536336c2
DD
2060 break;
2061 case KVM_REG_S390_PFCOMPARE:
2062 r = get_user(vcpu->arch.pfault_compare,
2063 (u64 __user *)reg->addr);
2064 break;
2065 case KVM_REG_S390_PFSELECT:
2066 r = get_user(vcpu->arch.pfault_select,
2067 (u64 __user *)reg->addr);
2068 break;
672550fb
CB
2069 case KVM_REG_S390_PP:
2070 r = get_user(vcpu->arch.sie_block->pp,
2071 (u64 __user *)reg->addr);
2072 break;
afa45ff5
CB
2073 case KVM_REG_S390_GBEA:
2074 r = get_user(vcpu->arch.sie_block->gbea,
2075 (u64 __user *)reg->addr);
2076 break;
14eebd91
CO
2077 default:
2078 break;
2079 }
2080
2081 return r;
2082}
b6d33834 2083
b0c632db
HC
2084static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
2085{
b0c632db 2086 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
2087 return 0;
2088}
2089
2090int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2091{
5a32c1af 2092 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
2093 return 0;
2094}
2095
2096int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2097{
5a32c1af 2098 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
2099 return 0;
2100}
2101
2102int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2103 struct kvm_sregs *sregs)
2104{
59674c1a 2105 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 2106 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 2107 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
2108 return 0;
2109}
2110
2111int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2112 struct kvm_sregs *sregs)
2113{
59674c1a 2114 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 2115 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
2116 return 0;
2117}
2118
2119int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2120{
9abc2a08
DH
2121 /* make sure the new values will be lazily loaded */
2122 save_fpu_regs();
4725c860
MS
2123 if (test_fp_ctl(fpu->fpc))
2124 return -EINVAL;
9abc2a08
DH
2125 current->thread.fpu.fpc = fpu->fpc;
2126 if (MACHINE_HAS_VX)
2127 convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
2128 else
2129 memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
b0c632db
HC
2130 return 0;
2131}
2132
2133int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2134{
9abc2a08
DH
2135 /* make sure we have the latest values */
2136 save_fpu_regs();
2137 if (MACHINE_HAS_VX)
2138 convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
2139 else
2140 memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
2141 fpu->fpc = current->thread.fpu.fpc;
b0c632db
HC
2142 return 0;
2143}
2144
2145static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
2146{
2147 int rc = 0;
2148
7a42fdc2 2149 if (!is_vcpu_stopped(vcpu))
b0c632db 2150 rc = -EBUSY;
d7b0b5eb
CO
2151 else {
2152 vcpu->run->psw_mask = psw.mask;
2153 vcpu->run->psw_addr = psw.addr;
2154 }
b0c632db
HC
2155 return rc;
2156}
2157
2158int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2159 struct kvm_translation *tr)
2160{
2161 return -EINVAL; /* not implemented yet */
2162}
2163
27291e21
DH
2164#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
2165 KVM_GUESTDBG_USE_HW_BP | \
2166 KVM_GUESTDBG_ENABLE)
2167
d0bfb940
JK
2168int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2169 struct kvm_guest_debug *dbg)
b0c632db 2170{
27291e21
DH
2171 int rc = 0;
2172
2173 vcpu->guest_debug = 0;
2174 kvm_s390_clear_bp_data(vcpu);
2175
2de3bfc2 2176 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
27291e21
DH
2177 return -EINVAL;
2178
2179 if (dbg->control & KVM_GUESTDBG_ENABLE) {
2180 vcpu->guest_debug = dbg->control;
2181 /* enforce guest PER */
805de8f4 2182 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
27291e21
DH
2183
2184 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
2185 rc = kvm_s390_import_bp_data(vcpu, dbg);
2186 } else {
805de8f4 2187 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
27291e21
DH
2188 vcpu->arch.guestdbg.last_bp = 0;
2189 }
2190
2191 if (rc) {
2192 vcpu->guest_debug = 0;
2193 kvm_s390_clear_bp_data(vcpu);
805de8f4 2194 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
27291e21
DH
2195 }
2196
2197 return rc;
b0c632db
HC
2198}
2199
62d9f0db
MT
2200int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2201 struct kvm_mp_state *mp_state)
2202{
6352e4d2
DH
2203 /* CHECK_STOP and LOAD are not supported yet */
2204 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
2205 KVM_MP_STATE_OPERATING;
62d9f0db
MT
2206}
2207
2208int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2209 struct kvm_mp_state *mp_state)
2210{
6352e4d2
DH
2211 int rc = 0;
2212
2213 /* user space knows about this interface - let it control the state */
2214 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
2215
2216 switch (mp_state->mp_state) {
2217 case KVM_MP_STATE_STOPPED:
2218 kvm_s390_vcpu_stop(vcpu);
2219 break;
2220 case KVM_MP_STATE_OPERATING:
2221 kvm_s390_vcpu_start(vcpu);
2222 break;
2223 case KVM_MP_STATE_LOAD:
2224 case KVM_MP_STATE_CHECK_STOP:
2225 /* fall through - CHECK_STOP and LOAD are not supported yet */
2226 default:
2227 rc = -ENXIO;
2228 }
2229
2230 return rc;
62d9f0db
MT
2231}
2232
8ad35755
DH
2233static bool ibs_enabled(struct kvm_vcpu *vcpu)
2234{
2235 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
2236}
2237
2c70fe44
CB
2238static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
2239{
8ad35755 2240retry:
8e236546 2241 kvm_s390_vcpu_request_handled(vcpu);
586b7ccd
CB
2242 if (!vcpu->requests)
2243 return 0;
2c70fe44
CB
2244 /*
2245 * We use MMU_RELOAD just to re-arm the ipte notifier for the
2246 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
2247 * This ensures that the ipte instruction for this request has
2248 * already finished. We might race against a second unmapper that
2249 * wants to set the blocking bit. Lets just retry the request loop.
2250 */
8ad35755 2251 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
2c70fe44
CB
2252 int rc;
2253 rc = gmap_ipte_notify(vcpu->arch.gmap,
fda902cb 2254 kvm_s390_get_prefix(vcpu),
2c70fe44
CB
2255 PAGE_SIZE * 2);
2256 if (rc)
2257 return rc;
8ad35755 2258 goto retry;
2c70fe44 2259 }
8ad35755 2260
d3d692c8
DH
2261 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2262 vcpu->arch.sie_block->ihcpu = 0xffff;
2263 goto retry;
2264 }
2265
8ad35755
DH
2266 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
2267 if (!ibs_enabled(vcpu)) {
2268 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
805de8f4 2269 atomic_or(CPUSTAT_IBS,
8ad35755
DH
2270 &vcpu->arch.sie_block->cpuflags);
2271 }
2272 goto retry;
2c70fe44 2273 }
8ad35755
DH
2274
2275 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
2276 if (ibs_enabled(vcpu)) {
2277 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
805de8f4 2278 atomic_andnot(CPUSTAT_IBS,
8ad35755
DH
2279 &vcpu->arch.sie_block->cpuflags);
2280 }
2281 goto retry;
2282 }
2283
0759d068
DH
2284 /* nothing to do, just clear the request */
2285 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
2286
2c70fe44
CB
2287 return 0;
2288}
2289
25ed1675
DH
2290void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
2291{
2292 struct kvm_vcpu *vcpu;
2293 int i;
2294
2295 mutex_lock(&kvm->lock);
2296 preempt_disable();
2297 kvm->arch.epoch = tod - get_tod_clock();
2298 kvm_s390_vcpu_block_all(kvm);
2299 kvm_for_each_vcpu(i, vcpu, kvm)
2300 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
2301 kvm_s390_vcpu_unblock_all(kvm);
2302 preempt_enable();
2303 mutex_unlock(&kvm->lock);
2304}
2305
fa576c58
TH
2306/**
2307 * kvm_arch_fault_in_page - fault-in guest page if necessary
2308 * @vcpu: The corresponding virtual cpu
2309 * @gpa: Guest physical address
2310 * @writable: Whether the page should be writable or not
2311 *
2312 * Make sure that a guest page has been faulted-in on the host.
2313 *
2314 * Return: Zero on success, negative error code otherwise.
2315 */
2316long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
24eb3a82 2317{
527e30b4
MS
2318 return gmap_fault(vcpu->arch.gmap, gpa,
2319 writable ? FAULT_FLAG_WRITE : 0);
24eb3a82
DD
2320}
2321
3c038e6b
DD
2322static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
2323 unsigned long token)
2324{
2325 struct kvm_s390_interrupt inti;
383d0b05 2326 struct kvm_s390_irq irq;
3c038e6b
DD
2327
2328 if (start_token) {
383d0b05
JF
2329 irq.u.ext.ext_params2 = token;
2330 irq.type = KVM_S390_INT_PFAULT_INIT;
2331 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
3c038e6b
DD
2332 } else {
2333 inti.type = KVM_S390_INT_PFAULT_DONE;
383d0b05 2334 inti.parm64 = token;
3c038e6b
DD
2335 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
2336 }
2337}
2338
2339void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
2340 struct kvm_async_pf *work)
2341{
2342 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
2343 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
2344}
2345
2346void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
2347 struct kvm_async_pf *work)
2348{
2349 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
2350 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
2351}
2352
2353void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
2354 struct kvm_async_pf *work)
2355{
2356 /* s390 will always inject the page directly */
2357}
2358
2359bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
2360{
2361 /*
2362 * s390 will always inject the page directly,
2363 * but we still want check_async_completion to cleanup
2364 */
2365 return true;
2366}
2367
2368static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
2369{
2370 hva_t hva;
2371 struct kvm_arch_async_pf arch;
2372 int rc;
2373
2374 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2375 return 0;
2376 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
2377 vcpu->arch.pfault_compare)
2378 return 0;
2379 if (psw_extint_disabled(vcpu))
2380 return 0;
9a022067 2381 if (kvm_s390_vcpu_has_irq(vcpu, 0))
3c038e6b
DD
2382 return 0;
2383 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
2384 return 0;
2385 if (!vcpu->arch.gmap->pfault_enabled)
2386 return 0;
2387
81480cc1
HC
2388 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
2389 hva += current->thread.gmap_addr & ~PAGE_MASK;
2390 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
3c038e6b
DD
2391 return 0;
2392
2393 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
2394 return rc;
2395}
2396
3fb4c40f 2397static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 2398{
3fb4c40f 2399 int rc, cpuflags;
e168bf8d 2400
3c038e6b
DD
2401 /*
2402 * On s390 notifications for arriving pages will be delivered directly
2403 * to the guest but the house keeping for completed pfaults is
2404 * handled outside the worker.
2405 */
2406 kvm_check_async_pf_completion(vcpu);
2407
7ec7c8c7
CB
2408 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
2409 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
b0c632db
HC
2410
2411 if (need_resched())
2412 schedule();
2413
d3a73acb 2414 if (test_cpu_flag(CIF_MCCK_PENDING))
71cde587
CB
2415 s390_handle_mcck();
2416
79395031
JF
2417 if (!kvm_is_ucontrol(vcpu->kvm)) {
2418 rc = kvm_s390_deliver_pending_interrupts(vcpu);
2419 if (rc)
2420 return rc;
2421 }
0ff31867 2422
2c70fe44
CB
2423 rc = kvm_s390_handle_requests(vcpu);
2424 if (rc)
2425 return rc;
2426
27291e21
DH
2427 if (guestdbg_enabled(vcpu)) {
2428 kvm_s390_backup_guest_per_regs(vcpu);
2429 kvm_s390_patch_guest_per_regs(vcpu);
2430 }
2431
b0c632db 2432 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
2433 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
2434 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
2435 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 2436
3fb4c40f
TH
2437 return 0;
2438}
2439
492d8642
TH
2440static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
2441{
56317920
DH
2442 struct kvm_s390_pgm_info pgm_info = {
2443 .code = PGM_ADDRESSING,
2444 };
2445 u8 opcode, ilen;
492d8642
TH
2446 int rc;
2447
2448 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
2449 trace_kvm_s390_sie_fault(vcpu);
2450
2451 /*
2452 * We want to inject an addressing exception, which is defined as a
2453 * suppressing or terminating exception. However, since we came here
2454 * by a DAT access exception, the PSW still points to the faulting
2455 * instruction since DAT exceptions are nullifying. So we've got
2456 * to look up the current opcode to get the length of the instruction
2457 * to be able to forward the PSW.
2458 */
65977322 2459 rc = read_guest_instr(vcpu, &opcode, 1);
56317920 2460 ilen = insn_length(opcode);
9b0d721a
DH
2461 if (rc < 0) {
2462 return rc;
2463 } else if (rc) {
2464 /* Instruction-Fetching Exceptions - we can't detect the ilen.
2465 * Forward by arbitrary ilc, injection will take care of
2466 * nullification if necessary.
2467 */
2468 pgm_info = vcpu->arch.pgm;
2469 ilen = 4;
2470 }
56317920
DH
2471 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
2472 kvm_s390_forward_psw(vcpu, ilen);
2473 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
492d8642
TH
2474}
2475
3fb4c40f
TH
2476static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
2477{
2b29a9fd
DD
2478 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
2479 vcpu->arch.sie_block->icptcode);
2480 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
2481
27291e21
DH
2482 if (guestdbg_enabled(vcpu))
2483 kvm_s390_restore_guest_per_regs(vcpu);
2484
7ec7c8c7
CB
2485 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
2486 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
71f116bf
DH
2487
2488 if (vcpu->arch.sie_block->icptcode > 0) {
2489 int rc = kvm_handle_sie_intercept(vcpu);
2490
2491 if (rc != -EOPNOTSUPP)
2492 return rc;
2493 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
2494 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
2495 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
2496 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
2497 return -EREMOTE;
2498 } else if (exit_reason != -EFAULT) {
2499 vcpu->stat.exit_null++;
2500 return 0;
210b1607
TH
2501 } else if (kvm_is_ucontrol(vcpu->kvm)) {
2502 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
2503 vcpu->run->s390_ucontrol.trans_exc_code =
2504 current->thread.gmap_addr;
2505 vcpu->run->s390_ucontrol.pgm_code = 0x10;
71f116bf 2506 return -EREMOTE;
24eb3a82 2507 } else if (current->thread.gmap_pfault) {
3c038e6b 2508 trace_kvm_s390_major_guest_pfault(vcpu);
24eb3a82 2509 current->thread.gmap_pfault = 0;
71f116bf
DH
2510 if (kvm_arch_setup_async_pf(vcpu))
2511 return 0;
2512 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
a76ccff6 2513 }
71f116bf 2514 return vcpu_post_run_fault_in_sie(vcpu);
3fb4c40f
TH
2515}
2516
2517static int __vcpu_run(struct kvm_vcpu *vcpu)
2518{
2519 int rc, exit_reason;
2520
800c1065
TH
2521 /*
2522 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2523 * ning the guest), so that memslots (and other stuff) are protected
2524 */
2525 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2526
a76ccff6
TH
2527 do {
2528 rc = vcpu_pre_run(vcpu);
2529 if (rc)
2530 break;
3fb4c40f 2531
800c1065 2532 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
2533 /*
2534 * As PF_VCPU will be used in fault handler, between
2535 * guest_enter and guest_exit should be no uaccess.
2536 */
0097d12e
CB
2537 local_irq_disable();
2538 __kvm_guest_enter();
db0758b2 2539 __disable_cpu_timer_accounting(vcpu);
0097d12e 2540 local_irq_enable();
a76ccff6
TH
2541 exit_reason = sie64a(vcpu->arch.sie_block,
2542 vcpu->run->s.regs.gprs);
0097d12e 2543 local_irq_disable();
db0758b2 2544 __enable_cpu_timer_accounting(vcpu);
0097d12e
CB
2545 __kvm_guest_exit();
2546 local_irq_enable();
800c1065 2547 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
2548
2549 rc = vcpu_post_run(vcpu, exit_reason);
27291e21 2550 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
3fb4c40f 2551
800c1065 2552 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 2553 return rc;
b0c632db
HC
2554}
2555
b028ee3e
DH
2556static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2557{
2558 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2559 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2560 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2561 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2562 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2563 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
d3d692c8
DH
2564 /* some control register changes require a tlb flush */
2565 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
b028ee3e
DH
2566 }
2567 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4287f247 2568 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
b028ee3e
DH
2569 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2570 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2571 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2572 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2573 }
2574 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2575 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2576 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2577 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
9fbd8082
DH
2578 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2579 kvm_clear_async_pf_completion_queue(vcpu);
b028ee3e
DH
2580 }
2581 kvm_run->kvm_dirty_regs = 0;
2582}
2583
2584static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2585{
2586 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2587 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2588 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2589 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
4287f247 2590 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
b028ee3e
DH
2591 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2592 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2593 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2594 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2595 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2596 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2597 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2598}
2599
b0c632db
HC
2600int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2601{
8f2abe6a 2602 int rc;
b0c632db
HC
2603 sigset_t sigsaved;
2604
27291e21
DH
2605 if (guestdbg_exit_pending(vcpu)) {
2606 kvm_s390_prepare_debug_exit(vcpu);
2607 return 0;
2608 }
2609
b0c632db
HC
2610 if (vcpu->sigset_active)
2611 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2612
6352e4d2
DH
2613 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2614 kvm_s390_vcpu_start(vcpu);
2615 } else if (is_vcpu_stopped(vcpu)) {
ea2cdd27 2616 pr_err_ratelimited("can't run stopped vcpu %d\n",
6352e4d2
DH
2617 vcpu->vcpu_id);
2618 return -EINVAL;
2619 }
b0c632db 2620
b028ee3e 2621 sync_regs(vcpu, kvm_run);
db0758b2 2622 enable_cpu_timer_accounting(vcpu);
d7b0b5eb 2623
dab4079d 2624 might_fault();
a76ccff6 2625 rc = __vcpu_run(vcpu);
9ace903d 2626
b1d16c49
CE
2627 if (signal_pending(current) && !rc) {
2628 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 2629 rc = -EINTR;
b1d16c49 2630 }
8f2abe6a 2631
27291e21
DH
2632 if (guestdbg_exit_pending(vcpu) && !rc) {
2633 kvm_s390_prepare_debug_exit(vcpu);
2634 rc = 0;
2635 }
2636
8f2abe6a 2637 if (rc == -EREMOTE) {
71f116bf 2638 /* userspace support is needed, kvm_run has been prepared */
8f2abe6a
CB
2639 rc = 0;
2640 }
b0c632db 2641
db0758b2 2642 disable_cpu_timer_accounting(vcpu);
b028ee3e 2643 store_regs(vcpu, kvm_run);
d7b0b5eb 2644
b0c632db
HC
2645 if (vcpu->sigset_active)
2646 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2647
b0c632db 2648 vcpu->stat.exit_userspace++;
7e8e6ab4 2649 return rc;
b0c632db
HC
2650}
2651
b0c632db
HC
2652/*
2653 * store status at address
2654 * we use have two special cases:
2655 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2656 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2657 */
d0bce605 2658int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
b0c632db 2659{
092670cd 2660 unsigned char archmode = 1;
9abc2a08 2661 freg_t fprs[NUM_FPRS];
fda902cb 2662 unsigned int px;
4287f247 2663 u64 clkcomp, cputm;
d0bce605 2664 int rc;
b0c632db 2665
d9a3a09a 2666 px = kvm_s390_get_prefix(vcpu);
d0bce605
HC
2667 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2668 if (write_guest_abs(vcpu, 163, &archmode, 1))
b0c632db 2669 return -EFAULT;
d9a3a09a 2670 gpa = 0;
d0bce605
HC
2671 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2672 if (write_guest_real(vcpu, 163, &archmode, 1))
b0c632db 2673 return -EFAULT;
d9a3a09a
MS
2674 gpa = px;
2675 } else
2676 gpa -= __LC_FPREGS_SAVE_AREA;
9abc2a08
DH
2677
2678 /* manually convert vector registers if necessary */
2679 if (MACHINE_HAS_VX) {
9522b37f 2680 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
9abc2a08
DH
2681 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
2682 fprs, 128);
2683 } else {
2684 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
6fd8e67d 2685 vcpu->run->s.regs.fprs, 128);
9abc2a08 2686 }
d9a3a09a 2687 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
d0bce605 2688 vcpu->run->s.regs.gprs, 128);
d9a3a09a 2689 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
d0bce605 2690 &vcpu->arch.sie_block->gpsw, 16);
d9a3a09a 2691 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
fda902cb 2692 &px, 4);
d9a3a09a 2693 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
9abc2a08 2694 &vcpu->run->s.regs.fpc, 4);
d9a3a09a 2695 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
d0bce605 2696 &vcpu->arch.sie_block->todpr, 4);
4287f247 2697 cputm = kvm_s390_get_cpu_timer(vcpu);
d9a3a09a 2698 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
4287f247 2699 &cputm, 8);
178bd789 2700 clkcomp = vcpu->arch.sie_block->ckc >> 8;
d9a3a09a 2701 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
d0bce605 2702 &clkcomp, 8);
d9a3a09a 2703 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
d0bce605 2704 &vcpu->run->s.regs.acrs, 64);
d9a3a09a 2705 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
d0bce605
HC
2706 &vcpu->arch.sie_block->gcr, 128);
2707 return rc ? -EFAULT : 0;
b0c632db
HC
2708}
2709
e879892c
TH
2710int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2711{
2712 /*
2713 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2714 * copying in vcpu load/put. Lets update our copies before we save
2715 * it into the save area
2716 */
d0164ee2 2717 save_fpu_regs();
9abc2a08 2718 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
e879892c
TH
2719 save_access_regs(vcpu->run->s.regs.acrs);
2720
2721 return kvm_s390_store_status_unloaded(vcpu, addr);
2722}
2723
bc17de7c
EF
2724/*
2725 * store additional status at address
2726 */
2727int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2728 unsigned long gpa)
2729{
2730 /* Only bits 0-53 are used for address formation */
2731 if (!(gpa & ~0x3ff))
2732 return 0;
2733
2734 return write_guest_abs(vcpu, gpa & ~0x3ff,
2735 (void *)&vcpu->run->s.regs.vrs, 512);
2736}
2737
2738int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2739{
2740 if (!test_kvm_facility(vcpu->kvm, 129))
2741 return 0;
2742
2743 /*
2744 * The guest VXRS are in the host VXRs due to the lazy
9977e886
HB
2745 * copying in vcpu load/put. We can simply call save_fpu_regs()
2746 * to save the current register state because we are in the
2747 * middle of a load/put cycle.
2748 *
2749 * Let's update our copies before we save it into the save area.
bc17de7c 2750 */
d0164ee2 2751 save_fpu_regs();
bc17de7c
EF
2752
2753 return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2754}
2755
8ad35755
DH
2756static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2757{
2758 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
8e236546 2759 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
8ad35755
DH
2760}
2761
2762static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2763{
2764 unsigned int i;
2765 struct kvm_vcpu *vcpu;
2766
2767 kvm_for_each_vcpu(i, vcpu, kvm) {
2768 __disable_ibs_on_vcpu(vcpu);
2769 }
2770}
2771
2772static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2773{
2774 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
8e236546 2775 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
8ad35755
DH
2776}
2777
6852d7b6
DH
2778void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2779{
8ad35755
DH
2780 int i, online_vcpus, started_vcpus = 0;
2781
2782 if (!is_vcpu_stopped(vcpu))
2783 return;
2784
6852d7b6 2785 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
8ad35755 2786 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 2787 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
2788 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2789
2790 for (i = 0; i < online_vcpus; i++) {
2791 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2792 started_vcpus++;
2793 }
2794
2795 if (started_vcpus == 0) {
2796 /* we're the only active VCPU -> speed it up */
2797 __enable_ibs_on_vcpu(vcpu);
2798 } else if (started_vcpus == 1) {
2799 /*
2800 * As we are starting a second VCPU, we have to disable
2801 * the IBS facility on all VCPUs to remove potentially
2802 * oustanding ENABLE requests.
2803 */
2804 __disable_ibs_on_all_vcpus(vcpu->kvm);
2805 }
2806
805de8f4 2807 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
2808 /*
2809 * Another VCPU might have used IBS while we were offline.
2810 * Let's play safe and flush the VCPU at startup.
2811 */
d3d692c8 2812 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
433b9ee4 2813 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 2814 return;
6852d7b6
DH
2815}
2816
2817void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2818{
8ad35755
DH
2819 int i, online_vcpus, started_vcpus = 0;
2820 struct kvm_vcpu *started_vcpu = NULL;
2821
2822 if (is_vcpu_stopped(vcpu))
2823 return;
2824
6852d7b6 2825 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
8ad35755 2826 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 2827 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
2828 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2829
32f5ff63 2830 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
6cddd432 2831 kvm_s390_clear_stop_irq(vcpu);
32f5ff63 2832
805de8f4 2833 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
2834 __disable_ibs_on_vcpu(vcpu);
2835
2836 for (i = 0; i < online_vcpus; i++) {
2837 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2838 started_vcpus++;
2839 started_vcpu = vcpu->kvm->vcpus[i];
2840 }
2841 }
2842
2843 if (started_vcpus == 1) {
2844 /*
2845 * As we only have one VCPU left, we want to enable the
2846 * IBS facility for that VCPU to speed it up.
2847 */
2848 __enable_ibs_on_vcpu(started_vcpu);
2849 }
2850
433b9ee4 2851 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 2852 return;
6852d7b6
DH
2853}
2854
d6712df9
CH
2855static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2856 struct kvm_enable_cap *cap)
2857{
2858 int r;
2859
2860 if (cap->flags)
2861 return -EINVAL;
2862
2863 switch (cap->cap) {
fa6b7fe9
CH
2864 case KVM_CAP_S390_CSS_SUPPORT:
2865 if (!vcpu->kvm->arch.css_support) {
2866 vcpu->kvm->arch.css_support = 1;
c92ea7b9 2867 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
fa6b7fe9
CH
2868 trace_kvm_s390_enable_css(vcpu->kvm);
2869 }
2870 r = 0;
2871 break;
d6712df9
CH
2872 default:
2873 r = -EINVAL;
2874 break;
2875 }
2876 return r;
2877}
2878
41408c28
TH
2879static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
2880 struct kvm_s390_mem_op *mop)
2881{
2882 void __user *uaddr = (void __user *)mop->buf;
2883 void *tmpbuf = NULL;
2884 int r, srcu_idx;
2885 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
2886 | KVM_S390_MEMOP_F_CHECK_ONLY;
2887
2888 if (mop->flags & ~supported_flags)
2889 return -EINVAL;
2890
2891 if (mop->size > MEM_OP_MAX_SIZE)
2892 return -E2BIG;
2893
2894 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2895 tmpbuf = vmalloc(mop->size);
2896 if (!tmpbuf)
2897 return -ENOMEM;
2898 }
2899
2900 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2901
2902 switch (mop->op) {
2903 case KVM_S390_MEMOP_LOGICAL_READ:
2904 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
92c96321
DH
2905 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
2906 mop->size, GACC_FETCH);
41408c28
TH
2907 break;
2908 }
2909 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2910 if (r == 0) {
2911 if (copy_to_user(uaddr, tmpbuf, mop->size))
2912 r = -EFAULT;
2913 }
2914 break;
2915 case KVM_S390_MEMOP_LOGICAL_WRITE:
2916 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
92c96321
DH
2917 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
2918 mop->size, GACC_STORE);
41408c28
TH
2919 break;
2920 }
2921 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2922 r = -EFAULT;
2923 break;
2924 }
2925 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2926 break;
2927 default:
2928 r = -EINVAL;
2929 }
2930
2931 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2932
2933 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
2934 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
2935
2936 vfree(tmpbuf);
2937 return r;
2938}
2939
b0c632db
HC
2940long kvm_arch_vcpu_ioctl(struct file *filp,
2941 unsigned int ioctl, unsigned long arg)
2942{
2943 struct kvm_vcpu *vcpu = filp->private_data;
2944 void __user *argp = (void __user *)arg;
800c1065 2945 int idx;
bc923cc9 2946 long r;
b0c632db 2947
93736624 2948 switch (ioctl) {
47b43c52
JF
2949 case KVM_S390_IRQ: {
2950 struct kvm_s390_irq s390irq;
2951
2952 r = -EFAULT;
2953 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
2954 break;
2955 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
2956 break;
2957 }
93736624 2958 case KVM_S390_INTERRUPT: {
ba5c1e9b 2959 struct kvm_s390_interrupt s390int;
383d0b05 2960 struct kvm_s390_irq s390irq;
ba5c1e9b 2961
93736624 2962 r = -EFAULT;
ba5c1e9b 2963 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624 2964 break;
383d0b05
JF
2965 if (s390int_to_s390irq(&s390int, &s390irq))
2966 return -EINVAL;
2967 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
93736624 2968 break;
ba5c1e9b 2969 }
b0c632db 2970 case KVM_S390_STORE_STATUS:
800c1065 2971 idx = srcu_read_lock(&vcpu->kvm->srcu);
bc923cc9 2972 r = kvm_s390_vcpu_store_status(vcpu, arg);
800c1065 2973 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 2974 break;
b0c632db
HC
2975 case KVM_S390_SET_INITIAL_PSW: {
2976 psw_t psw;
2977
bc923cc9 2978 r = -EFAULT;
b0c632db 2979 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
2980 break;
2981 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2982 break;
b0c632db
HC
2983 }
2984 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
2985 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2986 break;
14eebd91
CO
2987 case KVM_SET_ONE_REG:
2988 case KVM_GET_ONE_REG: {
2989 struct kvm_one_reg reg;
2990 r = -EFAULT;
2991 if (copy_from_user(&reg, argp, sizeof(reg)))
2992 break;
2993 if (ioctl == KVM_SET_ONE_REG)
2994 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
2995 else
2996 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
2997 break;
2998 }
27e0393f
CO
2999#ifdef CONFIG_KVM_S390_UCONTROL
3000 case KVM_S390_UCAS_MAP: {
3001 struct kvm_s390_ucas_mapping ucasmap;
3002
3003 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3004 r = -EFAULT;
3005 break;
3006 }
3007
3008 if (!kvm_is_ucontrol(vcpu->kvm)) {
3009 r = -EINVAL;
3010 break;
3011 }
3012
3013 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
3014 ucasmap.vcpu_addr, ucasmap.length);
3015 break;
3016 }
3017 case KVM_S390_UCAS_UNMAP: {
3018 struct kvm_s390_ucas_mapping ucasmap;
3019
3020 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3021 r = -EFAULT;
3022 break;
3023 }
3024
3025 if (!kvm_is_ucontrol(vcpu->kvm)) {
3026 r = -EINVAL;
3027 break;
3028 }
3029
3030 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
3031 ucasmap.length);
3032 break;
3033 }
3034#endif
ccc7910f 3035 case KVM_S390_VCPU_FAULT: {
527e30b4 3036 r = gmap_fault(vcpu->arch.gmap, arg, 0);
ccc7910f
CO
3037 break;
3038 }
d6712df9
CH
3039 case KVM_ENABLE_CAP:
3040 {
3041 struct kvm_enable_cap cap;
3042 r = -EFAULT;
3043 if (copy_from_user(&cap, argp, sizeof(cap)))
3044 break;
3045 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
3046 break;
3047 }
41408c28
TH
3048 case KVM_S390_MEM_OP: {
3049 struct kvm_s390_mem_op mem_op;
3050
3051 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
3052 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
3053 else
3054 r = -EFAULT;
3055 break;
3056 }
816c7667
JF
3057 case KVM_S390_SET_IRQ_STATE: {
3058 struct kvm_s390_irq_state irq_state;
3059
3060 r = -EFAULT;
3061 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3062 break;
3063 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
3064 irq_state.len == 0 ||
3065 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
3066 r = -EINVAL;
3067 break;
3068 }
3069 r = kvm_s390_set_irq_state(vcpu,
3070 (void __user *) irq_state.buf,
3071 irq_state.len);
3072 break;
3073 }
3074 case KVM_S390_GET_IRQ_STATE: {
3075 struct kvm_s390_irq_state irq_state;
3076
3077 r = -EFAULT;
3078 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3079 break;
3080 if (irq_state.len == 0) {
3081 r = -EINVAL;
3082 break;
3083 }
3084 r = kvm_s390_get_irq_state(vcpu,
3085 (__u8 __user *) irq_state.buf,
3086 irq_state.len);
3087 break;
3088 }
b0c632db 3089 default:
3e6afcf1 3090 r = -ENOTTY;
b0c632db 3091 }
bc923cc9 3092 return r;
b0c632db
HC
3093}
3094
5b1c1493
CO
3095int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
3096{
3097#ifdef CONFIG_KVM_S390_UCONTROL
3098 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
3099 && (kvm_is_ucontrol(vcpu->kvm))) {
3100 vmf->page = virt_to_page(vcpu->arch.sie_block);
3101 get_page(vmf->page);
3102 return 0;
3103 }
3104#endif
3105 return VM_FAULT_SIGBUS;
3106}
3107
5587027c
AK
3108int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
3109 unsigned long npages)
db3fe4eb
TY
3110{
3111 return 0;
3112}
3113
b0c632db 3114/* Section: memory related */
f7784b8e
MT
3115int kvm_arch_prepare_memory_region(struct kvm *kvm,
3116 struct kvm_memory_slot *memslot,
09170a49 3117 const struct kvm_userspace_memory_region *mem,
7b6195a9 3118 enum kvm_mr_change change)
b0c632db 3119{
dd2887e7
NW
3120 /* A few sanity checks. We can have memory slots which have to be
3121 located/ended at a segment boundary (1MB). The memory in userland is
3122 ok to be fragmented into various different vmas. It is okay to mmap()
3123 and munmap() stuff in this slot after doing this call at any time */
b0c632db 3124
598841ca 3125 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
3126 return -EINVAL;
3127
598841ca 3128 if (mem->memory_size & 0xffffful)
b0c632db
HC
3129 return -EINVAL;
3130
a3a92c31
DD
3131 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
3132 return -EINVAL;
3133
f7784b8e
MT
3134 return 0;
3135}
3136
3137void kvm_arch_commit_memory_region(struct kvm *kvm,
09170a49 3138 const struct kvm_userspace_memory_region *mem,
8482644a 3139 const struct kvm_memory_slot *old,
f36f3f28 3140 const struct kvm_memory_slot *new,
8482644a 3141 enum kvm_mr_change change)
f7784b8e 3142{
f7850c92 3143 int rc;
f7784b8e 3144
2cef4deb
CB
3145 /* If the basics of the memslot do not change, we do not want
3146 * to update the gmap. Every update causes several unnecessary
3147 * segment translation exceptions. This is usually handled just
3148 * fine by the normal fault handler + gmap, but it will also
3149 * cause faults on the prefix page of running guest CPUs.
3150 */
3151 if (old->userspace_addr == mem->userspace_addr &&
3152 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
3153 old->npages * PAGE_SIZE == mem->memory_size)
3154 return;
598841ca
CO
3155
3156 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
3157 mem->guest_phys_addr, mem->memory_size);
3158 if (rc)
ea2cdd27 3159 pr_warn("failed to commit memory region\n");
598841ca 3160 return;
b0c632db
HC
3161}
3162
60a37709
AY
3163static inline unsigned long nonhyp_mask(int i)
3164{
3165 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
3166
3167 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
3168}
3169
3491caf2
CB
3170void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
3171{
3172 vcpu->valid_wakeup = false;
3173}
3174
b0c632db
HC
3175static int __init kvm_s390_init(void)
3176{
60a37709
AY
3177 int i;
3178
07197fd0
DH
3179 if (!sclp.has_sief2) {
3180 pr_info("SIE not available\n");
3181 return -ENODEV;
3182 }
3183
60a37709
AY
3184 for (i = 0; i < 16; i++)
3185 kvm_s390_fac_list_mask[i] |=
3186 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
3187
9d8d5786 3188 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
b0c632db
HC
3189}
3190
3191static void __exit kvm_s390_exit(void)
3192{
3193 kvm_exit();
3194}
3195
3196module_init(kvm_s390_init);
3197module_exit(kvm_s390_exit);
566af940
CH
3198
3199/*
3200 * Enable autoloading of the kvm module.
3201 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
3202 * since x86 takes a different approach.
3203 */
3204#include <linux/miscdevice.h>
3205MODULE_ALIAS_MISCDEV(KVM_MINOR);
3206MODULE_ALIAS("devname:kvm");