]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - arch/s390/kvm/kvm-s390.c
KVM: s390: handle missing 64-bit-SCAO facility
[mirror_ubuntu-zesty-kernel.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15f36ebd 14 * Jason J. Herne <jjherne@us.ibm.com>
b0c632db
HC
15 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
ca872302 20#include <linux/hrtimer.h>
b0c632db
HC
21#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
a374e892 25#include <linux/random.h>
b0c632db 26#include <linux/slab.h>
ba5c1e9b 27#include <linux/timer.h>
41408c28 28#include <linux/vmalloc.h>
15c9705f 29#include <linux/bitmap.h>
cbb870c8 30#include <asm/asm-offsets.h>
b0c632db 31#include <asm/lowcore.h>
fdf03650 32#include <asm/etr.h>
b0c632db 33#include <asm/pgtable.h>
1e133ab2 34#include <asm/gmap.h>
f5daba1d 35#include <asm/nmi.h>
a0616cde 36#include <asm/switch_to.h>
6d3da241 37#include <asm/isc.h>
1526bf9c 38#include <asm/sclp.h>
0a763c78
DH
39#include <asm/cpacf.h>
40#include <asm/etr.h>
8f2abe6a 41#include "kvm-s390.h"
b0c632db
HC
42#include "gaccess.h"
43
ea2cdd27
DH
44#define KMSG_COMPONENT "kvm-s390"
45#undef pr_fmt
46#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
47
5786fffa
CH
48#define CREATE_TRACE_POINTS
49#include "trace.h"
ade38c31 50#include "trace-s390.h"
5786fffa 51
41408c28 52#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
816c7667
JF
53#define LOCAL_IRQS 32
54#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
55 (KVM_MAX_VCPUS + LOCAL_IRQS))
41408c28 56
b0c632db
HC
57#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
58
59struct kvm_stats_debugfs_item debugfs_entries[] = {
60 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 61 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
62 { "exit_validity", VCPU_STAT(exit_validity) },
63 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
64 { "exit_external_request", VCPU_STAT(exit_external_request) },
65 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
66 { "exit_instruction", VCPU_STAT(exit_instruction) },
67 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
68 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
a011eeb2 69 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
f7819512 70 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
62bea5bf 71 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
3491caf2 72 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
ce2e4f0b 73 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
f5e10b09 74 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b 75 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
aba07508
DH
76 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
77 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
ba5c1e9b 78 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 79 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
80 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
81 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
82 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
83 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
84 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
85 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
86 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 87 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
88 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
89 { "instruction_spx", VCPU_STAT(instruction_spx) },
90 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
91 { "instruction_stap", VCPU_STAT(instruction_stap) },
92 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
8a242234 93 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
453423dc
CB
94 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
95 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
b31288fa 96 { "instruction_essa", VCPU_STAT(instruction_essa) },
453423dc
CB
97 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
98 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 99 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
95ca2cb5 100 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
5288fbf0 101 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 102 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 103 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0 104 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
42cb0c9f
DH
105 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
106 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
5288fbf0 107 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
42cb0c9f
DH
108 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
109 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
cd7b4b61 110 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
5288fbf0
CB
111 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
112 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
113 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
42cb0c9f
DH
114 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
115 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
116 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
388186bc 117 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 118 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 119 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
175a5c9e
CB
120 { "diagnose_258", VCPU_STAT(diagnose_258) },
121 { "diagnose_308", VCPU_STAT(diagnose_308) },
122 { "diagnose_500", VCPU_STAT(diagnose_500) },
b0c632db
HC
123 { NULL }
124};
125
9d8d5786 126/* upper facilities limit for kvm */
60a37709
AY
127unsigned long kvm_s390_fac_list_mask[16] = {
128 0xffe6000000000000UL,
129 0x005e000000000000UL,
9d8d5786 130};
b0c632db 131
9d8d5786 132unsigned long kvm_s390_fac_list_mask_size(void)
78c4b59f 133{
9d8d5786
MM
134 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
135 return ARRAY_SIZE(kvm_s390_fac_list_mask);
78c4b59f
MM
136}
137
15c9705f
DH
138/* available cpu features supported by kvm */
139static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
0a763c78
DH
140/* available subfunctions indicated via query / "test bit" */
141static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
15c9705f 142
9d8d5786 143static struct gmap_notifier gmap_notifier;
78f26131 144debug_info_t *kvm_s390_dbf;
9d8d5786 145
b0c632db 146/* Section: not file related */
13a34e06 147int kvm_arch_hardware_enable(void)
b0c632db
HC
148{
149 /* every s390 is virtualization enabled ;-) */
10474ae8 150 return 0;
b0c632db
HC
151}
152
2c70fe44
CB
153static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
154
fdf03650
FZ
155/*
156 * This callback is executed during stop_machine(). All CPUs are therefore
157 * temporarily stopped. In order not to change guest behavior, we have to
158 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
159 * so a CPU won't be stopped while calculating with the epoch.
160 */
161static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
162 void *v)
163{
164 struct kvm *kvm;
165 struct kvm_vcpu *vcpu;
166 int i;
167 unsigned long long *delta = v;
168
169 list_for_each_entry(kvm, &vm_list, vm_list) {
170 kvm->arch.epoch -= *delta;
171 kvm_for_each_vcpu(i, vcpu, kvm) {
172 vcpu->arch.sie_block->epoch -= *delta;
db0758b2
DH
173 if (vcpu->arch.cputm_enabled)
174 vcpu->arch.cputm_start += *delta;
fdf03650
FZ
175 }
176 }
177 return NOTIFY_OK;
178}
179
180static struct notifier_block kvm_clock_notifier = {
181 .notifier_call = kvm_clock_sync,
182};
183
b0c632db
HC
184int kvm_arch_hardware_setup(void)
185{
2c70fe44
CB
186 gmap_notifier.notifier_call = kvm_gmap_notifier;
187 gmap_register_ipte_notifier(&gmap_notifier);
fdf03650
FZ
188 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
189 &kvm_clock_notifier);
b0c632db
HC
190 return 0;
191}
192
193void kvm_arch_hardware_unsetup(void)
194{
2c70fe44 195 gmap_unregister_ipte_notifier(&gmap_notifier);
fdf03650
FZ
196 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
197 &kvm_clock_notifier);
b0c632db
HC
198}
199
22be5a13
DH
200static void allow_cpu_feat(unsigned long nr)
201{
202 set_bit_inv(nr, kvm_s390_available_cpu_feat);
203}
204
0a763c78
DH
205static inline int plo_test_bit(unsigned char nr)
206{
207 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
208 int cc = 3; /* subfunction not available */
209
210 asm volatile(
211 /* Parameter registers are ignored for "test bit" */
212 " plo 0,0,0,0(0)\n"
213 " ipm %0\n"
214 " srl %0,28\n"
215 : "=d" (cc)
216 : "d" (r0)
217 : "cc");
218 return cc == 0;
219}
220
22be5a13
DH
221static void kvm_s390_cpu_feat_init(void)
222{
0a763c78
DH
223 int i;
224
225 for (i = 0; i < 256; ++i) {
226 if (plo_test_bit(i))
227 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
228 }
229
230 if (test_facility(28)) /* TOD-clock steering */
231 etr_ptff(kvm_s390_available_subfunc.ptff, ETR_PTFF_QAF);
232
233 if (test_facility(17)) { /* MSA */
234 __cpacf_query(CPACF_KMAC, kvm_s390_available_subfunc.kmac);
235 __cpacf_query(CPACF_KMC, kvm_s390_available_subfunc.kmc);
236 __cpacf_query(CPACF_KM, kvm_s390_available_subfunc.km);
237 __cpacf_query(CPACF_KIMD, kvm_s390_available_subfunc.kimd);
238 __cpacf_query(CPACF_KLMD, kvm_s390_available_subfunc.klmd);
239 }
240 if (test_facility(76)) /* MSA3 */
241 __cpacf_query(CPACF_PCKMO, kvm_s390_available_subfunc.pckmo);
242 if (test_facility(77)) { /* MSA4 */
243 __cpacf_query(CPACF_KMCTR, kvm_s390_available_subfunc.kmctr);
244 __cpacf_query(CPACF_KMF, kvm_s390_available_subfunc.kmf);
245 __cpacf_query(CPACF_KMO, kvm_s390_available_subfunc.kmo);
246 __cpacf_query(CPACF_PCC, kvm_s390_available_subfunc.pcc);
247 }
248 if (test_facility(57)) /* MSA5 */
249 __cpacf_query(CPACF_PPNO, kvm_s390_available_subfunc.ppno);
250
22be5a13
DH
251 if (MACHINE_HAS_ESOP)
252 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
253}
254
b0c632db
HC
255int kvm_arch_init(void *opaque)
256{
78f26131
CB
257 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
258 if (!kvm_s390_dbf)
259 return -ENOMEM;
260
261 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
262 debug_unregister(kvm_s390_dbf);
263 return -ENOMEM;
264 }
265
22be5a13
DH
266 kvm_s390_cpu_feat_init();
267
84877d93
CH
268 /* Register floating interrupt controller interface. */
269 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
b0c632db
HC
270}
271
78f26131
CB
272void kvm_arch_exit(void)
273{
274 debug_unregister(kvm_s390_dbf);
275}
276
b0c632db
HC
277/* Section: device related */
278long kvm_arch_dev_ioctl(struct file *filp,
279 unsigned int ioctl, unsigned long arg)
280{
281 if (ioctl == KVM_S390_ENABLE_SIE)
282 return s390_enable_sie();
283 return -EINVAL;
284}
285
784aa3d7 286int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
b0c632db 287{
d7b0b5eb
CO
288 int r;
289
2bd0ac4e 290 switch (ext) {
d7b0b5eb 291 case KVM_CAP_S390_PSW:
b6cf8788 292 case KVM_CAP_S390_GMAP:
52e16b18 293 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
294#ifdef CONFIG_KVM_S390_UCONTROL
295 case KVM_CAP_S390_UCONTROL:
296#endif
3c038e6b 297 case KVM_CAP_ASYNC_PF:
60b413c9 298 case KVM_CAP_SYNC_REGS:
14eebd91 299 case KVM_CAP_ONE_REG:
d6712df9 300 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 301 case KVM_CAP_S390_CSS_SUPPORT:
10ccaa1e 302 case KVM_CAP_IOEVENTFD:
c05c4186 303 case KVM_CAP_DEVICE_CTRL:
d938dc55 304 case KVM_CAP_ENABLE_CAP_VM:
78599d90 305 case KVM_CAP_S390_IRQCHIP:
f2061656 306 case KVM_CAP_VM_ATTRIBUTES:
6352e4d2 307 case KVM_CAP_MP_STATE:
47b43c52 308 case KVM_CAP_S390_INJECT_IRQ:
2444b352 309 case KVM_CAP_S390_USER_SIGP:
e44fc8c9 310 case KVM_CAP_S390_USER_STSI:
30ee2a98 311 case KVM_CAP_S390_SKEYS:
816c7667 312 case KVM_CAP_S390_IRQ_STATE:
d7b0b5eb
CO
313 r = 1;
314 break;
41408c28
TH
315 case KVM_CAP_S390_MEM_OP:
316 r = MEM_OP_MAX_SIZE;
317 break;
e726b1bd
CB
318 case KVM_CAP_NR_VCPUS:
319 case KVM_CAP_MAX_VCPUS:
76a6dd72
DH
320 r = KVM_S390_BSCA_CPU_SLOTS;
321 if (sclp.has_esca && sclp.has_64bscao)
322 r = KVM_S390_ESCA_CPU_SLOTS;
e726b1bd 323 break;
e1e2e605
NW
324 case KVM_CAP_NR_MEMSLOTS:
325 r = KVM_USER_MEM_SLOTS;
326 break;
1526bf9c 327 case KVM_CAP_S390_COW:
abf09bed 328 r = MACHINE_HAS_ESOP;
1526bf9c 329 break;
68c55750
EF
330 case KVM_CAP_S390_VECTOR_REGISTERS:
331 r = MACHINE_HAS_VX;
332 break;
c6e5f166
FZ
333 case KVM_CAP_S390_RI:
334 r = test_facility(64);
335 break;
2bd0ac4e 336 default:
d7b0b5eb 337 r = 0;
2bd0ac4e 338 }
d7b0b5eb 339 return r;
b0c632db
HC
340}
341
15f36ebd
JH
342static void kvm_s390_sync_dirty_log(struct kvm *kvm,
343 struct kvm_memory_slot *memslot)
344{
345 gfn_t cur_gfn, last_gfn;
346 unsigned long address;
347 struct gmap *gmap = kvm->arch.gmap;
348
15f36ebd
JH
349 /* Loop over all guest pages */
350 last_gfn = memslot->base_gfn + memslot->npages;
351 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
352 address = gfn_to_hva_memslot(memslot, cur_gfn);
353
1e133ab2 354 if (test_and_clear_guest_dirty(gmap->mm, address))
15f36ebd 355 mark_page_dirty(kvm, cur_gfn);
1763f8d0
CB
356 if (fatal_signal_pending(current))
357 return;
70c88a00 358 cond_resched();
15f36ebd 359 }
15f36ebd
JH
360}
361
b0c632db 362/* Section: vm related */
a6e2f683
ED
363static void sca_del_vcpu(struct kvm_vcpu *vcpu);
364
b0c632db
HC
365/*
366 * Get (and clear) the dirty memory log for a memory slot.
367 */
368int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
369 struct kvm_dirty_log *log)
370{
15f36ebd
JH
371 int r;
372 unsigned long n;
9f6b8029 373 struct kvm_memslots *slots;
15f36ebd
JH
374 struct kvm_memory_slot *memslot;
375 int is_dirty = 0;
376
377 mutex_lock(&kvm->slots_lock);
378
379 r = -EINVAL;
380 if (log->slot >= KVM_USER_MEM_SLOTS)
381 goto out;
382
9f6b8029
PB
383 slots = kvm_memslots(kvm);
384 memslot = id_to_memslot(slots, log->slot);
15f36ebd
JH
385 r = -ENOENT;
386 if (!memslot->dirty_bitmap)
387 goto out;
388
389 kvm_s390_sync_dirty_log(kvm, memslot);
390 r = kvm_get_dirty_log(kvm, log, &is_dirty);
391 if (r)
392 goto out;
393
394 /* Clear the dirty log */
395 if (is_dirty) {
396 n = kvm_dirty_bitmap_bytes(memslot);
397 memset(memslot->dirty_bitmap, 0, n);
398 }
399 r = 0;
400out:
401 mutex_unlock(&kvm->slots_lock);
402 return r;
b0c632db
HC
403}
404
d938dc55
CH
405static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
406{
407 int r;
408
409 if (cap->flags)
410 return -EINVAL;
411
412 switch (cap->cap) {
84223598 413 case KVM_CAP_S390_IRQCHIP:
c92ea7b9 414 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
84223598
CH
415 kvm->arch.use_irqchip = 1;
416 r = 0;
417 break;
2444b352 418 case KVM_CAP_S390_USER_SIGP:
c92ea7b9 419 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
2444b352
DH
420 kvm->arch.user_sigp = 1;
421 r = 0;
422 break;
68c55750 423 case KVM_CAP_S390_VECTOR_REGISTERS:
5967c17b
DH
424 mutex_lock(&kvm->lock);
425 if (atomic_read(&kvm->online_vcpus)) {
426 r = -EBUSY;
427 } else if (MACHINE_HAS_VX) {
c54f0d6a
DH
428 set_kvm_facility(kvm->arch.model.fac_mask, 129);
429 set_kvm_facility(kvm->arch.model.fac_list, 129);
18280d8b
MM
430 r = 0;
431 } else
432 r = -EINVAL;
5967c17b 433 mutex_unlock(&kvm->lock);
c92ea7b9
CB
434 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
435 r ? "(not available)" : "(success)");
68c55750 436 break;
c6e5f166
FZ
437 case KVM_CAP_S390_RI:
438 r = -EINVAL;
439 mutex_lock(&kvm->lock);
440 if (atomic_read(&kvm->online_vcpus)) {
441 r = -EBUSY;
442 } else if (test_facility(64)) {
c54f0d6a
DH
443 set_kvm_facility(kvm->arch.model.fac_mask, 64);
444 set_kvm_facility(kvm->arch.model.fac_list, 64);
c6e5f166
FZ
445 r = 0;
446 }
447 mutex_unlock(&kvm->lock);
448 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
449 r ? "(not available)" : "(success)");
450 break;
e44fc8c9 451 case KVM_CAP_S390_USER_STSI:
c92ea7b9 452 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
e44fc8c9
ET
453 kvm->arch.user_stsi = 1;
454 r = 0;
455 break;
d938dc55
CH
456 default:
457 r = -EINVAL;
458 break;
459 }
460 return r;
461}
462
8c0a7ce6
DD
463static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
464{
465 int ret;
466
467 switch (attr->attr) {
468 case KVM_S390_VM_MEM_LIMIT_SIZE:
469 ret = 0;
c92ea7b9 470 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
a3a92c31
DD
471 kvm->arch.mem_limit);
472 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
8c0a7ce6
DD
473 ret = -EFAULT;
474 break;
475 default:
476 ret = -ENXIO;
477 break;
478 }
479 return ret;
480}
481
482static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
4f718eab
DD
483{
484 int ret;
485 unsigned int idx;
486 switch (attr->attr) {
487 case KVM_S390_VM_MEM_ENABLE_CMMA:
e6db1d61
DD
488 /* enable CMMA only for z10 and later (EDAT_1) */
489 ret = -EINVAL;
490 if (!MACHINE_IS_LPAR || !MACHINE_HAS_EDAT1)
491 break;
492
4f718eab 493 ret = -EBUSY;
c92ea7b9 494 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
4f718eab
DD
495 mutex_lock(&kvm->lock);
496 if (atomic_read(&kvm->online_vcpus) == 0) {
497 kvm->arch.use_cmma = 1;
498 ret = 0;
499 }
500 mutex_unlock(&kvm->lock);
501 break;
502 case KVM_S390_VM_MEM_CLR_CMMA:
c3489155
DD
503 ret = -EINVAL;
504 if (!kvm->arch.use_cmma)
505 break;
506
c92ea7b9 507 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
4f718eab
DD
508 mutex_lock(&kvm->lock);
509 idx = srcu_read_lock(&kvm->srcu);
a13cff31 510 s390_reset_cmma(kvm->arch.gmap->mm);
4f718eab
DD
511 srcu_read_unlock(&kvm->srcu, idx);
512 mutex_unlock(&kvm->lock);
513 ret = 0;
514 break;
8c0a7ce6
DD
515 case KVM_S390_VM_MEM_LIMIT_SIZE: {
516 unsigned long new_limit;
517
518 if (kvm_is_ucontrol(kvm))
519 return -EINVAL;
520
521 if (get_user(new_limit, (u64 __user *)attr->addr))
522 return -EFAULT;
523
a3a92c31
DD
524 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
525 new_limit > kvm->arch.mem_limit)
8c0a7ce6
DD
526 return -E2BIG;
527
a3a92c31
DD
528 if (!new_limit)
529 return -EINVAL;
530
531 /* gmap_alloc takes last usable address */
532 if (new_limit != KVM_S390_NO_MEM_LIMIT)
533 new_limit -= 1;
534
8c0a7ce6
DD
535 ret = -EBUSY;
536 mutex_lock(&kvm->lock);
537 if (atomic_read(&kvm->online_vcpus) == 0) {
538 /* gmap_alloc will round the limit up */
539 struct gmap *new = gmap_alloc(current->mm, new_limit);
540
541 if (!new) {
542 ret = -ENOMEM;
543 } else {
544 gmap_free(kvm->arch.gmap);
545 new->private = kvm;
546 kvm->arch.gmap = new;
547 ret = 0;
548 }
549 }
550 mutex_unlock(&kvm->lock);
a3a92c31
DD
551 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
552 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
553 (void *) kvm->arch.gmap->asce);
8c0a7ce6
DD
554 break;
555 }
4f718eab
DD
556 default:
557 ret = -ENXIO;
558 break;
559 }
560 return ret;
561}
562
a374e892
TK
563static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
564
565static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
566{
567 struct kvm_vcpu *vcpu;
568 int i;
569
9d8d5786 570 if (!test_kvm_facility(kvm, 76))
a374e892
TK
571 return -EINVAL;
572
573 mutex_lock(&kvm->lock);
574 switch (attr->attr) {
575 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
576 get_random_bytes(
577 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
578 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
579 kvm->arch.crypto.aes_kw = 1;
c92ea7b9 580 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
a374e892
TK
581 break;
582 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
583 get_random_bytes(
584 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
585 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
586 kvm->arch.crypto.dea_kw = 1;
c92ea7b9 587 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
a374e892
TK
588 break;
589 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
590 kvm->arch.crypto.aes_kw = 0;
591 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
592 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
c92ea7b9 593 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
a374e892
TK
594 break;
595 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
596 kvm->arch.crypto.dea_kw = 0;
597 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
598 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
c92ea7b9 599 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
a374e892
TK
600 break;
601 default:
602 mutex_unlock(&kvm->lock);
603 return -ENXIO;
604 }
605
606 kvm_for_each_vcpu(i, vcpu, kvm) {
607 kvm_s390_vcpu_crypto_setup(vcpu);
608 exit_sie(vcpu);
609 }
610 mutex_unlock(&kvm->lock);
611 return 0;
612}
613
72f25020
JH
614static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
615{
616 u8 gtod_high;
617
618 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
619 sizeof(gtod_high)))
620 return -EFAULT;
621
622 if (gtod_high != 0)
623 return -EINVAL;
58c383c6 624 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
72f25020
JH
625
626 return 0;
627}
628
629static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
630{
5a3d883a 631 u64 gtod;
72f25020
JH
632
633 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
634 return -EFAULT;
635
25ed1675 636 kvm_s390_set_tod_clock(kvm, gtod);
58c383c6 637 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
72f25020
JH
638 return 0;
639}
640
641static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
642{
643 int ret;
644
645 if (attr->flags)
646 return -EINVAL;
647
648 switch (attr->attr) {
649 case KVM_S390_VM_TOD_HIGH:
650 ret = kvm_s390_set_tod_high(kvm, attr);
651 break;
652 case KVM_S390_VM_TOD_LOW:
653 ret = kvm_s390_set_tod_low(kvm, attr);
654 break;
655 default:
656 ret = -ENXIO;
657 break;
658 }
659 return ret;
660}
661
662static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
663{
664 u8 gtod_high = 0;
665
666 if (copy_to_user((void __user *)attr->addr, &gtod_high,
667 sizeof(gtod_high)))
668 return -EFAULT;
58c383c6 669 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
72f25020
JH
670
671 return 0;
672}
673
674static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
675{
5a3d883a 676 u64 gtod;
72f25020 677
60417fcc 678 gtod = kvm_s390_get_tod_clock_fast(kvm);
72f25020
JH
679 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
680 return -EFAULT;
58c383c6 681 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
72f25020
JH
682
683 return 0;
684}
685
686static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
687{
688 int ret;
689
690 if (attr->flags)
691 return -EINVAL;
692
693 switch (attr->attr) {
694 case KVM_S390_VM_TOD_HIGH:
695 ret = kvm_s390_get_tod_high(kvm, attr);
696 break;
697 case KVM_S390_VM_TOD_LOW:
698 ret = kvm_s390_get_tod_low(kvm, attr);
699 break;
700 default:
701 ret = -ENXIO;
702 break;
703 }
704 return ret;
705}
706
658b6eda
MM
707static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
708{
709 struct kvm_s390_vm_cpu_processor *proc;
053dd230 710 u16 lowest_ibc, unblocked_ibc;
658b6eda
MM
711 int ret = 0;
712
713 mutex_lock(&kvm->lock);
714 if (atomic_read(&kvm->online_vcpus)) {
715 ret = -EBUSY;
716 goto out;
717 }
718 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
719 if (!proc) {
720 ret = -ENOMEM;
721 goto out;
722 }
723 if (!copy_from_user(proc, (void __user *)attr->addr,
724 sizeof(*proc))) {
9bb0ec09 725 kvm->arch.model.cpuid = proc->cpuid;
053dd230
DH
726 lowest_ibc = sclp.ibc >> 16 & 0xfff;
727 unblocked_ibc = sclp.ibc & 0xfff;
728 if (lowest_ibc) {
729 if (proc->ibc > unblocked_ibc)
730 kvm->arch.model.ibc = unblocked_ibc;
731 else if (proc->ibc < lowest_ibc)
732 kvm->arch.model.ibc = lowest_ibc;
733 else
734 kvm->arch.model.ibc = proc->ibc;
735 }
c54f0d6a 736 memcpy(kvm->arch.model.fac_list, proc->fac_list,
658b6eda
MM
737 S390_ARCH_FAC_LIST_SIZE_BYTE);
738 } else
739 ret = -EFAULT;
740 kfree(proc);
741out:
742 mutex_unlock(&kvm->lock);
743 return ret;
744}
745
15c9705f
DH
746static int kvm_s390_set_processor_feat(struct kvm *kvm,
747 struct kvm_device_attr *attr)
748{
749 struct kvm_s390_vm_cpu_feat data;
750 int ret = -EBUSY;
751
752 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
753 return -EFAULT;
754 if (!bitmap_subset((unsigned long *) data.feat,
755 kvm_s390_available_cpu_feat,
756 KVM_S390_VM_CPU_FEAT_NR_BITS))
757 return -EINVAL;
758
759 mutex_lock(&kvm->lock);
760 if (!atomic_read(&kvm->online_vcpus)) {
761 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
762 KVM_S390_VM_CPU_FEAT_NR_BITS);
763 ret = 0;
764 }
765 mutex_unlock(&kvm->lock);
766 return ret;
767}
768
0a763c78
DH
769static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
770 struct kvm_device_attr *attr)
771{
772 /*
773 * Once supported by kernel + hw, we have to store the subfunctions
774 * in kvm->arch and remember that user space configured them.
775 */
776 return -ENXIO;
777}
778
658b6eda
MM
779static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
780{
781 int ret = -ENXIO;
782
783 switch (attr->attr) {
784 case KVM_S390_VM_CPU_PROCESSOR:
785 ret = kvm_s390_set_processor(kvm, attr);
786 break;
15c9705f
DH
787 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
788 ret = kvm_s390_set_processor_feat(kvm, attr);
789 break;
0a763c78
DH
790 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
791 ret = kvm_s390_set_processor_subfunc(kvm, attr);
792 break;
658b6eda
MM
793 }
794 return ret;
795}
796
797static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
798{
799 struct kvm_s390_vm_cpu_processor *proc;
800 int ret = 0;
801
802 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
803 if (!proc) {
804 ret = -ENOMEM;
805 goto out;
806 }
9bb0ec09 807 proc->cpuid = kvm->arch.model.cpuid;
658b6eda 808 proc->ibc = kvm->arch.model.ibc;
c54f0d6a
DH
809 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
810 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda
MM
811 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
812 ret = -EFAULT;
813 kfree(proc);
814out:
815 return ret;
816}
817
818static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
819{
820 struct kvm_s390_vm_cpu_machine *mach;
821 int ret = 0;
822
823 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
824 if (!mach) {
825 ret = -ENOMEM;
826 goto out;
827 }
828 get_cpu_id((struct cpuid *) &mach->cpuid);
37c5f6c8 829 mach->ibc = sclp.ibc;
c54f0d6a 830 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
981467c9 831 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda 832 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
94422ee8 833 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda
MM
834 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
835 ret = -EFAULT;
836 kfree(mach);
837out:
838 return ret;
839}
840
15c9705f
DH
841static int kvm_s390_get_processor_feat(struct kvm *kvm,
842 struct kvm_device_attr *attr)
843{
844 struct kvm_s390_vm_cpu_feat data;
845
846 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
847 KVM_S390_VM_CPU_FEAT_NR_BITS);
848 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
849 return -EFAULT;
850 return 0;
851}
852
853static int kvm_s390_get_machine_feat(struct kvm *kvm,
854 struct kvm_device_attr *attr)
855{
856 struct kvm_s390_vm_cpu_feat data;
857
858 bitmap_copy((unsigned long *) data.feat,
859 kvm_s390_available_cpu_feat,
860 KVM_S390_VM_CPU_FEAT_NR_BITS);
861 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
862 return -EFAULT;
863 return 0;
864}
865
0a763c78
DH
866static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
867 struct kvm_device_attr *attr)
868{
869 /*
870 * Once we can actually configure subfunctions (kernel + hw support),
871 * we have to check if they were already set by user space, if so copy
872 * them from kvm->arch.
873 */
874 return -ENXIO;
875}
876
877static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
878 struct kvm_device_attr *attr)
879{
880 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
881 sizeof(struct kvm_s390_vm_cpu_subfunc)))
882 return -EFAULT;
883 return 0;
884}
658b6eda
MM
885static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
886{
887 int ret = -ENXIO;
888
889 switch (attr->attr) {
890 case KVM_S390_VM_CPU_PROCESSOR:
891 ret = kvm_s390_get_processor(kvm, attr);
892 break;
893 case KVM_S390_VM_CPU_MACHINE:
894 ret = kvm_s390_get_machine(kvm, attr);
895 break;
15c9705f
DH
896 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
897 ret = kvm_s390_get_processor_feat(kvm, attr);
898 break;
899 case KVM_S390_VM_CPU_MACHINE_FEAT:
900 ret = kvm_s390_get_machine_feat(kvm, attr);
901 break;
0a763c78
DH
902 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
903 ret = kvm_s390_get_processor_subfunc(kvm, attr);
904 break;
905 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
906 ret = kvm_s390_get_machine_subfunc(kvm, attr);
907 break;
658b6eda
MM
908 }
909 return ret;
910}
911
f2061656
DD
912static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
913{
914 int ret;
915
916 switch (attr->group) {
4f718eab 917 case KVM_S390_VM_MEM_CTRL:
8c0a7ce6 918 ret = kvm_s390_set_mem_control(kvm, attr);
4f718eab 919 break;
72f25020
JH
920 case KVM_S390_VM_TOD:
921 ret = kvm_s390_set_tod(kvm, attr);
922 break;
658b6eda
MM
923 case KVM_S390_VM_CPU_MODEL:
924 ret = kvm_s390_set_cpu_model(kvm, attr);
925 break;
a374e892
TK
926 case KVM_S390_VM_CRYPTO:
927 ret = kvm_s390_vm_set_crypto(kvm, attr);
928 break;
f2061656
DD
929 default:
930 ret = -ENXIO;
931 break;
932 }
933
934 return ret;
935}
936
937static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
938{
8c0a7ce6
DD
939 int ret;
940
941 switch (attr->group) {
942 case KVM_S390_VM_MEM_CTRL:
943 ret = kvm_s390_get_mem_control(kvm, attr);
944 break;
72f25020
JH
945 case KVM_S390_VM_TOD:
946 ret = kvm_s390_get_tod(kvm, attr);
947 break;
658b6eda
MM
948 case KVM_S390_VM_CPU_MODEL:
949 ret = kvm_s390_get_cpu_model(kvm, attr);
950 break;
8c0a7ce6
DD
951 default:
952 ret = -ENXIO;
953 break;
954 }
955
956 return ret;
f2061656
DD
957}
958
959static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
960{
961 int ret;
962
963 switch (attr->group) {
4f718eab
DD
964 case KVM_S390_VM_MEM_CTRL:
965 switch (attr->attr) {
966 case KVM_S390_VM_MEM_ENABLE_CMMA:
967 case KVM_S390_VM_MEM_CLR_CMMA:
8c0a7ce6 968 case KVM_S390_VM_MEM_LIMIT_SIZE:
4f718eab
DD
969 ret = 0;
970 break;
971 default:
972 ret = -ENXIO;
973 break;
974 }
975 break;
72f25020
JH
976 case KVM_S390_VM_TOD:
977 switch (attr->attr) {
978 case KVM_S390_VM_TOD_LOW:
979 case KVM_S390_VM_TOD_HIGH:
980 ret = 0;
981 break;
982 default:
983 ret = -ENXIO;
984 break;
985 }
986 break;
658b6eda
MM
987 case KVM_S390_VM_CPU_MODEL:
988 switch (attr->attr) {
989 case KVM_S390_VM_CPU_PROCESSOR:
990 case KVM_S390_VM_CPU_MACHINE:
15c9705f
DH
991 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
992 case KVM_S390_VM_CPU_MACHINE_FEAT:
0a763c78 993 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
658b6eda
MM
994 ret = 0;
995 break;
0a763c78
DH
996 /* configuring subfunctions is not supported yet */
997 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
658b6eda
MM
998 default:
999 ret = -ENXIO;
1000 break;
1001 }
1002 break;
a374e892
TK
1003 case KVM_S390_VM_CRYPTO:
1004 switch (attr->attr) {
1005 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1006 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1007 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1008 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1009 ret = 0;
1010 break;
1011 default:
1012 ret = -ENXIO;
1013 break;
1014 }
1015 break;
f2061656
DD
1016 default:
1017 ret = -ENXIO;
1018 break;
1019 }
1020
1021 return ret;
1022}
1023
30ee2a98
JH
1024static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1025{
1026 uint8_t *keys;
1027 uint64_t hva;
1028 unsigned long curkey;
1029 int i, r = 0;
1030
1031 if (args->flags != 0)
1032 return -EINVAL;
1033
1034 /* Is this guest using storage keys? */
1035 if (!mm_use_skey(current->mm))
1036 return KVM_S390_GET_SKEYS_NONE;
1037
1038 /* Enforce sane limit on memory allocation */
1039 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1040 return -EINVAL;
1041
1042 keys = kmalloc_array(args->count, sizeof(uint8_t),
1043 GFP_KERNEL | __GFP_NOWARN);
1044 if (!keys)
1045 keys = vmalloc(sizeof(uint8_t) * args->count);
1046 if (!keys)
1047 return -ENOMEM;
1048
1049 for (i = 0; i < args->count; i++) {
1050 hva = gfn_to_hva(kvm, args->start_gfn + i);
1051 if (kvm_is_error_hva(hva)) {
1052 r = -EFAULT;
1053 goto out;
1054 }
1055
1056 curkey = get_guest_storage_key(current->mm, hva);
1057 if (IS_ERR_VALUE(curkey)) {
1058 r = curkey;
1059 goto out;
1060 }
1061 keys[i] = curkey;
1062 }
1063
1064 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1065 sizeof(uint8_t) * args->count);
1066 if (r)
1067 r = -EFAULT;
1068out:
1069 kvfree(keys);
1070 return r;
1071}
1072
1073static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1074{
1075 uint8_t *keys;
1076 uint64_t hva;
1077 int i, r = 0;
1078
1079 if (args->flags != 0)
1080 return -EINVAL;
1081
1082 /* Enforce sane limit on memory allocation */
1083 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1084 return -EINVAL;
1085
1086 keys = kmalloc_array(args->count, sizeof(uint8_t),
1087 GFP_KERNEL | __GFP_NOWARN);
1088 if (!keys)
1089 keys = vmalloc(sizeof(uint8_t) * args->count);
1090 if (!keys)
1091 return -ENOMEM;
1092
1093 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1094 sizeof(uint8_t) * args->count);
1095 if (r) {
1096 r = -EFAULT;
1097 goto out;
1098 }
1099
1100 /* Enable storage key handling for the guest */
14d4a425
DD
1101 r = s390_enable_skey();
1102 if (r)
1103 goto out;
30ee2a98
JH
1104
1105 for (i = 0; i < args->count; i++) {
1106 hva = gfn_to_hva(kvm, args->start_gfn + i);
1107 if (kvm_is_error_hva(hva)) {
1108 r = -EFAULT;
1109 goto out;
1110 }
1111
1112 /* Lowest order bit is reserved */
1113 if (keys[i] & 0x01) {
1114 r = -EINVAL;
1115 goto out;
1116 }
1117
1118 r = set_guest_storage_key(current->mm, hva,
1119 (unsigned long)keys[i], 0);
1120 if (r)
1121 goto out;
1122 }
1123out:
1124 kvfree(keys);
1125 return r;
1126}
1127
b0c632db
HC
1128long kvm_arch_vm_ioctl(struct file *filp,
1129 unsigned int ioctl, unsigned long arg)
1130{
1131 struct kvm *kvm = filp->private_data;
1132 void __user *argp = (void __user *)arg;
f2061656 1133 struct kvm_device_attr attr;
b0c632db
HC
1134 int r;
1135
1136 switch (ioctl) {
ba5c1e9b
CO
1137 case KVM_S390_INTERRUPT: {
1138 struct kvm_s390_interrupt s390int;
1139
1140 r = -EFAULT;
1141 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1142 break;
1143 r = kvm_s390_inject_vm(kvm, &s390int);
1144 break;
1145 }
d938dc55
CH
1146 case KVM_ENABLE_CAP: {
1147 struct kvm_enable_cap cap;
1148 r = -EFAULT;
1149 if (copy_from_user(&cap, argp, sizeof(cap)))
1150 break;
1151 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1152 break;
1153 }
84223598
CH
1154 case KVM_CREATE_IRQCHIP: {
1155 struct kvm_irq_routing_entry routing;
1156
1157 r = -EINVAL;
1158 if (kvm->arch.use_irqchip) {
1159 /* Set up dummy routing. */
1160 memset(&routing, 0, sizeof(routing));
152b2839 1161 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
84223598
CH
1162 }
1163 break;
1164 }
f2061656
DD
1165 case KVM_SET_DEVICE_ATTR: {
1166 r = -EFAULT;
1167 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1168 break;
1169 r = kvm_s390_vm_set_attr(kvm, &attr);
1170 break;
1171 }
1172 case KVM_GET_DEVICE_ATTR: {
1173 r = -EFAULT;
1174 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1175 break;
1176 r = kvm_s390_vm_get_attr(kvm, &attr);
1177 break;
1178 }
1179 case KVM_HAS_DEVICE_ATTR: {
1180 r = -EFAULT;
1181 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1182 break;
1183 r = kvm_s390_vm_has_attr(kvm, &attr);
1184 break;
1185 }
30ee2a98
JH
1186 case KVM_S390_GET_SKEYS: {
1187 struct kvm_s390_skeys args;
1188
1189 r = -EFAULT;
1190 if (copy_from_user(&args, argp,
1191 sizeof(struct kvm_s390_skeys)))
1192 break;
1193 r = kvm_s390_get_skeys(kvm, &args);
1194 break;
1195 }
1196 case KVM_S390_SET_SKEYS: {
1197 struct kvm_s390_skeys args;
1198
1199 r = -EFAULT;
1200 if (copy_from_user(&args, argp,
1201 sizeof(struct kvm_s390_skeys)))
1202 break;
1203 r = kvm_s390_set_skeys(kvm, &args);
1204 break;
1205 }
b0c632db 1206 default:
367e1319 1207 r = -ENOTTY;
b0c632db
HC
1208 }
1209
1210 return r;
1211}
1212
45c9b47c
TK
1213static int kvm_s390_query_ap_config(u8 *config)
1214{
1215 u32 fcn_code = 0x04000000UL;
86044c8c 1216 u32 cc = 0;
45c9b47c 1217
86044c8c 1218 memset(config, 0, 128);
45c9b47c
TK
1219 asm volatile(
1220 "lgr 0,%1\n"
1221 "lgr 2,%2\n"
1222 ".long 0xb2af0000\n" /* PQAP(QCI) */
86044c8c 1223 "0: ipm %0\n"
45c9b47c 1224 "srl %0,28\n"
86044c8c
CB
1225 "1:\n"
1226 EX_TABLE(0b, 1b)
1227 : "+r" (cc)
45c9b47c
TK
1228 : "r" (fcn_code), "r" (config)
1229 : "cc", "0", "2", "memory"
1230 );
1231
1232 return cc;
1233}
1234
1235static int kvm_s390_apxa_installed(void)
1236{
1237 u8 config[128];
1238 int cc;
1239
a6aacc3f 1240 if (test_facility(12)) {
45c9b47c
TK
1241 cc = kvm_s390_query_ap_config(config);
1242
1243 if (cc)
1244 pr_err("PQAP(QCI) failed with cc=%d", cc);
1245 else
1246 return config[0] & 0x40;
1247 }
1248
1249 return 0;
1250}
1251
1252static void kvm_s390_set_crycb_format(struct kvm *kvm)
1253{
1254 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1255
1256 if (kvm_s390_apxa_installed())
1257 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1258 else
1259 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1260}
1261
9bb0ec09 1262static u64 kvm_s390_get_initial_cpuid(void)
9d8d5786 1263{
9bb0ec09
DH
1264 struct cpuid cpuid;
1265
1266 get_cpu_id(&cpuid);
1267 cpuid.version = 0xff;
1268 return *((u64 *) &cpuid);
9d8d5786
MM
1269}
1270
c54f0d6a 1271static void kvm_s390_crypto_init(struct kvm *kvm)
5102ee87 1272{
9d8d5786 1273 if (!test_kvm_facility(kvm, 76))
c54f0d6a 1274 return;
5102ee87 1275
c54f0d6a 1276 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
45c9b47c 1277 kvm_s390_set_crycb_format(kvm);
5102ee87 1278
ed6f76b4
TK
1279 /* Enable AES/DEA protected key functions by default */
1280 kvm->arch.crypto.aes_kw = 1;
1281 kvm->arch.crypto.dea_kw = 1;
1282 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1283 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1284 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1285 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
5102ee87
TK
1286}
1287
7d43bafc
ED
1288static void sca_dispose(struct kvm *kvm)
1289{
1290 if (kvm->arch.use_esca)
5e044315 1291 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
7d43bafc
ED
1292 else
1293 free_page((unsigned long)(kvm->arch.sca));
1294 kvm->arch.sca = NULL;
1295}
1296
e08b9637 1297int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 1298{
76a6dd72 1299 gfp_t alloc_flags = GFP_KERNEL;
9d8d5786 1300 int i, rc;
b0c632db 1301 char debug_name[16];
f6c137ff 1302 static unsigned long sca_offset;
b0c632db 1303
e08b9637
CO
1304 rc = -EINVAL;
1305#ifdef CONFIG_KVM_S390_UCONTROL
1306 if (type & ~KVM_VM_S390_UCONTROL)
1307 goto out_err;
1308 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1309 goto out_err;
1310#else
1311 if (type)
1312 goto out_err;
1313#endif
1314
b0c632db
HC
1315 rc = s390_enable_sie();
1316 if (rc)
d89f5eff 1317 goto out_err;
b0c632db 1318
b290411a
CO
1319 rc = -ENOMEM;
1320
7d0a5e62
JF
1321 ratelimit_state_init(&kvm->arch.sthyi_limit, 5 * HZ, 500);
1322
7d43bafc 1323 kvm->arch.use_esca = 0; /* start with basic SCA */
76a6dd72
DH
1324 if (!sclp.has_64bscao)
1325 alloc_flags |= GFP_DMA;
5e044315 1326 rwlock_init(&kvm->arch.sca_lock);
76a6dd72 1327 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
b0c632db 1328 if (!kvm->arch.sca)
d89f5eff 1329 goto out_err;
f6c137ff 1330 spin_lock(&kvm_lock);
c5c2c393 1331 sca_offset += 16;
bc784cce 1332 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
c5c2c393 1333 sca_offset = 0;
bc784cce
ED
1334 kvm->arch.sca = (struct bsca_block *)
1335 ((char *) kvm->arch.sca + sca_offset);
f6c137ff 1336 spin_unlock(&kvm_lock);
b0c632db
HC
1337
1338 sprintf(debug_name, "kvm-%u", current->pid);
1339
1cb9cf72 1340 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
b0c632db 1341 if (!kvm->arch.dbf)
40f5b735 1342 goto out_err;
b0c632db 1343
c54f0d6a
DH
1344 kvm->arch.sie_page2 =
1345 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1346 if (!kvm->arch.sie_page2)
40f5b735 1347 goto out_err;
9d8d5786 1348
fb5bf93f 1349 /* Populate the facility mask initially. */
c54f0d6a 1350 memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
94422ee8 1351 S390_ARCH_FAC_LIST_SIZE_BYTE);
9d8d5786
MM
1352 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1353 if (i < kvm_s390_fac_list_mask_size())
c54f0d6a 1354 kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
9d8d5786 1355 else
c54f0d6a 1356 kvm->arch.model.fac_mask[i] = 0UL;
9d8d5786
MM
1357 }
1358
981467c9 1359 /* Populate the facility list initially. */
c54f0d6a
DH
1360 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
1361 memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
981467c9
MM
1362 S390_ARCH_FAC_LIST_SIZE_BYTE);
1363
95ca2cb5
JF
1364 set_kvm_facility(kvm->arch.model.fac_mask, 74);
1365 set_kvm_facility(kvm->arch.model.fac_list, 74);
1366
9bb0ec09 1367 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
37c5f6c8 1368 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
9d8d5786 1369
c54f0d6a 1370 kvm_s390_crypto_init(kvm);
5102ee87 1371
ba5c1e9b 1372 spin_lock_init(&kvm->arch.float_int.lock);
6d3da241
JF
1373 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1374 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
8a242234 1375 init_waitqueue_head(&kvm->arch.ipte_wq);
a6b7e459 1376 mutex_init(&kvm->arch.ipte_mutex);
ba5c1e9b 1377
b0c632db 1378 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
78f26131 1379 VM_EVENT(kvm, 3, "vm created with type %lu", type);
b0c632db 1380
e08b9637
CO
1381 if (type & KVM_VM_S390_UCONTROL) {
1382 kvm->arch.gmap = NULL;
a3a92c31 1383 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
e08b9637 1384 } else {
32e6b236
GH
1385 if (sclp.hamax == U64_MAX)
1386 kvm->arch.mem_limit = TASK_MAX_SIZE;
1387 else
1388 kvm->arch.mem_limit = min_t(unsigned long, TASK_MAX_SIZE,
1389 sclp.hamax + 1);
a3a92c31 1390 kvm->arch.gmap = gmap_alloc(current->mm, kvm->arch.mem_limit - 1);
e08b9637 1391 if (!kvm->arch.gmap)
40f5b735 1392 goto out_err;
2c70fe44 1393 kvm->arch.gmap->private = kvm;
24eb3a82 1394 kvm->arch.gmap->pfault_enabled = 0;
e08b9637 1395 }
fa6b7fe9
CH
1396
1397 kvm->arch.css_support = 0;
84223598 1398 kvm->arch.use_irqchip = 0;
72f25020 1399 kvm->arch.epoch = 0;
fa6b7fe9 1400
8ad35755 1401 spin_lock_init(&kvm->arch.start_stop_lock);
8335713a 1402 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
8ad35755 1403
d89f5eff 1404 return 0;
40f5b735 1405out_err:
c54f0d6a 1406 free_page((unsigned long)kvm->arch.sie_page2);
598841ca 1407 debug_unregister(kvm->arch.dbf);
7d43bafc 1408 sca_dispose(kvm);
78f26131 1409 KVM_EVENT(3, "creation of vm failed: %d", rc);
d89f5eff 1410 return rc;
b0c632db
HC
1411}
1412
d329c035
CB
1413void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1414{
1415 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 1416 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
67335e63 1417 kvm_s390_clear_local_irqs(vcpu);
3c038e6b 1418 kvm_clear_async_pf_completion_queue(vcpu);
bc784cce 1419 if (!kvm_is_ucontrol(vcpu->kvm))
a6e2f683 1420 sca_del_vcpu(vcpu);
27e0393f
CO
1421
1422 if (kvm_is_ucontrol(vcpu->kvm))
1423 gmap_free(vcpu->arch.gmap);
1424
e6db1d61 1425 if (vcpu->kvm->arch.use_cmma)
b31605c1 1426 kvm_s390_vcpu_unsetup_cmma(vcpu);
d329c035 1427 free_page((unsigned long)(vcpu->arch.sie_block));
b31288fa 1428
6692cef3 1429 kvm_vcpu_uninit(vcpu);
b110feaf 1430 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
1431}
1432
1433static void kvm_free_vcpus(struct kvm *kvm)
1434{
1435 unsigned int i;
988a2cae 1436 struct kvm_vcpu *vcpu;
d329c035 1437
988a2cae
GN
1438 kvm_for_each_vcpu(i, vcpu, kvm)
1439 kvm_arch_vcpu_destroy(vcpu);
1440
1441 mutex_lock(&kvm->lock);
1442 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1443 kvm->vcpus[i] = NULL;
1444
1445 atomic_set(&kvm->online_vcpus, 0);
1446 mutex_unlock(&kvm->lock);
d329c035
CB
1447}
1448
b0c632db
HC
1449void kvm_arch_destroy_vm(struct kvm *kvm)
1450{
d329c035 1451 kvm_free_vcpus(kvm);
7d43bafc 1452 sca_dispose(kvm);
d329c035 1453 debug_unregister(kvm->arch.dbf);
c54f0d6a 1454 free_page((unsigned long)kvm->arch.sie_page2);
27e0393f
CO
1455 if (!kvm_is_ucontrol(kvm))
1456 gmap_free(kvm->arch.gmap);
841b91c5 1457 kvm_s390_destroy_adapters(kvm);
67335e63 1458 kvm_s390_clear_float_irqs(kvm);
8335713a 1459 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
b0c632db
HC
1460}
1461
1462/* Section: vcpu related */
dafd032a
DD
1463static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1464{
1465 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
1466 if (!vcpu->arch.gmap)
1467 return -ENOMEM;
1468 vcpu->arch.gmap->private = vcpu->kvm;
1469
1470 return 0;
1471}
1472
a6e2f683
ED
1473static void sca_del_vcpu(struct kvm_vcpu *vcpu)
1474{
5e044315 1475 read_lock(&vcpu->kvm->arch.sca_lock);
7d43bafc
ED
1476 if (vcpu->kvm->arch.use_esca) {
1477 struct esca_block *sca = vcpu->kvm->arch.sca;
a6e2f683 1478
7d43bafc 1479 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
10ce32d5 1480 sca->cpu[vcpu->vcpu_id].sda = 0;
7d43bafc
ED
1481 } else {
1482 struct bsca_block *sca = vcpu->kvm->arch.sca;
1483
1484 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
10ce32d5 1485 sca->cpu[vcpu->vcpu_id].sda = 0;
7d43bafc 1486 }
5e044315 1487 read_unlock(&vcpu->kvm->arch.sca_lock);
a6e2f683
ED
1488}
1489
eaa78f34 1490static void sca_add_vcpu(struct kvm_vcpu *vcpu)
a6e2f683 1491{
eaa78f34
DH
1492 read_lock(&vcpu->kvm->arch.sca_lock);
1493 if (vcpu->kvm->arch.use_esca) {
1494 struct esca_block *sca = vcpu->kvm->arch.sca;
7d43bafc 1495
eaa78f34 1496 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
7d43bafc
ED
1497 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1498 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
25508824 1499 vcpu->arch.sie_block->ecb2 |= 0x04U;
eaa78f34 1500 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
7d43bafc 1501 } else {
eaa78f34 1502 struct bsca_block *sca = vcpu->kvm->arch.sca;
a6e2f683 1503
eaa78f34 1504 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
7d43bafc
ED
1505 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1506 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
eaa78f34 1507 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
7d43bafc 1508 }
eaa78f34 1509 read_unlock(&vcpu->kvm->arch.sca_lock);
5e044315
ED
1510}
1511
1512/* Basic SCA to Extended SCA data copy routines */
1513static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
1514{
1515 d->sda = s->sda;
1516 d->sigp_ctrl.c = s->sigp_ctrl.c;
1517 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
1518}
1519
1520static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
1521{
1522 int i;
1523
1524 d->ipte_control = s->ipte_control;
1525 d->mcn[0] = s->mcn;
1526 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
1527 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
1528}
1529
1530static int sca_switch_to_extended(struct kvm *kvm)
1531{
1532 struct bsca_block *old_sca = kvm->arch.sca;
1533 struct esca_block *new_sca;
1534 struct kvm_vcpu *vcpu;
1535 unsigned int vcpu_idx;
1536 u32 scaol, scaoh;
1537
1538 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
1539 if (!new_sca)
1540 return -ENOMEM;
1541
1542 scaoh = (u32)((u64)(new_sca) >> 32);
1543 scaol = (u32)(u64)(new_sca) & ~0x3fU;
1544
1545 kvm_s390_vcpu_block_all(kvm);
1546 write_lock(&kvm->arch.sca_lock);
1547
1548 sca_copy_b_to_e(new_sca, old_sca);
1549
1550 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
1551 vcpu->arch.sie_block->scaoh = scaoh;
1552 vcpu->arch.sie_block->scaol = scaol;
1553 vcpu->arch.sie_block->ecb2 |= 0x04U;
1554 }
1555 kvm->arch.sca = new_sca;
1556 kvm->arch.use_esca = 1;
1557
1558 write_unlock(&kvm->arch.sca_lock);
1559 kvm_s390_vcpu_unblock_all(kvm);
1560
1561 free_page((unsigned long)old_sca);
1562
8335713a
CB
1563 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
1564 old_sca, kvm->arch.sca);
5e044315 1565 return 0;
a6e2f683
ED
1566}
1567
1568static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
1569{
5e044315
ED
1570 int rc;
1571
1572 if (id < KVM_S390_BSCA_CPU_SLOTS)
1573 return true;
76a6dd72 1574 if (!sclp.has_esca || !sclp.has_64bscao)
5e044315
ED
1575 return false;
1576
1577 mutex_lock(&kvm->lock);
1578 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
1579 mutex_unlock(&kvm->lock);
1580
1581 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
a6e2f683
ED
1582}
1583
b0c632db
HC
1584int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1585{
3c038e6b
DD
1586 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1587 kvm_clear_async_pf_completion_queue(vcpu);
59674c1a
CB
1588 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1589 KVM_SYNC_GPRS |
9eed0735 1590 KVM_SYNC_ACRS |
b028ee3e
DH
1591 KVM_SYNC_CRS |
1592 KVM_SYNC_ARCH0 |
1593 KVM_SYNC_PFAULT;
c6e5f166
FZ
1594 if (test_kvm_facility(vcpu->kvm, 64))
1595 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
f6aa6dc4
DH
1596 /* fprs can be synchronized via vrs, even if the guest has no vx. With
1597 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
1598 */
1599 if (MACHINE_HAS_VX)
68c55750 1600 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
6fd8e67d
DH
1601 else
1602 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
dafd032a
DD
1603
1604 if (kvm_is_ucontrol(vcpu->kvm))
1605 return __kvm_ucontrol_vcpu_init(vcpu);
1606
b0c632db
HC
1607 return 0;
1608}
1609
db0758b2
DH
1610/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1611static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1612{
1613 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
9c23a131 1614 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
db0758b2 1615 vcpu->arch.cputm_start = get_tod_clock_fast();
9c23a131 1616 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
db0758b2
DH
1617}
1618
1619/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1620static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1621{
1622 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
9c23a131 1623 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
db0758b2
DH
1624 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1625 vcpu->arch.cputm_start = 0;
9c23a131 1626 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
db0758b2
DH
1627}
1628
1629/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1630static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1631{
1632 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
1633 vcpu->arch.cputm_enabled = true;
1634 __start_cpu_timer_accounting(vcpu);
1635}
1636
1637/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1638static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1639{
1640 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
1641 __stop_cpu_timer_accounting(vcpu);
1642 vcpu->arch.cputm_enabled = false;
1643}
1644
1645static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1646{
1647 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1648 __enable_cpu_timer_accounting(vcpu);
1649 preempt_enable();
1650}
1651
1652static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1653{
1654 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1655 __disable_cpu_timer_accounting(vcpu);
1656 preempt_enable();
1657}
1658
4287f247
DH
1659/* set the cpu timer - may only be called from the VCPU thread itself */
1660void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
1661{
db0758b2 1662 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
9c23a131 1663 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
db0758b2
DH
1664 if (vcpu->arch.cputm_enabled)
1665 vcpu->arch.cputm_start = get_tod_clock_fast();
4287f247 1666 vcpu->arch.sie_block->cputm = cputm;
9c23a131 1667 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
db0758b2 1668 preempt_enable();
4287f247
DH
1669}
1670
db0758b2 1671/* update and get the cpu timer - can also be called from other VCPU threads */
4287f247
DH
1672__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
1673{
9c23a131 1674 unsigned int seq;
db0758b2 1675 __u64 value;
db0758b2
DH
1676
1677 if (unlikely(!vcpu->arch.cputm_enabled))
1678 return vcpu->arch.sie_block->cputm;
1679
9c23a131
DH
1680 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1681 do {
1682 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
1683 /*
1684 * If the writer would ever execute a read in the critical
1685 * section, e.g. in irq context, we have a deadlock.
1686 */
1687 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
1688 value = vcpu->arch.sie_block->cputm;
1689 /* if cputm_start is 0, accounting is being started/stopped */
1690 if (likely(vcpu->arch.cputm_start))
1691 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1692 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
1693 preempt_enable();
db0758b2 1694 return value;
4287f247
DH
1695}
1696
b0c632db
HC
1697void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1698{
9977e886 1699 /* Save host register state */
d0164ee2 1700 save_fpu_regs();
9abc2a08
DH
1701 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
1702 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
9977e886 1703
6fd8e67d
DH
1704 if (MACHINE_HAS_VX)
1705 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
1706 else
1707 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
9abc2a08 1708 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
9977e886 1709 if (test_fp_ctl(current->thread.fpu.fpc))
96b2d7a8 1710 /* User space provided an invalid FPC, let's clear it */
9977e886
HB
1711 current->thread.fpu.fpc = 0;
1712
1713 save_access_regs(vcpu->arch.host_acrs);
59674c1a 1714 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 1715 gmap_enable(vcpu->arch.gmap);
805de8f4 1716 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
5ebda316 1717 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
db0758b2 1718 __start_cpu_timer_accounting(vcpu);
01a745ac 1719 vcpu->cpu = cpu;
b0c632db
HC
1720}
1721
1722void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1723{
01a745ac 1724 vcpu->cpu = -1;
5ebda316 1725 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
db0758b2 1726 __stop_cpu_timer_accounting(vcpu);
805de8f4 1727 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 1728 gmap_disable(vcpu->arch.gmap);
9977e886 1729
9abc2a08 1730 /* Save guest register state */
d0164ee2 1731 save_fpu_regs();
9abc2a08 1732 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
9977e886 1733
9abc2a08
DH
1734 /* Restore host register state */
1735 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
1736 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
9977e886
HB
1737
1738 save_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
1739 restore_access_regs(vcpu->arch.host_acrs);
1740}
1741
1742static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1743{
1744 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1745 vcpu->arch.sie_block->gpsw.mask = 0UL;
1746 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 1747 kvm_s390_set_prefix(vcpu, 0);
4287f247 1748 kvm_s390_set_cpu_timer(vcpu, 0);
b0c632db
HC
1749 vcpu->arch.sie_block->ckc = 0UL;
1750 vcpu->arch.sie_block->todpr = 0;
1751 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1752 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
1753 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
9abc2a08
DH
1754 /* make sure the new fpc will be lazily loaded */
1755 save_fpu_regs();
1756 current->thread.fpu.fpc = 0;
b0c632db 1757 vcpu->arch.sie_block->gbea = 1;
672550fb 1758 vcpu->arch.sie_block->pp = 0;
3c038e6b
DD
1759 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1760 kvm_clear_async_pf_completion_queue(vcpu);
6352e4d2
DH
1761 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1762 kvm_s390_vcpu_stop(vcpu);
2ed10cc1 1763 kvm_s390_clear_local_irqs(vcpu);
b0c632db
HC
1764}
1765
31928aa5 1766void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
42897d86 1767{
72f25020 1768 mutex_lock(&vcpu->kvm->lock);
fdf03650 1769 preempt_disable();
72f25020 1770 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
fdf03650 1771 preempt_enable();
72f25020 1772 mutex_unlock(&vcpu->kvm->lock);
25508824 1773 if (!kvm_is_ucontrol(vcpu->kvm)) {
dafd032a 1774 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
eaa78f34 1775 sca_add_vcpu(vcpu);
25508824
DH
1776 }
1777
42897d86
MT
1778}
1779
5102ee87
TK
1780static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1781{
9d8d5786 1782 if (!test_kvm_facility(vcpu->kvm, 76))
5102ee87
TK
1783 return;
1784
a374e892
TK
1785 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1786
1787 if (vcpu->kvm->arch.crypto.aes_kw)
1788 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1789 if (vcpu->kvm->arch.crypto.dea_kw)
1790 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1791
5102ee87
TK
1792 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1793}
1794
b31605c1
DD
1795void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1796{
1797 free_page(vcpu->arch.sie_block->cbrlo);
1798 vcpu->arch.sie_block->cbrlo = 0;
1799}
1800
1801int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1802{
1803 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1804 if (!vcpu->arch.sie_block->cbrlo)
1805 return -ENOMEM;
1806
1807 vcpu->arch.sie_block->ecb2 |= 0x80;
1808 vcpu->arch.sie_block->ecb2 &= ~0x08;
1809 return 0;
1810}
1811
91520f1a
MM
1812static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1813{
1814 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1815
91520f1a 1816 vcpu->arch.sie_block->ibc = model->ibc;
80bc79dc 1817 if (test_kvm_facility(vcpu->kvm, 7))
c54f0d6a 1818 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
91520f1a
MM
1819}
1820
b0c632db
HC
1821int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1822{
b31605c1 1823 int rc = 0;
b31288fa 1824
9e6dabef
CH
1825 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1826 CPUSTAT_SM |
a4a4f191
GH
1827 CPUSTAT_STOPPED);
1828
53df84f8 1829 if (test_kvm_facility(vcpu->kvm, 78))
805de8f4 1830 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
53df84f8 1831 else if (test_kvm_facility(vcpu->kvm, 8))
805de8f4 1832 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
a4a4f191 1833
91520f1a
MM
1834 kvm_s390_vcpu_setup_model(vcpu);
1835
bd50e8ec
DH
1836 vcpu->arch.sie_block->ecb = 0x02;
1837 if (test_kvm_facility(vcpu->kvm, 9))
1838 vcpu->arch.sie_block->ecb |= 0x04;
9d8d5786 1839 if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
7feb6bb8
MM
1840 vcpu->arch.sie_block->ecb |= 0x10;
1841
d6af0b49
DH
1842 if (test_kvm_facility(vcpu->kvm, 8))
1843 vcpu->arch.sie_block->ecb2 |= 0x08;
ea5f4969 1844 vcpu->arch.sie_block->eca = 0xC1002000U;
37c5f6c8 1845 if (sclp.has_siif)
217a4406 1846 vcpu->arch.sie_block->eca |= 1;
37c5f6c8 1847 if (sclp.has_sigpif)
ea5f4969 1848 vcpu->arch.sie_block->eca |= 0x10000000U;
c6e5f166
FZ
1849 if (test_kvm_facility(vcpu->kvm, 64))
1850 vcpu->arch.sie_block->ecb3 |= 0x01;
18280d8b 1851 if (test_kvm_facility(vcpu->kvm, 129)) {
13211ea7
EF
1852 vcpu->arch.sie_block->eca |= 0x00020000;
1853 vcpu->arch.sie_block->ecd |= 0x20000000;
1854 }
c6e5f166 1855 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
492d8642 1856 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
95ca2cb5
JF
1857 if (test_kvm_facility(vcpu->kvm, 74))
1858 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
5a5e6536 1859
e6db1d61 1860 if (vcpu->kvm->arch.use_cmma) {
b31605c1
DD
1861 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1862 if (rc)
1863 return rc;
b31288fa 1864 }
0ac96caf 1865 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ca872302 1866 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
9d8d5786 1867
5102ee87
TK
1868 kvm_s390_vcpu_crypto_setup(vcpu);
1869
b31605c1 1870 return rc;
b0c632db
HC
1871}
1872
1873struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1874 unsigned int id)
1875{
4d47555a 1876 struct kvm_vcpu *vcpu;
7feb6bb8 1877 struct sie_page *sie_page;
4d47555a
CO
1878 int rc = -EINVAL;
1879
4215825e 1880 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
4d47555a
CO
1881 goto out;
1882
1883 rc = -ENOMEM;
b0c632db 1884
b110feaf 1885 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 1886 if (!vcpu)
4d47555a 1887 goto out;
b0c632db 1888
7feb6bb8
MM
1889 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1890 if (!sie_page)
b0c632db
HC
1891 goto out_free_cpu;
1892
7feb6bb8
MM
1893 vcpu->arch.sie_block = &sie_page->sie_block;
1894 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
1895
b0c632db 1896 vcpu->arch.sie_block->icpua = id;
ba5c1e9b 1897 spin_lock_init(&vcpu->arch.local_int.lock);
ba5c1e9b 1898 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
d0321a24 1899 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 1900 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
9c23a131 1901 seqcount_init(&vcpu->arch.cputm_seqcount);
ba5c1e9b 1902
b0c632db
HC
1903 rc = kvm_vcpu_init(vcpu, kvm, id);
1904 if (rc)
9abc2a08 1905 goto out_free_sie_block;
8335713a 1906 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
b0c632db 1907 vcpu->arch.sie_block);
ade38c31 1908 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 1909
b0c632db 1910 return vcpu;
7b06bf2f
WY
1911out_free_sie_block:
1912 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 1913out_free_cpu:
b110feaf 1914 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 1915out:
b0c632db
HC
1916 return ERR_PTR(rc);
1917}
1918
b0c632db
HC
1919int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1920{
9a022067 1921 return kvm_s390_vcpu_has_irq(vcpu, 0);
b0c632db
HC
1922}
1923
27406cd5 1924void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
49b99e1e 1925{
805de8f4 1926 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
61a6df54 1927 exit_sie(vcpu);
49b99e1e
CB
1928}
1929
27406cd5 1930void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
49b99e1e 1931{
805de8f4 1932 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
49b99e1e
CB
1933}
1934
8e236546
CB
1935static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
1936{
805de8f4 1937 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
61a6df54 1938 exit_sie(vcpu);
8e236546
CB
1939}
1940
1941static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
1942{
9bf9fde2 1943 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
8e236546
CB
1944}
1945
49b99e1e
CB
1946/*
1947 * Kick a guest cpu out of SIE and wait until SIE is not running.
1948 * If the CPU is not running (e.g. waiting as idle) the function will
1949 * return immediately. */
1950void exit_sie(struct kvm_vcpu *vcpu)
1951{
805de8f4 1952 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
49b99e1e
CB
1953 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1954 cpu_relax();
1955}
1956
8e236546
CB
1957/* Kick a guest cpu out of SIE to process a request synchronously */
1958void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
49b99e1e 1959{
8e236546
CB
1960 kvm_make_request(req, vcpu);
1961 kvm_s390_vcpu_request(vcpu);
49b99e1e
CB
1962}
1963
2c70fe44
CB
1964static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1965{
1966 int i;
1967 struct kvm *kvm = gmap->private;
1968 struct kvm_vcpu *vcpu;
1969
1970 kvm_for_each_vcpu(i, vcpu, kvm) {
1971 /* match against both prefix pages */
fda902cb 1972 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
2c70fe44 1973 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
8e236546 1974 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
2c70fe44
CB
1975 }
1976 }
1977}
1978
b6d33834
CD
1979int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1980{
1981 /* kvm common code refers to this, but never calls it */
1982 BUG();
1983 return 0;
1984}
1985
14eebd91
CO
1986static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1987 struct kvm_one_reg *reg)
1988{
1989 int r = -EINVAL;
1990
1991 switch (reg->id) {
29b7c71b
CO
1992 case KVM_REG_S390_TODPR:
1993 r = put_user(vcpu->arch.sie_block->todpr,
1994 (u32 __user *)reg->addr);
1995 break;
1996 case KVM_REG_S390_EPOCHDIFF:
1997 r = put_user(vcpu->arch.sie_block->epoch,
1998 (u64 __user *)reg->addr);
1999 break;
46a6dd1c 2000 case KVM_REG_S390_CPU_TIMER:
4287f247 2001 r = put_user(kvm_s390_get_cpu_timer(vcpu),
46a6dd1c
J
2002 (u64 __user *)reg->addr);
2003 break;
2004 case KVM_REG_S390_CLOCK_COMP:
2005 r = put_user(vcpu->arch.sie_block->ckc,
2006 (u64 __user *)reg->addr);
2007 break;
536336c2
DD
2008 case KVM_REG_S390_PFTOKEN:
2009 r = put_user(vcpu->arch.pfault_token,
2010 (u64 __user *)reg->addr);
2011 break;
2012 case KVM_REG_S390_PFCOMPARE:
2013 r = put_user(vcpu->arch.pfault_compare,
2014 (u64 __user *)reg->addr);
2015 break;
2016 case KVM_REG_S390_PFSELECT:
2017 r = put_user(vcpu->arch.pfault_select,
2018 (u64 __user *)reg->addr);
2019 break;
672550fb
CB
2020 case KVM_REG_S390_PP:
2021 r = put_user(vcpu->arch.sie_block->pp,
2022 (u64 __user *)reg->addr);
2023 break;
afa45ff5
CB
2024 case KVM_REG_S390_GBEA:
2025 r = put_user(vcpu->arch.sie_block->gbea,
2026 (u64 __user *)reg->addr);
2027 break;
14eebd91
CO
2028 default:
2029 break;
2030 }
2031
2032 return r;
2033}
2034
2035static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
2036 struct kvm_one_reg *reg)
2037{
2038 int r = -EINVAL;
4287f247 2039 __u64 val;
14eebd91
CO
2040
2041 switch (reg->id) {
29b7c71b
CO
2042 case KVM_REG_S390_TODPR:
2043 r = get_user(vcpu->arch.sie_block->todpr,
2044 (u32 __user *)reg->addr);
2045 break;
2046 case KVM_REG_S390_EPOCHDIFF:
2047 r = get_user(vcpu->arch.sie_block->epoch,
2048 (u64 __user *)reg->addr);
2049 break;
46a6dd1c 2050 case KVM_REG_S390_CPU_TIMER:
4287f247
DH
2051 r = get_user(val, (u64 __user *)reg->addr);
2052 if (!r)
2053 kvm_s390_set_cpu_timer(vcpu, val);
46a6dd1c
J
2054 break;
2055 case KVM_REG_S390_CLOCK_COMP:
2056 r = get_user(vcpu->arch.sie_block->ckc,
2057 (u64 __user *)reg->addr);
2058 break;
536336c2
DD
2059 case KVM_REG_S390_PFTOKEN:
2060 r = get_user(vcpu->arch.pfault_token,
2061 (u64 __user *)reg->addr);
9fbd8082
DH
2062 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2063 kvm_clear_async_pf_completion_queue(vcpu);
536336c2
DD
2064 break;
2065 case KVM_REG_S390_PFCOMPARE:
2066 r = get_user(vcpu->arch.pfault_compare,
2067 (u64 __user *)reg->addr);
2068 break;
2069 case KVM_REG_S390_PFSELECT:
2070 r = get_user(vcpu->arch.pfault_select,
2071 (u64 __user *)reg->addr);
2072 break;
672550fb
CB
2073 case KVM_REG_S390_PP:
2074 r = get_user(vcpu->arch.sie_block->pp,
2075 (u64 __user *)reg->addr);
2076 break;
afa45ff5
CB
2077 case KVM_REG_S390_GBEA:
2078 r = get_user(vcpu->arch.sie_block->gbea,
2079 (u64 __user *)reg->addr);
2080 break;
14eebd91
CO
2081 default:
2082 break;
2083 }
2084
2085 return r;
2086}
b6d33834 2087
b0c632db
HC
2088static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
2089{
b0c632db 2090 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
2091 return 0;
2092}
2093
2094int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2095{
5a32c1af 2096 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
2097 return 0;
2098}
2099
2100int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2101{
5a32c1af 2102 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
2103 return 0;
2104}
2105
2106int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2107 struct kvm_sregs *sregs)
2108{
59674c1a 2109 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 2110 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 2111 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
2112 return 0;
2113}
2114
2115int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2116 struct kvm_sregs *sregs)
2117{
59674c1a 2118 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 2119 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
2120 return 0;
2121}
2122
2123int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2124{
9abc2a08
DH
2125 /* make sure the new values will be lazily loaded */
2126 save_fpu_regs();
4725c860
MS
2127 if (test_fp_ctl(fpu->fpc))
2128 return -EINVAL;
9abc2a08
DH
2129 current->thread.fpu.fpc = fpu->fpc;
2130 if (MACHINE_HAS_VX)
2131 convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
2132 else
2133 memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
b0c632db
HC
2134 return 0;
2135}
2136
2137int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2138{
9abc2a08
DH
2139 /* make sure we have the latest values */
2140 save_fpu_regs();
2141 if (MACHINE_HAS_VX)
2142 convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
2143 else
2144 memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
2145 fpu->fpc = current->thread.fpu.fpc;
b0c632db
HC
2146 return 0;
2147}
2148
2149static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
2150{
2151 int rc = 0;
2152
7a42fdc2 2153 if (!is_vcpu_stopped(vcpu))
b0c632db 2154 rc = -EBUSY;
d7b0b5eb
CO
2155 else {
2156 vcpu->run->psw_mask = psw.mask;
2157 vcpu->run->psw_addr = psw.addr;
2158 }
b0c632db
HC
2159 return rc;
2160}
2161
2162int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2163 struct kvm_translation *tr)
2164{
2165 return -EINVAL; /* not implemented yet */
2166}
2167
27291e21
DH
2168#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
2169 KVM_GUESTDBG_USE_HW_BP | \
2170 KVM_GUESTDBG_ENABLE)
2171
d0bfb940
JK
2172int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2173 struct kvm_guest_debug *dbg)
b0c632db 2174{
27291e21
DH
2175 int rc = 0;
2176
2177 vcpu->guest_debug = 0;
2178 kvm_s390_clear_bp_data(vcpu);
2179
2de3bfc2 2180 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
27291e21
DH
2181 return -EINVAL;
2182
2183 if (dbg->control & KVM_GUESTDBG_ENABLE) {
2184 vcpu->guest_debug = dbg->control;
2185 /* enforce guest PER */
805de8f4 2186 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
27291e21
DH
2187
2188 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
2189 rc = kvm_s390_import_bp_data(vcpu, dbg);
2190 } else {
805de8f4 2191 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
27291e21
DH
2192 vcpu->arch.guestdbg.last_bp = 0;
2193 }
2194
2195 if (rc) {
2196 vcpu->guest_debug = 0;
2197 kvm_s390_clear_bp_data(vcpu);
805de8f4 2198 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
27291e21
DH
2199 }
2200
2201 return rc;
b0c632db
HC
2202}
2203
62d9f0db
MT
2204int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2205 struct kvm_mp_state *mp_state)
2206{
6352e4d2
DH
2207 /* CHECK_STOP and LOAD are not supported yet */
2208 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
2209 KVM_MP_STATE_OPERATING;
62d9f0db
MT
2210}
2211
2212int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2213 struct kvm_mp_state *mp_state)
2214{
6352e4d2
DH
2215 int rc = 0;
2216
2217 /* user space knows about this interface - let it control the state */
2218 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
2219
2220 switch (mp_state->mp_state) {
2221 case KVM_MP_STATE_STOPPED:
2222 kvm_s390_vcpu_stop(vcpu);
2223 break;
2224 case KVM_MP_STATE_OPERATING:
2225 kvm_s390_vcpu_start(vcpu);
2226 break;
2227 case KVM_MP_STATE_LOAD:
2228 case KVM_MP_STATE_CHECK_STOP:
2229 /* fall through - CHECK_STOP and LOAD are not supported yet */
2230 default:
2231 rc = -ENXIO;
2232 }
2233
2234 return rc;
62d9f0db
MT
2235}
2236
8ad35755
DH
2237static bool ibs_enabled(struct kvm_vcpu *vcpu)
2238{
2239 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
2240}
2241
2c70fe44
CB
2242static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
2243{
8ad35755 2244retry:
8e236546 2245 kvm_s390_vcpu_request_handled(vcpu);
586b7ccd
CB
2246 if (!vcpu->requests)
2247 return 0;
2c70fe44
CB
2248 /*
2249 * We use MMU_RELOAD just to re-arm the ipte notifier for the
2250 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
2251 * This ensures that the ipte instruction for this request has
2252 * already finished. We might race against a second unmapper that
2253 * wants to set the blocking bit. Lets just retry the request loop.
2254 */
8ad35755 2255 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
2c70fe44
CB
2256 int rc;
2257 rc = gmap_ipte_notify(vcpu->arch.gmap,
fda902cb 2258 kvm_s390_get_prefix(vcpu),
2c70fe44
CB
2259 PAGE_SIZE * 2);
2260 if (rc)
2261 return rc;
8ad35755 2262 goto retry;
2c70fe44 2263 }
8ad35755 2264
d3d692c8
DH
2265 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2266 vcpu->arch.sie_block->ihcpu = 0xffff;
2267 goto retry;
2268 }
2269
8ad35755
DH
2270 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
2271 if (!ibs_enabled(vcpu)) {
2272 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
805de8f4 2273 atomic_or(CPUSTAT_IBS,
8ad35755
DH
2274 &vcpu->arch.sie_block->cpuflags);
2275 }
2276 goto retry;
2c70fe44 2277 }
8ad35755
DH
2278
2279 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
2280 if (ibs_enabled(vcpu)) {
2281 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
805de8f4 2282 atomic_andnot(CPUSTAT_IBS,
8ad35755
DH
2283 &vcpu->arch.sie_block->cpuflags);
2284 }
2285 goto retry;
2286 }
2287
0759d068
DH
2288 /* nothing to do, just clear the request */
2289 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
2290
2c70fe44
CB
2291 return 0;
2292}
2293
25ed1675
DH
2294void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
2295{
2296 struct kvm_vcpu *vcpu;
2297 int i;
2298
2299 mutex_lock(&kvm->lock);
2300 preempt_disable();
2301 kvm->arch.epoch = tod - get_tod_clock();
2302 kvm_s390_vcpu_block_all(kvm);
2303 kvm_for_each_vcpu(i, vcpu, kvm)
2304 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
2305 kvm_s390_vcpu_unblock_all(kvm);
2306 preempt_enable();
2307 mutex_unlock(&kvm->lock);
2308}
2309
fa576c58
TH
2310/**
2311 * kvm_arch_fault_in_page - fault-in guest page if necessary
2312 * @vcpu: The corresponding virtual cpu
2313 * @gpa: Guest physical address
2314 * @writable: Whether the page should be writable or not
2315 *
2316 * Make sure that a guest page has been faulted-in on the host.
2317 *
2318 * Return: Zero on success, negative error code otherwise.
2319 */
2320long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
24eb3a82 2321{
527e30b4
MS
2322 return gmap_fault(vcpu->arch.gmap, gpa,
2323 writable ? FAULT_FLAG_WRITE : 0);
24eb3a82
DD
2324}
2325
3c038e6b
DD
2326static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
2327 unsigned long token)
2328{
2329 struct kvm_s390_interrupt inti;
383d0b05 2330 struct kvm_s390_irq irq;
3c038e6b
DD
2331
2332 if (start_token) {
383d0b05
JF
2333 irq.u.ext.ext_params2 = token;
2334 irq.type = KVM_S390_INT_PFAULT_INIT;
2335 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
3c038e6b
DD
2336 } else {
2337 inti.type = KVM_S390_INT_PFAULT_DONE;
383d0b05 2338 inti.parm64 = token;
3c038e6b
DD
2339 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
2340 }
2341}
2342
2343void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
2344 struct kvm_async_pf *work)
2345{
2346 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
2347 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
2348}
2349
2350void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
2351 struct kvm_async_pf *work)
2352{
2353 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
2354 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
2355}
2356
2357void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
2358 struct kvm_async_pf *work)
2359{
2360 /* s390 will always inject the page directly */
2361}
2362
2363bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
2364{
2365 /*
2366 * s390 will always inject the page directly,
2367 * but we still want check_async_completion to cleanup
2368 */
2369 return true;
2370}
2371
2372static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
2373{
2374 hva_t hva;
2375 struct kvm_arch_async_pf arch;
2376 int rc;
2377
2378 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2379 return 0;
2380 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
2381 vcpu->arch.pfault_compare)
2382 return 0;
2383 if (psw_extint_disabled(vcpu))
2384 return 0;
9a022067 2385 if (kvm_s390_vcpu_has_irq(vcpu, 0))
3c038e6b
DD
2386 return 0;
2387 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
2388 return 0;
2389 if (!vcpu->arch.gmap->pfault_enabled)
2390 return 0;
2391
81480cc1
HC
2392 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
2393 hva += current->thread.gmap_addr & ~PAGE_MASK;
2394 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
3c038e6b
DD
2395 return 0;
2396
2397 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
2398 return rc;
2399}
2400
3fb4c40f 2401static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 2402{
3fb4c40f 2403 int rc, cpuflags;
e168bf8d 2404
3c038e6b
DD
2405 /*
2406 * On s390 notifications for arriving pages will be delivered directly
2407 * to the guest but the house keeping for completed pfaults is
2408 * handled outside the worker.
2409 */
2410 kvm_check_async_pf_completion(vcpu);
2411
7ec7c8c7
CB
2412 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
2413 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
b0c632db
HC
2414
2415 if (need_resched())
2416 schedule();
2417
d3a73acb 2418 if (test_cpu_flag(CIF_MCCK_PENDING))
71cde587
CB
2419 s390_handle_mcck();
2420
79395031
JF
2421 if (!kvm_is_ucontrol(vcpu->kvm)) {
2422 rc = kvm_s390_deliver_pending_interrupts(vcpu);
2423 if (rc)
2424 return rc;
2425 }
0ff31867 2426
2c70fe44
CB
2427 rc = kvm_s390_handle_requests(vcpu);
2428 if (rc)
2429 return rc;
2430
27291e21
DH
2431 if (guestdbg_enabled(vcpu)) {
2432 kvm_s390_backup_guest_per_regs(vcpu);
2433 kvm_s390_patch_guest_per_regs(vcpu);
2434 }
2435
b0c632db 2436 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
2437 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
2438 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
2439 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 2440
3fb4c40f
TH
2441 return 0;
2442}
2443
492d8642
TH
2444static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
2445{
56317920
DH
2446 struct kvm_s390_pgm_info pgm_info = {
2447 .code = PGM_ADDRESSING,
2448 };
2449 u8 opcode, ilen;
492d8642
TH
2450 int rc;
2451
2452 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
2453 trace_kvm_s390_sie_fault(vcpu);
2454
2455 /*
2456 * We want to inject an addressing exception, which is defined as a
2457 * suppressing or terminating exception. However, since we came here
2458 * by a DAT access exception, the PSW still points to the faulting
2459 * instruction since DAT exceptions are nullifying. So we've got
2460 * to look up the current opcode to get the length of the instruction
2461 * to be able to forward the PSW.
2462 */
65977322 2463 rc = read_guest_instr(vcpu, &opcode, 1);
56317920 2464 ilen = insn_length(opcode);
9b0d721a
DH
2465 if (rc < 0) {
2466 return rc;
2467 } else if (rc) {
2468 /* Instruction-Fetching Exceptions - we can't detect the ilen.
2469 * Forward by arbitrary ilc, injection will take care of
2470 * nullification if necessary.
2471 */
2472 pgm_info = vcpu->arch.pgm;
2473 ilen = 4;
2474 }
56317920
DH
2475 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
2476 kvm_s390_forward_psw(vcpu, ilen);
2477 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
492d8642
TH
2478}
2479
3fb4c40f
TH
2480static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
2481{
2b29a9fd
DD
2482 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
2483 vcpu->arch.sie_block->icptcode);
2484 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
2485
27291e21
DH
2486 if (guestdbg_enabled(vcpu))
2487 kvm_s390_restore_guest_per_regs(vcpu);
2488
7ec7c8c7
CB
2489 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
2490 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
71f116bf
DH
2491
2492 if (vcpu->arch.sie_block->icptcode > 0) {
2493 int rc = kvm_handle_sie_intercept(vcpu);
2494
2495 if (rc != -EOPNOTSUPP)
2496 return rc;
2497 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
2498 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
2499 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
2500 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
2501 return -EREMOTE;
2502 } else if (exit_reason != -EFAULT) {
2503 vcpu->stat.exit_null++;
2504 return 0;
210b1607
TH
2505 } else if (kvm_is_ucontrol(vcpu->kvm)) {
2506 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
2507 vcpu->run->s390_ucontrol.trans_exc_code =
2508 current->thread.gmap_addr;
2509 vcpu->run->s390_ucontrol.pgm_code = 0x10;
71f116bf 2510 return -EREMOTE;
24eb3a82 2511 } else if (current->thread.gmap_pfault) {
3c038e6b 2512 trace_kvm_s390_major_guest_pfault(vcpu);
24eb3a82 2513 current->thread.gmap_pfault = 0;
71f116bf
DH
2514 if (kvm_arch_setup_async_pf(vcpu))
2515 return 0;
2516 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
a76ccff6 2517 }
71f116bf 2518 return vcpu_post_run_fault_in_sie(vcpu);
3fb4c40f
TH
2519}
2520
2521static int __vcpu_run(struct kvm_vcpu *vcpu)
2522{
2523 int rc, exit_reason;
2524
800c1065
TH
2525 /*
2526 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2527 * ning the guest), so that memslots (and other stuff) are protected
2528 */
2529 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2530
a76ccff6
TH
2531 do {
2532 rc = vcpu_pre_run(vcpu);
2533 if (rc)
2534 break;
3fb4c40f 2535
800c1065 2536 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
2537 /*
2538 * As PF_VCPU will be used in fault handler, between
2539 * guest_enter and guest_exit should be no uaccess.
2540 */
0097d12e
CB
2541 local_irq_disable();
2542 __kvm_guest_enter();
db0758b2 2543 __disable_cpu_timer_accounting(vcpu);
0097d12e 2544 local_irq_enable();
a76ccff6
TH
2545 exit_reason = sie64a(vcpu->arch.sie_block,
2546 vcpu->run->s.regs.gprs);
0097d12e 2547 local_irq_disable();
db0758b2 2548 __enable_cpu_timer_accounting(vcpu);
0097d12e
CB
2549 __kvm_guest_exit();
2550 local_irq_enable();
800c1065 2551 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
2552
2553 rc = vcpu_post_run(vcpu, exit_reason);
27291e21 2554 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
3fb4c40f 2555
800c1065 2556 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 2557 return rc;
b0c632db
HC
2558}
2559
b028ee3e
DH
2560static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2561{
2562 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2563 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2564 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2565 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2566 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2567 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
d3d692c8
DH
2568 /* some control register changes require a tlb flush */
2569 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
b028ee3e
DH
2570 }
2571 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4287f247 2572 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
b028ee3e
DH
2573 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2574 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2575 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2576 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2577 }
2578 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2579 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2580 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2581 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
9fbd8082
DH
2582 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2583 kvm_clear_async_pf_completion_queue(vcpu);
b028ee3e
DH
2584 }
2585 kvm_run->kvm_dirty_regs = 0;
2586}
2587
2588static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2589{
2590 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2591 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2592 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2593 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
4287f247 2594 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
b028ee3e
DH
2595 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2596 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2597 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2598 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2599 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2600 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2601 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2602}
2603
b0c632db
HC
2604int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2605{
8f2abe6a 2606 int rc;
b0c632db
HC
2607 sigset_t sigsaved;
2608
27291e21
DH
2609 if (guestdbg_exit_pending(vcpu)) {
2610 kvm_s390_prepare_debug_exit(vcpu);
2611 return 0;
2612 }
2613
b0c632db
HC
2614 if (vcpu->sigset_active)
2615 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2616
6352e4d2
DH
2617 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2618 kvm_s390_vcpu_start(vcpu);
2619 } else if (is_vcpu_stopped(vcpu)) {
ea2cdd27 2620 pr_err_ratelimited("can't run stopped vcpu %d\n",
6352e4d2
DH
2621 vcpu->vcpu_id);
2622 return -EINVAL;
2623 }
b0c632db 2624
b028ee3e 2625 sync_regs(vcpu, kvm_run);
db0758b2 2626 enable_cpu_timer_accounting(vcpu);
d7b0b5eb 2627
dab4079d 2628 might_fault();
a76ccff6 2629 rc = __vcpu_run(vcpu);
9ace903d 2630
b1d16c49
CE
2631 if (signal_pending(current) && !rc) {
2632 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 2633 rc = -EINTR;
b1d16c49 2634 }
8f2abe6a 2635
27291e21
DH
2636 if (guestdbg_exit_pending(vcpu) && !rc) {
2637 kvm_s390_prepare_debug_exit(vcpu);
2638 rc = 0;
2639 }
2640
8f2abe6a 2641 if (rc == -EREMOTE) {
71f116bf 2642 /* userspace support is needed, kvm_run has been prepared */
8f2abe6a
CB
2643 rc = 0;
2644 }
b0c632db 2645
db0758b2 2646 disable_cpu_timer_accounting(vcpu);
b028ee3e 2647 store_regs(vcpu, kvm_run);
d7b0b5eb 2648
b0c632db
HC
2649 if (vcpu->sigset_active)
2650 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2651
b0c632db 2652 vcpu->stat.exit_userspace++;
7e8e6ab4 2653 return rc;
b0c632db
HC
2654}
2655
b0c632db
HC
2656/*
2657 * store status at address
2658 * we use have two special cases:
2659 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2660 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2661 */
d0bce605 2662int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
b0c632db 2663{
092670cd 2664 unsigned char archmode = 1;
9abc2a08 2665 freg_t fprs[NUM_FPRS];
fda902cb 2666 unsigned int px;
4287f247 2667 u64 clkcomp, cputm;
d0bce605 2668 int rc;
b0c632db 2669
d9a3a09a 2670 px = kvm_s390_get_prefix(vcpu);
d0bce605
HC
2671 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2672 if (write_guest_abs(vcpu, 163, &archmode, 1))
b0c632db 2673 return -EFAULT;
d9a3a09a 2674 gpa = 0;
d0bce605
HC
2675 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2676 if (write_guest_real(vcpu, 163, &archmode, 1))
b0c632db 2677 return -EFAULT;
d9a3a09a
MS
2678 gpa = px;
2679 } else
2680 gpa -= __LC_FPREGS_SAVE_AREA;
9abc2a08
DH
2681
2682 /* manually convert vector registers if necessary */
2683 if (MACHINE_HAS_VX) {
9522b37f 2684 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
9abc2a08
DH
2685 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
2686 fprs, 128);
2687 } else {
2688 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
6fd8e67d 2689 vcpu->run->s.regs.fprs, 128);
9abc2a08 2690 }
d9a3a09a 2691 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
d0bce605 2692 vcpu->run->s.regs.gprs, 128);
d9a3a09a 2693 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
d0bce605 2694 &vcpu->arch.sie_block->gpsw, 16);
d9a3a09a 2695 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
fda902cb 2696 &px, 4);
d9a3a09a 2697 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
9abc2a08 2698 &vcpu->run->s.regs.fpc, 4);
d9a3a09a 2699 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
d0bce605 2700 &vcpu->arch.sie_block->todpr, 4);
4287f247 2701 cputm = kvm_s390_get_cpu_timer(vcpu);
d9a3a09a 2702 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
4287f247 2703 &cputm, 8);
178bd789 2704 clkcomp = vcpu->arch.sie_block->ckc >> 8;
d9a3a09a 2705 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
d0bce605 2706 &clkcomp, 8);
d9a3a09a 2707 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
d0bce605 2708 &vcpu->run->s.regs.acrs, 64);
d9a3a09a 2709 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
d0bce605
HC
2710 &vcpu->arch.sie_block->gcr, 128);
2711 return rc ? -EFAULT : 0;
b0c632db
HC
2712}
2713
e879892c
TH
2714int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2715{
2716 /*
2717 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2718 * copying in vcpu load/put. Lets update our copies before we save
2719 * it into the save area
2720 */
d0164ee2 2721 save_fpu_regs();
9abc2a08 2722 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
e879892c
TH
2723 save_access_regs(vcpu->run->s.regs.acrs);
2724
2725 return kvm_s390_store_status_unloaded(vcpu, addr);
2726}
2727
bc17de7c
EF
2728/*
2729 * store additional status at address
2730 */
2731int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2732 unsigned long gpa)
2733{
2734 /* Only bits 0-53 are used for address formation */
2735 if (!(gpa & ~0x3ff))
2736 return 0;
2737
2738 return write_guest_abs(vcpu, gpa & ~0x3ff,
2739 (void *)&vcpu->run->s.regs.vrs, 512);
2740}
2741
2742int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2743{
2744 if (!test_kvm_facility(vcpu->kvm, 129))
2745 return 0;
2746
2747 /*
2748 * The guest VXRS are in the host VXRs due to the lazy
9977e886
HB
2749 * copying in vcpu load/put. We can simply call save_fpu_regs()
2750 * to save the current register state because we are in the
2751 * middle of a load/put cycle.
2752 *
2753 * Let's update our copies before we save it into the save area.
bc17de7c 2754 */
d0164ee2 2755 save_fpu_regs();
bc17de7c
EF
2756
2757 return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2758}
2759
8ad35755
DH
2760static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2761{
2762 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
8e236546 2763 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
8ad35755
DH
2764}
2765
2766static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2767{
2768 unsigned int i;
2769 struct kvm_vcpu *vcpu;
2770
2771 kvm_for_each_vcpu(i, vcpu, kvm) {
2772 __disable_ibs_on_vcpu(vcpu);
2773 }
2774}
2775
2776static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2777{
2778 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
8e236546 2779 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
8ad35755
DH
2780}
2781
6852d7b6
DH
2782void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2783{
8ad35755
DH
2784 int i, online_vcpus, started_vcpus = 0;
2785
2786 if (!is_vcpu_stopped(vcpu))
2787 return;
2788
6852d7b6 2789 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
8ad35755 2790 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 2791 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
2792 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2793
2794 for (i = 0; i < online_vcpus; i++) {
2795 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2796 started_vcpus++;
2797 }
2798
2799 if (started_vcpus == 0) {
2800 /* we're the only active VCPU -> speed it up */
2801 __enable_ibs_on_vcpu(vcpu);
2802 } else if (started_vcpus == 1) {
2803 /*
2804 * As we are starting a second VCPU, we have to disable
2805 * the IBS facility on all VCPUs to remove potentially
2806 * oustanding ENABLE requests.
2807 */
2808 __disable_ibs_on_all_vcpus(vcpu->kvm);
2809 }
2810
805de8f4 2811 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
2812 /*
2813 * Another VCPU might have used IBS while we were offline.
2814 * Let's play safe and flush the VCPU at startup.
2815 */
d3d692c8 2816 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
433b9ee4 2817 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 2818 return;
6852d7b6
DH
2819}
2820
2821void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2822{
8ad35755
DH
2823 int i, online_vcpus, started_vcpus = 0;
2824 struct kvm_vcpu *started_vcpu = NULL;
2825
2826 if (is_vcpu_stopped(vcpu))
2827 return;
2828
6852d7b6 2829 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
8ad35755 2830 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 2831 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
2832 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2833
32f5ff63 2834 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
6cddd432 2835 kvm_s390_clear_stop_irq(vcpu);
32f5ff63 2836
805de8f4 2837 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
2838 __disable_ibs_on_vcpu(vcpu);
2839
2840 for (i = 0; i < online_vcpus; i++) {
2841 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2842 started_vcpus++;
2843 started_vcpu = vcpu->kvm->vcpus[i];
2844 }
2845 }
2846
2847 if (started_vcpus == 1) {
2848 /*
2849 * As we only have one VCPU left, we want to enable the
2850 * IBS facility for that VCPU to speed it up.
2851 */
2852 __enable_ibs_on_vcpu(started_vcpu);
2853 }
2854
433b9ee4 2855 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 2856 return;
6852d7b6
DH
2857}
2858
d6712df9
CH
2859static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2860 struct kvm_enable_cap *cap)
2861{
2862 int r;
2863
2864 if (cap->flags)
2865 return -EINVAL;
2866
2867 switch (cap->cap) {
fa6b7fe9
CH
2868 case KVM_CAP_S390_CSS_SUPPORT:
2869 if (!vcpu->kvm->arch.css_support) {
2870 vcpu->kvm->arch.css_support = 1;
c92ea7b9 2871 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
fa6b7fe9
CH
2872 trace_kvm_s390_enable_css(vcpu->kvm);
2873 }
2874 r = 0;
2875 break;
d6712df9
CH
2876 default:
2877 r = -EINVAL;
2878 break;
2879 }
2880 return r;
2881}
2882
41408c28
TH
2883static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
2884 struct kvm_s390_mem_op *mop)
2885{
2886 void __user *uaddr = (void __user *)mop->buf;
2887 void *tmpbuf = NULL;
2888 int r, srcu_idx;
2889 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
2890 | KVM_S390_MEMOP_F_CHECK_ONLY;
2891
2892 if (mop->flags & ~supported_flags)
2893 return -EINVAL;
2894
2895 if (mop->size > MEM_OP_MAX_SIZE)
2896 return -E2BIG;
2897
2898 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2899 tmpbuf = vmalloc(mop->size);
2900 if (!tmpbuf)
2901 return -ENOMEM;
2902 }
2903
2904 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2905
2906 switch (mop->op) {
2907 case KVM_S390_MEMOP_LOGICAL_READ:
2908 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
92c96321
DH
2909 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
2910 mop->size, GACC_FETCH);
41408c28
TH
2911 break;
2912 }
2913 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2914 if (r == 0) {
2915 if (copy_to_user(uaddr, tmpbuf, mop->size))
2916 r = -EFAULT;
2917 }
2918 break;
2919 case KVM_S390_MEMOP_LOGICAL_WRITE:
2920 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
92c96321
DH
2921 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
2922 mop->size, GACC_STORE);
41408c28
TH
2923 break;
2924 }
2925 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2926 r = -EFAULT;
2927 break;
2928 }
2929 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2930 break;
2931 default:
2932 r = -EINVAL;
2933 }
2934
2935 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2936
2937 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
2938 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
2939
2940 vfree(tmpbuf);
2941 return r;
2942}
2943
b0c632db
HC
2944long kvm_arch_vcpu_ioctl(struct file *filp,
2945 unsigned int ioctl, unsigned long arg)
2946{
2947 struct kvm_vcpu *vcpu = filp->private_data;
2948 void __user *argp = (void __user *)arg;
800c1065 2949 int idx;
bc923cc9 2950 long r;
b0c632db 2951
93736624 2952 switch (ioctl) {
47b43c52
JF
2953 case KVM_S390_IRQ: {
2954 struct kvm_s390_irq s390irq;
2955
2956 r = -EFAULT;
2957 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
2958 break;
2959 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
2960 break;
2961 }
93736624 2962 case KVM_S390_INTERRUPT: {
ba5c1e9b 2963 struct kvm_s390_interrupt s390int;
383d0b05 2964 struct kvm_s390_irq s390irq;
ba5c1e9b 2965
93736624 2966 r = -EFAULT;
ba5c1e9b 2967 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624 2968 break;
383d0b05
JF
2969 if (s390int_to_s390irq(&s390int, &s390irq))
2970 return -EINVAL;
2971 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
93736624 2972 break;
ba5c1e9b 2973 }
b0c632db 2974 case KVM_S390_STORE_STATUS:
800c1065 2975 idx = srcu_read_lock(&vcpu->kvm->srcu);
bc923cc9 2976 r = kvm_s390_vcpu_store_status(vcpu, arg);
800c1065 2977 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 2978 break;
b0c632db
HC
2979 case KVM_S390_SET_INITIAL_PSW: {
2980 psw_t psw;
2981
bc923cc9 2982 r = -EFAULT;
b0c632db 2983 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
2984 break;
2985 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2986 break;
b0c632db
HC
2987 }
2988 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
2989 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2990 break;
14eebd91
CO
2991 case KVM_SET_ONE_REG:
2992 case KVM_GET_ONE_REG: {
2993 struct kvm_one_reg reg;
2994 r = -EFAULT;
2995 if (copy_from_user(&reg, argp, sizeof(reg)))
2996 break;
2997 if (ioctl == KVM_SET_ONE_REG)
2998 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
2999 else
3000 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
3001 break;
3002 }
27e0393f
CO
3003#ifdef CONFIG_KVM_S390_UCONTROL
3004 case KVM_S390_UCAS_MAP: {
3005 struct kvm_s390_ucas_mapping ucasmap;
3006
3007 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3008 r = -EFAULT;
3009 break;
3010 }
3011
3012 if (!kvm_is_ucontrol(vcpu->kvm)) {
3013 r = -EINVAL;
3014 break;
3015 }
3016
3017 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
3018 ucasmap.vcpu_addr, ucasmap.length);
3019 break;
3020 }
3021 case KVM_S390_UCAS_UNMAP: {
3022 struct kvm_s390_ucas_mapping ucasmap;
3023
3024 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3025 r = -EFAULT;
3026 break;
3027 }
3028
3029 if (!kvm_is_ucontrol(vcpu->kvm)) {
3030 r = -EINVAL;
3031 break;
3032 }
3033
3034 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
3035 ucasmap.length);
3036 break;
3037 }
3038#endif
ccc7910f 3039 case KVM_S390_VCPU_FAULT: {
527e30b4 3040 r = gmap_fault(vcpu->arch.gmap, arg, 0);
ccc7910f
CO
3041 break;
3042 }
d6712df9
CH
3043 case KVM_ENABLE_CAP:
3044 {
3045 struct kvm_enable_cap cap;
3046 r = -EFAULT;
3047 if (copy_from_user(&cap, argp, sizeof(cap)))
3048 break;
3049 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
3050 break;
3051 }
41408c28
TH
3052 case KVM_S390_MEM_OP: {
3053 struct kvm_s390_mem_op mem_op;
3054
3055 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
3056 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
3057 else
3058 r = -EFAULT;
3059 break;
3060 }
816c7667
JF
3061 case KVM_S390_SET_IRQ_STATE: {
3062 struct kvm_s390_irq_state irq_state;
3063
3064 r = -EFAULT;
3065 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3066 break;
3067 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
3068 irq_state.len == 0 ||
3069 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
3070 r = -EINVAL;
3071 break;
3072 }
3073 r = kvm_s390_set_irq_state(vcpu,
3074 (void __user *) irq_state.buf,
3075 irq_state.len);
3076 break;
3077 }
3078 case KVM_S390_GET_IRQ_STATE: {
3079 struct kvm_s390_irq_state irq_state;
3080
3081 r = -EFAULT;
3082 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3083 break;
3084 if (irq_state.len == 0) {
3085 r = -EINVAL;
3086 break;
3087 }
3088 r = kvm_s390_get_irq_state(vcpu,
3089 (__u8 __user *) irq_state.buf,
3090 irq_state.len);
3091 break;
3092 }
b0c632db 3093 default:
3e6afcf1 3094 r = -ENOTTY;
b0c632db 3095 }
bc923cc9 3096 return r;
b0c632db
HC
3097}
3098
5b1c1493
CO
3099int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
3100{
3101#ifdef CONFIG_KVM_S390_UCONTROL
3102 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
3103 && (kvm_is_ucontrol(vcpu->kvm))) {
3104 vmf->page = virt_to_page(vcpu->arch.sie_block);
3105 get_page(vmf->page);
3106 return 0;
3107 }
3108#endif
3109 return VM_FAULT_SIGBUS;
3110}
3111
5587027c
AK
3112int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
3113 unsigned long npages)
db3fe4eb
TY
3114{
3115 return 0;
3116}
3117
b0c632db 3118/* Section: memory related */
f7784b8e
MT
3119int kvm_arch_prepare_memory_region(struct kvm *kvm,
3120 struct kvm_memory_slot *memslot,
09170a49 3121 const struct kvm_userspace_memory_region *mem,
7b6195a9 3122 enum kvm_mr_change change)
b0c632db 3123{
dd2887e7
NW
3124 /* A few sanity checks. We can have memory slots which have to be
3125 located/ended at a segment boundary (1MB). The memory in userland is
3126 ok to be fragmented into various different vmas. It is okay to mmap()
3127 and munmap() stuff in this slot after doing this call at any time */
b0c632db 3128
598841ca 3129 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
3130 return -EINVAL;
3131
598841ca 3132 if (mem->memory_size & 0xffffful)
b0c632db
HC
3133 return -EINVAL;
3134
a3a92c31
DD
3135 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
3136 return -EINVAL;
3137
f7784b8e
MT
3138 return 0;
3139}
3140
3141void kvm_arch_commit_memory_region(struct kvm *kvm,
09170a49 3142 const struct kvm_userspace_memory_region *mem,
8482644a 3143 const struct kvm_memory_slot *old,
f36f3f28 3144 const struct kvm_memory_slot *new,
8482644a 3145 enum kvm_mr_change change)
f7784b8e 3146{
f7850c92 3147 int rc;
f7784b8e 3148
2cef4deb
CB
3149 /* If the basics of the memslot do not change, we do not want
3150 * to update the gmap. Every update causes several unnecessary
3151 * segment translation exceptions. This is usually handled just
3152 * fine by the normal fault handler + gmap, but it will also
3153 * cause faults on the prefix page of running guest CPUs.
3154 */
3155 if (old->userspace_addr == mem->userspace_addr &&
3156 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
3157 old->npages * PAGE_SIZE == mem->memory_size)
3158 return;
598841ca
CO
3159
3160 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
3161 mem->guest_phys_addr, mem->memory_size);
3162 if (rc)
ea2cdd27 3163 pr_warn("failed to commit memory region\n");
598841ca 3164 return;
b0c632db
HC
3165}
3166
60a37709
AY
3167static inline unsigned long nonhyp_mask(int i)
3168{
3169 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
3170
3171 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
3172}
3173
3491caf2
CB
3174void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
3175{
3176 vcpu->valid_wakeup = false;
3177}
3178
b0c632db
HC
3179static int __init kvm_s390_init(void)
3180{
60a37709
AY
3181 int i;
3182
07197fd0
DH
3183 if (!sclp.has_sief2) {
3184 pr_info("SIE not available\n");
3185 return -ENODEV;
3186 }
3187
60a37709
AY
3188 for (i = 0; i < 16; i++)
3189 kvm_s390_fac_list_mask[i] |=
3190 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
3191
9d8d5786 3192 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
b0c632db
HC
3193}
3194
3195static void __exit kvm_s390_exit(void)
3196{
3197 kvm_exit();
3198}
3199
3200module_init(kvm_s390_init);
3201module_exit(kvm_s390_exit);
566af940
CH
3202
3203/*
3204 * Enable autoloading of the kvm module.
3205 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
3206 * since x86 takes a different approach.
3207 */
3208#include <linux/miscdevice.h>
3209MODULE_ALIAS_MISCDEV(KVM_MINOR);
3210MODULE_ALIAS("devname:kvm");