]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/s390/kvm/kvm-s390.c
KVM: s390: enable CMMA if the interpration is available
[mirror_ubuntu-hirsute-kernel.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15f36ebd 14 * Jason J. Herne <jjherne@us.ibm.com>
b0c632db
HC
15 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
ca872302 20#include <linux/hrtimer.h>
b0c632db
HC
21#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
a374e892 25#include <linux/random.h>
b0c632db 26#include <linux/slab.h>
ba5c1e9b 27#include <linux/timer.h>
41408c28 28#include <linux/vmalloc.h>
15c9705f 29#include <linux/bitmap.h>
cbb870c8 30#include <asm/asm-offsets.h>
b0c632db 31#include <asm/lowcore.h>
fdf03650 32#include <asm/etr.h>
b0c632db 33#include <asm/pgtable.h>
1e133ab2 34#include <asm/gmap.h>
f5daba1d 35#include <asm/nmi.h>
a0616cde 36#include <asm/switch_to.h>
6d3da241 37#include <asm/isc.h>
1526bf9c 38#include <asm/sclp.h>
0a763c78
DH
39#include <asm/cpacf.h>
40#include <asm/etr.h>
8f2abe6a 41#include "kvm-s390.h"
b0c632db
HC
42#include "gaccess.h"
43
ea2cdd27
DH
44#define KMSG_COMPONENT "kvm-s390"
45#undef pr_fmt
46#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
47
5786fffa
CH
48#define CREATE_TRACE_POINTS
49#include "trace.h"
ade38c31 50#include "trace-s390.h"
5786fffa 51
41408c28 52#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
816c7667
JF
53#define LOCAL_IRQS 32
54#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
55 (KVM_MAX_VCPUS + LOCAL_IRQS))
41408c28 56
b0c632db
HC
57#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
58
59struct kvm_stats_debugfs_item debugfs_entries[] = {
60 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 61 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
62 { "exit_validity", VCPU_STAT(exit_validity) },
63 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
64 { "exit_external_request", VCPU_STAT(exit_external_request) },
65 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
66 { "exit_instruction", VCPU_STAT(exit_instruction) },
67 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
68 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
a011eeb2 69 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
f7819512 70 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
62bea5bf 71 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
3491caf2 72 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
ce2e4f0b 73 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
f5e10b09 74 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b 75 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
aba07508
DH
76 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
77 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
ba5c1e9b 78 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 79 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
80 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
81 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
82 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
83 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
84 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
85 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
86 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 87 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
88 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
89 { "instruction_spx", VCPU_STAT(instruction_spx) },
90 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
91 { "instruction_stap", VCPU_STAT(instruction_stap) },
92 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
8a242234 93 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
453423dc
CB
94 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
95 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
b31288fa 96 { "instruction_essa", VCPU_STAT(instruction_essa) },
453423dc
CB
97 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
98 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 99 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
95ca2cb5 100 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
5288fbf0 101 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 102 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 103 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0 104 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
42cb0c9f
DH
105 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
106 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
5288fbf0 107 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
42cb0c9f
DH
108 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
109 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
cd7b4b61 110 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
5288fbf0
CB
111 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
112 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
113 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
42cb0c9f
DH
114 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
115 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
116 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
388186bc 117 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 118 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 119 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
175a5c9e
CB
120 { "diagnose_258", VCPU_STAT(diagnose_258) },
121 { "diagnose_308", VCPU_STAT(diagnose_308) },
122 { "diagnose_500", VCPU_STAT(diagnose_500) },
b0c632db
HC
123 { NULL }
124};
125
9d8d5786 126/* upper facilities limit for kvm */
60a37709
AY
127unsigned long kvm_s390_fac_list_mask[16] = {
128 0xffe6000000000000UL,
129 0x005e000000000000UL,
9d8d5786 130};
b0c632db 131
9d8d5786 132unsigned long kvm_s390_fac_list_mask_size(void)
78c4b59f 133{
9d8d5786
MM
134 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
135 return ARRAY_SIZE(kvm_s390_fac_list_mask);
78c4b59f
MM
136}
137
15c9705f
DH
138/* available cpu features supported by kvm */
139static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
0a763c78
DH
140/* available subfunctions indicated via query / "test bit" */
141static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
15c9705f 142
9d8d5786 143static struct gmap_notifier gmap_notifier;
78f26131 144debug_info_t *kvm_s390_dbf;
9d8d5786 145
b0c632db 146/* Section: not file related */
13a34e06 147int kvm_arch_hardware_enable(void)
b0c632db
HC
148{
149 /* every s390 is virtualization enabled ;-) */
10474ae8 150 return 0;
b0c632db
HC
151}
152
2c70fe44
CB
153static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
154
fdf03650
FZ
155/*
156 * This callback is executed during stop_machine(). All CPUs are therefore
157 * temporarily stopped. In order not to change guest behavior, we have to
158 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
159 * so a CPU won't be stopped while calculating with the epoch.
160 */
161static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
162 void *v)
163{
164 struct kvm *kvm;
165 struct kvm_vcpu *vcpu;
166 int i;
167 unsigned long long *delta = v;
168
169 list_for_each_entry(kvm, &vm_list, vm_list) {
170 kvm->arch.epoch -= *delta;
171 kvm_for_each_vcpu(i, vcpu, kvm) {
172 vcpu->arch.sie_block->epoch -= *delta;
db0758b2
DH
173 if (vcpu->arch.cputm_enabled)
174 vcpu->arch.cputm_start += *delta;
fdf03650
FZ
175 }
176 }
177 return NOTIFY_OK;
178}
179
180static struct notifier_block kvm_clock_notifier = {
181 .notifier_call = kvm_clock_sync,
182};
183
b0c632db
HC
184int kvm_arch_hardware_setup(void)
185{
2c70fe44
CB
186 gmap_notifier.notifier_call = kvm_gmap_notifier;
187 gmap_register_ipte_notifier(&gmap_notifier);
fdf03650
FZ
188 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
189 &kvm_clock_notifier);
b0c632db
HC
190 return 0;
191}
192
193void kvm_arch_hardware_unsetup(void)
194{
2c70fe44 195 gmap_unregister_ipte_notifier(&gmap_notifier);
fdf03650
FZ
196 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
197 &kvm_clock_notifier);
b0c632db
HC
198}
199
22be5a13
DH
200static void allow_cpu_feat(unsigned long nr)
201{
202 set_bit_inv(nr, kvm_s390_available_cpu_feat);
203}
204
0a763c78
DH
205static inline int plo_test_bit(unsigned char nr)
206{
207 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
208 int cc = 3; /* subfunction not available */
209
210 asm volatile(
211 /* Parameter registers are ignored for "test bit" */
212 " plo 0,0,0,0(0)\n"
213 " ipm %0\n"
214 " srl %0,28\n"
215 : "=d" (cc)
216 : "d" (r0)
217 : "cc");
218 return cc == 0;
219}
220
22be5a13
DH
221static void kvm_s390_cpu_feat_init(void)
222{
0a763c78
DH
223 int i;
224
225 for (i = 0; i < 256; ++i) {
226 if (plo_test_bit(i))
227 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
228 }
229
230 if (test_facility(28)) /* TOD-clock steering */
231 etr_ptff(kvm_s390_available_subfunc.ptff, ETR_PTFF_QAF);
232
233 if (test_facility(17)) { /* MSA */
234 __cpacf_query(CPACF_KMAC, kvm_s390_available_subfunc.kmac);
235 __cpacf_query(CPACF_KMC, kvm_s390_available_subfunc.kmc);
236 __cpacf_query(CPACF_KM, kvm_s390_available_subfunc.km);
237 __cpacf_query(CPACF_KIMD, kvm_s390_available_subfunc.kimd);
238 __cpacf_query(CPACF_KLMD, kvm_s390_available_subfunc.klmd);
239 }
240 if (test_facility(76)) /* MSA3 */
241 __cpacf_query(CPACF_PCKMO, kvm_s390_available_subfunc.pckmo);
242 if (test_facility(77)) { /* MSA4 */
243 __cpacf_query(CPACF_KMCTR, kvm_s390_available_subfunc.kmctr);
244 __cpacf_query(CPACF_KMF, kvm_s390_available_subfunc.kmf);
245 __cpacf_query(CPACF_KMO, kvm_s390_available_subfunc.kmo);
246 __cpacf_query(CPACF_PCC, kvm_s390_available_subfunc.pcc);
247 }
248 if (test_facility(57)) /* MSA5 */
249 __cpacf_query(CPACF_PPNO, kvm_s390_available_subfunc.ppno);
250
22be5a13
DH
251 if (MACHINE_HAS_ESOP)
252 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
253}
254
b0c632db
HC
255int kvm_arch_init(void *opaque)
256{
78f26131
CB
257 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
258 if (!kvm_s390_dbf)
259 return -ENOMEM;
260
261 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
262 debug_unregister(kvm_s390_dbf);
263 return -ENOMEM;
264 }
265
22be5a13
DH
266 kvm_s390_cpu_feat_init();
267
84877d93
CH
268 /* Register floating interrupt controller interface. */
269 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
b0c632db
HC
270}
271
78f26131
CB
272void kvm_arch_exit(void)
273{
274 debug_unregister(kvm_s390_dbf);
275}
276
b0c632db
HC
277/* Section: device related */
278long kvm_arch_dev_ioctl(struct file *filp,
279 unsigned int ioctl, unsigned long arg)
280{
281 if (ioctl == KVM_S390_ENABLE_SIE)
282 return s390_enable_sie();
283 return -EINVAL;
284}
285
784aa3d7 286int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
b0c632db 287{
d7b0b5eb
CO
288 int r;
289
2bd0ac4e 290 switch (ext) {
d7b0b5eb 291 case KVM_CAP_S390_PSW:
b6cf8788 292 case KVM_CAP_S390_GMAP:
52e16b18 293 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
294#ifdef CONFIG_KVM_S390_UCONTROL
295 case KVM_CAP_S390_UCONTROL:
296#endif
3c038e6b 297 case KVM_CAP_ASYNC_PF:
60b413c9 298 case KVM_CAP_SYNC_REGS:
14eebd91 299 case KVM_CAP_ONE_REG:
d6712df9 300 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 301 case KVM_CAP_S390_CSS_SUPPORT:
10ccaa1e 302 case KVM_CAP_IOEVENTFD:
c05c4186 303 case KVM_CAP_DEVICE_CTRL:
d938dc55 304 case KVM_CAP_ENABLE_CAP_VM:
78599d90 305 case KVM_CAP_S390_IRQCHIP:
f2061656 306 case KVM_CAP_VM_ATTRIBUTES:
6352e4d2 307 case KVM_CAP_MP_STATE:
47b43c52 308 case KVM_CAP_S390_INJECT_IRQ:
2444b352 309 case KVM_CAP_S390_USER_SIGP:
e44fc8c9 310 case KVM_CAP_S390_USER_STSI:
30ee2a98 311 case KVM_CAP_S390_SKEYS:
816c7667 312 case KVM_CAP_S390_IRQ_STATE:
d7b0b5eb
CO
313 r = 1;
314 break;
41408c28
TH
315 case KVM_CAP_S390_MEM_OP:
316 r = MEM_OP_MAX_SIZE;
317 break;
e726b1bd
CB
318 case KVM_CAP_NR_VCPUS:
319 case KVM_CAP_MAX_VCPUS:
76a6dd72
DH
320 r = KVM_S390_BSCA_CPU_SLOTS;
321 if (sclp.has_esca && sclp.has_64bscao)
322 r = KVM_S390_ESCA_CPU_SLOTS;
e726b1bd 323 break;
e1e2e605
NW
324 case KVM_CAP_NR_MEMSLOTS:
325 r = KVM_USER_MEM_SLOTS;
326 break;
1526bf9c 327 case KVM_CAP_S390_COW:
abf09bed 328 r = MACHINE_HAS_ESOP;
1526bf9c 329 break;
68c55750
EF
330 case KVM_CAP_S390_VECTOR_REGISTERS:
331 r = MACHINE_HAS_VX;
332 break;
c6e5f166
FZ
333 case KVM_CAP_S390_RI:
334 r = test_facility(64);
335 break;
2bd0ac4e 336 default:
d7b0b5eb 337 r = 0;
2bd0ac4e 338 }
d7b0b5eb 339 return r;
b0c632db
HC
340}
341
15f36ebd
JH
342static void kvm_s390_sync_dirty_log(struct kvm *kvm,
343 struct kvm_memory_slot *memslot)
344{
345 gfn_t cur_gfn, last_gfn;
346 unsigned long address;
347 struct gmap *gmap = kvm->arch.gmap;
348
15f36ebd
JH
349 /* Loop over all guest pages */
350 last_gfn = memslot->base_gfn + memslot->npages;
351 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
352 address = gfn_to_hva_memslot(memslot, cur_gfn);
353
1e133ab2 354 if (test_and_clear_guest_dirty(gmap->mm, address))
15f36ebd 355 mark_page_dirty(kvm, cur_gfn);
1763f8d0
CB
356 if (fatal_signal_pending(current))
357 return;
70c88a00 358 cond_resched();
15f36ebd 359 }
15f36ebd
JH
360}
361
b0c632db 362/* Section: vm related */
a6e2f683
ED
363static void sca_del_vcpu(struct kvm_vcpu *vcpu);
364
b0c632db
HC
365/*
366 * Get (and clear) the dirty memory log for a memory slot.
367 */
368int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
369 struct kvm_dirty_log *log)
370{
15f36ebd
JH
371 int r;
372 unsigned long n;
9f6b8029 373 struct kvm_memslots *slots;
15f36ebd
JH
374 struct kvm_memory_slot *memslot;
375 int is_dirty = 0;
376
377 mutex_lock(&kvm->slots_lock);
378
379 r = -EINVAL;
380 if (log->slot >= KVM_USER_MEM_SLOTS)
381 goto out;
382
9f6b8029
PB
383 slots = kvm_memslots(kvm);
384 memslot = id_to_memslot(slots, log->slot);
15f36ebd
JH
385 r = -ENOENT;
386 if (!memslot->dirty_bitmap)
387 goto out;
388
389 kvm_s390_sync_dirty_log(kvm, memslot);
390 r = kvm_get_dirty_log(kvm, log, &is_dirty);
391 if (r)
392 goto out;
393
394 /* Clear the dirty log */
395 if (is_dirty) {
396 n = kvm_dirty_bitmap_bytes(memslot);
397 memset(memslot->dirty_bitmap, 0, n);
398 }
399 r = 0;
400out:
401 mutex_unlock(&kvm->slots_lock);
402 return r;
b0c632db
HC
403}
404
d938dc55
CH
405static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
406{
407 int r;
408
409 if (cap->flags)
410 return -EINVAL;
411
412 switch (cap->cap) {
84223598 413 case KVM_CAP_S390_IRQCHIP:
c92ea7b9 414 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
84223598
CH
415 kvm->arch.use_irqchip = 1;
416 r = 0;
417 break;
2444b352 418 case KVM_CAP_S390_USER_SIGP:
c92ea7b9 419 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
2444b352
DH
420 kvm->arch.user_sigp = 1;
421 r = 0;
422 break;
68c55750 423 case KVM_CAP_S390_VECTOR_REGISTERS:
5967c17b
DH
424 mutex_lock(&kvm->lock);
425 if (atomic_read(&kvm->online_vcpus)) {
426 r = -EBUSY;
427 } else if (MACHINE_HAS_VX) {
c54f0d6a
DH
428 set_kvm_facility(kvm->arch.model.fac_mask, 129);
429 set_kvm_facility(kvm->arch.model.fac_list, 129);
18280d8b
MM
430 r = 0;
431 } else
432 r = -EINVAL;
5967c17b 433 mutex_unlock(&kvm->lock);
c92ea7b9
CB
434 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
435 r ? "(not available)" : "(success)");
68c55750 436 break;
c6e5f166
FZ
437 case KVM_CAP_S390_RI:
438 r = -EINVAL;
439 mutex_lock(&kvm->lock);
440 if (atomic_read(&kvm->online_vcpus)) {
441 r = -EBUSY;
442 } else if (test_facility(64)) {
c54f0d6a
DH
443 set_kvm_facility(kvm->arch.model.fac_mask, 64);
444 set_kvm_facility(kvm->arch.model.fac_list, 64);
c6e5f166
FZ
445 r = 0;
446 }
447 mutex_unlock(&kvm->lock);
448 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
449 r ? "(not available)" : "(success)");
450 break;
e44fc8c9 451 case KVM_CAP_S390_USER_STSI:
c92ea7b9 452 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
e44fc8c9
ET
453 kvm->arch.user_stsi = 1;
454 r = 0;
455 break;
d938dc55
CH
456 default:
457 r = -EINVAL;
458 break;
459 }
460 return r;
461}
462
8c0a7ce6
DD
463static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
464{
465 int ret;
466
467 switch (attr->attr) {
468 case KVM_S390_VM_MEM_LIMIT_SIZE:
469 ret = 0;
c92ea7b9 470 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
a3a92c31
DD
471 kvm->arch.mem_limit);
472 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
8c0a7ce6
DD
473 ret = -EFAULT;
474 break;
475 default:
476 ret = -ENXIO;
477 break;
478 }
479 return ret;
480}
481
482static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
4f718eab
DD
483{
484 int ret;
485 unsigned int idx;
486 switch (attr->attr) {
487 case KVM_S390_VM_MEM_ENABLE_CMMA:
e6db1d61 488 ret = -EINVAL;
c24cc9c8 489 if (!sclp.has_cmma)
e6db1d61
DD
490 break;
491
4f718eab 492 ret = -EBUSY;
c92ea7b9 493 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
4f718eab
DD
494 mutex_lock(&kvm->lock);
495 if (atomic_read(&kvm->online_vcpus) == 0) {
496 kvm->arch.use_cmma = 1;
497 ret = 0;
498 }
499 mutex_unlock(&kvm->lock);
500 break;
501 case KVM_S390_VM_MEM_CLR_CMMA:
c3489155
DD
502 ret = -EINVAL;
503 if (!kvm->arch.use_cmma)
504 break;
505
c92ea7b9 506 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
4f718eab
DD
507 mutex_lock(&kvm->lock);
508 idx = srcu_read_lock(&kvm->srcu);
a13cff31 509 s390_reset_cmma(kvm->arch.gmap->mm);
4f718eab
DD
510 srcu_read_unlock(&kvm->srcu, idx);
511 mutex_unlock(&kvm->lock);
512 ret = 0;
513 break;
8c0a7ce6
DD
514 case KVM_S390_VM_MEM_LIMIT_SIZE: {
515 unsigned long new_limit;
516
517 if (kvm_is_ucontrol(kvm))
518 return -EINVAL;
519
520 if (get_user(new_limit, (u64 __user *)attr->addr))
521 return -EFAULT;
522
a3a92c31
DD
523 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
524 new_limit > kvm->arch.mem_limit)
8c0a7ce6
DD
525 return -E2BIG;
526
a3a92c31
DD
527 if (!new_limit)
528 return -EINVAL;
529
530 /* gmap_alloc takes last usable address */
531 if (new_limit != KVM_S390_NO_MEM_LIMIT)
532 new_limit -= 1;
533
8c0a7ce6
DD
534 ret = -EBUSY;
535 mutex_lock(&kvm->lock);
536 if (atomic_read(&kvm->online_vcpus) == 0) {
537 /* gmap_alloc will round the limit up */
538 struct gmap *new = gmap_alloc(current->mm, new_limit);
539
540 if (!new) {
541 ret = -ENOMEM;
542 } else {
543 gmap_free(kvm->arch.gmap);
544 new->private = kvm;
545 kvm->arch.gmap = new;
546 ret = 0;
547 }
548 }
549 mutex_unlock(&kvm->lock);
a3a92c31
DD
550 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
551 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
552 (void *) kvm->arch.gmap->asce);
8c0a7ce6
DD
553 break;
554 }
4f718eab
DD
555 default:
556 ret = -ENXIO;
557 break;
558 }
559 return ret;
560}
561
a374e892
TK
562static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
563
564static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
565{
566 struct kvm_vcpu *vcpu;
567 int i;
568
9d8d5786 569 if (!test_kvm_facility(kvm, 76))
a374e892
TK
570 return -EINVAL;
571
572 mutex_lock(&kvm->lock);
573 switch (attr->attr) {
574 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
575 get_random_bytes(
576 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
577 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
578 kvm->arch.crypto.aes_kw = 1;
c92ea7b9 579 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
a374e892
TK
580 break;
581 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
582 get_random_bytes(
583 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
584 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
585 kvm->arch.crypto.dea_kw = 1;
c92ea7b9 586 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
a374e892
TK
587 break;
588 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
589 kvm->arch.crypto.aes_kw = 0;
590 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
591 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
c92ea7b9 592 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
a374e892
TK
593 break;
594 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
595 kvm->arch.crypto.dea_kw = 0;
596 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
597 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
c92ea7b9 598 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
a374e892
TK
599 break;
600 default:
601 mutex_unlock(&kvm->lock);
602 return -ENXIO;
603 }
604
605 kvm_for_each_vcpu(i, vcpu, kvm) {
606 kvm_s390_vcpu_crypto_setup(vcpu);
607 exit_sie(vcpu);
608 }
609 mutex_unlock(&kvm->lock);
610 return 0;
611}
612
72f25020
JH
613static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
614{
615 u8 gtod_high;
616
617 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
618 sizeof(gtod_high)))
619 return -EFAULT;
620
621 if (gtod_high != 0)
622 return -EINVAL;
58c383c6 623 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
72f25020
JH
624
625 return 0;
626}
627
628static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
629{
5a3d883a 630 u64 gtod;
72f25020
JH
631
632 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
633 return -EFAULT;
634
25ed1675 635 kvm_s390_set_tod_clock(kvm, gtod);
58c383c6 636 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
72f25020
JH
637 return 0;
638}
639
640static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
641{
642 int ret;
643
644 if (attr->flags)
645 return -EINVAL;
646
647 switch (attr->attr) {
648 case KVM_S390_VM_TOD_HIGH:
649 ret = kvm_s390_set_tod_high(kvm, attr);
650 break;
651 case KVM_S390_VM_TOD_LOW:
652 ret = kvm_s390_set_tod_low(kvm, attr);
653 break;
654 default:
655 ret = -ENXIO;
656 break;
657 }
658 return ret;
659}
660
661static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
662{
663 u8 gtod_high = 0;
664
665 if (copy_to_user((void __user *)attr->addr, &gtod_high,
666 sizeof(gtod_high)))
667 return -EFAULT;
58c383c6 668 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
72f25020
JH
669
670 return 0;
671}
672
673static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
674{
5a3d883a 675 u64 gtod;
72f25020 676
60417fcc 677 gtod = kvm_s390_get_tod_clock_fast(kvm);
72f25020
JH
678 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
679 return -EFAULT;
58c383c6 680 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
72f25020
JH
681
682 return 0;
683}
684
685static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
686{
687 int ret;
688
689 if (attr->flags)
690 return -EINVAL;
691
692 switch (attr->attr) {
693 case KVM_S390_VM_TOD_HIGH:
694 ret = kvm_s390_get_tod_high(kvm, attr);
695 break;
696 case KVM_S390_VM_TOD_LOW:
697 ret = kvm_s390_get_tod_low(kvm, attr);
698 break;
699 default:
700 ret = -ENXIO;
701 break;
702 }
703 return ret;
704}
705
658b6eda
MM
706static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
707{
708 struct kvm_s390_vm_cpu_processor *proc;
053dd230 709 u16 lowest_ibc, unblocked_ibc;
658b6eda
MM
710 int ret = 0;
711
712 mutex_lock(&kvm->lock);
713 if (atomic_read(&kvm->online_vcpus)) {
714 ret = -EBUSY;
715 goto out;
716 }
717 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
718 if (!proc) {
719 ret = -ENOMEM;
720 goto out;
721 }
722 if (!copy_from_user(proc, (void __user *)attr->addr,
723 sizeof(*proc))) {
9bb0ec09 724 kvm->arch.model.cpuid = proc->cpuid;
053dd230
DH
725 lowest_ibc = sclp.ibc >> 16 & 0xfff;
726 unblocked_ibc = sclp.ibc & 0xfff;
727 if (lowest_ibc) {
728 if (proc->ibc > unblocked_ibc)
729 kvm->arch.model.ibc = unblocked_ibc;
730 else if (proc->ibc < lowest_ibc)
731 kvm->arch.model.ibc = lowest_ibc;
732 else
733 kvm->arch.model.ibc = proc->ibc;
734 }
c54f0d6a 735 memcpy(kvm->arch.model.fac_list, proc->fac_list,
658b6eda
MM
736 S390_ARCH_FAC_LIST_SIZE_BYTE);
737 } else
738 ret = -EFAULT;
739 kfree(proc);
740out:
741 mutex_unlock(&kvm->lock);
742 return ret;
743}
744
15c9705f
DH
745static int kvm_s390_set_processor_feat(struct kvm *kvm,
746 struct kvm_device_attr *attr)
747{
748 struct kvm_s390_vm_cpu_feat data;
749 int ret = -EBUSY;
750
751 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
752 return -EFAULT;
753 if (!bitmap_subset((unsigned long *) data.feat,
754 kvm_s390_available_cpu_feat,
755 KVM_S390_VM_CPU_FEAT_NR_BITS))
756 return -EINVAL;
757
758 mutex_lock(&kvm->lock);
759 if (!atomic_read(&kvm->online_vcpus)) {
760 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
761 KVM_S390_VM_CPU_FEAT_NR_BITS);
762 ret = 0;
763 }
764 mutex_unlock(&kvm->lock);
765 return ret;
766}
767
0a763c78
DH
768static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
769 struct kvm_device_attr *attr)
770{
771 /*
772 * Once supported by kernel + hw, we have to store the subfunctions
773 * in kvm->arch and remember that user space configured them.
774 */
775 return -ENXIO;
776}
777
658b6eda
MM
778static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
779{
780 int ret = -ENXIO;
781
782 switch (attr->attr) {
783 case KVM_S390_VM_CPU_PROCESSOR:
784 ret = kvm_s390_set_processor(kvm, attr);
785 break;
15c9705f
DH
786 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
787 ret = kvm_s390_set_processor_feat(kvm, attr);
788 break;
0a763c78
DH
789 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
790 ret = kvm_s390_set_processor_subfunc(kvm, attr);
791 break;
658b6eda
MM
792 }
793 return ret;
794}
795
796static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
797{
798 struct kvm_s390_vm_cpu_processor *proc;
799 int ret = 0;
800
801 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
802 if (!proc) {
803 ret = -ENOMEM;
804 goto out;
805 }
9bb0ec09 806 proc->cpuid = kvm->arch.model.cpuid;
658b6eda 807 proc->ibc = kvm->arch.model.ibc;
c54f0d6a
DH
808 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
809 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda
MM
810 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
811 ret = -EFAULT;
812 kfree(proc);
813out:
814 return ret;
815}
816
817static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
818{
819 struct kvm_s390_vm_cpu_machine *mach;
820 int ret = 0;
821
822 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
823 if (!mach) {
824 ret = -ENOMEM;
825 goto out;
826 }
827 get_cpu_id((struct cpuid *) &mach->cpuid);
37c5f6c8 828 mach->ibc = sclp.ibc;
c54f0d6a 829 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
981467c9 830 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda 831 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
94422ee8 832 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda
MM
833 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
834 ret = -EFAULT;
835 kfree(mach);
836out:
837 return ret;
838}
839
15c9705f
DH
840static int kvm_s390_get_processor_feat(struct kvm *kvm,
841 struct kvm_device_attr *attr)
842{
843 struct kvm_s390_vm_cpu_feat data;
844
845 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
846 KVM_S390_VM_CPU_FEAT_NR_BITS);
847 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
848 return -EFAULT;
849 return 0;
850}
851
852static int kvm_s390_get_machine_feat(struct kvm *kvm,
853 struct kvm_device_attr *attr)
854{
855 struct kvm_s390_vm_cpu_feat data;
856
857 bitmap_copy((unsigned long *) data.feat,
858 kvm_s390_available_cpu_feat,
859 KVM_S390_VM_CPU_FEAT_NR_BITS);
860 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
861 return -EFAULT;
862 return 0;
863}
864
0a763c78
DH
865static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
866 struct kvm_device_attr *attr)
867{
868 /*
869 * Once we can actually configure subfunctions (kernel + hw support),
870 * we have to check if they were already set by user space, if so copy
871 * them from kvm->arch.
872 */
873 return -ENXIO;
874}
875
876static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
877 struct kvm_device_attr *attr)
878{
879 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
880 sizeof(struct kvm_s390_vm_cpu_subfunc)))
881 return -EFAULT;
882 return 0;
883}
658b6eda
MM
884static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
885{
886 int ret = -ENXIO;
887
888 switch (attr->attr) {
889 case KVM_S390_VM_CPU_PROCESSOR:
890 ret = kvm_s390_get_processor(kvm, attr);
891 break;
892 case KVM_S390_VM_CPU_MACHINE:
893 ret = kvm_s390_get_machine(kvm, attr);
894 break;
15c9705f
DH
895 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
896 ret = kvm_s390_get_processor_feat(kvm, attr);
897 break;
898 case KVM_S390_VM_CPU_MACHINE_FEAT:
899 ret = kvm_s390_get_machine_feat(kvm, attr);
900 break;
0a763c78
DH
901 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
902 ret = kvm_s390_get_processor_subfunc(kvm, attr);
903 break;
904 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
905 ret = kvm_s390_get_machine_subfunc(kvm, attr);
906 break;
658b6eda
MM
907 }
908 return ret;
909}
910
f2061656
DD
911static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
912{
913 int ret;
914
915 switch (attr->group) {
4f718eab 916 case KVM_S390_VM_MEM_CTRL:
8c0a7ce6 917 ret = kvm_s390_set_mem_control(kvm, attr);
4f718eab 918 break;
72f25020
JH
919 case KVM_S390_VM_TOD:
920 ret = kvm_s390_set_tod(kvm, attr);
921 break;
658b6eda
MM
922 case KVM_S390_VM_CPU_MODEL:
923 ret = kvm_s390_set_cpu_model(kvm, attr);
924 break;
a374e892
TK
925 case KVM_S390_VM_CRYPTO:
926 ret = kvm_s390_vm_set_crypto(kvm, attr);
927 break;
f2061656
DD
928 default:
929 ret = -ENXIO;
930 break;
931 }
932
933 return ret;
934}
935
936static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
937{
8c0a7ce6
DD
938 int ret;
939
940 switch (attr->group) {
941 case KVM_S390_VM_MEM_CTRL:
942 ret = kvm_s390_get_mem_control(kvm, attr);
943 break;
72f25020
JH
944 case KVM_S390_VM_TOD:
945 ret = kvm_s390_get_tod(kvm, attr);
946 break;
658b6eda
MM
947 case KVM_S390_VM_CPU_MODEL:
948 ret = kvm_s390_get_cpu_model(kvm, attr);
949 break;
8c0a7ce6
DD
950 default:
951 ret = -ENXIO;
952 break;
953 }
954
955 return ret;
f2061656
DD
956}
957
958static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
959{
960 int ret;
961
962 switch (attr->group) {
4f718eab
DD
963 case KVM_S390_VM_MEM_CTRL:
964 switch (attr->attr) {
965 case KVM_S390_VM_MEM_ENABLE_CMMA:
966 case KVM_S390_VM_MEM_CLR_CMMA:
8c0a7ce6 967 case KVM_S390_VM_MEM_LIMIT_SIZE:
4f718eab
DD
968 ret = 0;
969 break;
970 default:
971 ret = -ENXIO;
972 break;
973 }
974 break;
72f25020
JH
975 case KVM_S390_VM_TOD:
976 switch (attr->attr) {
977 case KVM_S390_VM_TOD_LOW:
978 case KVM_S390_VM_TOD_HIGH:
979 ret = 0;
980 break;
981 default:
982 ret = -ENXIO;
983 break;
984 }
985 break;
658b6eda
MM
986 case KVM_S390_VM_CPU_MODEL:
987 switch (attr->attr) {
988 case KVM_S390_VM_CPU_PROCESSOR:
989 case KVM_S390_VM_CPU_MACHINE:
15c9705f
DH
990 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
991 case KVM_S390_VM_CPU_MACHINE_FEAT:
0a763c78 992 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
658b6eda
MM
993 ret = 0;
994 break;
0a763c78
DH
995 /* configuring subfunctions is not supported yet */
996 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
658b6eda
MM
997 default:
998 ret = -ENXIO;
999 break;
1000 }
1001 break;
a374e892
TK
1002 case KVM_S390_VM_CRYPTO:
1003 switch (attr->attr) {
1004 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1005 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1006 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1007 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1008 ret = 0;
1009 break;
1010 default:
1011 ret = -ENXIO;
1012 break;
1013 }
1014 break;
f2061656
DD
1015 default:
1016 ret = -ENXIO;
1017 break;
1018 }
1019
1020 return ret;
1021}
1022
30ee2a98
JH
1023static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1024{
1025 uint8_t *keys;
1026 uint64_t hva;
1027 unsigned long curkey;
1028 int i, r = 0;
1029
1030 if (args->flags != 0)
1031 return -EINVAL;
1032
1033 /* Is this guest using storage keys? */
1034 if (!mm_use_skey(current->mm))
1035 return KVM_S390_GET_SKEYS_NONE;
1036
1037 /* Enforce sane limit on memory allocation */
1038 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1039 return -EINVAL;
1040
1041 keys = kmalloc_array(args->count, sizeof(uint8_t),
1042 GFP_KERNEL | __GFP_NOWARN);
1043 if (!keys)
1044 keys = vmalloc(sizeof(uint8_t) * args->count);
1045 if (!keys)
1046 return -ENOMEM;
1047
1048 for (i = 0; i < args->count; i++) {
1049 hva = gfn_to_hva(kvm, args->start_gfn + i);
1050 if (kvm_is_error_hva(hva)) {
1051 r = -EFAULT;
1052 goto out;
1053 }
1054
1055 curkey = get_guest_storage_key(current->mm, hva);
1056 if (IS_ERR_VALUE(curkey)) {
1057 r = curkey;
1058 goto out;
1059 }
1060 keys[i] = curkey;
1061 }
1062
1063 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1064 sizeof(uint8_t) * args->count);
1065 if (r)
1066 r = -EFAULT;
1067out:
1068 kvfree(keys);
1069 return r;
1070}
1071
1072static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1073{
1074 uint8_t *keys;
1075 uint64_t hva;
1076 int i, r = 0;
1077
1078 if (args->flags != 0)
1079 return -EINVAL;
1080
1081 /* Enforce sane limit on memory allocation */
1082 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1083 return -EINVAL;
1084
1085 keys = kmalloc_array(args->count, sizeof(uint8_t),
1086 GFP_KERNEL | __GFP_NOWARN);
1087 if (!keys)
1088 keys = vmalloc(sizeof(uint8_t) * args->count);
1089 if (!keys)
1090 return -ENOMEM;
1091
1092 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1093 sizeof(uint8_t) * args->count);
1094 if (r) {
1095 r = -EFAULT;
1096 goto out;
1097 }
1098
1099 /* Enable storage key handling for the guest */
14d4a425
DD
1100 r = s390_enable_skey();
1101 if (r)
1102 goto out;
30ee2a98
JH
1103
1104 for (i = 0; i < args->count; i++) {
1105 hva = gfn_to_hva(kvm, args->start_gfn + i);
1106 if (kvm_is_error_hva(hva)) {
1107 r = -EFAULT;
1108 goto out;
1109 }
1110
1111 /* Lowest order bit is reserved */
1112 if (keys[i] & 0x01) {
1113 r = -EINVAL;
1114 goto out;
1115 }
1116
1117 r = set_guest_storage_key(current->mm, hva,
1118 (unsigned long)keys[i], 0);
1119 if (r)
1120 goto out;
1121 }
1122out:
1123 kvfree(keys);
1124 return r;
1125}
1126
b0c632db
HC
1127long kvm_arch_vm_ioctl(struct file *filp,
1128 unsigned int ioctl, unsigned long arg)
1129{
1130 struct kvm *kvm = filp->private_data;
1131 void __user *argp = (void __user *)arg;
f2061656 1132 struct kvm_device_attr attr;
b0c632db
HC
1133 int r;
1134
1135 switch (ioctl) {
ba5c1e9b
CO
1136 case KVM_S390_INTERRUPT: {
1137 struct kvm_s390_interrupt s390int;
1138
1139 r = -EFAULT;
1140 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1141 break;
1142 r = kvm_s390_inject_vm(kvm, &s390int);
1143 break;
1144 }
d938dc55
CH
1145 case KVM_ENABLE_CAP: {
1146 struct kvm_enable_cap cap;
1147 r = -EFAULT;
1148 if (copy_from_user(&cap, argp, sizeof(cap)))
1149 break;
1150 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1151 break;
1152 }
84223598
CH
1153 case KVM_CREATE_IRQCHIP: {
1154 struct kvm_irq_routing_entry routing;
1155
1156 r = -EINVAL;
1157 if (kvm->arch.use_irqchip) {
1158 /* Set up dummy routing. */
1159 memset(&routing, 0, sizeof(routing));
152b2839 1160 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
84223598
CH
1161 }
1162 break;
1163 }
f2061656
DD
1164 case KVM_SET_DEVICE_ATTR: {
1165 r = -EFAULT;
1166 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1167 break;
1168 r = kvm_s390_vm_set_attr(kvm, &attr);
1169 break;
1170 }
1171 case KVM_GET_DEVICE_ATTR: {
1172 r = -EFAULT;
1173 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1174 break;
1175 r = kvm_s390_vm_get_attr(kvm, &attr);
1176 break;
1177 }
1178 case KVM_HAS_DEVICE_ATTR: {
1179 r = -EFAULT;
1180 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1181 break;
1182 r = kvm_s390_vm_has_attr(kvm, &attr);
1183 break;
1184 }
30ee2a98
JH
1185 case KVM_S390_GET_SKEYS: {
1186 struct kvm_s390_skeys args;
1187
1188 r = -EFAULT;
1189 if (copy_from_user(&args, argp,
1190 sizeof(struct kvm_s390_skeys)))
1191 break;
1192 r = kvm_s390_get_skeys(kvm, &args);
1193 break;
1194 }
1195 case KVM_S390_SET_SKEYS: {
1196 struct kvm_s390_skeys args;
1197
1198 r = -EFAULT;
1199 if (copy_from_user(&args, argp,
1200 sizeof(struct kvm_s390_skeys)))
1201 break;
1202 r = kvm_s390_set_skeys(kvm, &args);
1203 break;
1204 }
b0c632db 1205 default:
367e1319 1206 r = -ENOTTY;
b0c632db
HC
1207 }
1208
1209 return r;
1210}
1211
45c9b47c
TK
1212static int kvm_s390_query_ap_config(u8 *config)
1213{
1214 u32 fcn_code = 0x04000000UL;
86044c8c 1215 u32 cc = 0;
45c9b47c 1216
86044c8c 1217 memset(config, 0, 128);
45c9b47c
TK
1218 asm volatile(
1219 "lgr 0,%1\n"
1220 "lgr 2,%2\n"
1221 ".long 0xb2af0000\n" /* PQAP(QCI) */
86044c8c 1222 "0: ipm %0\n"
45c9b47c 1223 "srl %0,28\n"
86044c8c
CB
1224 "1:\n"
1225 EX_TABLE(0b, 1b)
1226 : "+r" (cc)
45c9b47c
TK
1227 : "r" (fcn_code), "r" (config)
1228 : "cc", "0", "2", "memory"
1229 );
1230
1231 return cc;
1232}
1233
1234static int kvm_s390_apxa_installed(void)
1235{
1236 u8 config[128];
1237 int cc;
1238
a6aacc3f 1239 if (test_facility(12)) {
45c9b47c
TK
1240 cc = kvm_s390_query_ap_config(config);
1241
1242 if (cc)
1243 pr_err("PQAP(QCI) failed with cc=%d", cc);
1244 else
1245 return config[0] & 0x40;
1246 }
1247
1248 return 0;
1249}
1250
1251static void kvm_s390_set_crycb_format(struct kvm *kvm)
1252{
1253 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1254
1255 if (kvm_s390_apxa_installed())
1256 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1257 else
1258 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1259}
1260
9bb0ec09 1261static u64 kvm_s390_get_initial_cpuid(void)
9d8d5786 1262{
9bb0ec09
DH
1263 struct cpuid cpuid;
1264
1265 get_cpu_id(&cpuid);
1266 cpuid.version = 0xff;
1267 return *((u64 *) &cpuid);
9d8d5786
MM
1268}
1269
c54f0d6a 1270static void kvm_s390_crypto_init(struct kvm *kvm)
5102ee87 1271{
9d8d5786 1272 if (!test_kvm_facility(kvm, 76))
c54f0d6a 1273 return;
5102ee87 1274
c54f0d6a 1275 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
45c9b47c 1276 kvm_s390_set_crycb_format(kvm);
5102ee87 1277
ed6f76b4
TK
1278 /* Enable AES/DEA protected key functions by default */
1279 kvm->arch.crypto.aes_kw = 1;
1280 kvm->arch.crypto.dea_kw = 1;
1281 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1282 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1283 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1284 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
5102ee87
TK
1285}
1286
7d43bafc
ED
1287static void sca_dispose(struct kvm *kvm)
1288{
1289 if (kvm->arch.use_esca)
5e044315 1290 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
7d43bafc
ED
1291 else
1292 free_page((unsigned long)(kvm->arch.sca));
1293 kvm->arch.sca = NULL;
1294}
1295
e08b9637 1296int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 1297{
76a6dd72 1298 gfp_t alloc_flags = GFP_KERNEL;
9d8d5786 1299 int i, rc;
b0c632db 1300 char debug_name[16];
f6c137ff 1301 static unsigned long sca_offset;
b0c632db 1302
e08b9637
CO
1303 rc = -EINVAL;
1304#ifdef CONFIG_KVM_S390_UCONTROL
1305 if (type & ~KVM_VM_S390_UCONTROL)
1306 goto out_err;
1307 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1308 goto out_err;
1309#else
1310 if (type)
1311 goto out_err;
1312#endif
1313
b0c632db
HC
1314 rc = s390_enable_sie();
1315 if (rc)
d89f5eff 1316 goto out_err;
b0c632db 1317
b290411a
CO
1318 rc = -ENOMEM;
1319
7d0a5e62
JF
1320 ratelimit_state_init(&kvm->arch.sthyi_limit, 5 * HZ, 500);
1321
7d43bafc 1322 kvm->arch.use_esca = 0; /* start with basic SCA */
76a6dd72
DH
1323 if (!sclp.has_64bscao)
1324 alloc_flags |= GFP_DMA;
5e044315 1325 rwlock_init(&kvm->arch.sca_lock);
76a6dd72 1326 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
b0c632db 1327 if (!kvm->arch.sca)
d89f5eff 1328 goto out_err;
f6c137ff 1329 spin_lock(&kvm_lock);
c5c2c393 1330 sca_offset += 16;
bc784cce 1331 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
c5c2c393 1332 sca_offset = 0;
bc784cce
ED
1333 kvm->arch.sca = (struct bsca_block *)
1334 ((char *) kvm->arch.sca + sca_offset);
f6c137ff 1335 spin_unlock(&kvm_lock);
b0c632db
HC
1336
1337 sprintf(debug_name, "kvm-%u", current->pid);
1338
1cb9cf72 1339 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
b0c632db 1340 if (!kvm->arch.dbf)
40f5b735 1341 goto out_err;
b0c632db 1342
c54f0d6a
DH
1343 kvm->arch.sie_page2 =
1344 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1345 if (!kvm->arch.sie_page2)
40f5b735 1346 goto out_err;
9d8d5786 1347
fb5bf93f 1348 /* Populate the facility mask initially. */
c54f0d6a 1349 memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
94422ee8 1350 S390_ARCH_FAC_LIST_SIZE_BYTE);
9d8d5786
MM
1351 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1352 if (i < kvm_s390_fac_list_mask_size())
c54f0d6a 1353 kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
9d8d5786 1354 else
c54f0d6a 1355 kvm->arch.model.fac_mask[i] = 0UL;
9d8d5786
MM
1356 }
1357
981467c9 1358 /* Populate the facility list initially. */
c54f0d6a
DH
1359 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
1360 memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
981467c9
MM
1361 S390_ARCH_FAC_LIST_SIZE_BYTE);
1362
95ca2cb5
JF
1363 set_kvm_facility(kvm->arch.model.fac_mask, 74);
1364 set_kvm_facility(kvm->arch.model.fac_list, 74);
1365
9bb0ec09 1366 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
37c5f6c8 1367 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
9d8d5786 1368
c54f0d6a 1369 kvm_s390_crypto_init(kvm);
5102ee87 1370
ba5c1e9b 1371 spin_lock_init(&kvm->arch.float_int.lock);
6d3da241
JF
1372 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1373 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
8a242234 1374 init_waitqueue_head(&kvm->arch.ipte_wq);
a6b7e459 1375 mutex_init(&kvm->arch.ipte_mutex);
ba5c1e9b 1376
b0c632db 1377 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
78f26131 1378 VM_EVENT(kvm, 3, "vm created with type %lu", type);
b0c632db 1379
e08b9637
CO
1380 if (type & KVM_VM_S390_UCONTROL) {
1381 kvm->arch.gmap = NULL;
a3a92c31 1382 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
e08b9637 1383 } else {
32e6b236
GH
1384 if (sclp.hamax == U64_MAX)
1385 kvm->arch.mem_limit = TASK_MAX_SIZE;
1386 else
1387 kvm->arch.mem_limit = min_t(unsigned long, TASK_MAX_SIZE,
1388 sclp.hamax + 1);
a3a92c31 1389 kvm->arch.gmap = gmap_alloc(current->mm, kvm->arch.mem_limit - 1);
e08b9637 1390 if (!kvm->arch.gmap)
40f5b735 1391 goto out_err;
2c70fe44 1392 kvm->arch.gmap->private = kvm;
24eb3a82 1393 kvm->arch.gmap->pfault_enabled = 0;
e08b9637 1394 }
fa6b7fe9
CH
1395
1396 kvm->arch.css_support = 0;
84223598 1397 kvm->arch.use_irqchip = 0;
72f25020 1398 kvm->arch.epoch = 0;
fa6b7fe9 1399
8ad35755 1400 spin_lock_init(&kvm->arch.start_stop_lock);
8335713a 1401 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
8ad35755 1402
d89f5eff 1403 return 0;
40f5b735 1404out_err:
c54f0d6a 1405 free_page((unsigned long)kvm->arch.sie_page2);
598841ca 1406 debug_unregister(kvm->arch.dbf);
7d43bafc 1407 sca_dispose(kvm);
78f26131 1408 KVM_EVENT(3, "creation of vm failed: %d", rc);
d89f5eff 1409 return rc;
b0c632db
HC
1410}
1411
d329c035
CB
1412void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1413{
1414 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 1415 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
67335e63 1416 kvm_s390_clear_local_irqs(vcpu);
3c038e6b 1417 kvm_clear_async_pf_completion_queue(vcpu);
bc784cce 1418 if (!kvm_is_ucontrol(vcpu->kvm))
a6e2f683 1419 sca_del_vcpu(vcpu);
27e0393f
CO
1420
1421 if (kvm_is_ucontrol(vcpu->kvm))
1422 gmap_free(vcpu->arch.gmap);
1423
e6db1d61 1424 if (vcpu->kvm->arch.use_cmma)
b31605c1 1425 kvm_s390_vcpu_unsetup_cmma(vcpu);
d329c035 1426 free_page((unsigned long)(vcpu->arch.sie_block));
b31288fa 1427
6692cef3 1428 kvm_vcpu_uninit(vcpu);
b110feaf 1429 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
1430}
1431
1432static void kvm_free_vcpus(struct kvm *kvm)
1433{
1434 unsigned int i;
988a2cae 1435 struct kvm_vcpu *vcpu;
d329c035 1436
988a2cae
GN
1437 kvm_for_each_vcpu(i, vcpu, kvm)
1438 kvm_arch_vcpu_destroy(vcpu);
1439
1440 mutex_lock(&kvm->lock);
1441 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1442 kvm->vcpus[i] = NULL;
1443
1444 atomic_set(&kvm->online_vcpus, 0);
1445 mutex_unlock(&kvm->lock);
d329c035
CB
1446}
1447
b0c632db
HC
1448void kvm_arch_destroy_vm(struct kvm *kvm)
1449{
d329c035 1450 kvm_free_vcpus(kvm);
7d43bafc 1451 sca_dispose(kvm);
d329c035 1452 debug_unregister(kvm->arch.dbf);
c54f0d6a 1453 free_page((unsigned long)kvm->arch.sie_page2);
27e0393f
CO
1454 if (!kvm_is_ucontrol(kvm))
1455 gmap_free(kvm->arch.gmap);
841b91c5 1456 kvm_s390_destroy_adapters(kvm);
67335e63 1457 kvm_s390_clear_float_irqs(kvm);
8335713a 1458 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
b0c632db
HC
1459}
1460
1461/* Section: vcpu related */
dafd032a
DD
1462static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1463{
1464 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
1465 if (!vcpu->arch.gmap)
1466 return -ENOMEM;
1467 vcpu->arch.gmap->private = vcpu->kvm;
1468
1469 return 0;
1470}
1471
a6e2f683
ED
1472static void sca_del_vcpu(struct kvm_vcpu *vcpu)
1473{
5e044315 1474 read_lock(&vcpu->kvm->arch.sca_lock);
7d43bafc
ED
1475 if (vcpu->kvm->arch.use_esca) {
1476 struct esca_block *sca = vcpu->kvm->arch.sca;
a6e2f683 1477
7d43bafc 1478 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
10ce32d5 1479 sca->cpu[vcpu->vcpu_id].sda = 0;
7d43bafc
ED
1480 } else {
1481 struct bsca_block *sca = vcpu->kvm->arch.sca;
1482
1483 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
10ce32d5 1484 sca->cpu[vcpu->vcpu_id].sda = 0;
7d43bafc 1485 }
5e044315 1486 read_unlock(&vcpu->kvm->arch.sca_lock);
a6e2f683
ED
1487}
1488
eaa78f34 1489static void sca_add_vcpu(struct kvm_vcpu *vcpu)
a6e2f683 1490{
eaa78f34
DH
1491 read_lock(&vcpu->kvm->arch.sca_lock);
1492 if (vcpu->kvm->arch.use_esca) {
1493 struct esca_block *sca = vcpu->kvm->arch.sca;
7d43bafc 1494
eaa78f34 1495 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
7d43bafc
ED
1496 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1497 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
25508824 1498 vcpu->arch.sie_block->ecb2 |= 0x04U;
eaa78f34 1499 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
7d43bafc 1500 } else {
eaa78f34 1501 struct bsca_block *sca = vcpu->kvm->arch.sca;
a6e2f683 1502
eaa78f34 1503 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
7d43bafc
ED
1504 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1505 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
eaa78f34 1506 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
7d43bafc 1507 }
eaa78f34 1508 read_unlock(&vcpu->kvm->arch.sca_lock);
5e044315
ED
1509}
1510
1511/* Basic SCA to Extended SCA data copy routines */
1512static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
1513{
1514 d->sda = s->sda;
1515 d->sigp_ctrl.c = s->sigp_ctrl.c;
1516 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
1517}
1518
1519static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
1520{
1521 int i;
1522
1523 d->ipte_control = s->ipte_control;
1524 d->mcn[0] = s->mcn;
1525 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
1526 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
1527}
1528
1529static int sca_switch_to_extended(struct kvm *kvm)
1530{
1531 struct bsca_block *old_sca = kvm->arch.sca;
1532 struct esca_block *new_sca;
1533 struct kvm_vcpu *vcpu;
1534 unsigned int vcpu_idx;
1535 u32 scaol, scaoh;
1536
1537 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
1538 if (!new_sca)
1539 return -ENOMEM;
1540
1541 scaoh = (u32)((u64)(new_sca) >> 32);
1542 scaol = (u32)(u64)(new_sca) & ~0x3fU;
1543
1544 kvm_s390_vcpu_block_all(kvm);
1545 write_lock(&kvm->arch.sca_lock);
1546
1547 sca_copy_b_to_e(new_sca, old_sca);
1548
1549 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
1550 vcpu->arch.sie_block->scaoh = scaoh;
1551 vcpu->arch.sie_block->scaol = scaol;
1552 vcpu->arch.sie_block->ecb2 |= 0x04U;
1553 }
1554 kvm->arch.sca = new_sca;
1555 kvm->arch.use_esca = 1;
1556
1557 write_unlock(&kvm->arch.sca_lock);
1558 kvm_s390_vcpu_unblock_all(kvm);
1559
1560 free_page((unsigned long)old_sca);
1561
8335713a
CB
1562 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
1563 old_sca, kvm->arch.sca);
5e044315 1564 return 0;
a6e2f683
ED
1565}
1566
1567static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
1568{
5e044315
ED
1569 int rc;
1570
1571 if (id < KVM_S390_BSCA_CPU_SLOTS)
1572 return true;
76a6dd72 1573 if (!sclp.has_esca || !sclp.has_64bscao)
5e044315
ED
1574 return false;
1575
1576 mutex_lock(&kvm->lock);
1577 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
1578 mutex_unlock(&kvm->lock);
1579
1580 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
a6e2f683
ED
1581}
1582
b0c632db
HC
1583int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1584{
3c038e6b
DD
1585 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1586 kvm_clear_async_pf_completion_queue(vcpu);
59674c1a
CB
1587 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1588 KVM_SYNC_GPRS |
9eed0735 1589 KVM_SYNC_ACRS |
b028ee3e
DH
1590 KVM_SYNC_CRS |
1591 KVM_SYNC_ARCH0 |
1592 KVM_SYNC_PFAULT;
c6e5f166
FZ
1593 if (test_kvm_facility(vcpu->kvm, 64))
1594 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
f6aa6dc4
DH
1595 /* fprs can be synchronized via vrs, even if the guest has no vx. With
1596 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
1597 */
1598 if (MACHINE_HAS_VX)
68c55750 1599 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
6fd8e67d
DH
1600 else
1601 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
dafd032a
DD
1602
1603 if (kvm_is_ucontrol(vcpu->kvm))
1604 return __kvm_ucontrol_vcpu_init(vcpu);
1605
b0c632db
HC
1606 return 0;
1607}
1608
db0758b2
DH
1609/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1610static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1611{
1612 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
9c23a131 1613 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
db0758b2 1614 vcpu->arch.cputm_start = get_tod_clock_fast();
9c23a131 1615 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
db0758b2
DH
1616}
1617
1618/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1619static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1620{
1621 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
9c23a131 1622 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
db0758b2
DH
1623 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1624 vcpu->arch.cputm_start = 0;
9c23a131 1625 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
db0758b2
DH
1626}
1627
1628/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1629static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1630{
1631 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
1632 vcpu->arch.cputm_enabled = true;
1633 __start_cpu_timer_accounting(vcpu);
1634}
1635
1636/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1637static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1638{
1639 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
1640 __stop_cpu_timer_accounting(vcpu);
1641 vcpu->arch.cputm_enabled = false;
1642}
1643
1644static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1645{
1646 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1647 __enable_cpu_timer_accounting(vcpu);
1648 preempt_enable();
1649}
1650
1651static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1652{
1653 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1654 __disable_cpu_timer_accounting(vcpu);
1655 preempt_enable();
1656}
1657
4287f247
DH
1658/* set the cpu timer - may only be called from the VCPU thread itself */
1659void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
1660{
db0758b2 1661 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
9c23a131 1662 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
db0758b2
DH
1663 if (vcpu->arch.cputm_enabled)
1664 vcpu->arch.cputm_start = get_tod_clock_fast();
4287f247 1665 vcpu->arch.sie_block->cputm = cputm;
9c23a131 1666 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
db0758b2 1667 preempt_enable();
4287f247
DH
1668}
1669
db0758b2 1670/* update and get the cpu timer - can also be called from other VCPU threads */
4287f247
DH
1671__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
1672{
9c23a131 1673 unsigned int seq;
db0758b2 1674 __u64 value;
db0758b2
DH
1675
1676 if (unlikely(!vcpu->arch.cputm_enabled))
1677 return vcpu->arch.sie_block->cputm;
1678
9c23a131
DH
1679 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1680 do {
1681 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
1682 /*
1683 * If the writer would ever execute a read in the critical
1684 * section, e.g. in irq context, we have a deadlock.
1685 */
1686 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
1687 value = vcpu->arch.sie_block->cputm;
1688 /* if cputm_start is 0, accounting is being started/stopped */
1689 if (likely(vcpu->arch.cputm_start))
1690 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1691 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
1692 preempt_enable();
db0758b2 1693 return value;
4287f247
DH
1694}
1695
b0c632db
HC
1696void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1697{
9977e886 1698 /* Save host register state */
d0164ee2 1699 save_fpu_regs();
9abc2a08
DH
1700 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
1701 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
9977e886 1702
6fd8e67d
DH
1703 if (MACHINE_HAS_VX)
1704 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
1705 else
1706 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
9abc2a08 1707 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
9977e886 1708 if (test_fp_ctl(current->thread.fpu.fpc))
96b2d7a8 1709 /* User space provided an invalid FPC, let's clear it */
9977e886
HB
1710 current->thread.fpu.fpc = 0;
1711
1712 save_access_regs(vcpu->arch.host_acrs);
59674c1a 1713 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 1714 gmap_enable(vcpu->arch.gmap);
805de8f4 1715 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
5ebda316 1716 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
db0758b2 1717 __start_cpu_timer_accounting(vcpu);
01a745ac 1718 vcpu->cpu = cpu;
b0c632db
HC
1719}
1720
1721void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1722{
01a745ac 1723 vcpu->cpu = -1;
5ebda316 1724 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
db0758b2 1725 __stop_cpu_timer_accounting(vcpu);
805de8f4 1726 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 1727 gmap_disable(vcpu->arch.gmap);
9977e886 1728
9abc2a08 1729 /* Save guest register state */
d0164ee2 1730 save_fpu_regs();
9abc2a08 1731 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
9977e886 1732
9abc2a08
DH
1733 /* Restore host register state */
1734 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
1735 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
9977e886
HB
1736
1737 save_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
1738 restore_access_regs(vcpu->arch.host_acrs);
1739}
1740
1741static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1742{
1743 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1744 vcpu->arch.sie_block->gpsw.mask = 0UL;
1745 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 1746 kvm_s390_set_prefix(vcpu, 0);
4287f247 1747 kvm_s390_set_cpu_timer(vcpu, 0);
b0c632db
HC
1748 vcpu->arch.sie_block->ckc = 0UL;
1749 vcpu->arch.sie_block->todpr = 0;
1750 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1751 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
1752 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
9abc2a08
DH
1753 /* make sure the new fpc will be lazily loaded */
1754 save_fpu_regs();
1755 current->thread.fpu.fpc = 0;
b0c632db 1756 vcpu->arch.sie_block->gbea = 1;
672550fb 1757 vcpu->arch.sie_block->pp = 0;
3c038e6b
DD
1758 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1759 kvm_clear_async_pf_completion_queue(vcpu);
6352e4d2
DH
1760 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1761 kvm_s390_vcpu_stop(vcpu);
2ed10cc1 1762 kvm_s390_clear_local_irqs(vcpu);
b0c632db
HC
1763}
1764
31928aa5 1765void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
42897d86 1766{
72f25020 1767 mutex_lock(&vcpu->kvm->lock);
fdf03650 1768 preempt_disable();
72f25020 1769 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
fdf03650 1770 preempt_enable();
72f25020 1771 mutex_unlock(&vcpu->kvm->lock);
25508824 1772 if (!kvm_is_ucontrol(vcpu->kvm)) {
dafd032a 1773 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
eaa78f34 1774 sca_add_vcpu(vcpu);
25508824
DH
1775 }
1776
42897d86
MT
1777}
1778
5102ee87
TK
1779static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1780{
9d8d5786 1781 if (!test_kvm_facility(vcpu->kvm, 76))
5102ee87
TK
1782 return;
1783
a374e892
TK
1784 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1785
1786 if (vcpu->kvm->arch.crypto.aes_kw)
1787 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1788 if (vcpu->kvm->arch.crypto.dea_kw)
1789 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1790
5102ee87
TK
1791 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1792}
1793
b31605c1
DD
1794void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1795{
1796 free_page(vcpu->arch.sie_block->cbrlo);
1797 vcpu->arch.sie_block->cbrlo = 0;
1798}
1799
1800int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1801{
1802 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1803 if (!vcpu->arch.sie_block->cbrlo)
1804 return -ENOMEM;
1805
1806 vcpu->arch.sie_block->ecb2 |= 0x80;
1807 vcpu->arch.sie_block->ecb2 &= ~0x08;
1808 return 0;
1809}
1810
91520f1a
MM
1811static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1812{
1813 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1814
91520f1a 1815 vcpu->arch.sie_block->ibc = model->ibc;
80bc79dc 1816 if (test_kvm_facility(vcpu->kvm, 7))
c54f0d6a 1817 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
91520f1a
MM
1818}
1819
b0c632db
HC
1820int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1821{
b31605c1 1822 int rc = 0;
b31288fa 1823
9e6dabef
CH
1824 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1825 CPUSTAT_SM |
a4a4f191
GH
1826 CPUSTAT_STOPPED);
1827
53df84f8 1828 if (test_kvm_facility(vcpu->kvm, 78))
805de8f4 1829 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
53df84f8 1830 else if (test_kvm_facility(vcpu->kvm, 8))
805de8f4 1831 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
a4a4f191 1832
91520f1a
MM
1833 kvm_s390_vcpu_setup_model(vcpu);
1834
bd50e8ec
DH
1835 vcpu->arch.sie_block->ecb = 0x02;
1836 if (test_kvm_facility(vcpu->kvm, 9))
1837 vcpu->arch.sie_block->ecb |= 0x04;
9d8d5786 1838 if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
7feb6bb8
MM
1839 vcpu->arch.sie_block->ecb |= 0x10;
1840
d6af0b49
DH
1841 if (test_kvm_facility(vcpu->kvm, 8))
1842 vcpu->arch.sie_block->ecb2 |= 0x08;
ea5f4969 1843 vcpu->arch.sie_block->eca = 0xC1002000U;
37c5f6c8 1844 if (sclp.has_siif)
217a4406 1845 vcpu->arch.sie_block->eca |= 1;
37c5f6c8 1846 if (sclp.has_sigpif)
ea5f4969 1847 vcpu->arch.sie_block->eca |= 0x10000000U;
c6e5f166
FZ
1848 if (test_kvm_facility(vcpu->kvm, 64))
1849 vcpu->arch.sie_block->ecb3 |= 0x01;
18280d8b 1850 if (test_kvm_facility(vcpu->kvm, 129)) {
13211ea7
EF
1851 vcpu->arch.sie_block->eca |= 0x00020000;
1852 vcpu->arch.sie_block->ecd |= 0x20000000;
1853 }
c6e5f166 1854 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
492d8642 1855 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
95ca2cb5
JF
1856 if (test_kvm_facility(vcpu->kvm, 74))
1857 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
5a5e6536 1858
e6db1d61 1859 if (vcpu->kvm->arch.use_cmma) {
b31605c1
DD
1860 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1861 if (rc)
1862 return rc;
b31288fa 1863 }
0ac96caf 1864 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ca872302 1865 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
9d8d5786 1866
5102ee87
TK
1867 kvm_s390_vcpu_crypto_setup(vcpu);
1868
b31605c1 1869 return rc;
b0c632db
HC
1870}
1871
1872struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1873 unsigned int id)
1874{
4d47555a 1875 struct kvm_vcpu *vcpu;
7feb6bb8 1876 struct sie_page *sie_page;
4d47555a
CO
1877 int rc = -EINVAL;
1878
4215825e 1879 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
4d47555a
CO
1880 goto out;
1881
1882 rc = -ENOMEM;
b0c632db 1883
b110feaf 1884 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 1885 if (!vcpu)
4d47555a 1886 goto out;
b0c632db 1887
7feb6bb8
MM
1888 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1889 if (!sie_page)
b0c632db
HC
1890 goto out_free_cpu;
1891
7feb6bb8
MM
1892 vcpu->arch.sie_block = &sie_page->sie_block;
1893 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
1894
b0c632db 1895 vcpu->arch.sie_block->icpua = id;
ba5c1e9b 1896 spin_lock_init(&vcpu->arch.local_int.lock);
ba5c1e9b 1897 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
d0321a24 1898 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 1899 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
9c23a131 1900 seqcount_init(&vcpu->arch.cputm_seqcount);
ba5c1e9b 1901
b0c632db
HC
1902 rc = kvm_vcpu_init(vcpu, kvm, id);
1903 if (rc)
9abc2a08 1904 goto out_free_sie_block;
8335713a 1905 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
b0c632db 1906 vcpu->arch.sie_block);
ade38c31 1907 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 1908
b0c632db 1909 return vcpu;
7b06bf2f
WY
1910out_free_sie_block:
1911 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 1912out_free_cpu:
b110feaf 1913 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 1914out:
b0c632db
HC
1915 return ERR_PTR(rc);
1916}
1917
b0c632db
HC
1918int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1919{
9a022067 1920 return kvm_s390_vcpu_has_irq(vcpu, 0);
b0c632db
HC
1921}
1922
27406cd5 1923void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
49b99e1e 1924{
805de8f4 1925 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
61a6df54 1926 exit_sie(vcpu);
49b99e1e
CB
1927}
1928
27406cd5 1929void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
49b99e1e 1930{
805de8f4 1931 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
49b99e1e
CB
1932}
1933
8e236546
CB
1934static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
1935{
805de8f4 1936 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
61a6df54 1937 exit_sie(vcpu);
8e236546
CB
1938}
1939
1940static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
1941{
9bf9fde2 1942 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
8e236546
CB
1943}
1944
49b99e1e
CB
1945/*
1946 * Kick a guest cpu out of SIE and wait until SIE is not running.
1947 * If the CPU is not running (e.g. waiting as idle) the function will
1948 * return immediately. */
1949void exit_sie(struct kvm_vcpu *vcpu)
1950{
805de8f4 1951 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
49b99e1e
CB
1952 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1953 cpu_relax();
1954}
1955
8e236546
CB
1956/* Kick a guest cpu out of SIE to process a request synchronously */
1957void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
49b99e1e 1958{
8e236546
CB
1959 kvm_make_request(req, vcpu);
1960 kvm_s390_vcpu_request(vcpu);
49b99e1e
CB
1961}
1962
2c70fe44
CB
1963static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1964{
1965 int i;
1966 struct kvm *kvm = gmap->private;
1967 struct kvm_vcpu *vcpu;
1968
1969 kvm_for_each_vcpu(i, vcpu, kvm) {
1970 /* match against both prefix pages */
fda902cb 1971 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
2c70fe44 1972 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
8e236546 1973 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
2c70fe44
CB
1974 }
1975 }
1976}
1977
b6d33834
CD
1978int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1979{
1980 /* kvm common code refers to this, but never calls it */
1981 BUG();
1982 return 0;
1983}
1984
14eebd91
CO
1985static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1986 struct kvm_one_reg *reg)
1987{
1988 int r = -EINVAL;
1989
1990 switch (reg->id) {
29b7c71b
CO
1991 case KVM_REG_S390_TODPR:
1992 r = put_user(vcpu->arch.sie_block->todpr,
1993 (u32 __user *)reg->addr);
1994 break;
1995 case KVM_REG_S390_EPOCHDIFF:
1996 r = put_user(vcpu->arch.sie_block->epoch,
1997 (u64 __user *)reg->addr);
1998 break;
46a6dd1c 1999 case KVM_REG_S390_CPU_TIMER:
4287f247 2000 r = put_user(kvm_s390_get_cpu_timer(vcpu),
46a6dd1c
J
2001 (u64 __user *)reg->addr);
2002 break;
2003 case KVM_REG_S390_CLOCK_COMP:
2004 r = put_user(vcpu->arch.sie_block->ckc,
2005 (u64 __user *)reg->addr);
2006 break;
536336c2
DD
2007 case KVM_REG_S390_PFTOKEN:
2008 r = put_user(vcpu->arch.pfault_token,
2009 (u64 __user *)reg->addr);
2010 break;
2011 case KVM_REG_S390_PFCOMPARE:
2012 r = put_user(vcpu->arch.pfault_compare,
2013 (u64 __user *)reg->addr);
2014 break;
2015 case KVM_REG_S390_PFSELECT:
2016 r = put_user(vcpu->arch.pfault_select,
2017 (u64 __user *)reg->addr);
2018 break;
672550fb
CB
2019 case KVM_REG_S390_PP:
2020 r = put_user(vcpu->arch.sie_block->pp,
2021 (u64 __user *)reg->addr);
2022 break;
afa45ff5
CB
2023 case KVM_REG_S390_GBEA:
2024 r = put_user(vcpu->arch.sie_block->gbea,
2025 (u64 __user *)reg->addr);
2026 break;
14eebd91
CO
2027 default:
2028 break;
2029 }
2030
2031 return r;
2032}
2033
2034static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
2035 struct kvm_one_reg *reg)
2036{
2037 int r = -EINVAL;
4287f247 2038 __u64 val;
14eebd91
CO
2039
2040 switch (reg->id) {
29b7c71b
CO
2041 case KVM_REG_S390_TODPR:
2042 r = get_user(vcpu->arch.sie_block->todpr,
2043 (u32 __user *)reg->addr);
2044 break;
2045 case KVM_REG_S390_EPOCHDIFF:
2046 r = get_user(vcpu->arch.sie_block->epoch,
2047 (u64 __user *)reg->addr);
2048 break;
46a6dd1c 2049 case KVM_REG_S390_CPU_TIMER:
4287f247
DH
2050 r = get_user(val, (u64 __user *)reg->addr);
2051 if (!r)
2052 kvm_s390_set_cpu_timer(vcpu, val);
46a6dd1c
J
2053 break;
2054 case KVM_REG_S390_CLOCK_COMP:
2055 r = get_user(vcpu->arch.sie_block->ckc,
2056 (u64 __user *)reg->addr);
2057 break;
536336c2
DD
2058 case KVM_REG_S390_PFTOKEN:
2059 r = get_user(vcpu->arch.pfault_token,
2060 (u64 __user *)reg->addr);
9fbd8082
DH
2061 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2062 kvm_clear_async_pf_completion_queue(vcpu);
536336c2
DD
2063 break;
2064 case KVM_REG_S390_PFCOMPARE:
2065 r = get_user(vcpu->arch.pfault_compare,
2066 (u64 __user *)reg->addr);
2067 break;
2068 case KVM_REG_S390_PFSELECT:
2069 r = get_user(vcpu->arch.pfault_select,
2070 (u64 __user *)reg->addr);
2071 break;
672550fb
CB
2072 case KVM_REG_S390_PP:
2073 r = get_user(vcpu->arch.sie_block->pp,
2074 (u64 __user *)reg->addr);
2075 break;
afa45ff5
CB
2076 case KVM_REG_S390_GBEA:
2077 r = get_user(vcpu->arch.sie_block->gbea,
2078 (u64 __user *)reg->addr);
2079 break;
14eebd91
CO
2080 default:
2081 break;
2082 }
2083
2084 return r;
2085}
b6d33834 2086
b0c632db
HC
2087static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
2088{
b0c632db 2089 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
2090 return 0;
2091}
2092
2093int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2094{
5a32c1af 2095 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
2096 return 0;
2097}
2098
2099int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2100{
5a32c1af 2101 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
2102 return 0;
2103}
2104
2105int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2106 struct kvm_sregs *sregs)
2107{
59674c1a 2108 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 2109 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 2110 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
2111 return 0;
2112}
2113
2114int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2115 struct kvm_sregs *sregs)
2116{
59674c1a 2117 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 2118 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
2119 return 0;
2120}
2121
2122int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2123{
9abc2a08
DH
2124 /* make sure the new values will be lazily loaded */
2125 save_fpu_regs();
4725c860
MS
2126 if (test_fp_ctl(fpu->fpc))
2127 return -EINVAL;
9abc2a08
DH
2128 current->thread.fpu.fpc = fpu->fpc;
2129 if (MACHINE_HAS_VX)
2130 convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
2131 else
2132 memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
b0c632db
HC
2133 return 0;
2134}
2135
2136int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2137{
9abc2a08
DH
2138 /* make sure we have the latest values */
2139 save_fpu_regs();
2140 if (MACHINE_HAS_VX)
2141 convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
2142 else
2143 memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
2144 fpu->fpc = current->thread.fpu.fpc;
b0c632db
HC
2145 return 0;
2146}
2147
2148static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
2149{
2150 int rc = 0;
2151
7a42fdc2 2152 if (!is_vcpu_stopped(vcpu))
b0c632db 2153 rc = -EBUSY;
d7b0b5eb
CO
2154 else {
2155 vcpu->run->psw_mask = psw.mask;
2156 vcpu->run->psw_addr = psw.addr;
2157 }
b0c632db
HC
2158 return rc;
2159}
2160
2161int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2162 struct kvm_translation *tr)
2163{
2164 return -EINVAL; /* not implemented yet */
2165}
2166
27291e21
DH
2167#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
2168 KVM_GUESTDBG_USE_HW_BP | \
2169 KVM_GUESTDBG_ENABLE)
2170
d0bfb940
JK
2171int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2172 struct kvm_guest_debug *dbg)
b0c632db 2173{
27291e21
DH
2174 int rc = 0;
2175
2176 vcpu->guest_debug = 0;
2177 kvm_s390_clear_bp_data(vcpu);
2178
2de3bfc2 2179 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
27291e21 2180 return -EINVAL;
89b5b4de
DH
2181 if (!sclp.has_gpere)
2182 return -EINVAL;
27291e21
DH
2183
2184 if (dbg->control & KVM_GUESTDBG_ENABLE) {
2185 vcpu->guest_debug = dbg->control;
2186 /* enforce guest PER */
805de8f4 2187 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
27291e21
DH
2188
2189 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
2190 rc = kvm_s390_import_bp_data(vcpu, dbg);
2191 } else {
805de8f4 2192 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
27291e21
DH
2193 vcpu->arch.guestdbg.last_bp = 0;
2194 }
2195
2196 if (rc) {
2197 vcpu->guest_debug = 0;
2198 kvm_s390_clear_bp_data(vcpu);
805de8f4 2199 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
27291e21
DH
2200 }
2201
2202 return rc;
b0c632db
HC
2203}
2204
62d9f0db
MT
2205int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2206 struct kvm_mp_state *mp_state)
2207{
6352e4d2
DH
2208 /* CHECK_STOP and LOAD are not supported yet */
2209 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
2210 KVM_MP_STATE_OPERATING;
62d9f0db
MT
2211}
2212
2213int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2214 struct kvm_mp_state *mp_state)
2215{
6352e4d2
DH
2216 int rc = 0;
2217
2218 /* user space knows about this interface - let it control the state */
2219 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
2220
2221 switch (mp_state->mp_state) {
2222 case KVM_MP_STATE_STOPPED:
2223 kvm_s390_vcpu_stop(vcpu);
2224 break;
2225 case KVM_MP_STATE_OPERATING:
2226 kvm_s390_vcpu_start(vcpu);
2227 break;
2228 case KVM_MP_STATE_LOAD:
2229 case KVM_MP_STATE_CHECK_STOP:
2230 /* fall through - CHECK_STOP and LOAD are not supported yet */
2231 default:
2232 rc = -ENXIO;
2233 }
2234
2235 return rc;
62d9f0db
MT
2236}
2237
8ad35755
DH
2238static bool ibs_enabled(struct kvm_vcpu *vcpu)
2239{
2240 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
2241}
2242
2c70fe44
CB
2243static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
2244{
8ad35755 2245retry:
8e236546 2246 kvm_s390_vcpu_request_handled(vcpu);
586b7ccd
CB
2247 if (!vcpu->requests)
2248 return 0;
2c70fe44
CB
2249 /*
2250 * We use MMU_RELOAD just to re-arm the ipte notifier for the
2251 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
2252 * This ensures that the ipte instruction for this request has
2253 * already finished. We might race against a second unmapper that
2254 * wants to set the blocking bit. Lets just retry the request loop.
2255 */
8ad35755 2256 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
2c70fe44
CB
2257 int rc;
2258 rc = gmap_ipte_notify(vcpu->arch.gmap,
fda902cb 2259 kvm_s390_get_prefix(vcpu),
2c70fe44
CB
2260 PAGE_SIZE * 2);
2261 if (rc)
2262 return rc;
8ad35755 2263 goto retry;
2c70fe44 2264 }
8ad35755 2265
d3d692c8
DH
2266 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2267 vcpu->arch.sie_block->ihcpu = 0xffff;
2268 goto retry;
2269 }
2270
8ad35755
DH
2271 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
2272 if (!ibs_enabled(vcpu)) {
2273 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
805de8f4 2274 atomic_or(CPUSTAT_IBS,
8ad35755
DH
2275 &vcpu->arch.sie_block->cpuflags);
2276 }
2277 goto retry;
2c70fe44 2278 }
8ad35755
DH
2279
2280 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
2281 if (ibs_enabled(vcpu)) {
2282 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
805de8f4 2283 atomic_andnot(CPUSTAT_IBS,
8ad35755
DH
2284 &vcpu->arch.sie_block->cpuflags);
2285 }
2286 goto retry;
2287 }
2288
0759d068
DH
2289 /* nothing to do, just clear the request */
2290 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
2291
2c70fe44
CB
2292 return 0;
2293}
2294
25ed1675
DH
2295void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
2296{
2297 struct kvm_vcpu *vcpu;
2298 int i;
2299
2300 mutex_lock(&kvm->lock);
2301 preempt_disable();
2302 kvm->arch.epoch = tod - get_tod_clock();
2303 kvm_s390_vcpu_block_all(kvm);
2304 kvm_for_each_vcpu(i, vcpu, kvm)
2305 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
2306 kvm_s390_vcpu_unblock_all(kvm);
2307 preempt_enable();
2308 mutex_unlock(&kvm->lock);
2309}
2310
fa576c58
TH
2311/**
2312 * kvm_arch_fault_in_page - fault-in guest page if necessary
2313 * @vcpu: The corresponding virtual cpu
2314 * @gpa: Guest physical address
2315 * @writable: Whether the page should be writable or not
2316 *
2317 * Make sure that a guest page has been faulted-in on the host.
2318 *
2319 * Return: Zero on success, negative error code otherwise.
2320 */
2321long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
24eb3a82 2322{
527e30b4
MS
2323 return gmap_fault(vcpu->arch.gmap, gpa,
2324 writable ? FAULT_FLAG_WRITE : 0);
24eb3a82
DD
2325}
2326
3c038e6b
DD
2327static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
2328 unsigned long token)
2329{
2330 struct kvm_s390_interrupt inti;
383d0b05 2331 struct kvm_s390_irq irq;
3c038e6b
DD
2332
2333 if (start_token) {
383d0b05
JF
2334 irq.u.ext.ext_params2 = token;
2335 irq.type = KVM_S390_INT_PFAULT_INIT;
2336 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
3c038e6b
DD
2337 } else {
2338 inti.type = KVM_S390_INT_PFAULT_DONE;
383d0b05 2339 inti.parm64 = token;
3c038e6b
DD
2340 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
2341 }
2342}
2343
2344void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
2345 struct kvm_async_pf *work)
2346{
2347 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
2348 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
2349}
2350
2351void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
2352 struct kvm_async_pf *work)
2353{
2354 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
2355 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
2356}
2357
2358void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
2359 struct kvm_async_pf *work)
2360{
2361 /* s390 will always inject the page directly */
2362}
2363
2364bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
2365{
2366 /*
2367 * s390 will always inject the page directly,
2368 * but we still want check_async_completion to cleanup
2369 */
2370 return true;
2371}
2372
2373static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
2374{
2375 hva_t hva;
2376 struct kvm_arch_async_pf arch;
2377 int rc;
2378
2379 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2380 return 0;
2381 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
2382 vcpu->arch.pfault_compare)
2383 return 0;
2384 if (psw_extint_disabled(vcpu))
2385 return 0;
9a022067 2386 if (kvm_s390_vcpu_has_irq(vcpu, 0))
3c038e6b
DD
2387 return 0;
2388 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
2389 return 0;
2390 if (!vcpu->arch.gmap->pfault_enabled)
2391 return 0;
2392
81480cc1
HC
2393 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
2394 hva += current->thread.gmap_addr & ~PAGE_MASK;
2395 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
3c038e6b
DD
2396 return 0;
2397
2398 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
2399 return rc;
2400}
2401
3fb4c40f 2402static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 2403{
3fb4c40f 2404 int rc, cpuflags;
e168bf8d 2405
3c038e6b
DD
2406 /*
2407 * On s390 notifications for arriving pages will be delivered directly
2408 * to the guest but the house keeping for completed pfaults is
2409 * handled outside the worker.
2410 */
2411 kvm_check_async_pf_completion(vcpu);
2412
7ec7c8c7
CB
2413 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
2414 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
b0c632db
HC
2415
2416 if (need_resched())
2417 schedule();
2418
d3a73acb 2419 if (test_cpu_flag(CIF_MCCK_PENDING))
71cde587
CB
2420 s390_handle_mcck();
2421
79395031
JF
2422 if (!kvm_is_ucontrol(vcpu->kvm)) {
2423 rc = kvm_s390_deliver_pending_interrupts(vcpu);
2424 if (rc)
2425 return rc;
2426 }
0ff31867 2427
2c70fe44
CB
2428 rc = kvm_s390_handle_requests(vcpu);
2429 if (rc)
2430 return rc;
2431
27291e21
DH
2432 if (guestdbg_enabled(vcpu)) {
2433 kvm_s390_backup_guest_per_regs(vcpu);
2434 kvm_s390_patch_guest_per_regs(vcpu);
2435 }
2436
b0c632db 2437 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
2438 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
2439 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
2440 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 2441
3fb4c40f
TH
2442 return 0;
2443}
2444
492d8642
TH
2445static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
2446{
56317920
DH
2447 struct kvm_s390_pgm_info pgm_info = {
2448 .code = PGM_ADDRESSING,
2449 };
2450 u8 opcode, ilen;
492d8642
TH
2451 int rc;
2452
2453 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
2454 trace_kvm_s390_sie_fault(vcpu);
2455
2456 /*
2457 * We want to inject an addressing exception, which is defined as a
2458 * suppressing or terminating exception. However, since we came here
2459 * by a DAT access exception, the PSW still points to the faulting
2460 * instruction since DAT exceptions are nullifying. So we've got
2461 * to look up the current opcode to get the length of the instruction
2462 * to be able to forward the PSW.
2463 */
65977322 2464 rc = read_guest_instr(vcpu, &opcode, 1);
56317920 2465 ilen = insn_length(opcode);
9b0d721a
DH
2466 if (rc < 0) {
2467 return rc;
2468 } else if (rc) {
2469 /* Instruction-Fetching Exceptions - we can't detect the ilen.
2470 * Forward by arbitrary ilc, injection will take care of
2471 * nullification if necessary.
2472 */
2473 pgm_info = vcpu->arch.pgm;
2474 ilen = 4;
2475 }
56317920
DH
2476 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
2477 kvm_s390_forward_psw(vcpu, ilen);
2478 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
492d8642
TH
2479}
2480
3fb4c40f
TH
2481static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
2482{
2b29a9fd
DD
2483 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
2484 vcpu->arch.sie_block->icptcode);
2485 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
2486
27291e21
DH
2487 if (guestdbg_enabled(vcpu))
2488 kvm_s390_restore_guest_per_regs(vcpu);
2489
7ec7c8c7
CB
2490 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
2491 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
71f116bf
DH
2492
2493 if (vcpu->arch.sie_block->icptcode > 0) {
2494 int rc = kvm_handle_sie_intercept(vcpu);
2495
2496 if (rc != -EOPNOTSUPP)
2497 return rc;
2498 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
2499 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
2500 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
2501 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
2502 return -EREMOTE;
2503 } else if (exit_reason != -EFAULT) {
2504 vcpu->stat.exit_null++;
2505 return 0;
210b1607
TH
2506 } else if (kvm_is_ucontrol(vcpu->kvm)) {
2507 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
2508 vcpu->run->s390_ucontrol.trans_exc_code =
2509 current->thread.gmap_addr;
2510 vcpu->run->s390_ucontrol.pgm_code = 0x10;
71f116bf 2511 return -EREMOTE;
24eb3a82 2512 } else if (current->thread.gmap_pfault) {
3c038e6b 2513 trace_kvm_s390_major_guest_pfault(vcpu);
24eb3a82 2514 current->thread.gmap_pfault = 0;
71f116bf
DH
2515 if (kvm_arch_setup_async_pf(vcpu))
2516 return 0;
2517 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
a76ccff6 2518 }
71f116bf 2519 return vcpu_post_run_fault_in_sie(vcpu);
3fb4c40f
TH
2520}
2521
2522static int __vcpu_run(struct kvm_vcpu *vcpu)
2523{
2524 int rc, exit_reason;
2525
800c1065
TH
2526 /*
2527 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2528 * ning the guest), so that memslots (and other stuff) are protected
2529 */
2530 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2531
a76ccff6
TH
2532 do {
2533 rc = vcpu_pre_run(vcpu);
2534 if (rc)
2535 break;
3fb4c40f 2536
800c1065 2537 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
2538 /*
2539 * As PF_VCPU will be used in fault handler, between
2540 * guest_enter and guest_exit should be no uaccess.
2541 */
0097d12e
CB
2542 local_irq_disable();
2543 __kvm_guest_enter();
db0758b2 2544 __disable_cpu_timer_accounting(vcpu);
0097d12e 2545 local_irq_enable();
a76ccff6
TH
2546 exit_reason = sie64a(vcpu->arch.sie_block,
2547 vcpu->run->s.regs.gprs);
0097d12e 2548 local_irq_disable();
db0758b2 2549 __enable_cpu_timer_accounting(vcpu);
0097d12e
CB
2550 __kvm_guest_exit();
2551 local_irq_enable();
800c1065 2552 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
2553
2554 rc = vcpu_post_run(vcpu, exit_reason);
27291e21 2555 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
3fb4c40f 2556
800c1065 2557 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 2558 return rc;
b0c632db
HC
2559}
2560
b028ee3e
DH
2561static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2562{
2563 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2564 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2565 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2566 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2567 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2568 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
d3d692c8
DH
2569 /* some control register changes require a tlb flush */
2570 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
b028ee3e
DH
2571 }
2572 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4287f247 2573 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
b028ee3e
DH
2574 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2575 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2576 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2577 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2578 }
2579 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2580 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2581 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2582 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
9fbd8082
DH
2583 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2584 kvm_clear_async_pf_completion_queue(vcpu);
b028ee3e
DH
2585 }
2586 kvm_run->kvm_dirty_regs = 0;
2587}
2588
2589static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2590{
2591 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2592 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2593 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2594 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
4287f247 2595 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
b028ee3e
DH
2596 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2597 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2598 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2599 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2600 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2601 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2602 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2603}
2604
b0c632db
HC
2605int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2606{
8f2abe6a 2607 int rc;
b0c632db
HC
2608 sigset_t sigsaved;
2609
27291e21
DH
2610 if (guestdbg_exit_pending(vcpu)) {
2611 kvm_s390_prepare_debug_exit(vcpu);
2612 return 0;
2613 }
2614
b0c632db
HC
2615 if (vcpu->sigset_active)
2616 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2617
6352e4d2
DH
2618 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2619 kvm_s390_vcpu_start(vcpu);
2620 } else if (is_vcpu_stopped(vcpu)) {
ea2cdd27 2621 pr_err_ratelimited("can't run stopped vcpu %d\n",
6352e4d2
DH
2622 vcpu->vcpu_id);
2623 return -EINVAL;
2624 }
b0c632db 2625
b028ee3e 2626 sync_regs(vcpu, kvm_run);
db0758b2 2627 enable_cpu_timer_accounting(vcpu);
d7b0b5eb 2628
dab4079d 2629 might_fault();
a76ccff6 2630 rc = __vcpu_run(vcpu);
9ace903d 2631
b1d16c49
CE
2632 if (signal_pending(current) && !rc) {
2633 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 2634 rc = -EINTR;
b1d16c49 2635 }
8f2abe6a 2636
27291e21
DH
2637 if (guestdbg_exit_pending(vcpu) && !rc) {
2638 kvm_s390_prepare_debug_exit(vcpu);
2639 rc = 0;
2640 }
2641
8f2abe6a 2642 if (rc == -EREMOTE) {
71f116bf 2643 /* userspace support is needed, kvm_run has been prepared */
8f2abe6a
CB
2644 rc = 0;
2645 }
b0c632db 2646
db0758b2 2647 disable_cpu_timer_accounting(vcpu);
b028ee3e 2648 store_regs(vcpu, kvm_run);
d7b0b5eb 2649
b0c632db
HC
2650 if (vcpu->sigset_active)
2651 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2652
b0c632db 2653 vcpu->stat.exit_userspace++;
7e8e6ab4 2654 return rc;
b0c632db
HC
2655}
2656
b0c632db
HC
2657/*
2658 * store status at address
2659 * we use have two special cases:
2660 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2661 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2662 */
d0bce605 2663int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
b0c632db 2664{
092670cd 2665 unsigned char archmode = 1;
9abc2a08 2666 freg_t fprs[NUM_FPRS];
fda902cb 2667 unsigned int px;
4287f247 2668 u64 clkcomp, cputm;
d0bce605 2669 int rc;
b0c632db 2670
d9a3a09a 2671 px = kvm_s390_get_prefix(vcpu);
d0bce605
HC
2672 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2673 if (write_guest_abs(vcpu, 163, &archmode, 1))
b0c632db 2674 return -EFAULT;
d9a3a09a 2675 gpa = 0;
d0bce605
HC
2676 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2677 if (write_guest_real(vcpu, 163, &archmode, 1))
b0c632db 2678 return -EFAULT;
d9a3a09a
MS
2679 gpa = px;
2680 } else
2681 gpa -= __LC_FPREGS_SAVE_AREA;
9abc2a08
DH
2682
2683 /* manually convert vector registers if necessary */
2684 if (MACHINE_HAS_VX) {
9522b37f 2685 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
9abc2a08
DH
2686 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
2687 fprs, 128);
2688 } else {
2689 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
6fd8e67d 2690 vcpu->run->s.regs.fprs, 128);
9abc2a08 2691 }
d9a3a09a 2692 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
d0bce605 2693 vcpu->run->s.regs.gprs, 128);
d9a3a09a 2694 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
d0bce605 2695 &vcpu->arch.sie_block->gpsw, 16);
d9a3a09a 2696 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
fda902cb 2697 &px, 4);
d9a3a09a 2698 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
9abc2a08 2699 &vcpu->run->s.regs.fpc, 4);
d9a3a09a 2700 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
d0bce605 2701 &vcpu->arch.sie_block->todpr, 4);
4287f247 2702 cputm = kvm_s390_get_cpu_timer(vcpu);
d9a3a09a 2703 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
4287f247 2704 &cputm, 8);
178bd789 2705 clkcomp = vcpu->arch.sie_block->ckc >> 8;
d9a3a09a 2706 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
d0bce605 2707 &clkcomp, 8);
d9a3a09a 2708 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
d0bce605 2709 &vcpu->run->s.regs.acrs, 64);
d9a3a09a 2710 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
d0bce605
HC
2711 &vcpu->arch.sie_block->gcr, 128);
2712 return rc ? -EFAULT : 0;
b0c632db
HC
2713}
2714
e879892c
TH
2715int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2716{
2717 /*
2718 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2719 * copying in vcpu load/put. Lets update our copies before we save
2720 * it into the save area
2721 */
d0164ee2 2722 save_fpu_regs();
9abc2a08 2723 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
e879892c
TH
2724 save_access_regs(vcpu->run->s.regs.acrs);
2725
2726 return kvm_s390_store_status_unloaded(vcpu, addr);
2727}
2728
bc17de7c
EF
2729/*
2730 * store additional status at address
2731 */
2732int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2733 unsigned long gpa)
2734{
2735 /* Only bits 0-53 are used for address formation */
2736 if (!(gpa & ~0x3ff))
2737 return 0;
2738
2739 return write_guest_abs(vcpu, gpa & ~0x3ff,
2740 (void *)&vcpu->run->s.regs.vrs, 512);
2741}
2742
2743int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2744{
2745 if (!test_kvm_facility(vcpu->kvm, 129))
2746 return 0;
2747
2748 /*
2749 * The guest VXRS are in the host VXRs due to the lazy
9977e886
HB
2750 * copying in vcpu load/put. We can simply call save_fpu_regs()
2751 * to save the current register state because we are in the
2752 * middle of a load/put cycle.
2753 *
2754 * Let's update our copies before we save it into the save area.
bc17de7c 2755 */
d0164ee2 2756 save_fpu_regs();
bc17de7c
EF
2757
2758 return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2759}
2760
8ad35755
DH
2761static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2762{
2763 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
8e236546 2764 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
8ad35755
DH
2765}
2766
2767static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2768{
2769 unsigned int i;
2770 struct kvm_vcpu *vcpu;
2771
2772 kvm_for_each_vcpu(i, vcpu, kvm) {
2773 __disable_ibs_on_vcpu(vcpu);
2774 }
2775}
2776
2777static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2778{
2779 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
8e236546 2780 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
8ad35755
DH
2781}
2782
6852d7b6
DH
2783void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2784{
8ad35755
DH
2785 int i, online_vcpus, started_vcpus = 0;
2786
2787 if (!is_vcpu_stopped(vcpu))
2788 return;
2789
6852d7b6 2790 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
8ad35755 2791 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 2792 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
2793 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2794
2795 for (i = 0; i < online_vcpus; i++) {
2796 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2797 started_vcpus++;
2798 }
2799
2800 if (started_vcpus == 0) {
2801 /* we're the only active VCPU -> speed it up */
2802 __enable_ibs_on_vcpu(vcpu);
2803 } else if (started_vcpus == 1) {
2804 /*
2805 * As we are starting a second VCPU, we have to disable
2806 * the IBS facility on all VCPUs to remove potentially
2807 * oustanding ENABLE requests.
2808 */
2809 __disable_ibs_on_all_vcpus(vcpu->kvm);
2810 }
2811
805de8f4 2812 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
2813 /*
2814 * Another VCPU might have used IBS while we were offline.
2815 * Let's play safe and flush the VCPU at startup.
2816 */
d3d692c8 2817 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
433b9ee4 2818 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 2819 return;
6852d7b6
DH
2820}
2821
2822void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2823{
8ad35755
DH
2824 int i, online_vcpus, started_vcpus = 0;
2825 struct kvm_vcpu *started_vcpu = NULL;
2826
2827 if (is_vcpu_stopped(vcpu))
2828 return;
2829
6852d7b6 2830 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
8ad35755 2831 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 2832 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
2833 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2834
32f5ff63 2835 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
6cddd432 2836 kvm_s390_clear_stop_irq(vcpu);
32f5ff63 2837
805de8f4 2838 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
2839 __disable_ibs_on_vcpu(vcpu);
2840
2841 for (i = 0; i < online_vcpus; i++) {
2842 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2843 started_vcpus++;
2844 started_vcpu = vcpu->kvm->vcpus[i];
2845 }
2846 }
2847
2848 if (started_vcpus == 1) {
2849 /*
2850 * As we only have one VCPU left, we want to enable the
2851 * IBS facility for that VCPU to speed it up.
2852 */
2853 __enable_ibs_on_vcpu(started_vcpu);
2854 }
2855
433b9ee4 2856 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 2857 return;
6852d7b6
DH
2858}
2859
d6712df9
CH
2860static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2861 struct kvm_enable_cap *cap)
2862{
2863 int r;
2864
2865 if (cap->flags)
2866 return -EINVAL;
2867
2868 switch (cap->cap) {
fa6b7fe9
CH
2869 case KVM_CAP_S390_CSS_SUPPORT:
2870 if (!vcpu->kvm->arch.css_support) {
2871 vcpu->kvm->arch.css_support = 1;
c92ea7b9 2872 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
fa6b7fe9
CH
2873 trace_kvm_s390_enable_css(vcpu->kvm);
2874 }
2875 r = 0;
2876 break;
d6712df9
CH
2877 default:
2878 r = -EINVAL;
2879 break;
2880 }
2881 return r;
2882}
2883
41408c28
TH
2884static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
2885 struct kvm_s390_mem_op *mop)
2886{
2887 void __user *uaddr = (void __user *)mop->buf;
2888 void *tmpbuf = NULL;
2889 int r, srcu_idx;
2890 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
2891 | KVM_S390_MEMOP_F_CHECK_ONLY;
2892
2893 if (mop->flags & ~supported_flags)
2894 return -EINVAL;
2895
2896 if (mop->size > MEM_OP_MAX_SIZE)
2897 return -E2BIG;
2898
2899 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2900 tmpbuf = vmalloc(mop->size);
2901 if (!tmpbuf)
2902 return -ENOMEM;
2903 }
2904
2905 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2906
2907 switch (mop->op) {
2908 case KVM_S390_MEMOP_LOGICAL_READ:
2909 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
92c96321
DH
2910 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
2911 mop->size, GACC_FETCH);
41408c28
TH
2912 break;
2913 }
2914 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2915 if (r == 0) {
2916 if (copy_to_user(uaddr, tmpbuf, mop->size))
2917 r = -EFAULT;
2918 }
2919 break;
2920 case KVM_S390_MEMOP_LOGICAL_WRITE:
2921 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
92c96321
DH
2922 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
2923 mop->size, GACC_STORE);
41408c28
TH
2924 break;
2925 }
2926 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2927 r = -EFAULT;
2928 break;
2929 }
2930 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2931 break;
2932 default:
2933 r = -EINVAL;
2934 }
2935
2936 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2937
2938 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
2939 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
2940
2941 vfree(tmpbuf);
2942 return r;
2943}
2944
b0c632db
HC
2945long kvm_arch_vcpu_ioctl(struct file *filp,
2946 unsigned int ioctl, unsigned long arg)
2947{
2948 struct kvm_vcpu *vcpu = filp->private_data;
2949 void __user *argp = (void __user *)arg;
800c1065 2950 int idx;
bc923cc9 2951 long r;
b0c632db 2952
93736624 2953 switch (ioctl) {
47b43c52
JF
2954 case KVM_S390_IRQ: {
2955 struct kvm_s390_irq s390irq;
2956
2957 r = -EFAULT;
2958 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
2959 break;
2960 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
2961 break;
2962 }
93736624 2963 case KVM_S390_INTERRUPT: {
ba5c1e9b 2964 struct kvm_s390_interrupt s390int;
383d0b05 2965 struct kvm_s390_irq s390irq;
ba5c1e9b 2966
93736624 2967 r = -EFAULT;
ba5c1e9b 2968 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624 2969 break;
383d0b05
JF
2970 if (s390int_to_s390irq(&s390int, &s390irq))
2971 return -EINVAL;
2972 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
93736624 2973 break;
ba5c1e9b 2974 }
b0c632db 2975 case KVM_S390_STORE_STATUS:
800c1065 2976 idx = srcu_read_lock(&vcpu->kvm->srcu);
bc923cc9 2977 r = kvm_s390_vcpu_store_status(vcpu, arg);
800c1065 2978 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 2979 break;
b0c632db
HC
2980 case KVM_S390_SET_INITIAL_PSW: {
2981 psw_t psw;
2982
bc923cc9 2983 r = -EFAULT;
b0c632db 2984 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
2985 break;
2986 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2987 break;
b0c632db
HC
2988 }
2989 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
2990 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2991 break;
14eebd91
CO
2992 case KVM_SET_ONE_REG:
2993 case KVM_GET_ONE_REG: {
2994 struct kvm_one_reg reg;
2995 r = -EFAULT;
2996 if (copy_from_user(&reg, argp, sizeof(reg)))
2997 break;
2998 if (ioctl == KVM_SET_ONE_REG)
2999 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
3000 else
3001 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
3002 break;
3003 }
27e0393f
CO
3004#ifdef CONFIG_KVM_S390_UCONTROL
3005 case KVM_S390_UCAS_MAP: {
3006 struct kvm_s390_ucas_mapping ucasmap;
3007
3008 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3009 r = -EFAULT;
3010 break;
3011 }
3012
3013 if (!kvm_is_ucontrol(vcpu->kvm)) {
3014 r = -EINVAL;
3015 break;
3016 }
3017
3018 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
3019 ucasmap.vcpu_addr, ucasmap.length);
3020 break;
3021 }
3022 case KVM_S390_UCAS_UNMAP: {
3023 struct kvm_s390_ucas_mapping ucasmap;
3024
3025 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3026 r = -EFAULT;
3027 break;
3028 }
3029
3030 if (!kvm_is_ucontrol(vcpu->kvm)) {
3031 r = -EINVAL;
3032 break;
3033 }
3034
3035 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
3036 ucasmap.length);
3037 break;
3038 }
3039#endif
ccc7910f 3040 case KVM_S390_VCPU_FAULT: {
527e30b4 3041 r = gmap_fault(vcpu->arch.gmap, arg, 0);
ccc7910f
CO
3042 break;
3043 }
d6712df9
CH
3044 case KVM_ENABLE_CAP:
3045 {
3046 struct kvm_enable_cap cap;
3047 r = -EFAULT;
3048 if (copy_from_user(&cap, argp, sizeof(cap)))
3049 break;
3050 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
3051 break;
3052 }
41408c28
TH
3053 case KVM_S390_MEM_OP: {
3054 struct kvm_s390_mem_op mem_op;
3055
3056 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
3057 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
3058 else
3059 r = -EFAULT;
3060 break;
3061 }
816c7667
JF
3062 case KVM_S390_SET_IRQ_STATE: {
3063 struct kvm_s390_irq_state irq_state;
3064
3065 r = -EFAULT;
3066 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3067 break;
3068 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
3069 irq_state.len == 0 ||
3070 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
3071 r = -EINVAL;
3072 break;
3073 }
3074 r = kvm_s390_set_irq_state(vcpu,
3075 (void __user *) irq_state.buf,
3076 irq_state.len);
3077 break;
3078 }
3079 case KVM_S390_GET_IRQ_STATE: {
3080 struct kvm_s390_irq_state irq_state;
3081
3082 r = -EFAULT;
3083 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3084 break;
3085 if (irq_state.len == 0) {
3086 r = -EINVAL;
3087 break;
3088 }
3089 r = kvm_s390_get_irq_state(vcpu,
3090 (__u8 __user *) irq_state.buf,
3091 irq_state.len);
3092 break;
3093 }
b0c632db 3094 default:
3e6afcf1 3095 r = -ENOTTY;
b0c632db 3096 }
bc923cc9 3097 return r;
b0c632db
HC
3098}
3099
5b1c1493
CO
3100int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
3101{
3102#ifdef CONFIG_KVM_S390_UCONTROL
3103 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
3104 && (kvm_is_ucontrol(vcpu->kvm))) {
3105 vmf->page = virt_to_page(vcpu->arch.sie_block);
3106 get_page(vmf->page);
3107 return 0;
3108 }
3109#endif
3110 return VM_FAULT_SIGBUS;
3111}
3112
5587027c
AK
3113int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
3114 unsigned long npages)
db3fe4eb
TY
3115{
3116 return 0;
3117}
3118
b0c632db 3119/* Section: memory related */
f7784b8e
MT
3120int kvm_arch_prepare_memory_region(struct kvm *kvm,
3121 struct kvm_memory_slot *memslot,
09170a49 3122 const struct kvm_userspace_memory_region *mem,
7b6195a9 3123 enum kvm_mr_change change)
b0c632db 3124{
dd2887e7
NW
3125 /* A few sanity checks. We can have memory slots which have to be
3126 located/ended at a segment boundary (1MB). The memory in userland is
3127 ok to be fragmented into various different vmas. It is okay to mmap()
3128 and munmap() stuff in this slot after doing this call at any time */
b0c632db 3129
598841ca 3130 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
3131 return -EINVAL;
3132
598841ca 3133 if (mem->memory_size & 0xffffful)
b0c632db
HC
3134 return -EINVAL;
3135
a3a92c31
DD
3136 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
3137 return -EINVAL;
3138
f7784b8e
MT
3139 return 0;
3140}
3141
3142void kvm_arch_commit_memory_region(struct kvm *kvm,
09170a49 3143 const struct kvm_userspace_memory_region *mem,
8482644a 3144 const struct kvm_memory_slot *old,
f36f3f28 3145 const struct kvm_memory_slot *new,
8482644a 3146 enum kvm_mr_change change)
f7784b8e 3147{
f7850c92 3148 int rc;
f7784b8e 3149
2cef4deb
CB
3150 /* If the basics of the memslot do not change, we do not want
3151 * to update the gmap. Every update causes several unnecessary
3152 * segment translation exceptions. This is usually handled just
3153 * fine by the normal fault handler + gmap, but it will also
3154 * cause faults on the prefix page of running guest CPUs.
3155 */
3156 if (old->userspace_addr == mem->userspace_addr &&
3157 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
3158 old->npages * PAGE_SIZE == mem->memory_size)
3159 return;
598841ca
CO
3160
3161 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
3162 mem->guest_phys_addr, mem->memory_size);
3163 if (rc)
ea2cdd27 3164 pr_warn("failed to commit memory region\n");
598841ca 3165 return;
b0c632db
HC
3166}
3167
60a37709
AY
3168static inline unsigned long nonhyp_mask(int i)
3169{
3170 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
3171
3172 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
3173}
3174
3491caf2
CB
3175void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
3176{
3177 vcpu->valid_wakeup = false;
3178}
3179
b0c632db
HC
3180static int __init kvm_s390_init(void)
3181{
60a37709
AY
3182 int i;
3183
07197fd0
DH
3184 if (!sclp.has_sief2) {
3185 pr_info("SIE not available\n");
3186 return -ENODEV;
3187 }
3188
60a37709
AY
3189 for (i = 0; i < 16; i++)
3190 kvm_s390_fac_list_mask[i] |=
3191 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
3192
9d8d5786 3193 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
b0c632db
HC
3194}
3195
3196static void __exit kvm_s390_exit(void)
3197{
3198 kvm_exit();
3199}
3200
3201module_init(kvm_s390_init);
3202module_exit(kvm_s390_exit);
566af940
CH
3203
3204/*
3205 * Enable autoloading of the kvm module.
3206 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
3207 * since x86 takes a different approach.
3208 */
3209#include <linux/miscdevice.h>
3210MODULE_ALIAS_MISCDEV(KVM_MINOR);
3211MODULE_ALIAS("devname:kvm");