]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/s390/kvm/kvm-s390.c
KVM: s390: vsie: support run-time-instrumentation
[mirror_ubuntu-artful-kernel.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15f36ebd 14 * Jason J. Herne <jjherne@us.ibm.com>
b0c632db
HC
15 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
ca872302 20#include <linux/hrtimer.h>
b0c632db
HC
21#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
b2d73b2a 24#include <linux/mman.h>
b0c632db 25#include <linux/module.h>
a374e892 26#include <linux/random.h>
b0c632db 27#include <linux/slab.h>
ba5c1e9b 28#include <linux/timer.h>
41408c28 29#include <linux/vmalloc.h>
15c9705f 30#include <linux/bitmap.h>
cbb870c8 31#include <asm/asm-offsets.h>
b0c632db 32#include <asm/lowcore.h>
fdf03650 33#include <asm/etr.h>
b0c632db 34#include <asm/pgtable.h>
1e133ab2 35#include <asm/gmap.h>
f5daba1d 36#include <asm/nmi.h>
a0616cde 37#include <asm/switch_to.h>
6d3da241 38#include <asm/isc.h>
1526bf9c 39#include <asm/sclp.h>
0a763c78
DH
40#include <asm/cpacf.h>
41#include <asm/etr.h>
8f2abe6a 42#include "kvm-s390.h"
b0c632db
HC
43#include "gaccess.h"
44
ea2cdd27
DH
45#define KMSG_COMPONENT "kvm-s390"
46#undef pr_fmt
47#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
48
5786fffa
CH
49#define CREATE_TRACE_POINTS
50#include "trace.h"
ade38c31 51#include "trace-s390.h"
5786fffa 52
41408c28 53#define MEM_OP_MAX_SIZE 65536 /* Maximum transfer size for KVM_S390_MEM_OP */
816c7667
JF
54#define LOCAL_IRQS 32
55#define VCPU_IRQS_MAX_BUF (sizeof(struct kvm_s390_irq) * \
56 (KVM_MAX_VCPUS + LOCAL_IRQS))
41408c28 57
b0c632db
HC
58#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
59
60struct kvm_stats_debugfs_item debugfs_entries[] = {
61 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 62 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
63 { "exit_validity", VCPU_STAT(exit_validity) },
64 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
65 { "exit_external_request", VCPU_STAT(exit_external_request) },
66 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
67 { "exit_instruction", VCPU_STAT(exit_instruction) },
68 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
69 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
a011eeb2 70 { "exit_operation_exception", VCPU_STAT(exit_operation_exception) },
f7819512 71 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
62bea5bf 72 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
3491caf2 73 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
ce2e4f0b 74 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
f5e10b09 75 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b 76 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
aba07508
DH
77 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
78 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
ba5c1e9b 79 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 80 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
81 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
82 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
83 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
84 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
85 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
86 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
87 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 88 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
89 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
90 { "instruction_spx", VCPU_STAT(instruction_spx) },
91 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
92 { "instruction_stap", VCPU_STAT(instruction_stap) },
93 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
8a242234 94 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
453423dc
CB
95 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
96 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
b31288fa 97 { "instruction_essa", VCPU_STAT(instruction_essa) },
453423dc
CB
98 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
99 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 100 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
95ca2cb5 101 { "instruction_sthyi", VCPU_STAT(instruction_sthyi) },
a3508fbe 102 { "instruction_sie", VCPU_STAT(instruction_sie) },
5288fbf0 103 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 104 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 105 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0 106 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
42cb0c9f
DH
107 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
108 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
5288fbf0 109 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
42cb0c9f
DH
110 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
111 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
cd7b4b61 112 { "instruction_sigp_store_adtl_status", VCPU_STAT(instruction_sigp_store_adtl_status) },
5288fbf0
CB
113 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
114 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
115 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
42cb0c9f
DH
116 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
117 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
118 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
388186bc 119 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 120 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 121 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
175a5c9e
CB
122 { "diagnose_258", VCPU_STAT(diagnose_258) },
123 { "diagnose_308", VCPU_STAT(diagnose_308) },
124 { "diagnose_500", VCPU_STAT(diagnose_500) },
b0c632db
HC
125 { NULL }
126};
127
9d8d5786 128/* upper facilities limit for kvm */
60a37709
AY
129unsigned long kvm_s390_fac_list_mask[16] = {
130 0xffe6000000000000UL,
131 0x005e000000000000UL,
9d8d5786 132};
b0c632db 133
9d8d5786 134unsigned long kvm_s390_fac_list_mask_size(void)
78c4b59f 135{
9d8d5786
MM
136 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
137 return ARRAY_SIZE(kvm_s390_fac_list_mask);
78c4b59f
MM
138}
139
15c9705f
DH
140/* available cpu features supported by kvm */
141static DECLARE_BITMAP(kvm_s390_available_cpu_feat, KVM_S390_VM_CPU_FEAT_NR_BITS);
0a763c78
DH
142/* available subfunctions indicated via query / "test bit" */
143static struct kvm_s390_vm_cpu_subfunc kvm_s390_available_subfunc;
15c9705f 144
9d8d5786 145static struct gmap_notifier gmap_notifier;
a3508fbe 146static struct gmap_notifier vsie_gmap_notifier;
78f26131 147debug_info_t *kvm_s390_dbf;
9d8d5786 148
b0c632db 149/* Section: not file related */
13a34e06 150int kvm_arch_hardware_enable(void)
b0c632db
HC
151{
152 /* every s390 is virtualization enabled ;-) */
10474ae8 153 return 0;
b0c632db
HC
154}
155
414d3b07
MS
156static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
157 unsigned long end);
2c70fe44 158
fdf03650
FZ
159/*
160 * This callback is executed during stop_machine(). All CPUs are therefore
161 * temporarily stopped. In order not to change guest behavior, we have to
162 * disable preemption whenever we touch the epoch of kvm and the VCPUs,
163 * so a CPU won't be stopped while calculating with the epoch.
164 */
165static int kvm_clock_sync(struct notifier_block *notifier, unsigned long val,
166 void *v)
167{
168 struct kvm *kvm;
169 struct kvm_vcpu *vcpu;
170 int i;
171 unsigned long long *delta = v;
172
173 list_for_each_entry(kvm, &vm_list, vm_list) {
174 kvm->arch.epoch -= *delta;
175 kvm_for_each_vcpu(i, vcpu, kvm) {
176 vcpu->arch.sie_block->epoch -= *delta;
db0758b2
DH
177 if (vcpu->arch.cputm_enabled)
178 vcpu->arch.cputm_start += *delta;
fdf03650
FZ
179 }
180 }
181 return NOTIFY_OK;
182}
183
184static struct notifier_block kvm_clock_notifier = {
185 .notifier_call = kvm_clock_sync,
186};
187
b0c632db
HC
188int kvm_arch_hardware_setup(void)
189{
2c70fe44 190 gmap_notifier.notifier_call = kvm_gmap_notifier;
b2d73b2a 191 gmap_register_pte_notifier(&gmap_notifier);
a3508fbe
DH
192 vsie_gmap_notifier.notifier_call = kvm_s390_vsie_gmap_notifier;
193 gmap_register_pte_notifier(&vsie_gmap_notifier);
fdf03650
FZ
194 atomic_notifier_chain_register(&s390_epoch_delta_notifier,
195 &kvm_clock_notifier);
b0c632db
HC
196 return 0;
197}
198
199void kvm_arch_hardware_unsetup(void)
200{
b2d73b2a 201 gmap_unregister_pte_notifier(&gmap_notifier);
a3508fbe 202 gmap_unregister_pte_notifier(&vsie_gmap_notifier);
fdf03650
FZ
203 atomic_notifier_chain_unregister(&s390_epoch_delta_notifier,
204 &kvm_clock_notifier);
b0c632db
HC
205}
206
22be5a13
DH
207static void allow_cpu_feat(unsigned long nr)
208{
209 set_bit_inv(nr, kvm_s390_available_cpu_feat);
210}
211
0a763c78
DH
212static inline int plo_test_bit(unsigned char nr)
213{
214 register unsigned long r0 asm("0") = (unsigned long) nr | 0x100;
215 int cc = 3; /* subfunction not available */
216
217 asm volatile(
218 /* Parameter registers are ignored for "test bit" */
219 " plo 0,0,0,0(0)\n"
220 " ipm %0\n"
221 " srl %0,28\n"
222 : "=d" (cc)
223 : "d" (r0)
224 : "cc");
225 return cc == 0;
226}
227
22be5a13
DH
228static void kvm_s390_cpu_feat_init(void)
229{
0a763c78
DH
230 int i;
231
232 for (i = 0; i < 256; ++i) {
233 if (plo_test_bit(i))
234 kvm_s390_available_subfunc.plo[i >> 3] |= 0x80 >> (i & 7);
235 }
236
237 if (test_facility(28)) /* TOD-clock steering */
238 etr_ptff(kvm_s390_available_subfunc.ptff, ETR_PTFF_QAF);
239
240 if (test_facility(17)) { /* MSA */
241 __cpacf_query(CPACF_KMAC, kvm_s390_available_subfunc.kmac);
242 __cpacf_query(CPACF_KMC, kvm_s390_available_subfunc.kmc);
243 __cpacf_query(CPACF_KM, kvm_s390_available_subfunc.km);
244 __cpacf_query(CPACF_KIMD, kvm_s390_available_subfunc.kimd);
245 __cpacf_query(CPACF_KLMD, kvm_s390_available_subfunc.klmd);
246 }
247 if (test_facility(76)) /* MSA3 */
248 __cpacf_query(CPACF_PCKMO, kvm_s390_available_subfunc.pckmo);
249 if (test_facility(77)) { /* MSA4 */
250 __cpacf_query(CPACF_KMCTR, kvm_s390_available_subfunc.kmctr);
251 __cpacf_query(CPACF_KMF, kvm_s390_available_subfunc.kmf);
252 __cpacf_query(CPACF_KMO, kvm_s390_available_subfunc.kmo);
253 __cpacf_query(CPACF_PCC, kvm_s390_available_subfunc.pcc);
254 }
255 if (test_facility(57)) /* MSA5 */
256 __cpacf_query(CPACF_PPNO, kvm_s390_available_subfunc.ppno);
257
22be5a13
DH
258 if (MACHINE_HAS_ESOP)
259 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_ESOP);
a3508fbe
DH
260 /*
261 * We need SIE support, ESOP (PROT_READ protection for gmap_shadow),
262 * 64bit SCAO (SCA passthrough) and IDTE (for gmap_shadow unshadowing).
263 */
264 if (!sclp.has_sief2 || !MACHINE_HAS_ESOP || !sclp.has_64bscao ||
265 !test_facility(3))
266 return;
267 allow_cpu_feat(KVM_S390_VM_CPU_FEAT_SIEF2);
22be5a13
DH
268}
269
b0c632db
HC
270int kvm_arch_init(void *opaque)
271{
78f26131
CB
272 kvm_s390_dbf = debug_register("kvm-trace", 32, 1, 7 * sizeof(long));
273 if (!kvm_s390_dbf)
274 return -ENOMEM;
275
276 if (debug_register_view(kvm_s390_dbf, &debug_sprintf_view)) {
277 debug_unregister(kvm_s390_dbf);
278 return -ENOMEM;
279 }
280
22be5a13
DH
281 kvm_s390_cpu_feat_init();
282
84877d93
CH
283 /* Register floating interrupt controller interface. */
284 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
b0c632db
HC
285}
286
78f26131
CB
287void kvm_arch_exit(void)
288{
289 debug_unregister(kvm_s390_dbf);
290}
291
b0c632db
HC
292/* Section: device related */
293long kvm_arch_dev_ioctl(struct file *filp,
294 unsigned int ioctl, unsigned long arg)
295{
296 if (ioctl == KVM_S390_ENABLE_SIE)
297 return s390_enable_sie();
298 return -EINVAL;
299}
300
784aa3d7 301int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
b0c632db 302{
d7b0b5eb
CO
303 int r;
304
2bd0ac4e 305 switch (ext) {
d7b0b5eb 306 case KVM_CAP_S390_PSW:
b6cf8788 307 case KVM_CAP_S390_GMAP:
52e16b18 308 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
309#ifdef CONFIG_KVM_S390_UCONTROL
310 case KVM_CAP_S390_UCONTROL:
311#endif
3c038e6b 312 case KVM_CAP_ASYNC_PF:
60b413c9 313 case KVM_CAP_SYNC_REGS:
14eebd91 314 case KVM_CAP_ONE_REG:
d6712df9 315 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 316 case KVM_CAP_S390_CSS_SUPPORT:
10ccaa1e 317 case KVM_CAP_IOEVENTFD:
c05c4186 318 case KVM_CAP_DEVICE_CTRL:
d938dc55 319 case KVM_CAP_ENABLE_CAP_VM:
78599d90 320 case KVM_CAP_S390_IRQCHIP:
f2061656 321 case KVM_CAP_VM_ATTRIBUTES:
6352e4d2 322 case KVM_CAP_MP_STATE:
47b43c52 323 case KVM_CAP_S390_INJECT_IRQ:
2444b352 324 case KVM_CAP_S390_USER_SIGP:
e44fc8c9 325 case KVM_CAP_S390_USER_STSI:
30ee2a98 326 case KVM_CAP_S390_SKEYS:
816c7667 327 case KVM_CAP_S390_IRQ_STATE:
d7b0b5eb
CO
328 r = 1;
329 break;
41408c28
TH
330 case KVM_CAP_S390_MEM_OP:
331 r = MEM_OP_MAX_SIZE;
332 break;
e726b1bd
CB
333 case KVM_CAP_NR_VCPUS:
334 case KVM_CAP_MAX_VCPUS:
76a6dd72
DH
335 r = KVM_S390_BSCA_CPU_SLOTS;
336 if (sclp.has_esca && sclp.has_64bscao)
337 r = KVM_S390_ESCA_CPU_SLOTS;
e726b1bd 338 break;
e1e2e605
NW
339 case KVM_CAP_NR_MEMSLOTS:
340 r = KVM_USER_MEM_SLOTS;
341 break;
1526bf9c 342 case KVM_CAP_S390_COW:
abf09bed 343 r = MACHINE_HAS_ESOP;
1526bf9c 344 break;
68c55750
EF
345 case KVM_CAP_S390_VECTOR_REGISTERS:
346 r = MACHINE_HAS_VX;
347 break;
c6e5f166
FZ
348 case KVM_CAP_S390_RI:
349 r = test_facility(64);
350 break;
2bd0ac4e 351 default:
d7b0b5eb 352 r = 0;
2bd0ac4e 353 }
d7b0b5eb 354 return r;
b0c632db
HC
355}
356
15f36ebd
JH
357static void kvm_s390_sync_dirty_log(struct kvm *kvm,
358 struct kvm_memory_slot *memslot)
359{
360 gfn_t cur_gfn, last_gfn;
361 unsigned long address;
362 struct gmap *gmap = kvm->arch.gmap;
363
15f36ebd
JH
364 /* Loop over all guest pages */
365 last_gfn = memslot->base_gfn + memslot->npages;
366 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
367 address = gfn_to_hva_memslot(memslot, cur_gfn);
368
1e133ab2 369 if (test_and_clear_guest_dirty(gmap->mm, address))
15f36ebd 370 mark_page_dirty(kvm, cur_gfn);
1763f8d0
CB
371 if (fatal_signal_pending(current))
372 return;
70c88a00 373 cond_resched();
15f36ebd 374 }
15f36ebd
JH
375}
376
b0c632db 377/* Section: vm related */
a6e2f683
ED
378static void sca_del_vcpu(struct kvm_vcpu *vcpu);
379
b0c632db
HC
380/*
381 * Get (and clear) the dirty memory log for a memory slot.
382 */
383int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
384 struct kvm_dirty_log *log)
385{
15f36ebd
JH
386 int r;
387 unsigned long n;
9f6b8029 388 struct kvm_memslots *slots;
15f36ebd
JH
389 struct kvm_memory_slot *memslot;
390 int is_dirty = 0;
391
392 mutex_lock(&kvm->slots_lock);
393
394 r = -EINVAL;
395 if (log->slot >= KVM_USER_MEM_SLOTS)
396 goto out;
397
9f6b8029
PB
398 slots = kvm_memslots(kvm);
399 memslot = id_to_memslot(slots, log->slot);
15f36ebd
JH
400 r = -ENOENT;
401 if (!memslot->dirty_bitmap)
402 goto out;
403
404 kvm_s390_sync_dirty_log(kvm, memslot);
405 r = kvm_get_dirty_log(kvm, log, &is_dirty);
406 if (r)
407 goto out;
408
409 /* Clear the dirty log */
410 if (is_dirty) {
411 n = kvm_dirty_bitmap_bytes(memslot);
412 memset(memslot->dirty_bitmap, 0, n);
413 }
414 r = 0;
415out:
416 mutex_unlock(&kvm->slots_lock);
417 return r;
b0c632db
HC
418}
419
d938dc55
CH
420static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
421{
422 int r;
423
424 if (cap->flags)
425 return -EINVAL;
426
427 switch (cap->cap) {
84223598 428 case KVM_CAP_S390_IRQCHIP:
c92ea7b9 429 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_IRQCHIP");
84223598
CH
430 kvm->arch.use_irqchip = 1;
431 r = 0;
432 break;
2444b352 433 case KVM_CAP_S390_USER_SIGP:
c92ea7b9 434 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_SIGP");
2444b352
DH
435 kvm->arch.user_sigp = 1;
436 r = 0;
437 break;
68c55750 438 case KVM_CAP_S390_VECTOR_REGISTERS:
5967c17b 439 mutex_lock(&kvm->lock);
a03825bb 440 if (kvm->created_vcpus) {
5967c17b
DH
441 r = -EBUSY;
442 } else if (MACHINE_HAS_VX) {
c54f0d6a
DH
443 set_kvm_facility(kvm->arch.model.fac_mask, 129);
444 set_kvm_facility(kvm->arch.model.fac_list, 129);
18280d8b
MM
445 r = 0;
446 } else
447 r = -EINVAL;
5967c17b 448 mutex_unlock(&kvm->lock);
c92ea7b9
CB
449 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_VECTOR_REGISTERS %s",
450 r ? "(not available)" : "(success)");
68c55750 451 break;
c6e5f166
FZ
452 case KVM_CAP_S390_RI:
453 r = -EINVAL;
454 mutex_lock(&kvm->lock);
a03825bb 455 if (kvm->created_vcpus) {
c6e5f166
FZ
456 r = -EBUSY;
457 } else if (test_facility(64)) {
c54f0d6a
DH
458 set_kvm_facility(kvm->arch.model.fac_mask, 64);
459 set_kvm_facility(kvm->arch.model.fac_list, 64);
c6e5f166
FZ
460 r = 0;
461 }
462 mutex_unlock(&kvm->lock);
463 VM_EVENT(kvm, 3, "ENABLE: CAP_S390_RI %s",
464 r ? "(not available)" : "(success)");
465 break;
e44fc8c9 466 case KVM_CAP_S390_USER_STSI:
c92ea7b9 467 VM_EVENT(kvm, 3, "%s", "ENABLE: CAP_S390_USER_STSI");
e44fc8c9
ET
468 kvm->arch.user_stsi = 1;
469 r = 0;
470 break;
d938dc55
CH
471 default:
472 r = -EINVAL;
473 break;
474 }
475 return r;
476}
477
8c0a7ce6
DD
478static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
479{
480 int ret;
481
482 switch (attr->attr) {
483 case KVM_S390_VM_MEM_LIMIT_SIZE:
484 ret = 0;
c92ea7b9 485 VM_EVENT(kvm, 3, "QUERY: max guest memory: %lu bytes",
a3a92c31
DD
486 kvm->arch.mem_limit);
487 if (put_user(kvm->arch.mem_limit, (u64 __user *)attr->addr))
8c0a7ce6
DD
488 ret = -EFAULT;
489 break;
490 default:
491 ret = -ENXIO;
492 break;
493 }
494 return ret;
495}
496
497static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
4f718eab
DD
498{
499 int ret;
500 unsigned int idx;
501 switch (attr->attr) {
502 case KVM_S390_VM_MEM_ENABLE_CMMA:
f9cbd9b0 503 ret = -ENXIO;
c24cc9c8 504 if (!sclp.has_cmma)
e6db1d61
DD
505 break;
506
4f718eab 507 ret = -EBUSY;
c92ea7b9 508 VM_EVENT(kvm, 3, "%s", "ENABLE: CMMA support");
4f718eab 509 mutex_lock(&kvm->lock);
a03825bb 510 if (!kvm->created_vcpus) {
4f718eab
DD
511 kvm->arch.use_cmma = 1;
512 ret = 0;
513 }
514 mutex_unlock(&kvm->lock);
515 break;
516 case KVM_S390_VM_MEM_CLR_CMMA:
f9cbd9b0
DH
517 ret = -ENXIO;
518 if (!sclp.has_cmma)
519 break;
c3489155
DD
520 ret = -EINVAL;
521 if (!kvm->arch.use_cmma)
522 break;
523
c92ea7b9 524 VM_EVENT(kvm, 3, "%s", "RESET: CMMA states");
4f718eab
DD
525 mutex_lock(&kvm->lock);
526 idx = srcu_read_lock(&kvm->srcu);
a13cff31 527 s390_reset_cmma(kvm->arch.gmap->mm);
4f718eab
DD
528 srcu_read_unlock(&kvm->srcu, idx);
529 mutex_unlock(&kvm->lock);
530 ret = 0;
531 break;
8c0a7ce6
DD
532 case KVM_S390_VM_MEM_LIMIT_SIZE: {
533 unsigned long new_limit;
534
535 if (kvm_is_ucontrol(kvm))
536 return -EINVAL;
537
538 if (get_user(new_limit, (u64 __user *)attr->addr))
539 return -EFAULT;
540
a3a92c31
DD
541 if (kvm->arch.mem_limit != KVM_S390_NO_MEM_LIMIT &&
542 new_limit > kvm->arch.mem_limit)
8c0a7ce6
DD
543 return -E2BIG;
544
a3a92c31
DD
545 if (!new_limit)
546 return -EINVAL;
547
6ea427bb 548 /* gmap_create takes last usable address */
a3a92c31
DD
549 if (new_limit != KVM_S390_NO_MEM_LIMIT)
550 new_limit -= 1;
551
8c0a7ce6
DD
552 ret = -EBUSY;
553 mutex_lock(&kvm->lock);
a03825bb 554 if (!kvm->created_vcpus) {
6ea427bb
MS
555 /* gmap_create will round the limit up */
556 struct gmap *new = gmap_create(current->mm, new_limit);
8c0a7ce6
DD
557
558 if (!new) {
559 ret = -ENOMEM;
560 } else {
6ea427bb 561 gmap_remove(kvm->arch.gmap);
8c0a7ce6
DD
562 new->private = kvm;
563 kvm->arch.gmap = new;
564 ret = 0;
565 }
566 }
567 mutex_unlock(&kvm->lock);
a3a92c31
DD
568 VM_EVENT(kvm, 3, "SET: max guest address: %lu", new_limit);
569 VM_EVENT(kvm, 3, "New guest asce: 0x%pK",
570 (void *) kvm->arch.gmap->asce);
8c0a7ce6
DD
571 break;
572 }
4f718eab
DD
573 default:
574 ret = -ENXIO;
575 break;
576 }
577 return ret;
578}
579
a374e892
TK
580static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
581
582static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
583{
584 struct kvm_vcpu *vcpu;
585 int i;
586
9d8d5786 587 if (!test_kvm_facility(kvm, 76))
a374e892
TK
588 return -EINVAL;
589
590 mutex_lock(&kvm->lock);
591 switch (attr->attr) {
592 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
593 get_random_bytes(
594 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
595 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
596 kvm->arch.crypto.aes_kw = 1;
c92ea7b9 597 VM_EVENT(kvm, 3, "%s", "ENABLE: AES keywrapping support");
a374e892
TK
598 break;
599 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
600 get_random_bytes(
601 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
602 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
603 kvm->arch.crypto.dea_kw = 1;
c92ea7b9 604 VM_EVENT(kvm, 3, "%s", "ENABLE: DEA keywrapping support");
a374e892
TK
605 break;
606 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
607 kvm->arch.crypto.aes_kw = 0;
608 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
609 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
c92ea7b9 610 VM_EVENT(kvm, 3, "%s", "DISABLE: AES keywrapping support");
a374e892
TK
611 break;
612 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
613 kvm->arch.crypto.dea_kw = 0;
614 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
615 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
c92ea7b9 616 VM_EVENT(kvm, 3, "%s", "DISABLE: DEA keywrapping support");
a374e892
TK
617 break;
618 default:
619 mutex_unlock(&kvm->lock);
620 return -ENXIO;
621 }
622
623 kvm_for_each_vcpu(i, vcpu, kvm) {
624 kvm_s390_vcpu_crypto_setup(vcpu);
625 exit_sie(vcpu);
626 }
627 mutex_unlock(&kvm->lock);
628 return 0;
629}
630
72f25020
JH
631static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
632{
633 u8 gtod_high;
634
635 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
636 sizeof(gtod_high)))
637 return -EFAULT;
638
639 if (gtod_high != 0)
640 return -EINVAL;
58c383c6 641 VM_EVENT(kvm, 3, "SET: TOD extension: 0x%x", gtod_high);
72f25020
JH
642
643 return 0;
644}
645
646static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
647{
5a3d883a 648 u64 gtod;
72f25020
JH
649
650 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
651 return -EFAULT;
652
25ed1675 653 kvm_s390_set_tod_clock(kvm, gtod);
58c383c6 654 VM_EVENT(kvm, 3, "SET: TOD base: 0x%llx", gtod);
72f25020
JH
655 return 0;
656}
657
658static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
659{
660 int ret;
661
662 if (attr->flags)
663 return -EINVAL;
664
665 switch (attr->attr) {
666 case KVM_S390_VM_TOD_HIGH:
667 ret = kvm_s390_set_tod_high(kvm, attr);
668 break;
669 case KVM_S390_VM_TOD_LOW:
670 ret = kvm_s390_set_tod_low(kvm, attr);
671 break;
672 default:
673 ret = -ENXIO;
674 break;
675 }
676 return ret;
677}
678
679static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
680{
681 u8 gtod_high = 0;
682
683 if (copy_to_user((void __user *)attr->addr, &gtod_high,
684 sizeof(gtod_high)))
685 return -EFAULT;
58c383c6 686 VM_EVENT(kvm, 3, "QUERY: TOD extension: 0x%x", gtod_high);
72f25020
JH
687
688 return 0;
689}
690
691static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
692{
5a3d883a 693 u64 gtod;
72f25020 694
60417fcc 695 gtod = kvm_s390_get_tod_clock_fast(kvm);
72f25020
JH
696 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
697 return -EFAULT;
58c383c6 698 VM_EVENT(kvm, 3, "QUERY: TOD base: 0x%llx", gtod);
72f25020
JH
699
700 return 0;
701}
702
703static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
704{
705 int ret;
706
707 if (attr->flags)
708 return -EINVAL;
709
710 switch (attr->attr) {
711 case KVM_S390_VM_TOD_HIGH:
712 ret = kvm_s390_get_tod_high(kvm, attr);
713 break;
714 case KVM_S390_VM_TOD_LOW:
715 ret = kvm_s390_get_tod_low(kvm, attr);
716 break;
717 default:
718 ret = -ENXIO;
719 break;
720 }
721 return ret;
722}
723
658b6eda
MM
724static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
725{
726 struct kvm_s390_vm_cpu_processor *proc;
053dd230 727 u16 lowest_ibc, unblocked_ibc;
658b6eda
MM
728 int ret = 0;
729
730 mutex_lock(&kvm->lock);
a03825bb 731 if (kvm->created_vcpus) {
658b6eda
MM
732 ret = -EBUSY;
733 goto out;
734 }
735 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
736 if (!proc) {
737 ret = -ENOMEM;
738 goto out;
739 }
740 if (!copy_from_user(proc, (void __user *)attr->addr,
741 sizeof(*proc))) {
9bb0ec09 742 kvm->arch.model.cpuid = proc->cpuid;
053dd230
DH
743 lowest_ibc = sclp.ibc >> 16 & 0xfff;
744 unblocked_ibc = sclp.ibc & 0xfff;
745 if (lowest_ibc) {
746 if (proc->ibc > unblocked_ibc)
747 kvm->arch.model.ibc = unblocked_ibc;
748 else if (proc->ibc < lowest_ibc)
749 kvm->arch.model.ibc = lowest_ibc;
750 else
751 kvm->arch.model.ibc = proc->ibc;
752 }
c54f0d6a 753 memcpy(kvm->arch.model.fac_list, proc->fac_list,
658b6eda
MM
754 S390_ARCH_FAC_LIST_SIZE_BYTE);
755 } else
756 ret = -EFAULT;
757 kfree(proc);
758out:
759 mutex_unlock(&kvm->lock);
760 return ret;
761}
762
15c9705f
DH
763static int kvm_s390_set_processor_feat(struct kvm *kvm,
764 struct kvm_device_attr *attr)
765{
766 struct kvm_s390_vm_cpu_feat data;
767 int ret = -EBUSY;
768
769 if (copy_from_user(&data, (void __user *)attr->addr, sizeof(data)))
770 return -EFAULT;
771 if (!bitmap_subset((unsigned long *) data.feat,
772 kvm_s390_available_cpu_feat,
773 KVM_S390_VM_CPU_FEAT_NR_BITS))
774 return -EINVAL;
775
776 mutex_lock(&kvm->lock);
777 if (!atomic_read(&kvm->online_vcpus)) {
778 bitmap_copy(kvm->arch.cpu_feat, (unsigned long *) data.feat,
779 KVM_S390_VM_CPU_FEAT_NR_BITS);
780 ret = 0;
781 }
782 mutex_unlock(&kvm->lock);
783 return ret;
784}
785
0a763c78
DH
786static int kvm_s390_set_processor_subfunc(struct kvm *kvm,
787 struct kvm_device_attr *attr)
788{
789 /*
790 * Once supported by kernel + hw, we have to store the subfunctions
791 * in kvm->arch and remember that user space configured them.
792 */
793 return -ENXIO;
794}
795
658b6eda
MM
796static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
797{
798 int ret = -ENXIO;
799
800 switch (attr->attr) {
801 case KVM_S390_VM_CPU_PROCESSOR:
802 ret = kvm_s390_set_processor(kvm, attr);
803 break;
15c9705f
DH
804 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
805 ret = kvm_s390_set_processor_feat(kvm, attr);
806 break;
0a763c78
DH
807 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
808 ret = kvm_s390_set_processor_subfunc(kvm, attr);
809 break;
658b6eda
MM
810 }
811 return ret;
812}
813
814static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
815{
816 struct kvm_s390_vm_cpu_processor *proc;
817 int ret = 0;
818
819 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
820 if (!proc) {
821 ret = -ENOMEM;
822 goto out;
823 }
9bb0ec09 824 proc->cpuid = kvm->arch.model.cpuid;
658b6eda 825 proc->ibc = kvm->arch.model.ibc;
c54f0d6a
DH
826 memcpy(&proc->fac_list, kvm->arch.model.fac_list,
827 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda
MM
828 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
829 ret = -EFAULT;
830 kfree(proc);
831out:
832 return ret;
833}
834
835static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
836{
837 struct kvm_s390_vm_cpu_machine *mach;
838 int ret = 0;
839
840 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
841 if (!mach) {
842 ret = -ENOMEM;
843 goto out;
844 }
845 get_cpu_id((struct cpuid *) &mach->cpuid);
37c5f6c8 846 mach->ibc = sclp.ibc;
c54f0d6a 847 memcpy(&mach->fac_mask, kvm->arch.model.fac_mask,
981467c9 848 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda 849 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
94422ee8 850 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda
MM
851 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
852 ret = -EFAULT;
853 kfree(mach);
854out:
855 return ret;
856}
857
15c9705f
DH
858static int kvm_s390_get_processor_feat(struct kvm *kvm,
859 struct kvm_device_attr *attr)
860{
861 struct kvm_s390_vm_cpu_feat data;
862
863 bitmap_copy((unsigned long *) data.feat, kvm->arch.cpu_feat,
864 KVM_S390_VM_CPU_FEAT_NR_BITS);
865 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
866 return -EFAULT;
867 return 0;
868}
869
870static int kvm_s390_get_machine_feat(struct kvm *kvm,
871 struct kvm_device_attr *attr)
872{
873 struct kvm_s390_vm_cpu_feat data;
874
875 bitmap_copy((unsigned long *) data.feat,
876 kvm_s390_available_cpu_feat,
877 KVM_S390_VM_CPU_FEAT_NR_BITS);
878 if (copy_to_user((void __user *)attr->addr, &data, sizeof(data)))
879 return -EFAULT;
880 return 0;
881}
882
0a763c78
DH
883static int kvm_s390_get_processor_subfunc(struct kvm *kvm,
884 struct kvm_device_attr *attr)
885{
886 /*
887 * Once we can actually configure subfunctions (kernel + hw support),
888 * we have to check if they were already set by user space, if so copy
889 * them from kvm->arch.
890 */
891 return -ENXIO;
892}
893
894static int kvm_s390_get_machine_subfunc(struct kvm *kvm,
895 struct kvm_device_attr *attr)
896{
897 if (copy_to_user((void __user *)attr->addr, &kvm_s390_available_subfunc,
898 sizeof(struct kvm_s390_vm_cpu_subfunc)))
899 return -EFAULT;
900 return 0;
901}
658b6eda
MM
902static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
903{
904 int ret = -ENXIO;
905
906 switch (attr->attr) {
907 case KVM_S390_VM_CPU_PROCESSOR:
908 ret = kvm_s390_get_processor(kvm, attr);
909 break;
910 case KVM_S390_VM_CPU_MACHINE:
911 ret = kvm_s390_get_machine(kvm, attr);
912 break;
15c9705f
DH
913 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
914 ret = kvm_s390_get_processor_feat(kvm, attr);
915 break;
916 case KVM_S390_VM_CPU_MACHINE_FEAT:
917 ret = kvm_s390_get_machine_feat(kvm, attr);
918 break;
0a763c78
DH
919 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
920 ret = kvm_s390_get_processor_subfunc(kvm, attr);
921 break;
922 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
923 ret = kvm_s390_get_machine_subfunc(kvm, attr);
924 break;
658b6eda
MM
925 }
926 return ret;
927}
928
f2061656
DD
929static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
930{
931 int ret;
932
933 switch (attr->group) {
4f718eab 934 case KVM_S390_VM_MEM_CTRL:
8c0a7ce6 935 ret = kvm_s390_set_mem_control(kvm, attr);
4f718eab 936 break;
72f25020
JH
937 case KVM_S390_VM_TOD:
938 ret = kvm_s390_set_tod(kvm, attr);
939 break;
658b6eda
MM
940 case KVM_S390_VM_CPU_MODEL:
941 ret = kvm_s390_set_cpu_model(kvm, attr);
942 break;
a374e892
TK
943 case KVM_S390_VM_CRYPTO:
944 ret = kvm_s390_vm_set_crypto(kvm, attr);
945 break;
f2061656
DD
946 default:
947 ret = -ENXIO;
948 break;
949 }
950
951 return ret;
952}
953
954static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
955{
8c0a7ce6
DD
956 int ret;
957
958 switch (attr->group) {
959 case KVM_S390_VM_MEM_CTRL:
960 ret = kvm_s390_get_mem_control(kvm, attr);
961 break;
72f25020
JH
962 case KVM_S390_VM_TOD:
963 ret = kvm_s390_get_tod(kvm, attr);
964 break;
658b6eda
MM
965 case KVM_S390_VM_CPU_MODEL:
966 ret = kvm_s390_get_cpu_model(kvm, attr);
967 break;
8c0a7ce6
DD
968 default:
969 ret = -ENXIO;
970 break;
971 }
972
973 return ret;
f2061656
DD
974}
975
976static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
977{
978 int ret;
979
980 switch (attr->group) {
4f718eab
DD
981 case KVM_S390_VM_MEM_CTRL:
982 switch (attr->attr) {
983 case KVM_S390_VM_MEM_ENABLE_CMMA:
984 case KVM_S390_VM_MEM_CLR_CMMA:
f9cbd9b0
DH
985 ret = sclp.has_cmma ? 0 : -ENXIO;
986 break;
8c0a7ce6 987 case KVM_S390_VM_MEM_LIMIT_SIZE:
4f718eab
DD
988 ret = 0;
989 break;
990 default:
991 ret = -ENXIO;
992 break;
993 }
994 break;
72f25020
JH
995 case KVM_S390_VM_TOD:
996 switch (attr->attr) {
997 case KVM_S390_VM_TOD_LOW:
998 case KVM_S390_VM_TOD_HIGH:
999 ret = 0;
1000 break;
1001 default:
1002 ret = -ENXIO;
1003 break;
1004 }
1005 break;
658b6eda
MM
1006 case KVM_S390_VM_CPU_MODEL:
1007 switch (attr->attr) {
1008 case KVM_S390_VM_CPU_PROCESSOR:
1009 case KVM_S390_VM_CPU_MACHINE:
15c9705f
DH
1010 case KVM_S390_VM_CPU_PROCESSOR_FEAT:
1011 case KVM_S390_VM_CPU_MACHINE_FEAT:
0a763c78 1012 case KVM_S390_VM_CPU_MACHINE_SUBFUNC:
658b6eda
MM
1013 ret = 0;
1014 break;
0a763c78
DH
1015 /* configuring subfunctions is not supported yet */
1016 case KVM_S390_VM_CPU_PROCESSOR_SUBFUNC:
658b6eda
MM
1017 default:
1018 ret = -ENXIO;
1019 break;
1020 }
1021 break;
a374e892
TK
1022 case KVM_S390_VM_CRYPTO:
1023 switch (attr->attr) {
1024 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
1025 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
1026 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
1027 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
1028 ret = 0;
1029 break;
1030 default:
1031 ret = -ENXIO;
1032 break;
1033 }
1034 break;
f2061656
DD
1035 default:
1036 ret = -ENXIO;
1037 break;
1038 }
1039
1040 return ret;
1041}
1042
30ee2a98
JH
1043static long kvm_s390_get_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1044{
1045 uint8_t *keys;
1046 uint64_t hva;
30ee2a98
JH
1047 int i, r = 0;
1048
1049 if (args->flags != 0)
1050 return -EINVAL;
1051
1052 /* Is this guest using storage keys? */
1053 if (!mm_use_skey(current->mm))
1054 return KVM_S390_GET_SKEYS_NONE;
1055
1056 /* Enforce sane limit on memory allocation */
1057 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1058 return -EINVAL;
1059
1060 keys = kmalloc_array(args->count, sizeof(uint8_t),
1061 GFP_KERNEL | __GFP_NOWARN);
1062 if (!keys)
1063 keys = vmalloc(sizeof(uint8_t) * args->count);
1064 if (!keys)
1065 return -ENOMEM;
1066
d3ed1cee 1067 down_read(&current->mm->mmap_sem);
30ee2a98
JH
1068 for (i = 0; i < args->count; i++) {
1069 hva = gfn_to_hva(kvm, args->start_gfn + i);
1070 if (kvm_is_error_hva(hva)) {
1071 r = -EFAULT;
d3ed1cee 1072 break;
30ee2a98
JH
1073 }
1074
154c8c19
DH
1075 r = get_guest_storage_key(current->mm, hva, &keys[i]);
1076 if (r)
d3ed1cee 1077 break;
30ee2a98 1078 }
d3ed1cee
MS
1079 up_read(&current->mm->mmap_sem);
1080
1081 if (!r) {
1082 r = copy_to_user((uint8_t __user *)args->skeydata_addr, keys,
1083 sizeof(uint8_t) * args->count);
1084 if (r)
1085 r = -EFAULT;
1086 }
30ee2a98 1087
30ee2a98
JH
1088 kvfree(keys);
1089 return r;
1090}
1091
1092static long kvm_s390_set_skeys(struct kvm *kvm, struct kvm_s390_skeys *args)
1093{
1094 uint8_t *keys;
1095 uint64_t hva;
1096 int i, r = 0;
1097
1098 if (args->flags != 0)
1099 return -EINVAL;
1100
1101 /* Enforce sane limit on memory allocation */
1102 if (args->count < 1 || args->count > KVM_S390_SKEYS_MAX)
1103 return -EINVAL;
1104
1105 keys = kmalloc_array(args->count, sizeof(uint8_t),
1106 GFP_KERNEL | __GFP_NOWARN);
1107 if (!keys)
1108 keys = vmalloc(sizeof(uint8_t) * args->count);
1109 if (!keys)
1110 return -ENOMEM;
1111
1112 r = copy_from_user(keys, (uint8_t __user *)args->skeydata_addr,
1113 sizeof(uint8_t) * args->count);
1114 if (r) {
1115 r = -EFAULT;
1116 goto out;
1117 }
1118
1119 /* Enable storage key handling for the guest */
14d4a425
DD
1120 r = s390_enable_skey();
1121 if (r)
1122 goto out;
30ee2a98 1123
d3ed1cee 1124 down_read(&current->mm->mmap_sem);
30ee2a98
JH
1125 for (i = 0; i < args->count; i++) {
1126 hva = gfn_to_hva(kvm, args->start_gfn + i);
1127 if (kvm_is_error_hva(hva)) {
1128 r = -EFAULT;
d3ed1cee 1129 break;
30ee2a98
JH
1130 }
1131
1132 /* Lowest order bit is reserved */
1133 if (keys[i] & 0x01) {
1134 r = -EINVAL;
d3ed1cee 1135 break;
30ee2a98
JH
1136 }
1137
fe69eabf 1138 r = set_guest_storage_key(current->mm, hva, keys[i], 0);
30ee2a98 1139 if (r)
d3ed1cee 1140 break;
30ee2a98 1141 }
d3ed1cee 1142 up_read(&current->mm->mmap_sem);
30ee2a98
JH
1143out:
1144 kvfree(keys);
1145 return r;
1146}
1147
b0c632db
HC
1148long kvm_arch_vm_ioctl(struct file *filp,
1149 unsigned int ioctl, unsigned long arg)
1150{
1151 struct kvm *kvm = filp->private_data;
1152 void __user *argp = (void __user *)arg;
f2061656 1153 struct kvm_device_attr attr;
b0c632db
HC
1154 int r;
1155
1156 switch (ioctl) {
ba5c1e9b
CO
1157 case KVM_S390_INTERRUPT: {
1158 struct kvm_s390_interrupt s390int;
1159
1160 r = -EFAULT;
1161 if (copy_from_user(&s390int, argp, sizeof(s390int)))
1162 break;
1163 r = kvm_s390_inject_vm(kvm, &s390int);
1164 break;
1165 }
d938dc55
CH
1166 case KVM_ENABLE_CAP: {
1167 struct kvm_enable_cap cap;
1168 r = -EFAULT;
1169 if (copy_from_user(&cap, argp, sizeof(cap)))
1170 break;
1171 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1172 break;
1173 }
84223598
CH
1174 case KVM_CREATE_IRQCHIP: {
1175 struct kvm_irq_routing_entry routing;
1176
1177 r = -EINVAL;
1178 if (kvm->arch.use_irqchip) {
1179 /* Set up dummy routing. */
1180 memset(&routing, 0, sizeof(routing));
152b2839 1181 r = kvm_set_irq_routing(kvm, &routing, 0, 0);
84223598
CH
1182 }
1183 break;
1184 }
f2061656
DD
1185 case KVM_SET_DEVICE_ATTR: {
1186 r = -EFAULT;
1187 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1188 break;
1189 r = kvm_s390_vm_set_attr(kvm, &attr);
1190 break;
1191 }
1192 case KVM_GET_DEVICE_ATTR: {
1193 r = -EFAULT;
1194 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1195 break;
1196 r = kvm_s390_vm_get_attr(kvm, &attr);
1197 break;
1198 }
1199 case KVM_HAS_DEVICE_ATTR: {
1200 r = -EFAULT;
1201 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
1202 break;
1203 r = kvm_s390_vm_has_attr(kvm, &attr);
1204 break;
1205 }
30ee2a98
JH
1206 case KVM_S390_GET_SKEYS: {
1207 struct kvm_s390_skeys args;
1208
1209 r = -EFAULT;
1210 if (copy_from_user(&args, argp,
1211 sizeof(struct kvm_s390_skeys)))
1212 break;
1213 r = kvm_s390_get_skeys(kvm, &args);
1214 break;
1215 }
1216 case KVM_S390_SET_SKEYS: {
1217 struct kvm_s390_skeys args;
1218
1219 r = -EFAULT;
1220 if (copy_from_user(&args, argp,
1221 sizeof(struct kvm_s390_skeys)))
1222 break;
1223 r = kvm_s390_set_skeys(kvm, &args);
1224 break;
1225 }
b0c632db 1226 default:
367e1319 1227 r = -ENOTTY;
b0c632db
HC
1228 }
1229
1230 return r;
1231}
1232
45c9b47c
TK
1233static int kvm_s390_query_ap_config(u8 *config)
1234{
1235 u32 fcn_code = 0x04000000UL;
86044c8c 1236 u32 cc = 0;
45c9b47c 1237
86044c8c 1238 memset(config, 0, 128);
45c9b47c
TK
1239 asm volatile(
1240 "lgr 0,%1\n"
1241 "lgr 2,%2\n"
1242 ".long 0xb2af0000\n" /* PQAP(QCI) */
86044c8c 1243 "0: ipm %0\n"
45c9b47c 1244 "srl %0,28\n"
86044c8c
CB
1245 "1:\n"
1246 EX_TABLE(0b, 1b)
1247 : "+r" (cc)
45c9b47c
TK
1248 : "r" (fcn_code), "r" (config)
1249 : "cc", "0", "2", "memory"
1250 );
1251
1252 return cc;
1253}
1254
1255static int kvm_s390_apxa_installed(void)
1256{
1257 u8 config[128];
1258 int cc;
1259
a6aacc3f 1260 if (test_facility(12)) {
45c9b47c
TK
1261 cc = kvm_s390_query_ap_config(config);
1262
1263 if (cc)
1264 pr_err("PQAP(QCI) failed with cc=%d", cc);
1265 else
1266 return config[0] & 0x40;
1267 }
1268
1269 return 0;
1270}
1271
1272static void kvm_s390_set_crycb_format(struct kvm *kvm)
1273{
1274 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
1275
1276 if (kvm_s390_apxa_installed())
1277 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
1278 else
1279 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
1280}
1281
9bb0ec09 1282static u64 kvm_s390_get_initial_cpuid(void)
9d8d5786 1283{
9bb0ec09
DH
1284 struct cpuid cpuid;
1285
1286 get_cpu_id(&cpuid);
1287 cpuid.version = 0xff;
1288 return *((u64 *) &cpuid);
9d8d5786
MM
1289}
1290
c54f0d6a 1291static void kvm_s390_crypto_init(struct kvm *kvm)
5102ee87 1292{
9d8d5786 1293 if (!test_kvm_facility(kvm, 76))
c54f0d6a 1294 return;
5102ee87 1295
c54f0d6a 1296 kvm->arch.crypto.crycb = &kvm->arch.sie_page2->crycb;
45c9b47c 1297 kvm_s390_set_crycb_format(kvm);
5102ee87 1298
ed6f76b4
TK
1299 /* Enable AES/DEA protected key functions by default */
1300 kvm->arch.crypto.aes_kw = 1;
1301 kvm->arch.crypto.dea_kw = 1;
1302 get_random_bytes(kvm->arch.crypto.crycb->aes_wrapping_key_mask,
1303 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
1304 get_random_bytes(kvm->arch.crypto.crycb->dea_wrapping_key_mask,
1305 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
5102ee87
TK
1306}
1307
7d43bafc
ED
1308static void sca_dispose(struct kvm *kvm)
1309{
1310 if (kvm->arch.use_esca)
5e044315 1311 free_pages_exact(kvm->arch.sca, sizeof(struct esca_block));
7d43bafc
ED
1312 else
1313 free_page((unsigned long)(kvm->arch.sca));
1314 kvm->arch.sca = NULL;
1315}
1316
e08b9637 1317int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 1318{
76a6dd72 1319 gfp_t alloc_flags = GFP_KERNEL;
9d8d5786 1320 int i, rc;
b0c632db 1321 char debug_name[16];
f6c137ff 1322 static unsigned long sca_offset;
b0c632db 1323
e08b9637
CO
1324 rc = -EINVAL;
1325#ifdef CONFIG_KVM_S390_UCONTROL
1326 if (type & ~KVM_VM_S390_UCONTROL)
1327 goto out_err;
1328 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
1329 goto out_err;
1330#else
1331 if (type)
1332 goto out_err;
1333#endif
1334
b0c632db
HC
1335 rc = s390_enable_sie();
1336 if (rc)
d89f5eff 1337 goto out_err;
b0c632db 1338
b290411a
CO
1339 rc = -ENOMEM;
1340
7d0a5e62
JF
1341 ratelimit_state_init(&kvm->arch.sthyi_limit, 5 * HZ, 500);
1342
7d43bafc 1343 kvm->arch.use_esca = 0; /* start with basic SCA */
76a6dd72
DH
1344 if (!sclp.has_64bscao)
1345 alloc_flags |= GFP_DMA;
5e044315 1346 rwlock_init(&kvm->arch.sca_lock);
76a6dd72 1347 kvm->arch.sca = (struct bsca_block *) get_zeroed_page(alloc_flags);
b0c632db 1348 if (!kvm->arch.sca)
d89f5eff 1349 goto out_err;
f6c137ff 1350 spin_lock(&kvm_lock);
c5c2c393 1351 sca_offset += 16;
bc784cce 1352 if (sca_offset + sizeof(struct bsca_block) > PAGE_SIZE)
c5c2c393 1353 sca_offset = 0;
bc784cce
ED
1354 kvm->arch.sca = (struct bsca_block *)
1355 ((char *) kvm->arch.sca + sca_offset);
f6c137ff 1356 spin_unlock(&kvm_lock);
b0c632db
HC
1357
1358 sprintf(debug_name, "kvm-%u", current->pid);
1359
1cb9cf72 1360 kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
b0c632db 1361 if (!kvm->arch.dbf)
40f5b735 1362 goto out_err;
b0c632db 1363
c54f0d6a
DH
1364 kvm->arch.sie_page2 =
1365 (struct sie_page2 *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
1366 if (!kvm->arch.sie_page2)
40f5b735 1367 goto out_err;
9d8d5786 1368
fb5bf93f 1369 /* Populate the facility mask initially. */
c54f0d6a 1370 memcpy(kvm->arch.model.fac_mask, S390_lowcore.stfle_fac_list,
94422ee8 1371 S390_ARCH_FAC_LIST_SIZE_BYTE);
9d8d5786
MM
1372 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
1373 if (i < kvm_s390_fac_list_mask_size())
c54f0d6a 1374 kvm->arch.model.fac_mask[i] &= kvm_s390_fac_list_mask[i];
9d8d5786 1375 else
c54f0d6a 1376 kvm->arch.model.fac_mask[i] = 0UL;
9d8d5786
MM
1377 }
1378
981467c9 1379 /* Populate the facility list initially. */
c54f0d6a
DH
1380 kvm->arch.model.fac_list = kvm->arch.sie_page2->fac_list;
1381 memcpy(kvm->arch.model.fac_list, kvm->arch.model.fac_mask,
981467c9
MM
1382 S390_ARCH_FAC_LIST_SIZE_BYTE);
1383
95ca2cb5
JF
1384 set_kvm_facility(kvm->arch.model.fac_mask, 74);
1385 set_kvm_facility(kvm->arch.model.fac_list, 74);
1386
9bb0ec09 1387 kvm->arch.model.cpuid = kvm_s390_get_initial_cpuid();
37c5f6c8 1388 kvm->arch.model.ibc = sclp.ibc & 0x0fff;
9d8d5786 1389
c54f0d6a 1390 kvm_s390_crypto_init(kvm);
5102ee87 1391
ba5c1e9b 1392 spin_lock_init(&kvm->arch.float_int.lock);
6d3da241
JF
1393 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1394 INIT_LIST_HEAD(&kvm->arch.float_int.lists[i]);
8a242234 1395 init_waitqueue_head(&kvm->arch.ipte_wq);
a6b7e459 1396 mutex_init(&kvm->arch.ipte_mutex);
ba5c1e9b 1397
b0c632db 1398 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
78f26131 1399 VM_EVENT(kvm, 3, "vm created with type %lu", type);
b0c632db 1400
e08b9637
CO
1401 if (type & KVM_VM_S390_UCONTROL) {
1402 kvm->arch.gmap = NULL;
a3a92c31 1403 kvm->arch.mem_limit = KVM_S390_NO_MEM_LIMIT;
e08b9637 1404 } else {
32e6b236
GH
1405 if (sclp.hamax == U64_MAX)
1406 kvm->arch.mem_limit = TASK_MAX_SIZE;
1407 else
1408 kvm->arch.mem_limit = min_t(unsigned long, TASK_MAX_SIZE,
1409 sclp.hamax + 1);
6ea427bb 1410 kvm->arch.gmap = gmap_create(current->mm, kvm->arch.mem_limit - 1);
e08b9637 1411 if (!kvm->arch.gmap)
40f5b735 1412 goto out_err;
2c70fe44 1413 kvm->arch.gmap->private = kvm;
24eb3a82 1414 kvm->arch.gmap->pfault_enabled = 0;
e08b9637 1415 }
fa6b7fe9
CH
1416
1417 kvm->arch.css_support = 0;
84223598 1418 kvm->arch.use_irqchip = 0;
72f25020 1419 kvm->arch.epoch = 0;
fa6b7fe9 1420
8ad35755 1421 spin_lock_init(&kvm->arch.start_stop_lock);
a3508fbe 1422 kvm_s390_vsie_init(kvm);
8335713a 1423 KVM_EVENT(3, "vm 0x%pK created by pid %u", kvm, current->pid);
8ad35755 1424
d89f5eff 1425 return 0;
40f5b735 1426out_err:
c54f0d6a 1427 free_page((unsigned long)kvm->arch.sie_page2);
598841ca 1428 debug_unregister(kvm->arch.dbf);
7d43bafc 1429 sca_dispose(kvm);
78f26131 1430 KVM_EVENT(3, "creation of vm failed: %d", rc);
d89f5eff 1431 return rc;
b0c632db
HC
1432}
1433
d329c035
CB
1434void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
1435{
1436 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 1437 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
67335e63 1438 kvm_s390_clear_local_irqs(vcpu);
3c038e6b 1439 kvm_clear_async_pf_completion_queue(vcpu);
bc784cce 1440 if (!kvm_is_ucontrol(vcpu->kvm))
a6e2f683 1441 sca_del_vcpu(vcpu);
27e0393f
CO
1442
1443 if (kvm_is_ucontrol(vcpu->kvm))
6ea427bb 1444 gmap_remove(vcpu->arch.gmap);
27e0393f 1445
e6db1d61 1446 if (vcpu->kvm->arch.use_cmma)
b31605c1 1447 kvm_s390_vcpu_unsetup_cmma(vcpu);
d329c035 1448 free_page((unsigned long)(vcpu->arch.sie_block));
b31288fa 1449
6692cef3 1450 kvm_vcpu_uninit(vcpu);
b110feaf 1451 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
1452}
1453
1454static void kvm_free_vcpus(struct kvm *kvm)
1455{
1456 unsigned int i;
988a2cae 1457 struct kvm_vcpu *vcpu;
d329c035 1458
988a2cae
GN
1459 kvm_for_each_vcpu(i, vcpu, kvm)
1460 kvm_arch_vcpu_destroy(vcpu);
1461
1462 mutex_lock(&kvm->lock);
1463 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1464 kvm->vcpus[i] = NULL;
1465
1466 atomic_set(&kvm->online_vcpus, 0);
1467 mutex_unlock(&kvm->lock);
d329c035
CB
1468}
1469
b0c632db
HC
1470void kvm_arch_destroy_vm(struct kvm *kvm)
1471{
d329c035 1472 kvm_free_vcpus(kvm);
7d43bafc 1473 sca_dispose(kvm);
d329c035 1474 debug_unregister(kvm->arch.dbf);
c54f0d6a 1475 free_page((unsigned long)kvm->arch.sie_page2);
27e0393f 1476 if (!kvm_is_ucontrol(kvm))
6ea427bb 1477 gmap_remove(kvm->arch.gmap);
841b91c5 1478 kvm_s390_destroy_adapters(kvm);
67335e63 1479 kvm_s390_clear_float_irqs(kvm);
a3508fbe 1480 kvm_s390_vsie_destroy(kvm);
8335713a 1481 KVM_EVENT(3, "vm 0x%pK destroyed", kvm);
b0c632db
HC
1482}
1483
1484/* Section: vcpu related */
dafd032a
DD
1485static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1486{
6ea427bb 1487 vcpu->arch.gmap = gmap_create(current->mm, -1UL);
dafd032a
DD
1488 if (!vcpu->arch.gmap)
1489 return -ENOMEM;
1490 vcpu->arch.gmap->private = vcpu->kvm;
1491
1492 return 0;
1493}
1494
a6e2f683
ED
1495static void sca_del_vcpu(struct kvm_vcpu *vcpu)
1496{
5e044315 1497 read_lock(&vcpu->kvm->arch.sca_lock);
7d43bafc
ED
1498 if (vcpu->kvm->arch.use_esca) {
1499 struct esca_block *sca = vcpu->kvm->arch.sca;
a6e2f683 1500
7d43bafc 1501 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
10ce32d5 1502 sca->cpu[vcpu->vcpu_id].sda = 0;
7d43bafc
ED
1503 } else {
1504 struct bsca_block *sca = vcpu->kvm->arch.sca;
1505
1506 clear_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
10ce32d5 1507 sca->cpu[vcpu->vcpu_id].sda = 0;
7d43bafc 1508 }
5e044315 1509 read_unlock(&vcpu->kvm->arch.sca_lock);
a6e2f683
ED
1510}
1511
eaa78f34 1512static void sca_add_vcpu(struct kvm_vcpu *vcpu)
a6e2f683 1513{
eaa78f34
DH
1514 read_lock(&vcpu->kvm->arch.sca_lock);
1515 if (vcpu->kvm->arch.use_esca) {
1516 struct esca_block *sca = vcpu->kvm->arch.sca;
7d43bafc 1517
eaa78f34 1518 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
7d43bafc
ED
1519 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1520 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca & ~0x3fU;
25508824 1521 vcpu->arch.sie_block->ecb2 |= 0x04U;
eaa78f34 1522 set_bit_inv(vcpu->vcpu_id, (unsigned long *) sca->mcn);
7d43bafc 1523 } else {
eaa78f34 1524 struct bsca_block *sca = vcpu->kvm->arch.sca;
a6e2f683 1525
eaa78f34 1526 sca->cpu[vcpu->vcpu_id].sda = (__u64) vcpu->arch.sie_block;
7d43bafc
ED
1527 vcpu->arch.sie_block->scaoh = (__u32)(((__u64)sca) >> 32);
1528 vcpu->arch.sie_block->scaol = (__u32)(__u64)sca;
eaa78f34 1529 set_bit_inv(vcpu->vcpu_id, (unsigned long *) &sca->mcn);
7d43bafc 1530 }
eaa78f34 1531 read_unlock(&vcpu->kvm->arch.sca_lock);
5e044315
ED
1532}
1533
1534/* Basic SCA to Extended SCA data copy routines */
1535static inline void sca_copy_entry(struct esca_entry *d, struct bsca_entry *s)
1536{
1537 d->sda = s->sda;
1538 d->sigp_ctrl.c = s->sigp_ctrl.c;
1539 d->sigp_ctrl.scn = s->sigp_ctrl.scn;
1540}
1541
1542static void sca_copy_b_to_e(struct esca_block *d, struct bsca_block *s)
1543{
1544 int i;
1545
1546 d->ipte_control = s->ipte_control;
1547 d->mcn[0] = s->mcn;
1548 for (i = 0; i < KVM_S390_BSCA_CPU_SLOTS; i++)
1549 sca_copy_entry(&d->cpu[i], &s->cpu[i]);
1550}
1551
1552static int sca_switch_to_extended(struct kvm *kvm)
1553{
1554 struct bsca_block *old_sca = kvm->arch.sca;
1555 struct esca_block *new_sca;
1556 struct kvm_vcpu *vcpu;
1557 unsigned int vcpu_idx;
1558 u32 scaol, scaoh;
1559
1560 new_sca = alloc_pages_exact(sizeof(*new_sca), GFP_KERNEL|__GFP_ZERO);
1561 if (!new_sca)
1562 return -ENOMEM;
1563
1564 scaoh = (u32)((u64)(new_sca) >> 32);
1565 scaol = (u32)(u64)(new_sca) & ~0x3fU;
1566
1567 kvm_s390_vcpu_block_all(kvm);
1568 write_lock(&kvm->arch.sca_lock);
1569
1570 sca_copy_b_to_e(new_sca, old_sca);
1571
1572 kvm_for_each_vcpu(vcpu_idx, vcpu, kvm) {
1573 vcpu->arch.sie_block->scaoh = scaoh;
1574 vcpu->arch.sie_block->scaol = scaol;
1575 vcpu->arch.sie_block->ecb2 |= 0x04U;
1576 }
1577 kvm->arch.sca = new_sca;
1578 kvm->arch.use_esca = 1;
1579
1580 write_unlock(&kvm->arch.sca_lock);
1581 kvm_s390_vcpu_unblock_all(kvm);
1582
1583 free_page((unsigned long)old_sca);
1584
8335713a
CB
1585 VM_EVENT(kvm, 2, "Switched to ESCA (0x%pK -> 0x%pK)",
1586 old_sca, kvm->arch.sca);
5e044315 1587 return 0;
a6e2f683
ED
1588}
1589
1590static int sca_can_add_vcpu(struct kvm *kvm, unsigned int id)
1591{
5e044315
ED
1592 int rc;
1593
1594 if (id < KVM_S390_BSCA_CPU_SLOTS)
1595 return true;
76a6dd72 1596 if (!sclp.has_esca || !sclp.has_64bscao)
5e044315
ED
1597 return false;
1598
1599 mutex_lock(&kvm->lock);
1600 rc = kvm->arch.use_esca ? 0 : sca_switch_to_extended(kvm);
1601 mutex_unlock(&kvm->lock);
1602
1603 return rc == 0 && id < KVM_S390_ESCA_CPU_SLOTS;
a6e2f683
ED
1604}
1605
b0c632db
HC
1606int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1607{
3c038e6b
DD
1608 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1609 kvm_clear_async_pf_completion_queue(vcpu);
59674c1a
CB
1610 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1611 KVM_SYNC_GPRS |
9eed0735 1612 KVM_SYNC_ACRS |
b028ee3e
DH
1613 KVM_SYNC_CRS |
1614 KVM_SYNC_ARCH0 |
1615 KVM_SYNC_PFAULT;
c6e5f166
FZ
1616 if (test_kvm_facility(vcpu->kvm, 64))
1617 vcpu->run->kvm_valid_regs |= KVM_SYNC_RICCB;
f6aa6dc4
DH
1618 /* fprs can be synchronized via vrs, even if the guest has no vx. With
1619 * MACHINE_HAS_VX, (load|store)_fpu_regs() will work with vrs format.
1620 */
1621 if (MACHINE_HAS_VX)
68c55750 1622 vcpu->run->kvm_valid_regs |= KVM_SYNC_VRS;
6fd8e67d
DH
1623 else
1624 vcpu->run->kvm_valid_regs |= KVM_SYNC_FPRS;
dafd032a
DD
1625
1626 if (kvm_is_ucontrol(vcpu->kvm))
1627 return __kvm_ucontrol_vcpu_init(vcpu);
1628
b0c632db
HC
1629 return 0;
1630}
1631
db0758b2
DH
1632/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1633static void __start_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1634{
1635 WARN_ON_ONCE(vcpu->arch.cputm_start != 0);
9c23a131 1636 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
db0758b2 1637 vcpu->arch.cputm_start = get_tod_clock_fast();
9c23a131 1638 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
db0758b2
DH
1639}
1640
1641/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1642static void __stop_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1643{
1644 WARN_ON_ONCE(vcpu->arch.cputm_start == 0);
9c23a131 1645 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
db0758b2
DH
1646 vcpu->arch.sie_block->cputm -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1647 vcpu->arch.cputm_start = 0;
9c23a131 1648 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
db0758b2
DH
1649}
1650
1651/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1652static void __enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1653{
1654 WARN_ON_ONCE(vcpu->arch.cputm_enabled);
1655 vcpu->arch.cputm_enabled = true;
1656 __start_cpu_timer_accounting(vcpu);
1657}
1658
1659/* needs disabled preemption to protect from TOD sync and vcpu_load/put */
1660static void __disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1661{
1662 WARN_ON_ONCE(!vcpu->arch.cputm_enabled);
1663 __stop_cpu_timer_accounting(vcpu);
1664 vcpu->arch.cputm_enabled = false;
1665}
1666
1667static void enable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1668{
1669 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1670 __enable_cpu_timer_accounting(vcpu);
1671 preempt_enable();
1672}
1673
1674static void disable_cpu_timer_accounting(struct kvm_vcpu *vcpu)
1675{
1676 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1677 __disable_cpu_timer_accounting(vcpu);
1678 preempt_enable();
1679}
1680
4287f247
DH
1681/* set the cpu timer - may only be called from the VCPU thread itself */
1682void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm)
1683{
db0758b2 1684 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
9c23a131 1685 raw_write_seqcount_begin(&vcpu->arch.cputm_seqcount);
db0758b2
DH
1686 if (vcpu->arch.cputm_enabled)
1687 vcpu->arch.cputm_start = get_tod_clock_fast();
4287f247 1688 vcpu->arch.sie_block->cputm = cputm;
9c23a131 1689 raw_write_seqcount_end(&vcpu->arch.cputm_seqcount);
db0758b2 1690 preempt_enable();
4287f247
DH
1691}
1692
db0758b2 1693/* update and get the cpu timer - can also be called from other VCPU threads */
4287f247
DH
1694__u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu)
1695{
9c23a131 1696 unsigned int seq;
db0758b2 1697 __u64 value;
db0758b2
DH
1698
1699 if (unlikely(!vcpu->arch.cputm_enabled))
1700 return vcpu->arch.sie_block->cputm;
1701
9c23a131
DH
1702 preempt_disable(); /* protect from TOD sync and vcpu_load/put */
1703 do {
1704 seq = raw_read_seqcount(&vcpu->arch.cputm_seqcount);
1705 /*
1706 * If the writer would ever execute a read in the critical
1707 * section, e.g. in irq context, we have a deadlock.
1708 */
1709 WARN_ON_ONCE((seq & 1) && smp_processor_id() == vcpu->cpu);
1710 value = vcpu->arch.sie_block->cputm;
1711 /* if cputm_start is 0, accounting is being started/stopped */
1712 if (likely(vcpu->arch.cputm_start))
1713 value -= get_tod_clock_fast() - vcpu->arch.cputm_start;
1714 } while (read_seqcount_retry(&vcpu->arch.cputm_seqcount, seq & ~1));
1715 preempt_enable();
db0758b2 1716 return value;
4287f247
DH
1717}
1718
b0c632db
HC
1719void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1720{
9977e886 1721 /* Save host register state */
d0164ee2 1722 save_fpu_regs();
9abc2a08
DH
1723 vcpu->arch.host_fpregs.fpc = current->thread.fpu.fpc;
1724 vcpu->arch.host_fpregs.regs = current->thread.fpu.regs;
9977e886 1725
6fd8e67d
DH
1726 if (MACHINE_HAS_VX)
1727 current->thread.fpu.regs = vcpu->run->s.regs.vrs;
1728 else
1729 current->thread.fpu.regs = vcpu->run->s.regs.fprs;
9abc2a08 1730 current->thread.fpu.fpc = vcpu->run->s.regs.fpc;
9977e886 1731 if (test_fp_ctl(current->thread.fpu.fpc))
96b2d7a8 1732 /* User space provided an invalid FPC, let's clear it */
9977e886
HB
1733 current->thread.fpu.fpc = 0;
1734
1735 save_access_regs(vcpu->arch.host_acrs);
59674c1a 1736 restore_access_regs(vcpu->run->s.regs.acrs);
37d9df98 1737 gmap_enable(vcpu->arch.enabled_gmap);
805de8f4 1738 atomic_or(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
5ebda316 1739 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
db0758b2 1740 __start_cpu_timer_accounting(vcpu);
01a745ac 1741 vcpu->cpu = cpu;
b0c632db
HC
1742}
1743
1744void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1745{
01a745ac 1746 vcpu->cpu = -1;
5ebda316 1747 if (vcpu->arch.cputm_enabled && !is_vcpu_idle(vcpu))
db0758b2 1748 __stop_cpu_timer_accounting(vcpu);
805de8f4 1749 atomic_andnot(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
37d9df98
DH
1750 vcpu->arch.enabled_gmap = gmap_get_enabled();
1751 gmap_disable(vcpu->arch.enabled_gmap);
9977e886 1752
9abc2a08 1753 /* Save guest register state */
d0164ee2 1754 save_fpu_regs();
9abc2a08 1755 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
9977e886 1756
9abc2a08
DH
1757 /* Restore host register state */
1758 current->thread.fpu.fpc = vcpu->arch.host_fpregs.fpc;
1759 current->thread.fpu.regs = vcpu->arch.host_fpregs.regs;
9977e886
HB
1760
1761 save_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
1762 restore_access_regs(vcpu->arch.host_acrs);
1763}
1764
1765static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1766{
1767 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1768 vcpu->arch.sie_block->gpsw.mask = 0UL;
1769 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 1770 kvm_s390_set_prefix(vcpu, 0);
4287f247 1771 kvm_s390_set_cpu_timer(vcpu, 0);
b0c632db
HC
1772 vcpu->arch.sie_block->ckc = 0UL;
1773 vcpu->arch.sie_block->todpr = 0;
1774 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1775 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
1776 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
9abc2a08
DH
1777 /* make sure the new fpc will be lazily loaded */
1778 save_fpu_regs();
1779 current->thread.fpu.fpc = 0;
b0c632db 1780 vcpu->arch.sie_block->gbea = 1;
672550fb 1781 vcpu->arch.sie_block->pp = 0;
3c038e6b
DD
1782 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1783 kvm_clear_async_pf_completion_queue(vcpu);
6352e4d2
DH
1784 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1785 kvm_s390_vcpu_stop(vcpu);
2ed10cc1 1786 kvm_s390_clear_local_irqs(vcpu);
b0c632db
HC
1787}
1788
31928aa5 1789void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
42897d86 1790{
72f25020 1791 mutex_lock(&vcpu->kvm->lock);
fdf03650 1792 preempt_disable();
72f25020 1793 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
fdf03650 1794 preempt_enable();
72f25020 1795 mutex_unlock(&vcpu->kvm->lock);
25508824 1796 if (!kvm_is_ucontrol(vcpu->kvm)) {
dafd032a 1797 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
eaa78f34 1798 sca_add_vcpu(vcpu);
25508824 1799 }
37d9df98
DH
1800 /* make vcpu_load load the right gmap on the first trigger */
1801 vcpu->arch.enabled_gmap = vcpu->arch.gmap;
42897d86
MT
1802}
1803
5102ee87
TK
1804static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1805{
9d8d5786 1806 if (!test_kvm_facility(vcpu->kvm, 76))
5102ee87
TK
1807 return;
1808
a374e892
TK
1809 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1810
1811 if (vcpu->kvm->arch.crypto.aes_kw)
1812 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1813 if (vcpu->kvm->arch.crypto.dea_kw)
1814 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1815
5102ee87
TK
1816 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1817}
1818
b31605c1
DD
1819void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1820{
1821 free_page(vcpu->arch.sie_block->cbrlo);
1822 vcpu->arch.sie_block->cbrlo = 0;
1823}
1824
1825int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1826{
1827 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1828 if (!vcpu->arch.sie_block->cbrlo)
1829 return -ENOMEM;
1830
1831 vcpu->arch.sie_block->ecb2 |= 0x80;
1832 vcpu->arch.sie_block->ecb2 &= ~0x08;
1833 return 0;
1834}
1835
91520f1a
MM
1836static void kvm_s390_vcpu_setup_model(struct kvm_vcpu *vcpu)
1837{
1838 struct kvm_s390_cpu_model *model = &vcpu->kvm->arch.model;
1839
91520f1a 1840 vcpu->arch.sie_block->ibc = model->ibc;
80bc79dc 1841 if (test_kvm_facility(vcpu->kvm, 7))
c54f0d6a 1842 vcpu->arch.sie_block->fac = (u32)(u64) model->fac_list;
91520f1a
MM
1843}
1844
b0c632db
HC
1845int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1846{
b31605c1 1847 int rc = 0;
b31288fa 1848
9e6dabef
CH
1849 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1850 CPUSTAT_SM |
a4a4f191
GH
1851 CPUSTAT_STOPPED);
1852
53df84f8 1853 if (test_kvm_facility(vcpu->kvm, 78))
805de8f4 1854 atomic_or(CPUSTAT_GED2, &vcpu->arch.sie_block->cpuflags);
53df84f8 1855 else if (test_kvm_facility(vcpu->kvm, 8))
805de8f4 1856 atomic_or(CPUSTAT_GED, &vcpu->arch.sie_block->cpuflags);
a4a4f191 1857
91520f1a
MM
1858 kvm_s390_vcpu_setup_model(vcpu);
1859
bdab09f3
DH
1860 /* pgste_set_pte has special handling for !MACHINE_HAS_ESOP */
1861 if (MACHINE_HAS_ESOP)
1862 vcpu->arch.sie_block->ecb |= 0x02;
bd50e8ec
DH
1863 if (test_kvm_facility(vcpu->kvm, 9))
1864 vcpu->arch.sie_block->ecb |= 0x04;
f597d24e 1865 if (test_kvm_facility(vcpu->kvm, 73))
7feb6bb8
MM
1866 vcpu->arch.sie_block->ecb |= 0x10;
1867
873b425e 1868 if (test_kvm_facility(vcpu->kvm, 8) && sclp.has_pfmfi)
d6af0b49 1869 vcpu->arch.sie_block->ecb2 |= 0x08;
48ee7d3a
DH
1870 vcpu->arch.sie_block->eca = 0x1002000U;
1871 if (sclp.has_cei)
1872 vcpu->arch.sie_block->eca |= 0x80000000U;
11ad65b7
DH
1873 if (sclp.has_ib)
1874 vcpu->arch.sie_block->eca |= 0x40000000U;
37c5f6c8 1875 if (sclp.has_siif)
217a4406 1876 vcpu->arch.sie_block->eca |= 1;
37c5f6c8 1877 if (sclp.has_sigpif)
ea5f4969 1878 vcpu->arch.sie_block->eca |= 0x10000000U;
c6e5f166
FZ
1879 if (test_kvm_facility(vcpu->kvm, 64))
1880 vcpu->arch.sie_block->ecb3 |= 0x01;
18280d8b 1881 if (test_kvm_facility(vcpu->kvm, 129)) {
13211ea7
EF
1882 vcpu->arch.sie_block->eca |= 0x00020000;
1883 vcpu->arch.sie_block->ecd |= 0x20000000;
1884 }
c6e5f166 1885 vcpu->arch.sie_block->riccbd = (unsigned long) &vcpu->run->s.regs.riccb;
492d8642 1886 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
95ca2cb5
JF
1887 if (test_kvm_facility(vcpu->kvm, 74))
1888 vcpu->arch.sie_block->ictl |= ICTL_OPEREXC;
5a5e6536 1889
e6db1d61 1890 if (vcpu->kvm->arch.use_cmma) {
b31605c1
DD
1891 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1892 if (rc)
1893 return rc;
b31288fa 1894 }
0ac96caf 1895 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ca872302 1896 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
9d8d5786 1897
5102ee87
TK
1898 kvm_s390_vcpu_crypto_setup(vcpu);
1899
b31605c1 1900 return rc;
b0c632db
HC
1901}
1902
1903struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1904 unsigned int id)
1905{
4d47555a 1906 struct kvm_vcpu *vcpu;
7feb6bb8 1907 struct sie_page *sie_page;
4d47555a
CO
1908 int rc = -EINVAL;
1909
4215825e 1910 if (!kvm_is_ucontrol(kvm) && !sca_can_add_vcpu(kvm, id))
4d47555a
CO
1911 goto out;
1912
1913 rc = -ENOMEM;
b0c632db 1914
b110feaf 1915 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 1916 if (!vcpu)
4d47555a 1917 goto out;
b0c632db 1918
7feb6bb8
MM
1919 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1920 if (!sie_page)
b0c632db
HC
1921 goto out_free_cpu;
1922
7feb6bb8
MM
1923 vcpu->arch.sie_block = &sie_page->sie_block;
1924 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
1925
efed1104
DH
1926 /* the real guest size will always be smaller than msl */
1927 vcpu->arch.sie_block->mso = 0;
1928 vcpu->arch.sie_block->msl = sclp.hamax;
1929
b0c632db 1930 vcpu->arch.sie_block->icpua = id;
ba5c1e9b 1931 spin_lock_init(&vcpu->arch.local_int.lock);
ba5c1e9b 1932 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
d0321a24 1933 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 1934 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
9c23a131 1935 seqcount_init(&vcpu->arch.cputm_seqcount);
ba5c1e9b 1936
b0c632db
HC
1937 rc = kvm_vcpu_init(vcpu, kvm, id);
1938 if (rc)
9abc2a08 1939 goto out_free_sie_block;
8335713a 1940 VM_EVENT(kvm, 3, "create cpu %d at 0x%pK, sie block at 0x%pK", id, vcpu,
b0c632db 1941 vcpu->arch.sie_block);
ade38c31 1942 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 1943
b0c632db 1944 return vcpu;
7b06bf2f
WY
1945out_free_sie_block:
1946 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 1947out_free_cpu:
b110feaf 1948 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 1949out:
b0c632db
HC
1950 return ERR_PTR(rc);
1951}
1952
b0c632db
HC
1953int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1954{
9a022067 1955 return kvm_s390_vcpu_has_irq(vcpu, 0);
b0c632db
HC
1956}
1957
27406cd5 1958void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu)
49b99e1e 1959{
805de8f4 1960 atomic_or(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
61a6df54 1961 exit_sie(vcpu);
49b99e1e
CB
1962}
1963
27406cd5 1964void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu)
49b99e1e 1965{
805de8f4 1966 atomic_andnot(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
49b99e1e
CB
1967}
1968
8e236546
CB
1969static void kvm_s390_vcpu_request(struct kvm_vcpu *vcpu)
1970{
805de8f4 1971 atomic_or(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
61a6df54 1972 exit_sie(vcpu);
8e236546
CB
1973}
1974
1975static void kvm_s390_vcpu_request_handled(struct kvm_vcpu *vcpu)
1976{
9bf9fde2 1977 atomic_andnot(PROG_REQUEST, &vcpu->arch.sie_block->prog20);
8e236546
CB
1978}
1979
49b99e1e
CB
1980/*
1981 * Kick a guest cpu out of SIE and wait until SIE is not running.
1982 * If the CPU is not running (e.g. waiting as idle) the function will
1983 * return immediately. */
1984void exit_sie(struct kvm_vcpu *vcpu)
1985{
805de8f4 1986 atomic_or(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
49b99e1e
CB
1987 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1988 cpu_relax();
1989}
1990
8e236546
CB
1991/* Kick a guest cpu out of SIE to process a request synchronously */
1992void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu)
49b99e1e 1993{
8e236546
CB
1994 kvm_make_request(req, vcpu);
1995 kvm_s390_vcpu_request(vcpu);
49b99e1e
CB
1996}
1997
414d3b07
MS
1998static void kvm_gmap_notifier(struct gmap *gmap, unsigned long start,
1999 unsigned long end)
2c70fe44 2000{
2c70fe44
CB
2001 struct kvm *kvm = gmap->private;
2002 struct kvm_vcpu *vcpu;
414d3b07
MS
2003 unsigned long prefix;
2004 int i;
2c70fe44 2005
65d0b0d4
DH
2006 if (gmap_is_shadow(gmap))
2007 return;
414d3b07
MS
2008 if (start >= 1UL << 31)
2009 /* We are only interested in prefix pages */
2010 return;
2c70fe44
CB
2011 kvm_for_each_vcpu(i, vcpu, kvm) {
2012 /* match against both prefix pages */
414d3b07
MS
2013 prefix = kvm_s390_get_prefix(vcpu);
2014 if (prefix <= end && start <= prefix + 2*PAGE_SIZE - 1) {
2015 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx-%lx",
2016 start, end);
8e236546 2017 kvm_s390_sync_request(KVM_REQ_MMU_RELOAD, vcpu);
2c70fe44
CB
2018 }
2019 }
2020}
2021
b6d33834
CD
2022int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
2023{
2024 /* kvm common code refers to this, but never calls it */
2025 BUG();
2026 return 0;
2027}
2028
14eebd91
CO
2029static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
2030 struct kvm_one_reg *reg)
2031{
2032 int r = -EINVAL;
2033
2034 switch (reg->id) {
29b7c71b
CO
2035 case KVM_REG_S390_TODPR:
2036 r = put_user(vcpu->arch.sie_block->todpr,
2037 (u32 __user *)reg->addr);
2038 break;
2039 case KVM_REG_S390_EPOCHDIFF:
2040 r = put_user(vcpu->arch.sie_block->epoch,
2041 (u64 __user *)reg->addr);
2042 break;
46a6dd1c 2043 case KVM_REG_S390_CPU_TIMER:
4287f247 2044 r = put_user(kvm_s390_get_cpu_timer(vcpu),
46a6dd1c
J
2045 (u64 __user *)reg->addr);
2046 break;
2047 case KVM_REG_S390_CLOCK_COMP:
2048 r = put_user(vcpu->arch.sie_block->ckc,
2049 (u64 __user *)reg->addr);
2050 break;
536336c2
DD
2051 case KVM_REG_S390_PFTOKEN:
2052 r = put_user(vcpu->arch.pfault_token,
2053 (u64 __user *)reg->addr);
2054 break;
2055 case KVM_REG_S390_PFCOMPARE:
2056 r = put_user(vcpu->arch.pfault_compare,
2057 (u64 __user *)reg->addr);
2058 break;
2059 case KVM_REG_S390_PFSELECT:
2060 r = put_user(vcpu->arch.pfault_select,
2061 (u64 __user *)reg->addr);
2062 break;
672550fb
CB
2063 case KVM_REG_S390_PP:
2064 r = put_user(vcpu->arch.sie_block->pp,
2065 (u64 __user *)reg->addr);
2066 break;
afa45ff5
CB
2067 case KVM_REG_S390_GBEA:
2068 r = put_user(vcpu->arch.sie_block->gbea,
2069 (u64 __user *)reg->addr);
2070 break;
14eebd91
CO
2071 default:
2072 break;
2073 }
2074
2075 return r;
2076}
2077
2078static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
2079 struct kvm_one_reg *reg)
2080{
2081 int r = -EINVAL;
4287f247 2082 __u64 val;
14eebd91
CO
2083
2084 switch (reg->id) {
29b7c71b
CO
2085 case KVM_REG_S390_TODPR:
2086 r = get_user(vcpu->arch.sie_block->todpr,
2087 (u32 __user *)reg->addr);
2088 break;
2089 case KVM_REG_S390_EPOCHDIFF:
2090 r = get_user(vcpu->arch.sie_block->epoch,
2091 (u64 __user *)reg->addr);
2092 break;
46a6dd1c 2093 case KVM_REG_S390_CPU_TIMER:
4287f247
DH
2094 r = get_user(val, (u64 __user *)reg->addr);
2095 if (!r)
2096 kvm_s390_set_cpu_timer(vcpu, val);
46a6dd1c
J
2097 break;
2098 case KVM_REG_S390_CLOCK_COMP:
2099 r = get_user(vcpu->arch.sie_block->ckc,
2100 (u64 __user *)reg->addr);
2101 break;
536336c2
DD
2102 case KVM_REG_S390_PFTOKEN:
2103 r = get_user(vcpu->arch.pfault_token,
2104 (u64 __user *)reg->addr);
9fbd8082
DH
2105 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2106 kvm_clear_async_pf_completion_queue(vcpu);
536336c2
DD
2107 break;
2108 case KVM_REG_S390_PFCOMPARE:
2109 r = get_user(vcpu->arch.pfault_compare,
2110 (u64 __user *)reg->addr);
2111 break;
2112 case KVM_REG_S390_PFSELECT:
2113 r = get_user(vcpu->arch.pfault_select,
2114 (u64 __user *)reg->addr);
2115 break;
672550fb
CB
2116 case KVM_REG_S390_PP:
2117 r = get_user(vcpu->arch.sie_block->pp,
2118 (u64 __user *)reg->addr);
2119 break;
afa45ff5
CB
2120 case KVM_REG_S390_GBEA:
2121 r = get_user(vcpu->arch.sie_block->gbea,
2122 (u64 __user *)reg->addr);
2123 break;
14eebd91
CO
2124 default:
2125 break;
2126 }
2127
2128 return r;
2129}
b6d33834 2130
b0c632db
HC
2131static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
2132{
b0c632db 2133 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
2134 return 0;
2135}
2136
2137int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2138{
5a32c1af 2139 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
2140 return 0;
2141}
2142
2143int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
2144{
5a32c1af 2145 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
2146 return 0;
2147}
2148
2149int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
2150 struct kvm_sregs *sregs)
2151{
59674c1a 2152 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 2153 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 2154 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
2155 return 0;
2156}
2157
2158int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
2159 struct kvm_sregs *sregs)
2160{
59674c1a 2161 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 2162 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
2163 return 0;
2164}
2165
2166int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2167{
9abc2a08
DH
2168 /* make sure the new values will be lazily loaded */
2169 save_fpu_regs();
4725c860
MS
2170 if (test_fp_ctl(fpu->fpc))
2171 return -EINVAL;
9abc2a08
DH
2172 current->thread.fpu.fpc = fpu->fpc;
2173 if (MACHINE_HAS_VX)
2174 convert_fp_to_vx(current->thread.fpu.vxrs, (freg_t *)fpu->fprs);
2175 else
2176 memcpy(current->thread.fpu.fprs, &fpu->fprs, sizeof(fpu->fprs));
b0c632db
HC
2177 return 0;
2178}
2179
2180int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
2181{
9abc2a08
DH
2182 /* make sure we have the latest values */
2183 save_fpu_regs();
2184 if (MACHINE_HAS_VX)
2185 convert_vx_to_fp((freg_t *)fpu->fprs, current->thread.fpu.vxrs);
2186 else
2187 memcpy(fpu->fprs, current->thread.fpu.fprs, sizeof(fpu->fprs));
2188 fpu->fpc = current->thread.fpu.fpc;
b0c632db
HC
2189 return 0;
2190}
2191
2192static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
2193{
2194 int rc = 0;
2195
7a42fdc2 2196 if (!is_vcpu_stopped(vcpu))
b0c632db 2197 rc = -EBUSY;
d7b0b5eb
CO
2198 else {
2199 vcpu->run->psw_mask = psw.mask;
2200 vcpu->run->psw_addr = psw.addr;
2201 }
b0c632db
HC
2202 return rc;
2203}
2204
2205int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
2206 struct kvm_translation *tr)
2207{
2208 return -EINVAL; /* not implemented yet */
2209}
2210
27291e21
DH
2211#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
2212 KVM_GUESTDBG_USE_HW_BP | \
2213 KVM_GUESTDBG_ENABLE)
2214
d0bfb940
JK
2215int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2216 struct kvm_guest_debug *dbg)
b0c632db 2217{
27291e21
DH
2218 int rc = 0;
2219
2220 vcpu->guest_debug = 0;
2221 kvm_s390_clear_bp_data(vcpu);
2222
2de3bfc2 2223 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
27291e21 2224 return -EINVAL;
89b5b4de
DH
2225 if (!sclp.has_gpere)
2226 return -EINVAL;
27291e21
DH
2227
2228 if (dbg->control & KVM_GUESTDBG_ENABLE) {
2229 vcpu->guest_debug = dbg->control;
2230 /* enforce guest PER */
805de8f4 2231 atomic_or(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
27291e21
DH
2232
2233 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
2234 rc = kvm_s390_import_bp_data(vcpu, dbg);
2235 } else {
805de8f4 2236 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
27291e21
DH
2237 vcpu->arch.guestdbg.last_bp = 0;
2238 }
2239
2240 if (rc) {
2241 vcpu->guest_debug = 0;
2242 kvm_s390_clear_bp_data(vcpu);
805de8f4 2243 atomic_andnot(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
27291e21
DH
2244 }
2245
2246 return rc;
b0c632db
HC
2247}
2248
62d9f0db
MT
2249int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
2250 struct kvm_mp_state *mp_state)
2251{
6352e4d2
DH
2252 /* CHECK_STOP and LOAD are not supported yet */
2253 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
2254 KVM_MP_STATE_OPERATING;
62d9f0db
MT
2255}
2256
2257int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
2258 struct kvm_mp_state *mp_state)
2259{
6352e4d2
DH
2260 int rc = 0;
2261
2262 /* user space knows about this interface - let it control the state */
2263 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
2264
2265 switch (mp_state->mp_state) {
2266 case KVM_MP_STATE_STOPPED:
2267 kvm_s390_vcpu_stop(vcpu);
2268 break;
2269 case KVM_MP_STATE_OPERATING:
2270 kvm_s390_vcpu_start(vcpu);
2271 break;
2272 case KVM_MP_STATE_LOAD:
2273 case KVM_MP_STATE_CHECK_STOP:
2274 /* fall through - CHECK_STOP and LOAD are not supported yet */
2275 default:
2276 rc = -ENXIO;
2277 }
2278
2279 return rc;
62d9f0db
MT
2280}
2281
8ad35755
DH
2282static bool ibs_enabled(struct kvm_vcpu *vcpu)
2283{
2284 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
2285}
2286
2c70fe44
CB
2287static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
2288{
8ad35755 2289retry:
8e236546 2290 kvm_s390_vcpu_request_handled(vcpu);
586b7ccd
CB
2291 if (!vcpu->requests)
2292 return 0;
2c70fe44
CB
2293 /*
2294 * We use MMU_RELOAD just to re-arm the ipte notifier for the
b2d73b2a 2295 * guest prefix page. gmap_mprotect_notify will wait on the ptl lock.
2c70fe44
CB
2296 * This ensures that the ipte instruction for this request has
2297 * already finished. We might race against a second unmapper that
2298 * wants to set the blocking bit. Lets just retry the request loop.
2299 */
8ad35755 2300 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
2c70fe44 2301 int rc;
b2d73b2a
MS
2302 rc = gmap_mprotect_notify(vcpu->arch.gmap,
2303 kvm_s390_get_prefix(vcpu),
2304 PAGE_SIZE * 2, PROT_WRITE);
2c70fe44
CB
2305 if (rc)
2306 return rc;
8ad35755 2307 goto retry;
2c70fe44 2308 }
8ad35755 2309
d3d692c8
DH
2310 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2311 vcpu->arch.sie_block->ihcpu = 0xffff;
2312 goto retry;
2313 }
2314
8ad35755
DH
2315 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
2316 if (!ibs_enabled(vcpu)) {
2317 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
805de8f4 2318 atomic_or(CPUSTAT_IBS,
8ad35755
DH
2319 &vcpu->arch.sie_block->cpuflags);
2320 }
2321 goto retry;
2c70fe44 2322 }
8ad35755
DH
2323
2324 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
2325 if (ibs_enabled(vcpu)) {
2326 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
805de8f4 2327 atomic_andnot(CPUSTAT_IBS,
8ad35755
DH
2328 &vcpu->arch.sie_block->cpuflags);
2329 }
2330 goto retry;
2331 }
2332
0759d068
DH
2333 /* nothing to do, just clear the request */
2334 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
2335
2c70fe44
CB
2336 return 0;
2337}
2338
25ed1675
DH
2339void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod)
2340{
2341 struct kvm_vcpu *vcpu;
2342 int i;
2343
2344 mutex_lock(&kvm->lock);
2345 preempt_disable();
2346 kvm->arch.epoch = tod - get_tod_clock();
2347 kvm_s390_vcpu_block_all(kvm);
2348 kvm_for_each_vcpu(i, vcpu, kvm)
2349 vcpu->arch.sie_block->epoch = kvm->arch.epoch;
2350 kvm_s390_vcpu_unblock_all(kvm);
2351 preempt_enable();
2352 mutex_unlock(&kvm->lock);
2353}
2354
fa576c58
TH
2355/**
2356 * kvm_arch_fault_in_page - fault-in guest page if necessary
2357 * @vcpu: The corresponding virtual cpu
2358 * @gpa: Guest physical address
2359 * @writable: Whether the page should be writable or not
2360 *
2361 * Make sure that a guest page has been faulted-in on the host.
2362 *
2363 * Return: Zero on success, negative error code otherwise.
2364 */
2365long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
24eb3a82 2366{
527e30b4
MS
2367 return gmap_fault(vcpu->arch.gmap, gpa,
2368 writable ? FAULT_FLAG_WRITE : 0);
24eb3a82
DD
2369}
2370
3c038e6b
DD
2371static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
2372 unsigned long token)
2373{
2374 struct kvm_s390_interrupt inti;
383d0b05 2375 struct kvm_s390_irq irq;
3c038e6b
DD
2376
2377 if (start_token) {
383d0b05
JF
2378 irq.u.ext.ext_params2 = token;
2379 irq.type = KVM_S390_INT_PFAULT_INIT;
2380 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
3c038e6b
DD
2381 } else {
2382 inti.type = KVM_S390_INT_PFAULT_DONE;
383d0b05 2383 inti.parm64 = token;
3c038e6b
DD
2384 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
2385 }
2386}
2387
2388void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
2389 struct kvm_async_pf *work)
2390{
2391 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
2392 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
2393}
2394
2395void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
2396 struct kvm_async_pf *work)
2397{
2398 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
2399 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
2400}
2401
2402void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
2403 struct kvm_async_pf *work)
2404{
2405 /* s390 will always inject the page directly */
2406}
2407
2408bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
2409{
2410 /*
2411 * s390 will always inject the page directly,
2412 * but we still want check_async_completion to cleanup
2413 */
2414 return true;
2415}
2416
2417static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
2418{
2419 hva_t hva;
2420 struct kvm_arch_async_pf arch;
2421 int rc;
2422
2423 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2424 return 0;
2425 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
2426 vcpu->arch.pfault_compare)
2427 return 0;
2428 if (psw_extint_disabled(vcpu))
2429 return 0;
9a022067 2430 if (kvm_s390_vcpu_has_irq(vcpu, 0))
3c038e6b
DD
2431 return 0;
2432 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
2433 return 0;
2434 if (!vcpu->arch.gmap->pfault_enabled)
2435 return 0;
2436
81480cc1
HC
2437 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
2438 hva += current->thread.gmap_addr & ~PAGE_MASK;
2439 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
3c038e6b
DD
2440 return 0;
2441
2442 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
2443 return rc;
2444}
2445
3fb4c40f 2446static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 2447{
3fb4c40f 2448 int rc, cpuflags;
e168bf8d 2449
3c038e6b
DD
2450 /*
2451 * On s390 notifications for arriving pages will be delivered directly
2452 * to the guest but the house keeping for completed pfaults is
2453 * handled outside the worker.
2454 */
2455 kvm_check_async_pf_completion(vcpu);
2456
7ec7c8c7
CB
2457 vcpu->arch.sie_block->gg14 = vcpu->run->s.regs.gprs[14];
2458 vcpu->arch.sie_block->gg15 = vcpu->run->s.regs.gprs[15];
b0c632db
HC
2459
2460 if (need_resched())
2461 schedule();
2462
d3a73acb 2463 if (test_cpu_flag(CIF_MCCK_PENDING))
71cde587
CB
2464 s390_handle_mcck();
2465
79395031
JF
2466 if (!kvm_is_ucontrol(vcpu->kvm)) {
2467 rc = kvm_s390_deliver_pending_interrupts(vcpu);
2468 if (rc)
2469 return rc;
2470 }
0ff31867 2471
2c70fe44
CB
2472 rc = kvm_s390_handle_requests(vcpu);
2473 if (rc)
2474 return rc;
2475
27291e21
DH
2476 if (guestdbg_enabled(vcpu)) {
2477 kvm_s390_backup_guest_per_regs(vcpu);
2478 kvm_s390_patch_guest_per_regs(vcpu);
2479 }
2480
b0c632db 2481 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
2482 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
2483 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
2484 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 2485
3fb4c40f
TH
2486 return 0;
2487}
2488
492d8642
TH
2489static int vcpu_post_run_fault_in_sie(struct kvm_vcpu *vcpu)
2490{
56317920
DH
2491 struct kvm_s390_pgm_info pgm_info = {
2492 .code = PGM_ADDRESSING,
2493 };
2494 u8 opcode, ilen;
492d8642
TH
2495 int rc;
2496
2497 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
2498 trace_kvm_s390_sie_fault(vcpu);
2499
2500 /*
2501 * We want to inject an addressing exception, which is defined as a
2502 * suppressing or terminating exception. However, since we came here
2503 * by a DAT access exception, the PSW still points to the faulting
2504 * instruction since DAT exceptions are nullifying. So we've got
2505 * to look up the current opcode to get the length of the instruction
2506 * to be able to forward the PSW.
2507 */
65977322 2508 rc = read_guest_instr(vcpu, &opcode, 1);
56317920 2509 ilen = insn_length(opcode);
9b0d721a
DH
2510 if (rc < 0) {
2511 return rc;
2512 } else if (rc) {
2513 /* Instruction-Fetching Exceptions - we can't detect the ilen.
2514 * Forward by arbitrary ilc, injection will take care of
2515 * nullification if necessary.
2516 */
2517 pgm_info = vcpu->arch.pgm;
2518 ilen = 4;
2519 }
56317920
DH
2520 pgm_info.flags = ilen | KVM_S390_PGM_FLAGS_ILC_VALID;
2521 kvm_s390_forward_psw(vcpu, ilen);
2522 return kvm_s390_inject_prog_irq(vcpu, &pgm_info);
492d8642
TH
2523}
2524
3fb4c40f
TH
2525static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
2526{
2b29a9fd
DD
2527 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
2528 vcpu->arch.sie_block->icptcode);
2529 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
2530
27291e21
DH
2531 if (guestdbg_enabled(vcpu))
2532 kvm_s390_restore_guest_per_regs(vcpu);
2533
7ec7c8c7
CB
2534 vcpu->run->s.regs.gprs[14] = vcpu->arch.sie_block->gg14;
2535 vcpu->run->s.regs.gprs[15] = vcpu->arch.sie_block->gg15;
71f116bf
DH
2536
2537 if (vcpu->arch.sie_block->icptcode > 0) {
2538 int rc = kvm_handle_sie_intercept(vcpu);
2539
2540 if (rc != -EOPNOTSUPP)
2541 return rc;
2542 vcpu->run->exit_reason = KVM_EXIT_S390_SIEIC;
2543 vcpu->run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
2544 vcpu->run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
2545 vcpu->run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
2546 return -EREMOTE;
2547 } else if (exit_reason != -EFAULT) {
2548 vcpu->stat.exit_null++;
2549 return 0;
210b1607
TH
2550 } else if (kvm_is_ucontrol(vcpu->kvm)) {
2551 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
2552 vcpu->run->s390_ucontrol.trans_exc_code =
2553 current->thread.gmap_addr;
2554 vcpu->run->s390_ucontrol.pgm_code = 0x10;
71f116bf 2555 return -EREMOTE;
24eb3a82 2556 } else if (current->thread.gmap_pfault) {
3c038e6b 2557 trace_kvm_s390_major_guest_pfault(vcpu);
24eb3a82 2558 current->thread.gmap_pfault = 0;
71f116bf
DH
2559 if (kvm_arch_setup_async_pf(vcpu))
2560 return 0;
2561 return kvm_arch_fault_in_page(vcpu, current->thread.gmap_addr, 1);
a76ccff6 2562 }
71f116bf 2563 return vcpu_post_run_fault_in_sie(vcpu);
3fb4c40f
TH
2564}
2565
2566static int __vcpu_run(struct kvm_vcpu *vcpu)
2567{
2568 int rc, exit_reason;
2569
800c1065
TH
2570 /*
2571 * We try to hold kvm->srcu during most of vcpu_run (except when run-
2572 * ning the guest), so that memslots (and other stuff) are protected
2573 */
2574 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2575
a76ccff6
TH
2576 do {
2577 rc = vcpu_pre_run(vcpu);
2578 if (rc)
2579 break;
3fb4c40f 2580
800c1065 2581 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
2582 /*
2583 * As PF_VCPU will be used in fault handler, between
2584 * guest_enter and guest_exit should be no uaccess.
2585 */
0097d12e
CB
2586 local_irq_disable();
2587 __kvm_guest_enter();
db0758b2 2588 __disable_cpu_timer_accounting(vcpu);
0097d12e 2589 local_irq_enable();
a76ccff6
TH
2590 exit_reason = sie64a(vcpu->arch.sie_block,
2591 vcpu->run->s.regs.gprs);
0097d12e 2592 local_irq_disable();
db0758b2 2593 __enable_cpu_timer_accounting(vcpu);
0097d12e
CB
2594 __kvm_guest_exit();
2595 local_irq_enable();
800c1065 2596 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
2597
2598 rc = vcpu_post_run(vcpu, exit_reason);
27291e21 2599 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
3fb4c40f 2600
800c1065 2601 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 2602 return rc;
b0c632db
HC
2603}
2604
b028ee3e
DH
2605static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2606{
2607 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
2608 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
2609 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
2610 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
2611 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
2612 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
d3d692c8
DH
2613 /* some control register changes require a tlb flush */
2614 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
b028ee3e
DH
2615 }
2616 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
4287f247 2617 kvm_s390_set_cpu_timer(vcpu, kvm_run->s.regs.cputm);
b028ee3e
DH
2618 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
2619 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
2620 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
2621 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
2622 }
2623 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
2624 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
2625 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
2626 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
9fbd8082
DH
2627 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
2628 kvm_clear_async_pf_completion_queue(vcpu);
b028ee3e
DH
2629 }
2630 kvm_run->kvm_dirty_regs = 0;
2631}
2632
2633static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2634{
2635 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
2636 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
2637 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
2638 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
4287f247 2639 kvm_run->s.regs.cputm = kvm_s390_get_cpu_timer(vcpu);
b028ee3e
DH
2640 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
2641 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
2642 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
2643 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
2644 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
2645 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
2646 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
2647}
2648
b0c632db
HC
2649int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2650{
8f2abe6a 2651 int rc;
b0c632db
HC
2652 sigset_t sigsaved;
2653
27291e21
DH
2654 if (guestdbg_exit_pending(vcpu)) {
2655 kvm_s390_prepare_debug_exit(vcpu);
2656 return 0;
2657 }
2658
b0c632db
HC
2659 if (vcpu->sigset_active)
2660 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
2661
6352e4d2
DH
2662 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
2663 kvm_s390_vcpu_start(vcpu);
2664 } else if (is_vcpu_stopped(vcpu)) {
ea2cdd27 2665 pr_err_ratelimited("can't run stopped vcpu %d\n",
6352e4d2
DH
2666 vcpu->vcpu_id);
2667 return -EINVAL;
2668 }
b0c632db 2669
b028ee3e 2670 sync_regs(vcpu, kvm_run);
db0758b2 2671 enable_cpu_timer_accounting(vcpu);
d7b0b5eb 2672
dab4079d 2673 might_fault();
a76ccff6 2674 rc = __vcpu_run(vcpu);
9ace903d 2675
b1d16c49
CE
2676 if (signal_pending(current) && !rc) {
2677 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 2678 rc = -EINTR;
b1d16c49 2679 }
8f2abe6a 2680
27291e21
DH
2681 if (guestdbg_exit_pending(vcpu) && !rc) {
2682 kvm_s390_prepare_debug_exit(vcpu);
2683 rc = 0;
2684 }
2685
8f2abe6a 2686 if (rc == -EREMOTE) {
71f116bf 2687 /* userspace support is needed, kvm_run has been prepared */
8f2abe6a
CB
2688 rc = 0;
2689 }
b0c632db 2690
db0758b2 2691 disable_cpu_timer_accounting(vcpu);
b028ee3e 2692 store_regs(vcpu, kvm_run);
d7b0b5eb 2693
b0c632db
HC
2694 if (vcpu->sigset_active)
2695 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
2696
b0c632db 2697 vcpu->stat.exit_userspace++;
7e8e6ab4 2698 return rc;
b0c632db
HC
2699}
2700
b0c632db
HC
2701/*
2702 * store status at address
2703 * we use have two special cases:
2704 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
2705 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
2706 */
d0bce605 2707int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
b0c632db 2708{
092670cd 2709 unsigned char archmode = 1;
9abc2a08 2710 freg_t fprs[NUM_FPRS];
fda902cb 2711 unsigned int px;
4287f247 2712 u64 clkcomp, cputm;
d0bce605 2713 int rc;
b0c632db 2714
d9a3a09a 2715 px = kvm_s390_get_prefix(vcpu);
d0bce605
HC
2716 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
2717 if (write_guest_abs(vcpu, 163, &archmode, 1))
b0c632db 2718 return -EFAULT;
d9a3a09a 2719 gpa = 0;
d0bce605
HC
2720 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
2721 if (write_guest_real(vcpu, 163, &archmode, 1))
b0c632db 2722 return -EFAULT;
d9a3a09a
MS
2723 gpa = px;
2724 } else
2725 gpa -= __LC_FPREGS_SAVE_AREA;
9abc2a08
DH
2726
2727 /* manually convert vector registers if necessary */
2728 if (MACHINE_HAS_VX) {
9522b37f 2729 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
9abc2a08
DH
2730 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
2731 fprs, 128);
2732 } else {
2733 rc = write_guest_abs(vcpu, gpa + __LC_FPREGS_SAVE_AREA,
6fd8e67d 2734 vcpu->run->s.regs.fprs, 128);
9abc2a08 2735 }
d9a3a09a 2736 rc |= write_guest_abs(vcpu, gpa + __LC_GPREGS_SAVE_AREA,
d0bce605 2737 vcpu->run->s.regs.gprs, 128);
d9a3a09a 2738 rc |= write_guest_abs(vcpu, gpa + __LC_PSW_SAVE_AREA,
d0bce605 2739 &vcpu->arch.sie_block->gpsw, 16);
d9a3a09a 2740 rc |= write_guest_abs(vcpu, gpa + __LC_PREFIX_SAVE_AREA,
fda902cb 2741 &px, 4);
d9a3a09a 2742 rc |= write_guest_abs(vcpu, gpa + __LC_FP_CREG_SAVE_AREA,
9abc2a08 2743 &vcpu->run->s.regs.fpc, 4);
d9a3a09a 2744 rc |= write_guest_abs(vcpu, gpa + __LC_TOD_PROGREG_SAVE_AREA,
d0bce605 2745 &vcpu->arch.sie_block->todpr, 4);
4287f247 2746 cputm = kvm_s390_get_cpu_timer(vcpu);
d9a3a09a 2747 rc |= write_guest_abs(vcpu, gpa + __LC_CPU_TIMER_SAVE_AREA,
4287f247 2748 &cputm, 8);
178bd789 2749 clkcomp = vcpu->arch.sie_block->ckc >> 8;
d9a3a09a 2750 rc |= write_guest_abs(vcpu, gpa + __LC_CLOCK_COMP_SAVE_AREA,
d0bce605 2751 &clkcomp, 8);
d9a3a09a 2752 rc |= write_guest_abs(vcpu, gpa + __LC_AREGS_SAVE_AREA,
d0bce605 2753 &vcpu->run->s.regs.acrs, 64);
d9a3a09a 2754 rc |= write_guest_abs(vcpu, gpa + __LC_CREGS_SAVE_AREA,
d0bce605
HC
2755 &vcpu->arch.sie_block->gcr, 128);
2756 return rc ? -EFAULT : 0;
b0c632db
HC
2757}
2758
e879892c
TH
2759int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
2760{
2761 /*
2762 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
2763 * copying in vcpu load/put. Lets update our copies before we save
2764 * it into the save area
2765 */
d0164ee2 2766 save_fpu_regs();
9abc2a08 2767 vcpu->run->s.regs.fpc = current->thread.fpu.fpc;
e879892c
TH
2768 save_access_regs(vcpu->run->s.regs.acrs);
2769
2770 return kvm_s390_store_status_unloaded(vcpu, addr);
2771}
2772
bc17de7c
EF
2773/*
2774 * store additional status at address
2775 */
2776int kvm_s390_store_adtl_status_unloaded(struct kvm_vcpu *vcpu,
2777 unsigned long gpa)
2778{
2779 /* Only bits 0-53 are used for address formation */
2780 if (!(gpa & ~0x3ff))
2781 return 0;
2782
2783 return write_guest_abs(vcpu, gpa & ~0x3ff,
2784 (void *)&vcpu->run->s.regs.vrs, 512);
2785}
2786
2787int kvm_s390_vcpu_store_adtl_status(struct kvm_vcpu *vcpu, unsigned long addr)
2788{
2789 if (!test_kvm_facility(vcpu->kvm, 129))
2790 return 0;
2791
2792 /*
2793 * The guest VXRS are in the host VXRs due to the lazy
9977e886
HB
2794 * copying in vcpu load/put. We can simply call save_fpu_regs()
2795 * to save the current register state because we are in the
2796 * middle of a load/put cycle.
2797 *
2798 * Let's update our copies before we save it into the save area.
bc17de7c 2799 */
d0164ee2 2800 save_fpu_regs();
bc17de7c
EF
2801
2802 return kvm_s390_store_adtl_status_unloaded(vcpu, addr);
2803}
2804
8ad35755
DH
2805static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2806{
2807 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
8e236546 2808 kvm_s390_sync_request(KVM_REQ_DISABLE_IBS, vcpu);
8ad35755
DH
2809}
2810
2811static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
2812{
2813 unsigned int i;
2814 struct kvm_vcpu *vcpu;
2815
2816 kvm_for_each_vcpu(i, vcpu, kvm) {
2817 __disable_ibs_on_vcpu(vcpu);
2818 }
2819}
2820
2821static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2822{
09a400e7
DH
2823 if (!sclp.has_ibs)
2824 return;
8ad35755 2825 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
8e236546 2826 kvm_s390_sync_request(KVM_REQ_ENABLE_IBS, vcpu);
8ad35755
DH
2827}
2828
6852d7b6
DH
2829void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2830{
8ad35755
DH
2831 int i, online_vcpus, started_vcpus = 0;
2832
2833 if (!is_vcpu_stopped(vcpu))
2834 return;
2835
6852d7b6 2836 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
8ad35755 2837 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 2838 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
2839 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2840
2841 for (i = 0; i < online_vcpus; i++) {
2842 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2843 started_vcpus++;
2844 }
2845
2846 if (started_vcpus == 0) {
2847 /* we're the only active VCPU -> speed it up */
2848 __enable_ibs_on_vcpu(vcpu);
2849 } else if (started_vcpus == 1) {
2850 /*
2851 * As we are starting a second VCPU, we have to disable
2852 * the IBS facility on all VCPUs to remove potentially
2853 * oustanding ENABLE requests.
2854 */
2855 __disable_ibs_on_all_vcpus(vcpu->kvm);
2856 }
2857
805de8f4 2858 atomic_andnot(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
2859 /*
2860 * Another VCPU might have used IBS while we were offline.
2861 * Let's play safe and flush the VCPU at startup.
2862 */
d3d692c8 2863 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
433b9ee4 2864 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 2865 return;
6852d7b6
DH
2866}
2867
2868void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2869{
8ad35755
DH
2870 int i, online_vcpus, started_vcpus = 0;
2871 struct kvm_vcpu *started_vcpu = NULL;
2872
2873 if (is_vcpu_stopped(vcpu))
2874 return;
2875
6852d7b6 2876 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
8ad35755 2877 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 2878 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
2879 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2880
32f5ff63 2881 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
6cddd432 2882 kvm_s390_clear_stop_irq(vcpu);
32f5ff63 2883
805de8f4 2884 atomic_or(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
2885 __disable_ibs_on_vcpu(vcpu);
2886
2887 for (i = 0; i < online_vcpus; i++) {
2888 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2889 started_vcpus++;
2890 started_vcpu = vcpu->kvm->vcpus[i];
2891 }
2892 }
2893
2894 if (started_vcpus == 1) {
2895 /*
2896 * As we only have one VCPU left, we want to enable the
2897 * IBS facility for that VCPU to speed it up.
2898 */
2899 __enable_ibs_on_vcpu(started_vcpu);
2900 }
2901
433b9ee4 2902 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 2903 return;
6852d7b6
DH
2904}
2905
d6712df9
CH
2906static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2907 struct kvm_enable_cap *cap)
2908{
2909 int r;
2910
2911 if (cap->flags)
2912 return -EINVAL;
2913
2914 switch (cap->cap) {
fa6b7fe9
CH
2915 case KVM_CAP_S390_CSS_SUPPORT:
2916 if (!vcpu->kvm->arch.css_support) {
2917 vcpu->kvm->arch.css_support = 1;
c92ea7b9 2918 VM_EVENT(vcpu->kvm, 3, "%s", "ENABLE: CSS support");
fa6b7fe9
CH
2919 trace_kvm_s390_enable_css(vcpu->kvm);
2920 }
2921 r = 0;
2922 break;
d6712df9
CH
2923 default:
2924 r = -EINVAL;
2925 break;
2926 }
2927 return r;
2928}
2929
41408c28
TH
2930static long kvm_s390_guest_mem_op(struct kvm_vcpu *vcpu,
2931 struct kvm_s390_mem_op *mop)
2932{
2933 void __user *uaddr = (void __user *)mop->buf;
2934 void *tmpbuf = NULL;
2935 int r, srcu_idx;
2936 const u64 supported_flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION
2937 | KVM_S390_MEMOP_F_CHECK_ONLY;
2938
2939 if (mop->flags & ~supported_flags)
2940 return -EINVAL;
2941
2942 if (mop->size > MEM_OP_MAX_SIZE)
2943 return -E2BIG;
2944
2945 if (!(mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY)) {
2946 tmpbuf = vmalloc(mop->size);
2947 if (!tmpbuf)
2948 return -ENOMEM;
2949 }
2950
2951 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2952
2953 switch (mop->op) {
2954 case KVM_S390_MEMOP_LOGICAL_READ:
2955 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
92c96321
DH
2956 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
2957 mop->size, GACC_FETCH);
41408c28
TH
2958 break;
2959 }
2960 r = read_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2961 if (r == 0) {
2962 if (copy_to_user(uaddr, tmpbuf, mop->size))
2963 r = -EFAULT;
2964 }
2965 break;
2966 case KVM_S390_MEMOP_LOGICAL_WRITE:
2967 if (mop->flags & KVM_S390_MEMOP_F_CHECK_ONLY) {
92c96321
DH
2968 r = check_gva_range(vcpu, mop->gaddr, mop->ar,
2969 mop->size, GACC_STORE);
41408c28
TH
2970 break;
2971 }
2972 if (copy_from_user(tmpbuf, uaddr, mop->size)) {
2973 r = -EFAULT;
2974 break;
2975 }
2976 r = write_guest(vcpu, mop->gaddr, mop->ar, tmpbuf, mop->size);
2977 break;
2978 default:
2979 r = -EINVAL;
2980 }
2981
2982 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
2983
2984 if (r > 0 && (mop->flags & KVM_S390_MEMOP_F_INJECT_EXCEPTION) != 0)
2985 kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
2986
2987 vfree(tmpbuf);
2988 return r;
2989}
2990
b0c632db
HC
2991long kvm_arch_vcpu_ioctl(struct file *filp,
2992 unsigned int ioctl, unsigned long arg)
2993{
2994 struct kvm_vcpu *vcpu = filp->private_data;
2995 void __user *argp = (void __user *)arg;
800c1065 2996 int idx;
bc923cc9 2997 long r;
b0c632db 2998
93736624 2999 switch (ioctl) {
47b43c52
JF
3000 case KVM_S390_IRQ: {
3001 struct kvm_s390_irq s390irq;
3002
3003 r = -EFAULT;
3004 if (copy_from_user(&s390irq, argp, sizeof(s390irq)))
3005 break;
3006 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
3007 break;
3008 }
93736624 3009 case KVM_S390_INTERRUPT: {
ba5c1e9b 3010 struct kvm_s390_interrupt s390int;
383d0b05 3011 struct kvm_s390_irq s390irq;
ba5c1e9b 3012
93736624 3013 r = -EFAULT;
ba5c1e9b 3014 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624 3015 break;
383d0b05
JF
3016 if (s390int_to_s390irq(&s390int, &s390irq))
3017 return -EINVAL;
3018 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
93736624 3019 break;
ba5c1e9b 3020 }
b0c632db 3021 case KVM_S390_STORE_STATUS:
800c1065 3022 idx = srcu_read_lock(&vcpu->kvm->srcu);
bc923cc9 3023 r = kvm_s390_vcpu_store_status(vcpu, arg);
800c1065 3024 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 3025 break;
b0c632db
HC
3026 case KVM_S390_SET_INITIAL_PSW: {
3027 psw_t psw;
3028
bc923cc9 3029 r = -EFAULT;
b0c632db 3030 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
3031 break;
3032 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
3033 break;
b0c632db
HC
3034 }
3035 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
3036 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
3037 break;
14eebd91
CO
3038 case KVM_SET_ONE_REG:
3039 case KVM_GET_ONE_REG: {
3040 struct kvm_one_reg reg;
3041 r = -EFAULT;
3042 if (copy_from_user(&reg, argp, sizeof(reg)))
3043 break;
3044 if (ioctl == KVM_SET_ONE_REG)
3045 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
3046 else
3047 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
3048 break;
3049 }
27e0393f
CO
3050#ifdef CONFIG_KVM_S390_UCONTROL
3051 case KVM_S390_UCAS_MAP: {
3052 struct kvm_s390_ucas_mapping ucasmap;
3053
3054 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3055 r = -EFAULT;
3056 break;
3057 }
3058
3059 if (!kvm_is_ucontrol(vcpu->kvm)) {
3060 r = -EINVAL;
3061 break;
3062 }
3063
3064 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
3065 ucasmap.vcpu_addr, ucasmap.length);
3066 break;
3067 }
3068 case KVM_S390_UCAS_UNMAP: {
3069 struct kvm_s390_ucas_mapping ucasmap;
3070
3071 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
3072 r = -EFAULT;
3073 break;
3074 }
3075
3076 if (!kvm_is_ucontrol(vcpu->kvm)) {
3077 r = -EINVAL;
3078 break;
3079 }
3080
3081 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
3082 ucasmap.length);
3083 break;
3084 }
3085#endif
ccc7910f 3086 case KVM_S390_VCPU_FAULT: {
527e30b4 3087 r = gmap_fault(vcpu->arch.gmap, arg, 0);
ccc7910f
CO
3088 break;
3089 }
d6712df9
CH
3090 case KVM_ENABLE_CAP:
3091 {
3092 struct kvm_enable_cap cap;
3093 r = -EFAULT;
3094 if (copy_from_user(&cap, argp, sizeof(cap)))
3095 break;
3096 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
3097 break;
3098 }
41408c28
TH
3099 case KVM_S390_MEM_OP: {
3100 struct kvm_s390_mem_op mem_op;
3101
3102 if (copy_from_user(&mem_op, argp, sizeof(mem_op)) == 0)
3103 r = kvm_s390_guest_mem_op(vcpu, &mem_op);
3104 else
3105 r = -EFAULT;
3106 break;
3107 }
816c7667
JF
3108 case KVM_S390_SET_IRQ_STATE: {
3109 struct kvm_s390_irq_state irq_state;
3110
3111 r = -EFAULT;
3112 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3113 break;
3114 if (irq_state.len > VCPU_IRQS_MAX_BUF ||
3115 irq_state.len == 0 ||
3116 irq_state.len % sizeof(struct kvm_s390_irq) > 0) {
3117 r = -EINVAL;
3118 break;
3119 }
3120 r = kvm_s390_set_irq_state(vcpu,
3121 (void __user *) irq_state.buf,
3122 irq_state.len);
3123 break;
3124 }
3125 case KVM_S390_GET_IRQ_STATE: {
3126 struct kvm_s390_irq_state irq_state;
3127
3128 r = -EFAULT;
3129 if (copy_from_user(&irq_state, argp, sizeof(irq_state)))
3130 break;
3131 if (irq_state.len == 0) {
3132 r = -EINVAL;
3133 break;
3134 }
3135 r = kvm_s390_get_irq_state(vcpu,
3136 (__u8 __user *) irq_state.buf,
3137 irq_state.len);
3138 break;
3139 }
b0c632db 3140 default:
3e6afcf1 3141 r = -ENOTTY;
b0c632db 3142 }
bc923cc9 3143 return r;
b0c632db
HC
3144}
3145
5b1c1493
CO
3146int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
3147{
3148#ifdef CONFIG_KVM_S390_UCONTROL
3149 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
3150 && (kvm_is_ucontrol(vcpu->kvm))) {
3151 vmf->page = virt_to_page(vcpu->arch.sie_block);
3152 get_page(vmf->page);
3153 return 0;
3154 }
3155#endif
3156 return VM_FAULT_SIGBUS;
3157}
3158
5587027c
AK
3159int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
3160 unsigned long npages)
db3fe4eb
TY
3161{
3162 return 0;
3163}
3164
b0c632db 3165/* Section: memory related */
f7784b8e
MT
3166int kvm_arch_prepare_memory_region(struct kvm *kvm,
3167 struct kvm_memory_slot *memslot,
09170a49 3168 const struct kvm_userspace_memory_region *mem,
7b6195a9 3169 enum kvm_mr_change change)
b0c632db 3170{
dd2887e7
NW
3171 /* A few sanity checks. We can have memory slots which have to be
3172 located/ended at a segment boundary (1MB). The memory in userland is
3173 ok to be fragmented into various different vmas. It is okay to mmap()
3174 and munmap() stuff in this slot after doing this call at any time */
b0c632db 3175
598841ca 3176 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
3177 return -EINVAL;
3178
598841ca 3179 if (mem->memory_size & 0xffffful)
b0c632db
HC
3180 return -EINVAL;
3181
a3a92c31
DD
3182 if (mem->guest_phys_addr + mem->memory_size > kvm->arch.mem_limit)
3183 return -EINVAL;
3184
f7784b8e
MT
3185 return 0;
3186}
3187
3188void kvm_arch_commit_memory_region(struct kvm *kvm,
09170a49 3189 const struct kvm_userspace_memory_region *mem,
8482644a 3190 const struct kvm_memory_slot *old,
f36f3f28 3191 const struct kvm_memory_slot *new,
8482644a 3192 enum kvm_mr_change change)
f7784b8e 3193{
f7850c92 3194 int rc;
f7784b8e 3195
2cef4deb
CB
3196 /* If the basics of the memslot do not change, we do not want
3197 * to update the gmap. Every update causes several unnecessary
3198 * segment translation exceptions. This is usually handled just
3199 * fine by the normal fault handler + gmap, but it will also
3200 * cause faults on the prefix page of running guest CPUs.
3201 */
3202 if (old->userspace_addr == mem->userspace_addr &&
3203 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
3204 old->npages * PAGE_SIZE == mem->memory_size)
3205 return;
598841ca
CO
3206
3207 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
3208 mem->guest_phys_addr, mem->memory_size);
3209 if (rc)
ea2cdd27 3210 pr_warn("failed to commit memory region\n");
598841ca 3211 return;
b0c632db
HC
3212}
3213
60a37709
AY
3214static inline unsigned long nonhyp_mask(int i)
3215{
3216 unsigned int nonhyp_fai = (sclp.hmfai << i * 2) >> 30;
3217
3218 return 0x0000ffffffffffffUL >> (nonhyp_fai << 4);
3219}
3220
3491caf2
CB
3221void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu)
3222{
3223 vcpu->valid_wakeup = false;
3224}
3225
b0c632db
HC
3226static int __init kvm_s390_init(void)
3227{
60a37709
AY
3228 int i;
3229
07197fd0
DH
3230 if (!sclp.has_sief2) {
3231 pr_info("SIE not available\n");
3232 return -ENODEV;
3233 }
3234
60a37709
AY
3235 for (i = 0; i < 16; i++)
3236 kvm_s390_fac_list_mask[i] |=
3237 S390_lowcore.stfle_fac_list[i] & nonhyp_mask(i);
3238
9d8d5786 3239 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
b0c632db
HC
3240}
3241
3242static void __exit kvm_s390_exit(void)
3243{
3244 kvm_exit();
3245}
3246
3247module_init(kvm_s390_init);
3248module_exit(kvm_s390_exit);
566af940
CH
3249
3250/*
3251 * Enable autoloading of the kvm module.
3252 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
3253 * since x86 takes a different approach.
3254 */
3255#include <linux/miscdevice.h>
3256MODULE_ALIAS_MISCDEV(KVM_MINOR);
3257MODULE_ALIAS("devname:kvm");