]> git.proxmox.com Git - mirror_qemu.git/blame - target/s390x/kvm.c
s390x/tcg: a CPU cannot switch state due to an interrupt
[mirror_qemu.git] / target / s390x / kvm.c
CommitLineData
0e60a699
AG
1/*
2 * QEMU S390x KVM implementation
3 *
4 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
ccb084d3 5 * Copyright IBM Corp. 2012
0e60a699
AG
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
ccb084d3
CB
17 * Contributions after 2012-10-29 are licensed under the terms of the
18 * GNU GPL, version 2 or (at your option) any later version.
19 *
20 * You should have received a copy of the GNU (Lesser) General Public
0e60a699
AG
21 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 */
23
9615495a 24#include "qemu/osdep.h"
0e60a699 25#include <sys/ioctl.h>
0e60a699
AG
26
27#include <linux/kvm.h>
28#include <asm/ptrace.h>
29
30#include "qemu-common.h"
33c11879 31#include "cpu.h"
4e58b838 32#include "internal.h"
f16bbb9b 33#include "kvm_s390x.h"
d49b6836 34#include "qemu/error-report.h"
1de7afc9 35#include "qemu/timer.h"
9c17d615 36#include "sysemu/sysemu.h"
8195d899 37#include "sysemu/hw_accel.h"
4cb88c3c 38#include "hw/hw.h"
9c17d615 39#include "sysemu/device_tree.h"
08eb8c85 40#include "qapi/qmp/qjson.h"
770a6379 41#include "exec/gdbstub.h"
18ff9494 42#include "exec/address-spaces.h"
860643bc 43#include "trace.h"
3a449690 44#include "qapi-event.h"
863f6f52 45#include "hw/s390x/s390-pci-inst.h"
9e03a040 46#include "hw/s390x/s390-pci-bus.h"
e91e972c 47#include "hw/s390x/ipl.h"
f07177a5 48#include "hw/s390x/ebcdic.h"
4c663752 49#include "exec/memattrs.h"
9700230b 50#include "hw/s390x/s390-virtio-ccw.h"
2c98a6c1 51#include "hw/s390x/s390-virtio-hcall.h"
0e60a699 52
08564ecd
DA
53#ifndef DEBUG_KVM
54#define DEBUG_KVM 0
0e60a699
AG
55#endif
56
08564ecd
DA
57#define DPRINTF(fmt, ...) do { \
58 if (DEBUG_KVM) { \
59 fprintf(stderr, fmt, ## __VA_ARGS__); \
60 } \
61} while (0);
62
2b147555
DD
63#define kvm_vm_check_mem_attr(s, attr) \
64 kvm_vm_check_attr(s, KVM_S390_VM_MEM_CTRL, attr)
65
0e60a699
AG
66#define IPA0_DIAG 0x8300
67#define IPA0_SIGP 0xae00
09b99878
CH
68#define IPA0_B2 0xb200
69#define IPA0_B9 0xb900
70#define IPA0_EB 0xeb00
863f6f52 71#define IPA0_E3 0xe300
0e60a699 72
1eecf41b
FB
73#define PRIV_B2_SCLP_CALL 0x20
74#define PRIV_B2_CSCH 0x30
75#define PRIV_B2_HSCH 0x31
76#define PRIV_B2_MSCH 0x32
77#define PRIV_B2_SSCH 0x33
78#define PRIV_B2_STSCH 0x34
79#define PRIV_B2_TSCH 0x35
80#define PRIV_B2_TPI 0x36
81#define PRIV_B2_SAL 0x37
82#define PRIV_B2_RSCH 0x38
83#define PRIV_B2_STCRW 0x39
84#define PRIV_B2_STCPS 0x3a
85#define PRIV_B2_RCHP 0x3b
86#define PRIV_B2_SCHM 0x3c
87#define PRIV_B2_CHSC 0x5f
88#define PRIV_B2_SIGA 0x74
89#define PRIV_B2_XSCH 0x76
90
91#define PRIV_EB_SQBS 0x8a
863f6f52
FB
92#define PRIV_EB_PCISTB 0xd0
93#define PRIV_EB_SIC 0xd1
1eecf41b
FB
94
95#define PRIV_B9_EQBS 0x9c
863f6f52
FB
96#define PRIV_B9_CLP 0xa0
97#define PRIV_B9_PCISTG 0xd0
98#define PRIV_B9_PCILG 0xd2
99#define PRIV_B9_RPCIT 0xd3
100
101#define PRIV_E3_MPCIFC 0xd0
102#define PRIV_E3_STPCIFC 0xd4
1eecf41b 103
8fc639af 104#define DIAG_TIMEREVENT 0x288
268846ba 105#define DIAG_IPL 0x308
0e60a699
AG
106#define DIAG_KVM_HYPERCALL 0x500
107#define DIAG_KVM_BREAKPOINT 0x501
108
0e60a699 109#define ICPT_INSTRUCTION 0x04
6449a41a 110#define ICPT_PROGRAM 0x08
a2689242 111#define ICPT_EXT_INT 0x14
0e60a699
AG
112#define ICPT_WAITPSW 0x1c
113#define ICPT_SOFT_INTERCEPT 0x24
114#define ICPT_CPU_STOP 0x28
b60fae32 115#define ICPT_OPEREXC 0x2c
0e60a699
AG
116#define ICPT_IO 0x40
117
3cda44f7
JF
118#define NR_LOCAL_IRQS 32
119/*
120 * Needs to be big enough to contain max_cpus emergency signals
121 * and in addition NR_LOCAL_IRQS interrupts
122 */
123#define VCPU_IRQ_BUF_SIZE (sizeof(struct kvm_s390_irq) * \
124 (max_cpus + NR_LOCAL_IRQS))
125
770a6379
DH
126static CPUWatchpoint hw_watchpoint;
127/*
128 * We don't use a list because this structure is also used to transmit the
129 * hardware breakpoints to the kernel.
130 */
131static struct kvm_hw_breakpoint *hw_breakpoints;
132static int nb_hw_breakpoints;
133
94a8d39a
JK
134const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
135 KVM_CAP_LAST_INFO
136};
137
f9530c32
CB
138static QemuMutex qemu_sigp_mutex;
139
5b08b344 140static int cap_sync_regs;
819bd309 141static int cap_async_pf;
a9bcd1b8 142static int cap_mem_op;
1191c949 143static int cap_s390_irq;
9700230b 144static int cap_ri;
62deb62d 145static int cap_gs;
5b08b344 146
03f47ee4
JF
147static int active_cmma;
148
dc622deb 149static void *legacy_s390_alloc(size_t size, uint64_t *align);
91138037 150
708f99c3 151static int kvm_s390_query_mem_limit(uint64_t *memory_limit)
a310b283
DD
152{
153 struct kvm_device_attr attr = {
154 .group = KVM_S390_VM_MEM_CTRL,
155 .attr = KVM_S390_VM_MEM_LIMIT_SIZE,
156 .addr = (uint64_t) memory_limit,
157 };
158
708f99c3 159 return kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
a310b283
DD
160}
161
708f99c3 162int kvm_s390_set_mem_limit(uint64_t new_limit, uint64_t *hw_limit)
a310b283
DD
163{
164 int rc;
165
166 struct kvm_device_attr attr = {
167 .group = KVM_S390_VM_MEM_CTRL,
168 .attr = KVM_S390_VM_MEM_LIMIT_SIZE,
169 .addr = (uint64_t) &new_limit,
170 };
171
708f99c3 172 if (!kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_LIMIT_SIZE)) {
a310b283
DD
173 return 0;
174 }
175
708f99c3 176 rc = kvm_s390_query_mem_limit(hw_limit);
a310b283
DD
177 if (rc) {
178 return rc;
179 } else if (*hw_limit < new_limit) {
180 return -E2BIG;
181 }
182
708f99c3 183 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
a310b283
DD
184}
185
03f47ee4
JF
186int kvm_s390_cmma_active(void)
187{
188 return active_cmma;
189}
190
07059eff
DH
191static bool kvm_s390_cmma_available(void)
192{
193 static bool initialized, value;
194
195 if (!initialized) {
196 initialized = true;
197 value = kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_ENABLE_CMMA) &&
198 kvm_vm_check_mem_attr(kvm_state, KVM_S390_VM_MEM_CLR_CMMA);
199 }
200 return value;
201}
202
1cd4e0f6 203void kvm_s390_cmma_reset(void)
4cb88c3c
DD
204{
205 int rc;
4cb88c3c
DD
206 struct kvm_device_attr attr = {
207 .group = KVM_S390_VM_MEM_CTRL,
208 .attr = KVM_S390_VM_MEM_CLR_CMMA,
209 };
210
03f47ee4 211 if (!kvm_s390_cmma_active()) {
07059eff
DH
212 return;
213 }
214
1cd4e0f6 215 rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
4cb88c3c
DD
216 trace_kvm_clear_cmma(rc);
217}
218
07059eff 219static void kvm_s390_enable_cmma(void)
4cb88c3c
DD
220{
221 int rc;
222 struct kvm_device_attr attr = {
223 .group = KVM_S390_VM_MEM_CTRL,
224 .attr = KVM_S390_VM_MEM_ENABLE_CMMA,
225 };
226
03f47ee4 227 if (mem_path) {
55d527a9
AF
228 warn_report("CMM will not be enabled because it is not "
229 "compatible with hugetlbfs.");
03f47ee4
JF
230 return;
231 }
07059eff 232 rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
03f47ee4 233 active_cmma = !rc;
4cb88c3c
DD
234 trace_kvm_enable_cmma(rc);
235}
236
2eb1cd07
TK
237static void kvm_s390_set_attr(uint64_t attr)
238{
239 struct kvm_device_attr attribute = {
240 .group = KVM_S390_VM_CRYPTO,
241 .attr = attr,
242 };
243
244 int ret = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attribute);
245
246 if (ret) {
247 error_report("Failed to set crypto device attribute %lu: %s",
248 attr, strerror(-ret));
249 }
250}
251
252static void kvm_s390_init_aes_kw(void)
253{
254 uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_AES_KW;
255
256 if (object_property_get_bool(OBJECT(qdev_get_machine()), "aes-key-wrap",
257 NULL)) {
258 attr = KVM_S390_VM_CRYPTO_ENABLE_AES_KW;
259 }
260
261 if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) {
262 kvm_s390_set_attr(attr);
263 }
264}
265
266static void kvm_s390_init_dea_kw(void)
267{
268 uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_DEA_KW;
269
270 if (object_property_get_bool(OBJECT(qdev_get_machine()), "dea-key-wrap",
271 NULL)) {
272 attr = KVM_S390_VM_CRYPTO_ENABLE_DEA_KW;
273 }
274
275 if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) {
276 kvm_s390_set_attr(attr);
277 }
278}
279
4ab72920 280void kvm_s390_crypto_reset(void)
2eb1cd07 281{
c85d21c7
DH
282 if (s390_has_feat(S390_FEAT_MSA_EXT_3)) {
283 kvm_s390_init_aes_kw();
284 kvm_s390_init_dea_kw();
285 }
2eb1cd07
TK
286}
287
b16565b3 288int kvm_arch_init(MachineState *ms, KVMState *s)
0e60a699 289{
b6805e12
IM
290 MachineClass *mc = MACHINE_GET_CLASS(ms);
291
292 mc->default_cpu_type = S390_CPU_TYPE_NAME("host");
5b08b344 293 cap_sync_regs = kvm_check_extension(s, KVM_CAP_SYNC_REGS);
819bd309 294 cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF);
a9bcd1b8 295 cap_mem_op = kvm_check_extension(s, KVM_CAP_S390_MEM_OP);
1191c949 296 cap_s390_irq = kvm_check_extension(s, KVM_CAP_S390_INJECT_IRQ);
4cb88c3c 297
91138037
MA
298 if (!kvm_check_extension(s, KVM_CAP_S390_GMAP)
299 || !kvm_check_extension(s, KVM_CAP_S390_COW)) {
300 phys_mem_set_alloc(legacy_s390_alloc);
301 }
f16d3f58
DH
302
303 kvm_vm_enable_cap(s, KVM_CAP_S390_USER_SIGP, 0);
46ca6b3b 304 kvm_vm_enable_cap(s, KVM_CAP_S390_VECTOR_REGISTERS, 0);
f07177a5 305 kvm_vm_enable_cap(s, KVM_CAP_S390_USER_STSI, 0);
9700230b
FZ
306 if (ri_allowed()) {
307 if (kvm_vm_enable_cap(s, KVM_CAP_S390_RI, 0) == 0) {
308 cap_ri = 1;
309 }
310 }
62deb62d
FZ
311 if (gs_allowed()) {
312 if (kvm_vm_enable_cap(s, KVM_CAP_S390_GS, 0) == 0) {
313 cap_gs = 1;
314 }
315 }
f16d3f58 316
3f2d07b3
CB
317 /*
318 * The migration interface for ais was introduced with kernel 4.13
319 * but the capability itself had been active since 4.12. As migration
320 * support is considered necessary let's disable ais in the 2.10
321 * machine.
322 */
323 /* kvm_vm_enable_cap(s, KVM_CAP_S390_AIS, 0); */
3b00f702 324
f9530c32
CB
325 qemu_mutex_init(&qemu_sigp_mutex);
326
0e60a699
AG
327 return 0;
328}
329
d525ffab
PB
330int kvm_arch_irqchip_create(MachineState *ms, KVMState *s)
331{
332 return 0;
333}
334
b164e48e
EH
335unsigned long kvm_arch_vcpu_id(CPUState *cpu)
336{
337 return cpu->cpu_index;
338}
339
c9e659c9 340int kvm_arch_init_vcpu(CPUState *cs)
0e60a699 341{
c9e659c9
DH
342 S390CPU *cpu = S390_CPU(cs);
343 kvm_s390_set_cpu_state(cpu, cpu->env.cpu_state);
3cda44f7 344 cpu->irqstate = g_malloc0(VCPU_IRQ_BUF_SIZE);
1c9d2a1d 345 return 0;
0e60a699
AG
346}
347
50a2c6e5 348void kvm_s390_reset_vcpu(S390CPU *cpu)
0e60a699 349{
50a2c6e5
PB
350 CPUState *cs = CPU(cpu);
351
419831d7
AG
352 /* The initial reset call is needed here to reset in-kernel
353 * vcpu data that we can't access directly from QEMU
354 * (i.e. with older kernels which don't support sync_regs/ONE_REG).
355 * Before this ioctl cpu_synchronize_state() is called in common kvm
356 * code (kvm-all) */
50a2c6e5 357 if (kvm_vcpu_ioctl(cs, KVM_S390_INITIAL_RESET, NULL)) {
81b07353 358 error_report("Initial CPU reset failed on CPU %i", cs->cpu_index);
70bada03 359 }
0e60a699
AG
360}
361
fdb78ec0
DH
362static int can_sync_regs(CPUState *cs, int regs)
363{
364 return cap_sync_regs && (cs->kvm_run->kvm_valid_regs & regs) == regs;
365}
366
20d695a9 367int kvm_arch_put_registers(CPUState *cs, int level)
0e60a699 368{
20d695a9
AF
369 S390CPU *cpu = S390_CPU(cs);
370 CPUS390XState *env = &cpu->env;
5b08b344 371 struct kvm_sregs sregs;
0e60a699 372 struct kvm_regs regs;
e6eef7c2 373 struct kvm_fpu fpu = {};
860643bc 374 int r;
0e60a699
AG
375 int i;
376
5b08b344 377 /* always save the PSW and the GPRS*/
f7575c96
AF
378 cs->kvm_run->psw_addr = env->psw.addr;
379 cs->kvm_run->psw_mask = env->psw.mask;
0e60a699 380
fdb78ec0 381 if (can_sync_regs(cs, KVM_SYNC_GPRS)) {
5b08b344 382 for (i = 0; i < 16; i++) {
f7575c96
AF
383 cs->kvm_run->s.regs.gprs[i] = env->regs[i];
384 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GPRS;
5b08b344
CB
385 }
386 } else {
387 for (i = 0; i < 16; i++) {
388 regs.gprs[i] = env->regs[i];
389 }
860643bc
CB
390 r = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
391 if (r < 0) {
392 return r;
5b08b344 393 }
0e60a699
AG
394 }
395
fcb79802
EF
396 if (can_sync_regs(cs, KVM_SYNC_VRS)) {
397 for (i = 0; i < 32; i++) {
398 cs->kvm_run->s.regs.vrs[i][0] = env->vregs[i][0].ll;
399 cs->kvm_run->s.regs.vrs[i][1] = env->vregs[i][1].ll;
400 }
401 cs->kvm_run->s.regs.fpc = env->fpc;
402 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_VRS;
5ab0e547
DH
403 } else if (can_sync_regs(cs, KVM_SYNC_FPRS)) {
404 for (i = 0; i < 16; i++) {
405 cs->kvm_run->s.regs.fprs[i] = get_freg(env, i)->ll;
406 }
407 cs->kvm_run->s.regs.fpc = env->fpc;
408 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_FPRS;
fcb79802
EF
409 } else {
410 /* Floating point */
411 for (i = 0; i < 16; i++) {
412 fpu.fprs[i] = get_freg(env, i)->ll;
413 }
414 fpu.fpc = env->fpc;
85ad6230 415
fcb79802
EF
416 r = kvm_vcpu_ioctl(cs, KVM_SET_FPU, &fpu);
417 if (r < 0) {
418 return r;
419 }
85ad6230
JH
420 }
421
44c68de0
DD
422 /* Do we need to save more than that? */
423 if (level == KVM_PUT_RUNTIME_STATE) {
424 return 0;
425 }
420840e5 426
59ac1532
DH
427 if (can_sync_regs(cs, KVM_SYNC_ARCH0)) {
428 cs->kvm_run->s.regs.cputm = env->cputm;
429 cs->kvm_run->s.regs.ckc = env->ckc;
430 cs->kvm_run->s.regs.todpr = env->todpr;
431 cs->kvm_run->s.regs.gbea = env->gbea;
432 cs->kvm_run->s.regs.pp = env->pp;
433 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ARCH0;
434 } else {
435 /*
436 * These ONE_REGS are not protected by a capability. As they are only
437 * necessary for migration we just trace a possible error, but don't
438 * return with an error return code.
439 */
440 kvm_set_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm);
441 kvm_set_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc);
442 kvm_set_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr);
443 kvm_set_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea);
444 kvm_set_one_reg(cs, KVM_REG_S390_PP, &env->pp);
445 }
446
9700230b
FZ
447 if (can_sync_regs(cs, KVM_SYNC_RICCB)) {
448 memcpy(cs->kvm_run->s.regs.riccb, env->riccb, 64);
449 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_RICCB;
450 }
451
59ac1532
DH
452 /* pfault parameters */
453 if (can_sync_regs(cs, KVM_SYNC_PFAULT)) {
454 cs->kvm_run->s.regs.pft = env->pfault_token;
455 cs->kvm_run->s.regs.pfs = env->pfault_select;
456 cs->kvm_run->s.regs.pfc = env->pfault_compare;
457 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PFAULT;
458 } else if (cap_async_pf) {
860643bc
CB
459 r = kvm_set_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token);
460 if (r < 0) {
461 return r;
819bd309 462 }
860643bc
CB
463 r = kvm_set_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare);
464 if (r < 0) {
465 return r;
819bd309 466 }
860643bc
CB
467 r = kvm_set_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select);
468 if (r < 0) {
469 return r;
819bd309
DD
470 }
471 }
472
fdb78ec0
DH
473 /* access registers and control registers*/
474 if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) {
5b08b344 475 for (i = 0; i < 16; i++) {
f7575c96
AF
476 cs->kvm_run->s.regs.acrs[i] = env->aregs[i];
477 cs->kvm_run->s.regs.crs[i] = env->cregs[i];
5b08b344 478 }
f7575c96
AF
479 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ACRS;
480 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_CRS;
5b08b344
CB
481 } else {
482 for (i = 0; i < 16; i++) {
483 sregs.acrs[i] = env->aregs[i];
484 sregs.crs[i] = env->cregs[i];
485 }
860643bc
CB
486 r = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
487 if (r < 0) {
488 return r;
5b08b344
CB
489 }
490 }
0e60a699 491
62deb62d
FZ
492 if (can_sync_regs(cs, KVM_SYNC_GSCB)) {
493 memcpy(cs->kvm_run->s.regs.gscb, env->gscb, 32);
494 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GSCB;
495 }
496
5b08b344 497 /* Finally the prefix */
fdb78ec0 498 if (can_sync_regs(cs, KVM_SYNC_PREFIX)) {
f7575c96
AF
499 cs->kvm_run->s.regs.prefix = env->psa;
500 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PREFIX;
5b08b344
CB
501 } else {
502 /* prefix is only supported via sync regs */
503 }
504 return 0;
0e60a699
AG
505}
506
20d695a9 507int kvm_arch_get_registers(CPUState *cs)
420840e5
JH
508{
509 S390CPU *cpu = S390_CPU(cs);
510 CPUS390XState *env = &cpu->env;
5b08b344 511 struct kvm_sregs sregs;
0e60a699 512 struct kvm_regs regs;
85ad6230 513 struct kvm_fpu fpu;
44c68de0 514 int i, r;
420840e5 515
5b08b344 516 /* get the PSW */
f7575c96
AF
517 env->psw.addr = cs->kvm_run->psw_addr;
518 env->psw.mask = cs->kvm_run->psw_mask;
5b08b344
CB
519
520 /* the GPRS */
fdb78ec0 521 if (can_sync_regs(cs, KVM_SYNC_GPRS)) {
5b08b344 522 for (i = 0; i < 16; i++) {
f7575c96 523 env->regs[i] = cs->kvm_run->s.regs.gprs[i];
5b08b344
CB
524 }
525 } else {
44c68de0
DD
526 r = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
527 if (r < 0) {
528 return r;
5b08b344
CB
529 }
530 for (i = 0; i < 16; i++) {
531 env->regs[i] = regs.gprs[i];
532 }
0e60a699
AG
533 }
534
5b08b344 535 /* The ACRS and CRS */
fdb78ec0 536 if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) {
5b08b344 537 for (i = 0; i < 16; i++) {
f7575c96
AF
538 env->aregs[i] = cs->kvm_run->s.regs.acrs[i];
539 env->cregs[i] = cs->kvm_run->s.regs.crs[i];
5b08b344
CB
540 }
541 } else {
44c68de0
DD
542 r = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
543 if (r < 0) {
544 return r;
5b08b344
CB
545 }
546 for (i = 0; i < 16; i++) {
547 env->aregs[i] = sregs.acrs[i];
548 env->cregs[i] = sregs.crs[i];
549 }
0e60a699
AG
550 }
551
fcb79802
EF
552 /* Floating point and vector registers */
553 if (can_sync_regs(cs, KVM_SYNC_VRS)) {
554 for (i = 0; i < 32; i++) {
555 env->vregs[i][0].ll = cs->kvm_run->s.regs.vrs[i][0];
556 env->vregs[i][1].ll = cs->kvm_run->s.regs.vrs[i][1];
557 }
558 env->fpc = cs->kvm_run->s.regs.fpc;
5ab0e547
DH
559 } else if (can_sync_regs(cs, KVM_SYNC_FPRS)) {
560 for (i = 0; i < 16; i++) {
561 get_freg(env, i)->ll = cs->kvm_run->s.regs.fprs[i];
562 }
563 env->fpc = cs->kvm_run->s.regs.fpc;
fcb79802
EF
564 } else {
565 r = kvm_vcpu_ioctl(cs, KVM_GET_FPU, &fpu);
566 if (r < 0) {
567 return r;
568 }
569 for (i = 0; i < 16; i++) {
570 get_freg(env, i)->ll = fpu.fprs[i];
571 }
572 env->fpc = fpu.fpc;
85ad6230 573 }
85ad6230 574
44c68de0 575 /* The prefix */
fdb78ec0 576 if (can_sync_regs(cs, KVM_SYNC_PREFIX)) {
f7575c96 577 env->psa = cs->kvm_run->s.regs.prefix;
5b08b344 578 }
0e60a699 579
59ac1532
DH
580 if (can_sync_regs(cs, KVM_SYNC_ARCH0)) {
581 env->cputm = cs->kvm_run->s.regs.cputm;
582 env->ckc = cs->kvm_run->s.regs.ckc;
583 env->todpr = cs->kvm_run->s.regs.todpr;
584 env->gbea = cs->kvm_run->s.regs.gbea;
585 env->pp = cs->kvm_run->s.regs.pp;
586 } else {
587 /*
588 * These ONE_REGS are not protected by a capability. As they are only
589 * necessary for migration we just trace a possible error, but don't
590 * return with an error return code.
591 */
592 kvm_get_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm);
593 kvm_get_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc);
594 kvm_get_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr);
595 kvm_get_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea);
596 kvm_get_one_reg(cs, KVM_REG_S390_PP, &env->pp);
597 }
598
9700230b
FZ
599 if (can_sync_regs(cs, KVM_SYNC_RICCB)) {
600 memcpy(env->riccb, cs->kvm_run->s.regs.riccb, 64);
601 }
602
62deb62d
FZ
603 if (can_sync_regs(cs, KVM_SYNC_GSCB)) {
604 memcpy(env->gscb, cs->kvm_run->s.regs.gscb, 32);
605 }
606
59ac1532
DH
607 /* pfault parameters */
608 if (can_sync_regs(cs, KVM_SYNC_PFAULT)) {
609 env->pfault_token = cs->kvm_run->s.regs.pft;
610 env->pfault_select = cs->kvm_run->s.regs.pfs;
611 env->pfault_compare = cs->kvm_run->s.regs.pfc;
612 } else if (cap_async_pf) {
860643bc 613 r = kvm_get_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token);
819bd309
DD
614 if (r < 0) {
615 return r;
616 }
860643bc 617 r = kvm_get_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare);
819bd309
DD
618 if (r < 0) {
619 return r;
620 }
860643bc 621 r = kvm_get_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select);
819bd309
DD
622 if (r < 0) {
623 return r;
624 }
625 }
626
0e60a699
AG
627 return 0;
628}
629
3f9e59bb
JH
630int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_low)
631{
632 int r;
633 struct kvm_device_attr attr = {
634 .group = KVM_S390_VM_TOD,
635 .attr = KVM_S390_VM_TOD_LOW,
636 .addr = (uint64_t)tod_low,
637 };
638
639 r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
640 if (r) {
641 return r;
642 }
643
644 attr.attr = KVM_S390_VM_TOD_HIGH;
645 attr.addr = (uint64_t)tod_high;
646 return kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
647}
648
7edd4a49 649int kvm_s390_get_clock_ext(uint8_t *tod_high, uint64_t *tod_low)
3f9e59bb
JH
650{
651 int r;
7edd4a49
CW
652 struct kvm_s390_vm_tod_clock gtod;
653 struct kvm_device_attr attr = {
654 .group = KVM_S390_VM_TOD,
655 .attr = KVM_S390_VM_TOD_EXT,
656 .addr = (uint64_t)&gtod,
657 };
658
659 r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
660 *tod_high = gtod.epoch_idx;
661 *tod_low = gtod.tod;
662
663 return r;
664}
3f9e59bb 665
7edd4a49
CW
666int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_low)
667{
668 int r;
3f9e59bb
JH
669 struct kvm_device_attr attr = {
670 .group = KVM_S390_VM_TOD,
671 .attr = KVM_S390_VM_TOD_LOW,
672 .addr = (uint64_t)tod_low,
673 };
674
675 r = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
676 if (r) {
677 return r;
678 }
679
680 attr.attr = KVM_S390_VM_TOD_HIGH;
681 attr.addr = (uint64_t)tod_high;
682 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
683}
684
7edd4a49
CW
685int kvm_s390_set_clock_ext(uint8_t *tod_high, uint64_t *tod_low)
686{
687 struct kvm_s390_vm_tod_clock gtod = {
688 .epoch_idx = *tod_high,
689 .tod = *tod_low,
690 };
691 struct kvm_device_attr attr = {
692 .group = KVM_S390_VM_TOD,
693 .attr = KVM_S390_VM_TOD_EXT,
694 .addr = (uint64_t)&gtod,
695 };
696
697 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
698}
699
a9bcd1b8
TH
700/**
701 * kvm_s390_mem_op:
702 * @addr: the logical start address in guest memory
6cb1e49d 703 * @ar: the access register number
a9bcd1b8 704 * @hostbuf: buffer in host memory. NULL = do only checks w/o copying
67cc32eb 705 * @len: length that should be transferred
a9bcd1b8 706 * @is_write: true = write, false = read
67cc32eb 707 * Returns: 0 on success, non-zero if an exception or error occurred
a9bcd1b8
TH
708 *
709 * Use KVM ioctl to read/write from/to guest memory. An access exception
710 * is injected into the vCPU in case of translation errors.
711 */
6cb1e49d
AY
712int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf,
713 int len, bool is_write)
a9bcd1b8
TH
714{
715 struct kvm_s390_mem_op mem_op = {
716 .gaddr = addr,
717 .flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION,
718 .size = len,
719 .op = is_write ? KVM_S390_MEMOP_LOGICAL_WRITE
720 : KVM_S390_MEMOP_LOGICAL_READ,
721 .buf = (uint64_t)hostbuf,
6cb1e49d 722 .ar = ar,
a9bcd1b8
TH
723 };
724 int ret;
725
726 if (!cap_mem_op) {
727 return -ENOSYS;
728 }
729 if (!hostbuf) {
730 mem_op.flags |= KVM_S390_MEMOP_F_CHECK_ONLY;
731 }
732
733 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_S390_MEM_OP, &mem_op);
734 if (ret < 0) {
735 error_printf("KVM_S390_MEM_OP failed: %s\n", strerror(-ret));
736 }
737 return ret;
738}
739
fdec9918
CB
740/*
741 * Legacy layout for s390:
742 * Older S390 KVM requires the topmost vma of the RAM to be
743 * smaller than an system defined value, which is at least 256GB.
744 * Larger systems have larger values. We put the guest between
745 * the end of data segment (system break) and this value. We
746 * use 32GB as a base to have enough room for the system break
747 * to grow. We also have to use MAP parameters that avoid
748 * read-only mapping of guest pages.
749 */
dc622deb 750static void *legacy_s390_alloc(size_t size, uint64_t *align)
fdec9918
CB
751{
752 void *mem;
753
754 mem = mmap((void *) 0x800000000ULL, size,
755 PROT_EXEC|PROT_READ|PROT_WRITE,
756 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
39228250 757 return mem == MAP_FAILED ? NULL : mem;
fdec9918
CB
758}
759
b60fae32
DH
760static uint8_t const *sw_bp_inst;
761static uint8_t sw_bp_ilen;
762
763static void determine_sw_breakpoint_instr(void)
764{
765 /* DIAG 501 is used for sw breakpoints with old kernels */
766 static const uint8_t diag_501[] = {0x83, 0x24, 0x05, 0x01};
767 /* Instruction 0x0000 is used for sw breakpoints with recent kernels */
768 static const uint8_t instr_0x0000[] = {0x00, 0x00};
769
770 if (sw_bp_inst) {
771 return;
772 }
773 if (kvm_vm_enable_cap(kvm_state, KVM_CAP_S390_USER_INSTR0, 0)) {
774 sw_bp_inst = diag_501;
775 sw_bp_ilen = sizeof(diag_501);
776 DPRINTF("KVM: will use 4-byte sw breakpoints.\n");
777 } else {
778 sw_bp_inst = instr_0x0000;
779 sw_bp_ilen = sizeof(instr_0x0000);
780 DPRINTF("KVM: will use 2-byte sw breakpoints.\n");
781 }
782}
8e4e86af 783
20d695a9 784int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
0e60a699 785{
b60fae32 786 determine_sw_breakpoint_instr();
0e60a699 787
8e4e86af 788 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
b60fae32
DH
789 sw_bp_ilen, 0) ||
790 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)sw_bp_inst, sw_bp_ilen, 1)) {
0e60a699
AG
791 return -EINVAL;
792 }
793 return 0;
794}
795
20d695a9 796int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
0e60a699 797{
b60fae32 798 uint8_t t[MAX_ILEN];
0e60a699 799
b60fae32 800 if (cpu_memory_rw_debug(cs, bp->pc, t, sw_bp_ilen, 0)) {
0e60a699 801 return -EINVAL;
b60fae32 802 } else if (memcmp(t, sw_bp_inst, sw_bp_ilen)) {
0e60a699 803 return -EINVAL;
8e4e86af 804 } else if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
b60fae32 805 sw_bp_ilen, 1)) {
0e60a699
AG
806 return -EINVAL;
807 }
808
809 return 0;
810}
811
770a6379
DH
812static struct kvm_hw_breakpoint *find_hw_breakpoint(target_ulong addr,
813 int len, int type)
814{
815 int n;
816
817 for (n = 0; n < nb_hw_breakpoints; n++) {
818 if (hw_breakpoints[n].addr == addr && hw_breakpoints[n].type == type &&
819 (hw_breakpoints[n].len == len || len == -1)) {
820 return &hw_breakpoints[n];
821 }
822 }
823
824 return NULL;
825}
826
827static int insert_hw_breakpoint(target_ulong addr, int len, int type)
828{
829 int size;
830
831 if (find_hw_breakpoint(addr, len, type)) {
832 return -EEXIST;
833 }
834
835 size = (nb_hw_breakpoints + 1) * sizeof(struct kvm_hw_breakpoint);
836
837 if (!hw_breakpoints) {
838 nb_hw_breakpoints = 0;
839 hw_breakpoints = (struct kvm_hw_breakpoint *)g_try_malloc(size);
840 } else {
841 hw_breakpoints =
842 (struct kvm_hw_breakpoint *)g_try_realloc(hw_breakpoints, size);
843 }
844
845 if (!hw_breakpoints) {
846 nb_hw_breakpoints = 0;
847 return -ENOMEM;
848 }
849
850 hw_breakpoints[nb_hw_breakpoints].addr = addr;
851 hw_breakpoints[nb_hw_breakpoints].len = len;
852 hw_breakpoints[nb_hw_breakpoints].type = type;
853
854 nb_hw_breakpoints++;
855
856 return 0;
857}
858
8c012449
DH
859int kvm_arch_insert_hw_breakpoint(target_ulong addr,
860 target_ulong len, int type)
861{
770a6379
DH
862 switch (type) {
863 case GDB_BREAKPOINT_HW:
864 type = KVM_HW_BP;
865 break;
866 case GDB_WATCHPOINT_WRITE:
867 if (len < 1) {
868 return -EINVAL;
869 }
870 type = KVM_HW_WP_WRITE;
871 break;
872 default:
873 return -ENOSYS;
874 }
875 return insert_hw_breakpoint(addr, len, type);
8c012449
DH
876}
877
878int kvm_arch_remove_hw_breakpoint(target_ulong addr,
879 target_ulong len, int type)
880{
770a6379
DH
881 int size;
882 struct kvm_hw_breakpoint *bp = find_hw_breakpoint(addr, len, type);
883
884 if (bp == NULL) {
885 return -ENOENT;
886 }
887
888 nb_hw_breakpoints--;
889 if (nb_hw_breakpoints > 0) {
890 /*
891 * In order to trim the array, move the last element to the position to
892 * be removed - if necessary.
893 */
894 if (bp != &hw_breakpoints[nb_hw_breakpoints]) {
895 *bp = hw_breakpoints[nb_hw_breakpoints];
896 }
897 size = nb_hw_breakpoints * sizeof(struct kvm_hw_breakpoint);
898 hw_breakpoints =
899 (struct kvm_hw_breakpoint *)g_realloc(hw_breakpoints, size);
900 } else {
901 g_free(hw_breakpoints);
902 hw_breakpoints = NULL;
903 }
904
905 return 0;
8c012449
DH
906}
907
908void kvm_arch_remove_all_hw_breakpoints(void)
909{
770a6379
DH
910 nb_hw_breakpoints = 0;
911 g_free(hw_breakpoints);
912 hw_breakpoints = NULL;
8c012449
DH
913}
914
915void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
916{
770a6379
DH
917 int i;
918
919 if (nb_hw_breakpoints > 0) {
920 dbg->arch.nr_hw_bp = nb_hw_breakpoints;
921 dbg->arch.hw_bp = hw_breakpoints;
922
923 for (i = 0; i < nb_hw_breakpoints; ++i) {
924 hw_breakpoints[i].phys_addr = s390_cpu_get_phys_addr_debug(cpu,
925 hw_breakpoints[i].addr);
926 }
927 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
928 } else {
929 dbg->arch.nr_hw_bp = 0;
930 dbg->arch.hw_bp = NULL;
931 }
8c012449
DH
932}
933
20d695a9 934void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
0e60a699 935{
0e60a699
AG
936}
937
4c663752 938MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
0e60a699 939{
4c663752 940 return MEMTXATTRS_UNSPECIFIED;
0e60a699
AG
941}
942
20d695a9 943int kvm_arch_process_async_events(CPUState *cs)
0af691d7 944{
225dc991 945 return cs->halted;
0af691d7
MT
946}
947
66ad0893
CH
948static int s390_kvm_irq_to_interrupt(struct kvm_s390_irq *irq,
949 struct kvm_s390_interrupt *interrupt)
950{
951 int r = 0;
952
953 interrupt->type = irq->type;
954 switch (irq->type) {
955 case KVM_S390_INT_VIRTIO:
956 interrupt->parm = irq->u.ext.ext_params;
957 /* fall through */
958 case KVM_S390_INT_PFAULT_INIT:
959 case KVM_S390_INT_PFAULT_DONE:
960 interrupt->parm64 = irq->u.ext.ext_params2;
961 break;
962 case KVM_S390_PROGRAM_INT:
963 interrupt->parm = irq->u.pgm.code;
964 break;
965 case KVM_S390_SIGP_SET_PREFIX:
966 interrupt->parm = irq->u.prefix.address;
967 break;
968 case KVM_S390_INT_SERVICE:
969 interrupt->parm = irq->u.ext.ext_params;
970 break;
971 case KVM_S390_MCHK:
972 interrupt->parm = irq->u.mchk.cr14;
973 interrupt->parm64 = irq->u.mchk.mcic;
974 break;
975 case KVM_S390_INT_EXTERNAL_CALL:
976 interrupt->parm = irq->u.extcall.code;
977 break;
978 case KVM_S390_INT_EMERGENCY:
979 interrupt->parm = irq->u.emerg.code;
980 break;
981 case KVM_S390_SIGP_STOP:
982 case KVM_S390_RESTART:
983 break; /* These types have no parameters */
984 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
985 interrupt->parm = irq->u.io.subchannel_id << 16;
986 interrupt->parm |= irq->u.io.subchannel_nr;
987 interrupt->parm64 = (uint64_t)irq->u.io.io_int_parm << 32;
988 interrupt->parm64 |= irq->u.io.io_int_word;
989 break;
990 default:
991 r = -EINVAL;
992 break;
993 }
994 return r;
995}
996
1191c949 997static void inject_vcpu_irq_legacy(CPUState *cs, struct kvm_s390_irq *irq)
66ad0893
CH
998{
999 struct kvm_s390_interrupt kvmint = {};
66ad0893
CH
1000 int r;
1001
1002 r = s390_kvm_irq_to_interrupt(irq, &kvmint);
1003 if (r < 0) {
1004 fprintf(stderr, "%s called with bogus interrupt\n", __func__);
1005 exit(1);
1006 }
1007
1008 r = kvm_vcpu_ioctl(cs, KVM_S390_INTERRUPT, &kvmint);
1009 if (r < 0) {
1010 fprintf(stderr, "KVM failed to inject interrupt\n");
1011 exit(1);
1012 }
1013}
1014
1191c949
JF
1015void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq)
1016{
1017 CPUState *cs = CPU(cpu);
1018 int r;
1019
1020 if (cap_s390_irq) {
1021 r = kvm_vcpu_ioctl(cs, KVM_S390_IRQ, irq);
1022 if (!r) {
1023 return;
1024 }
1025 error_report("KVM failed to inject interrupt %llx", irq->type);
1026 exit(1);
1027 }
1028
1029 inject_vcpu_irq_legacy(cs, irq);
1030}
1031
bbd8bb8e 1032static void __kvm_s390_floating_interrupt(struct kvm_s390_irq *irq)
66ad0893
CH
1033{
1034 struct kvm_s390_interrupt kvmint = {};
1035 int r;
1036
1037 r = s390_kvm_irq_to_interrupt(irq, &kvmint);
1038 if (r < 0) {
1039 fprintf(stderr, "%s called with bogus interrupt\n", __func__);
1040 exit(1);
1041 }
1042
1043 r = kvm_vm_ioctl(kvm_state, KVM_S390_INTERRUPT, &kvmint);
1044 if (r < 0) {
1045 fprintf(stderr, "KVM failed to inject interrupt\n");
1046 exit(1);
1047 }
1048}
1049
bbd8bb8e
CH
1050void kvm_s390_floating_interrupt(struct kvm_s390_irq *irq)
1051{
1052 static bool use_flic = true;
1053 int r;
1054
1055 if (use_flic) {
1056 r = kvm_s390_inject_flic(irq);
1057 if (r == -ENOSYS) {
1058 use_flic = false;
1059 }
1060 if (!r) {
1061 return;
1062 }
1063 }
1064 __kvm_s390_floating_interrupt(irq);
1065}
1066
de13d216 1067void kvm_s390_service_interrupt(uint32_t parm)
0e60a699 1068{
de13d216
CH
1069 struct kvm_s390_irq irq = {
1070 .type = KVM_S390_INT_SERVICE,
1071 .u.ext.ext_params = parm,
1072 };
0e60a699 1073
de13d216 1074 kvm_s390_floating_interrupt(&irq);
79afc36d
CH
1075}
1076
e3cfd926 1077void kvm_s390_program_interrupt(S390CPU *cpu, uint16_t code)
0e60a699 1078{
de13d216
CH
1079 struct kvm_s390_irq irq = {
1080 .type = KVM_S390_PROGRAM_INT,
1081 .u.pgm.code = code,
1082 };
1083
1084 kvm_s390_vcpu_interrupt(cpu, &irq);
0e60a699
AG
1085}
1086
801cdd35
TH
1087void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code)
1088{
1089 struct kvm_s390_irq irq = {
1090 .type = KVM_S390_PROGRAM_INT,
1091 .u.pgm.code = code,
1092 .u.pgm.trans_exc_code = te_code,
1093 .u.pgm.exc_access_id = te_code & 3,
1094 };
1095
1096 kvm_s390_vcpu_interrupt(cpu, &irq);
1097}
1098
1bc22652 1099static int kvm_sclp_service_call(S390CPU *cpu, struct kvm_run *run,
bcec36ea 1100 uint16_t ipbh0)
0e60a699 1101{
1bc22652 1102 CPUS390XState *env = &cpu->env;
a0fa2cb8
TH
1103 uint64_t sccb;
1104 uint32_t code;
0e60a699
AG
1105 int r = 0;
1106
cb446eca 1107 cpu_synchronize_state(CPU(cpu));
0e60a699
AG
1108 sccb = env->regs[ipbh0 & 0xf];
1109 code = env->regs[(ipbh0 & 0xf0) >> 4];
1110
6e252802 1111 r = sclp_service_call(env, sccb, code);
9abf567d 1112 if (r < 0) {
e3cfd926 1113 kvm_s390_program_interrupt(cpu, -r);
e8803d93
TH
1114 } else {
1115 setcc(cpu, r);
0e60a699 1116 }
81f7c56c 1117
0e60a699
AG
1118 return 0;
1119}
1120
1eecf41b 1121static int handle_b2(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
09b99878 1122{
09b99878 1123 CPUS390XState *env = &cpu->env;
1eecf41b
FB
1124 int rc = 0;
1125 uint16_t ipbh0 = (run->s390_sieic.ipb & 0xffff0000) >> 16;
3474b679 1126
44c68de0 1127 cpu_synchronize_state(CPU(cpu));
3474b679 1128
09b99878 1129 switch (ipa1) {
1eecf41b 1130 case PRIV_B2_XSCH:
5d9bf1c0 1131 ioinst_handle_xsch(cpu, env->regs[1]);
09b99878 1132 break;
1eecf41b 1133 case PRIV_B2_CSCH:
5d9bf1c0 1134 ioinst_handle_csch(cpu, env->regs[1]);
09b99878 1135 break;
1eecf41b 1136 case PRIV_B2_HSCH:
5d9bf1c0 1137 ioinst_handle_hsch(cpu, env->regs[1]);
09b99878 1138 break;
1eecf41b 1139 case PRIV_B2_MSCH:
5d9bf1c0 1140 ioinst_handle_msch(cpu, env->regs[1], run->s390_sieic.ipb);
09b99878 1141 break;
1eecf41b 1142 case PRIV_B2_SSCH:
5d9bf1c0 1143 ioinst_handle_ssch(cpu, env->regs[1], run->s390_sieic.ipb);
09b99878 1144 break;
1eecf41b 1145 case PRIV_B2_STCRW:
5d9bf1c0 1146 ioinst_handle_stcrw(cpu, run->s390_sieic.ipb);
09b99878 1147 break;
1eecf41b 1148 case PRIV_B2_STSCH:
5d9bf1c0 1149 ioinst_handle_stsch(cpu, env->regs[1], run->s390_sieic.ipb);
09b99878 1150 break;
1eecf41b 1151 case PRIV_B2_TSCH:
09b99878
CH
1152 /* We should only get tsch via KVM_EXIT_S390_TSCH. */
1153 fprintf(stderr, "Spurious tsch intercept\n");
1154 break;
1eecf41b 1155 case PRIV_B2_CHSC:
5d9bf1c0 1156 ioinst_handle_chsc(cpu, run->s390_sieic.ipb);
09b99878 1157 break;
1eecf41b 1158 case PRIV_B2_TPI:
09b99878
CH
1159 /* This should have been handled by kvm already. */
1160 fprintf(stderr, "Spurious tpi intercept\n");
1161 break;
1eecf41b 1162 case PRIV_B2_SCHM:
5d9bf1c0
TH
1163 ioinst_handle_schm(cpu, env->regs[1], env->regs[2],
1164 run->s390_sieic.ipb);
09b99878 1165 break;
1eecf41b 1166 case PRIV_B2_RSCH:
5d9bf1c0 1167 ioinst_handle_rsch(cpu, env->regs[1]);
09b99878 1168 break;
1eecf41b 1169 case PRIV_B2_RCHP:
5d9bf1c0 1170 ioinst_handle_rchp(cpu, env->regs[1]);
09b99878 1171 break;
1eecf41b 1172 case PRIV_B2_STCPS:
09b99878 1173 /* We do not provide this instruction, it is suppressed. */
09b99878 1174 break;
1eecf41b 1175 case PRIV_B2_SAL:
5d9bf1c0 1176 ioinst_handle_sal(cpu, env->regs[1]);
09b99878 1177 break;
1eecf41b 1178 case PRIV_B2_SIGA:
c1e8dfb5 1179 /* Not provided, set CC = 3 for subchannel not operational */
5d9bf1c0 1180 setcc(cpu, 3);
09b99878 1181 break;
1eecf41b
FB
1182 case PRIV_B2_SCLP_CALL:
1183 rc = kvm_sclp_service_call(cpu, run, ipbh0);
1184 break;
c1e8dfb5 1185 default:
1eecf41b
FB
1186 rc = -1;
1187 DPRINTF("KVM: unhandled PRIV: 0xb2%x\n", ipa1);
1188 break;
09b99878
CH
1189 }
1190
1eecf41b 1191 return rc;
09b99878
CH
1192}
1193
6cb1e49d
AY
1194static uint64_t get_base_disp_rxy(S390CPU *cpu, struct kvm_run *run,
1195 uint8_t *ar)
863f6f52
FB
1196{
1197 CPUS390XState *env = &cpu->env;
1198 uint32_t x2 = (run->s390_sieic.ipa & 0x000f);
1199 uint32_t base2 = run->s390_sieic.ipb >> 28;
1200 uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) +
1201 ((run->s390_sieic.ipb & 0xff00) << 4);
1202
1203 if (disp2 & 0x80000) {
1204 disp2 += 0xfff00000;
1205 }
6cb1e49d
AY
1206 if (ar) {
1207 *ar = base2;
1208 }
863f6f52
FB
1209
1210 return (base2 ? env->regs[base2] : 0) +
1211 (x2 ? env->regs[x2] : 0) + (long)(int)disp2;
1212}
1213
6cb1e49d
AY
1214static uint64_t get_base_disp_rsy(S390CPU *cpu, struct kvm_run *run,
1215 uint8_t *ar)
863f6f52
FB
1216{
1217 CPUS390XState *env = &cpu->env;
1218 uint32_t base2 = run->s390_sieic.ipb >> 28;
1219 uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) +
1220 ((run->s390_sieic.ipb & 0xff00) << 4);
1221
1222 if (disp2 & 0x80000) {
1223 disp2 += 0xfff00000;
1224 }
6cb1e49d
AY
1225 if (ar) {
1226 *ar = base2;
1227 }
863f6f52
FB
1228
1229 return (base2 ? env->regs[base2] : 0) + (long)(int)disp2;
1230}
1231
1232static int kvm_clp_service_call(S390CPU *cpu, struct kvm_run *run)
1233{
1234 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
1235
42f865da
CH
1236 if (s390_has_feat(S390_FEAT_ZPCI)) {
1237 return clp_service_call(cpu, r2);
1238 } else {
1239 return -1;
1240 }
863f6f52
FB
1241}
1242
1243static int kvm_pcilg_service_call(S390CPU *cpu, struct kvm_run *run)
1244{
1245 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
1246 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
1247
42f865da
CH
1248 if (s390_has_feat(S390_FEAT_ZPCI)) {
1249 return pcilg_service_call(cpu, r1, r2);
1250 } else {
1251 return -1;
1252 }
863f6f52
FB
1253}
1254
1255static int kvm_pcistg_service_call(S390CPU *cpu, struct kvm_run *run)
1256{
1257 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
1258 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
1259
42f865da
CH
1260 if (s390_has_feat(S390_FEAT_ZPCI)) {
1261 return pcistg_service_call(cpu, r1, r2);
1262 } else {
1263 return -1;
1264 }
863f6f52
FB
1265}
1266
1267static int kvm_stpcifc_service_call(S390CPU *cpu, struct kvm_run *run)
1268{
1269 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1270 uint64_t fiba;
6cb1e49d 1271 uint8_t ar;
863f6f52 1272
42f865da
CH
1273 if (s390_has_feat(S390_FEAT_ZPCI)) {
1274 cpu_synchronize_state(CPU(cpu));
1275 fiba = get_base_disp_rxy(cpu, run, &ar);
863f6f52 1276
42f865da
CH
1277 return stpcifc_service_call(cpu, r1, fiba, ar);
1278 } else {
1279 return -1;
1280 }
863f6f52
FB
1281}
1282
1283static int kvm_sic_service_call(S390CPU *cpu, struct kvm_run *run)
1284{
2283f4d6
FL
1285 CPUS390XState *env = &cpu->env;
1286 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1287 uint8_t r3 = run->s390_sieic.ipa & 0x000f;
1288 uint8_t isc;
1289 uint16_t mode;
1290 int r;
1291
1292 cpu_synchronize_state(CPU(cpu));
1293 mode = env->regs[r1] & 0xffff;
1294 isc = (env->regs[r3] >> 27) & 0x7;
1295 r = css_do_sic(env, isc, mode);
1296 if (r) {
e3cfd926 1297 kvm_s390_program_interrupt(cpu, -r);
2283f4d6
FL
1298 }
1299
863f6f52
FB
1300 return 0;
1301}
1302
1303static int kvm_rpcit_service_call(S390CPU *cpu, struct kvm_run *run)
1304{
1305 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
1306 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
1307
42f865da
CH
1308 if (s390_has_feat(S390_FEAT_ZPCI)) {
1309 return rpcit_service_call(cpu, r1, r2);
1310 } else {
1311 return -1;
1312 }
863f6f52
FB
1313}
1314
1315static int kvm_pcistb_service_call(S390CPU *cpu, struct kvm_run *run)
1316{
1317 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1318 uint8_t r3 = run->s390_sieic.ipa & 0x000f;
1319 uint64_t gaddr;
6cb1e49d 1320 uint8_t ar;
863f6f52 1321
42f865da
CH
1322 if (s390_has_feat(S390_FEAT_ZPCI)) {
1323 cpu_synchronize_state(CPU(cpu));
1324 gaddr = get_base_disp_rsy(cpu, run, &ar);
863f6f52 1325
42f865da
CH
1326 return pcistb_service_call(cpu, r1, r3, gaddr, ar);
1327 } else {
1328 return -1;
1329 }
863f6f52
FB
1330}
1331
1332static int kvm_mpcifc_service_call(S390CPU *cpu, struct kvm_run *run)
1333{
1334 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1335 uint64_t fiba;
6cb1e49d 1336 uint8_t ar;
863f6f52 1337
42f865da
CH
1338 if (s390_has_feat(S390_FEAT_ZPCI)) {
1339 cpu_synchronize_state(CPU(cpu));
1340 fiba = get_base_disp_rxy(cpu, run, &ar);
863f6f52 1341
42f865da
CH
1342 return mpcifc_service_call(cpu, r1, fiba, ar);
1343 } else {
1344 return -1;
1345 }
863f6f52
FB
1346}
1347
1eecf41b 1348static int handle_b9(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
0e60a699
AG
1349{
1350 int r = 0;
0e60a699 1351
0e60a699 1352 switch (ipa1) {
863f6f52
FB
1353 case PRIV_B9_CLP:
1354 r = kvm_clp_service_call(cpu, run);
1355 break;
1356 case PRIV_B9_PCISTG:
1357 r = kvm_pcistg_service_call(cpu, run);
1358 break;
1359 case PRIV_B9_PCILG:
1360 r = kvm_pcilg_service_call(cpu, run);
1361 break;
1362 case PRIV_B9_RPCIT:
1363 r = kvm_rpcit_service_call(cpu, run);
1364 break;
1eecf41b
FB
1365 case PRIV_B9_EQBS:
1366 /* just inject exception */
1367 r = -1;
1368 break;
1369 default:
1370 r = -1;
1371 DPRINTF("KVM: unhandled PRIV: 0xb9%x\n", ipa1);
1372 break;
1373 }
1374
1375 return r;
1376}
1377
80765f07 1378static int handle_eb(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl)
1eecf41b
FB
1379{
1380 int r = 0;
1381
80765f07 1382 switch (ipbl) {
863f6f52
FB
1383 case PRIV_EB_PCISTB:
1384 r = kvm_pcistb_service_call(cpu, run);
1385 break;
1386 case PRIV_EB_SIC:
1387 r = kvm_sic_service_call(cpu, run);
1388 break;
1eecf41b
FB
1389 case PRIV_EB_SQBS:
1390 /* just inject exception */
1391 r = -1;
1392 break;
1393 default:
1394 r = -1;
80765f07 1395 DPRINTF("KVM: unhandled PRIV: 0xeb%x\n", ipbl);
1eecf41b 1396 break;
0e60a699
AG
1397 }
1398
1399 return r;
1400}
1401
863f6f52
FB
1402static int handle_e3(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl)
1403{
1404 int r = 0;
1405
1406 switch (ipbl) {
1407 case PRIV_E3_MPCIFC:
1408 r = kvm_mpcifc_service_call(cpu, run);
1409 break;
1410 case PRIV_E3_STPCIFC:
1411 r = kvm_stpcifc_service_call(cpu, run);
1412 break;
1413 default:
1414 r = -1;
1415 DPRINTF("KVM: unhandled PRIV: 0xe3%x\n", ipbl);
1416 break;
1417 }
1418
1419 return r;
1420}
1421
4fd6dd06 1422static int handle_hypercall(S390CPU *cpu, struct kvm_run *run)
0e60a699 1423{
4fd6dd06 1424 CPUS390XState *env = &cpu->env;
77319f22 1425 int ret;
3474b679 1426
44c68de0 1427 cpu_synchronize_state(CPU(cpu));
77319f22
TH
1428 ret = s390_virtio_hypercall(env);
1429 if (ret == -EINVAL) {
e3cfd926 1430 kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION);
77319f22
TH
1431 return 0;
1432 }
0e60a699 1433
77319f22 1434 return ret;
0e60a699
AG
1435}
1436
8fc639af
XW
1437static void kvm_handle_diag_288(S390CPU *cpu, struct kvm_run *run)
1438{
1439 uint64_t r1, r3;
1440 int rc;
1441
1442 cpu_synchronize_state(CPU(cpu));
1443 r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1444 r3 = run->s390_sieic.ipa & 0x000f;
1445 rc = handle_diag_288(&cpu->env, r1, r3);
1446 if (rc) {
e3cfd926 1447 kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION);
8fc639af
XW
1448 }
1449}
1450
268846ba
ED
1451static void kvm_handle_diag_308(S390CPU *cpu, struct kvm_run *run)
1452{
1453 uint64_t r1, r3;
1454
1455 cpu_synchronize_state(CPU(cpu));
20dd25bb 1456 r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
268846ba
ED
1457 r3 = run->s390_sieic.ipa & 0x000f;
1458 handle_diag_308(&cpu->env, r1, r3);
1459}
1460
b30f4dfb
DH
1461static int handle_sw_breakpoint(S390CPU *cpu, struct kvm_run *run)
1462{
1463 CPUS390XState *env = &cpu->env;
1464 unsigned long pc;
1465
1466 cpu_synchronize_state(CPU(cpu));
1467
b60fae32 1468 pc = env->psw.addr - sw_bp_ilen;
b30f4dfb
DH
1469 if (kvm_find_sw_breakpoint(CPU(cpu), pc)) {
1470 env->psw.addr = pc;
1471 return EXCP_DEBUG;
1472 }
1473
1474 return -ENOENT;
1475}
1476
638129ff
CH
1477#define DIAG_KVM_CODE_MASK 0x000000000000ffff
1478
1479static int handle_diag(S390CPU *cpu, struct kvm_run *run, uint32_t ipb)
0e60a699
AG
1480{
1481 int r = 0;
638129ff
CH
1482 uint16_t func_code;
1483
1484 /*
1485 * For any diagnose call we support, bits 48-63 of the resulting
1486 * address specify the function code; the remainder is ignored.
1487 */
6cb1e49d 1488 func_code = decode_basedisp_rs(&cpu->env, ipb, NULL) & DIAG_KVM_CODE_MASK;
638129ff 1489 switch (func_code) {
8fc639af
XW
1490 case DIAG_TIMEREVENT:
1491 kvm_handle_diag_288(cpu, run);
1492 break;
268846ba
ED
1493 case DIAG_IPL:
1494 kvm_handle_diag_308(cpu, run);
1495 break;
39fbc5c6
CB
1496 case DIAG_KVM_HYPERCALL:
1497 r = handle_hypercall(cpu, run);
1498 break;
1499 case DIAG_KVM_BREAKPOINT:
b30f4dfb 1500 r = handle_sw_breakpoint(cpu, run);
39fbc5c6
CB
1501 break;
1502 default:
638129ff 1503 DPRINTF("KVM: unknown DIAG: 0x%x\n", func_code);
e3cfd926 1504 kvm_s390_program_interrupt(cpu, PGM_SPECIFICATION);
39fbc5c6 1505 break;
0e60a699
AG
1506 }
1507
1508 return r;
1509}
1510
6eb8f212 1511typedef struct SigpInfo {
22740e3f 1512 uint64_t param;
6eb8f212
DH
1513 int cc;
1514 uint64_t *status_reg;
1515} SigpInfo;
1516
36b5c845 1517static void set_sigp_status(SigpInfo *si, uint64_t status)
b20a461f 1518{
36b5c845
DH
1519 *si->status_reg &= 0xffffffff00000000ULL;
1520 *si->status_reg |= status;
1521 si->cc = SIGP_CC_STATUS_STORED;
1522}
6e6ad8db 1523
0ea3eb65 1524static void sigp_start(CPUState *cs, run_on_cpu_data arg)
b20a461f 1525{
e0eeb4a2 1526 S390CPU *cpu = S390_CPU(cs);
0ea3eb65 1527 SigpInfo *si = arg.host_ptr;
6e6ad8db 1528
e0eeb4a2 1529 if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) {
4f2b55d1
DH
1530 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1531 return;
1532 }
1533
e0eeb4a2 1534 s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
6eb8f212 1535 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
b20a461f
TH
1536}
1537
0ea3eb65 1538static void sigp_stop(CPUState *cs, run_on_cpu_data arg)
0e60a699 1539{
e0eeb4a2 1540 S390CPU *cpu = S390_CPU(cs);
0ea3eb65 1541 SigpInfo *si = arg.host_ptr;
18ff9494
DH
1542 struct kvm_s390_irq irq = {
1543 .type = KVM_S390_SIGP_STOP,
1544 };
1545
e0eeb4a2 1546 if (s390_cpu_get_state(cpu) != CPU_STATE_OPERATING) {
18ff9494
DH
1547 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1548 return;
1549 }
1550
1551 /* disabled wait - sleeping in user space */
e0eeb4a2
AB
1552 if (cs->halted) {
1553 s390_cpu_set_state(CPU_STATE_STOPPED, cpu);
18ff9494
DH
1554 } else {
1555 /* execute the stop function */
e0eeb4a2
AB
1556 cpu->env.sigp_order = SIGP_STOP;
1557 kvm_s390_vcpu_interrupt(cpu, &irq);
18ff9494
DH
1558 }
1559 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1560}
1561
62deb62d
FZ
1562#define ADTL_GS_OFFSET 1024 /* offset of GS data in adtl save area */
1563#define ADTL_GS_MIN_SIZE 2048 /* minimal size of adtl save area for GS */
1564static int do_store_adtl_status(S390CPU *cpu, hwaddr addr, hwaddr len)
abec5356 1565{
62deb62d 1566 hwaddr save = len;
abec5356 1567 void *mem;
abec5356 1568
62deb62d 1569 mem = cpu_physical_memory_map(addr, &save, 1);
abec5356
EF
1570 if (!mem) {
1571 return -EFAULT;
1572 }
62deb62d 1573 if (save != len) {
abec5356
EF
1574 cpu_physical_memory_unmap(mem, len, 1, 0);
1575 return -EFAULT;
1576 }
1577
62deb62d
FZ
1578 if (s390_has_feat(S390_FEAT_VECTOR)) {
1579 memcpy(mem, &cpu->env.vregs, 512);
1580 }
1581 if (s390_has_feat(S390_FEAT_GUARDED_STORAGE) && len >= ADTL_GS_MIN_SIZE) {
1582 memcpy(mem + ADTL_GS_OFFSET, &cpu->env.gscb, 32);
1583 }
abec5356
EF
1584
1585 cpu_physical_memory_unmap(mem, len, 1, len);
1586
1587 return 0;
1588}
1589
947a38bd
DH
1590struct sigp_save_area {
1591 uint64_t fprs[16]; /* 0x0000 */
1592 uint64_t grs[16]; /* 0x0080 */
1593 PSW psw; /* 0x0100 */
1594 uint8_t pad_0x0110[0x0118 - 0x0110]; /* 0x0110 */
1595 uint32_t prefix; /* 0x0118 */
1596 uint32_t fpc; /* 0x011c */
1597 uint8_t pad_0x0120[0x0124 - 0x0120]; /* 0x0120 */
1598 uint32_t todpr; /* 0x0124 */
1599 uint64_t cputm; /* 0x0128 */
1600 uint64_t ckc; /* 0x0130 */
1601 uint8_t pad_0x0138[0x0140 - 0x0138]; /* 0x0138 */
1602 uint32_t ars[16]; /* 0x0140 */
1603 uint64_t crs[16]; /* 0x0384 */
1604};
1605QEMU_BUILD_BUG_ON(sizeof(struct sigp_save_area) != 512);
1606
18ff9494 1607#define KVM_S390_STORE_STATUS_DEF_ADDR offsetof(LowCore, floating_pt_save_area)
18ff9494
DH
1608static int kvm_s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch)
1609{
1610 static const uint8_t ar_id = 1;
947a38bd
DH
1611 struct sigp_save_area *sa;
1612 hwaddr len = sizeof(*sa);
c498d8e3 1613 int i;
18ff9494 1614
947a38bd
DH
1615 sa = cpu_physical_memory_map(addr, &len, 1);
1616 if (!sa) {
18ff9494
DH
1617 return -EFAULT;
1618 }
947a38bd
DH
1619 if (len != sizeof(*sa)) {
1620 cpu_physical_memory_unmap(sa, len, 1, 0);
18ff9494
DH
1621 return -EFAULT;
1622 }
1623
1624 if (store_arch) {
1625 cpu_physical_memory_write(offsetof(LowCore, ar_access_id), &ar_id, 1);
1626 }
c498d8e3 1627 for (i = 0; i < 16; ++i) {
947a38bd
DH
1628 sa->fprs[i] = cpu_to_be64(get_freg(&cpu->env, i)->ll);
1629 }
1630 for (i = 0; i < 16; ++i) {
1631 sa->grs[i] = cpu_to_be64(cpu->env.regs[i]);
1632 }
1633 sa->psw.addr = cpu_to_be64(cpu->env.psw.addr);
1634 sa->psw.mask = cpu_to_be64(get_psw_mask(&cpu->env));
1635 sa->prefix = cpu_to_be32(cpu->env.psa);
1636 sa->fpc = cpu_to_be32(cpu->env.fpc);
1637 sa->todpr = cpu_to_be32(cpu->env.todpr);
1638 sa->cputm = cpu_to_be64(cpu->env.cputm);
1639 sa->ckc = cpu_to_be64(cpu->env.ckc >> 8);
1640 for (i = 0; i < 16; ++i) {
1641 sa->ars[i] = cpu_to_be32(cpu->env.aregs[i]);
1642 }
1643 for (i = 0; i < 16; ++i) {
1644 sa->ars[i] = cpu_to_be64(cpu->env.cregs[i]);
1645 }
18ff9494 1646
947a38bd 1647 cpu_physical_memory_unmap(sa, len, 1, len);
18ff9494
DH
1648
1649 return 0;
1650}
1651
0ea3eb65 1652static void sigp_stop_and_store_status(CPUState *cs, run_on_cpu_data arg)
18ff9494 1653{
e0eeb4a2 1654 S390CPU *cpu = S390_CPU(cs);
0ea3eb65 1655 SigpInfo *si = arg.host_ptr;
18ff9494
DH
1656 struct kvm_s390_irq irq = {
1657 .type = KVM_S390_SIGP_STOP,
1658 };
1659
1660 /* disabled wait - sleeping in user space */
e0eeb4a2
AB
1661 if (s390_cpu_get_state(cpu) == CPU_STATE_OPERATING && cs->halted) {
1662 s390_cpu_set_state(CPU_STATE_STOPPED, cpu);
18ff9494
DH
1663 }
1664
e0eeb4a2 1665 switch (s390_cpu_get_state(cpu)) {
18ff9494 1666 case CPU_STATE_OPERATING:
e0eeb4a2
AB
1667 cpu->env.sigp_order = SIGP_STOP_STORE_STATUS;
1668 kvm_s390_vcpu_interrupt(cpu, &irq);
18ff9494
DH
1669 /* store will be performed when handling the stop intercept */
1670 break;
1671 case CPU_STATE_STOPPED:
1672 /* already stopped, just store the status */
e0eeb4a2
AB
1673 cpu_synchronize_state(cs);
1674 kvm_s390_store_status(cpu, KVM_S390_STORE_STATUS_DEF_ADDR, true);
18ff9494
DH
1675 break;
1676 }
1677 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1678}
1679
0ea3eb65 1680static void sigp_store_status_at_address(CPUState *cs, run_on_cpu_data arg)
18ff9494 1681{
e0eeb4a2 1682 S390CPU *cpu = S390_CPU(cs);
0ea3eb65 1683 SigpInfo *si = arg.host_ptr;
18ff9494
DH
1684 uint32_t address = si->param & 0x7ffffe00u;
1685
1686 /* cpu has to be stopped */
e0eeb4a2 1687 if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) {
18ff9494
DH
1688 set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
1689 return;
1690 }
1691
e0eeb4a2 1692 cpu_synchronize_state(cs);
18ff9494 1693
e0eeb4a2 1694 if (kvm_s390_store_status(cpu, address, false)) {
18ff9494
DH
1695 set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
1696 return;
1697 }
1698 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1699}
1700
62deb62d 1701#define ADTL_SAVE_LC_MASK 0xfUL
0ea3eb65 1702static void sigp_store_adtl_status(CPUState *cs, run_on_cpu_data arg)
abec5356 1703{
e0eeb4a2 1704 S390CPU *cpu = S390_CPU(cs);
0ea3eb65 1705 SigpInfo *si = arg.host_ptr;
62deb62d
FZ
1706 uint8_t lc = si->param & ADTL_SAVE_LC_MASK;
1707 hwaddr addr = si->param & ~ADTL_SAVE_LC_MASK;
1708 hwaddr len = 1UL << (lc ? lc : 10);
abec5356 1709
62deb62d
FZ
1710 if (!s390_has_feat(S390_FEAT_VECTOR) &&
1711 !s390_has_feat(S390_FEAT_GUARDED_STORAGE)) {
abec5356
EF
1712 set_sigp_status(si, SIGP_STAT_INVALID_ORDER);
1713 return;
1714 }
1715
1716 /* cpu has to be stopped */
e0eeb4a2 1717 if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) {
abec5356
EF
1718 set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
1719 return;
1720 }
1721
62deb62d
FZ
1722 /* address must be aligned to length */
1723 if (addr & (len - 1)) {
1724 set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
1725 return;
1726 }
1727
1728 /* no GS: only lc == 0 is valid */
1729 if (!s390_has_feat(S390_FEAT_GUARDED_STORAGE) &&
1730 lc != 0) {
1731 set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
1732 return;
1733 }
1734
1735 /* GS: 0, 10, 11, 12 are valid */
1736 if (s390_has_feat(S390_FEAT_GUARDED_STORAGE) &&
1737 lc != 0 &&
1738 lc != 10 &&
1739 lc != 11 &&
1740 lc != 12) {
abec5356
EF
1741 set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
1742 return;
1743 }
1744
e0eeb4a2 1745 cpu_synchronize_state(cs);
abec5356 1746
62deb62d 1747 if (do_store_adtl_status(cpu, addr, len)) {
abec5356
EF
1748 set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
1749 return;
1750 }
1751 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1752}
1753
0ea3eb65 1754static void sigp_restart(CPUState *cs, run_on_cpu_data arg)
0e60a699 1755{
e0eeb4a2 1756 S390CPU *cpu = S390_CPU(cs);
0ea3eb65 1757 SigpInfo *si = arg.host_ptr;
de13d216
CH
1758 struct kvm_s390_irq irq = {
1759 .type = KVM_S390_RESTART,
1760 };
1761
e0eeb4a2 1762 switch (s390_cpu_get_state(cpu)) {
e3b7b578
DH
1763 case CPU_STATE_STOPPED:
1764 /* the restart irq has to be delivered prior to any other pending irq */
e0eeb4a2
AB
1765 cpu_synchronize_state(cs);
1766 do_restart_interrupt(&cpu->env);
1767 s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
e3b7b578
DH
1768 break;
1769 case CPU_STATE_OPERATING:
e0eeb4a2 1770 kvm_s390_vcpu_interrupt(cpu, &irq);
e3b7b578
DH
1771 break;
1772 }
6eb8f212 1773 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
6e6ad8db
DH
1774}
1775
1776int kvm_s390_cpu_restart(S390CPU *cpu)
1777{
e0eeb4a2 1778 SigpInfo si = {};
6eb8f212 1779
14e6fe12 1780 run_on_cpu(CPU(cpu), sigp_restart, RUN_ON_CPU_HOST_PTR(&si));
7f7f9752 1781 DPRINTF("DONE: KVM cpu restart: %p\n", &cpu->env);
0e60a699
AG
1782 return 0;
1783}
1784
0ea3eb65 1785static void sigp_initial_cpu_reset(CPUState *cs, run_on_cpu_data arg)
0e60a699 1786{
e0eeb4a2
AB
1787 S390CPU *cpu = S390_CPU(cs);
1788 S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
0ea3eb65 1789 SigpInfo *si = arg.host_ptr;
d5900813 1790
6eb8f212
DH
1791 cpu_synchronize_state(cs);
1792 scc->initial_cpu_reset(cs);
1793 cpu_synchronize_post_reset(cs);
1794 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
0e60a699
AG
1795}
1796
0ea3eb65 1797static void sigp_cpu_reset(CPUState *cs, run_on_cpu_data arg)
04c2b516 1798{
e0eeb4a2
AB
1799 S390CPU *cpu = S390_CPU(cs);
1800 S390CPUClass *scc = S390_CPU_GET_CLASS(cpu);
0ea3eb65 1801 SigpInfo *si = arg.host_ptr;
04c2b516 1802
6eb8f212
DH
1803 cpu_synchronize_state(cs);
1804 scc->cpu_reset(cs);
1805 cpu_synchronize_post_reset(cs);
1806 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
04c2b516
TH
1807}
1808
0ea3eb65 1809static void sigp_set_prefix(CPUState *cs, run_on_cpu_data arg)
0e60a699 1810{
e0eeb4a2 1811 S390CPU *cpu = S390_CPU(cs);
0ea3eb65 1812 SigpInfo *si = arg.host_ptr;
18ff9494 1813 uint32_t addr = si->param & 0x7fffe000u;
0e60a699 1814
e0eeb4a2 1815 cpu_synchronize_state(cs);
0e60a699 1816
18ff9494
DH
1817 if (!address_space_access_valid(&address_space_memory, addr,
1818 sizeof(struct LowCore), false)) {
1819 set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
1820 return;
1821 }
0e60a699 1822
18ff9494 1823 /* cpu has to be stopped */
e0eeb4a2 1824 if (s390_cpu_get_state(cpu) != CPU_STATE_STOPPED) {
18ff9494
DH
1825 set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
1826 return;
0e60a699
AG
1827 }
1828
e0eeb4a2
AB
1829 cpu->env.psa = addr;
1830 cpu_synchronize_post_init(cs);
18ff9494
DH
1831 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1832}
1833
6eb8f212 1834static int handle_sigp_single_dst(S390CPU *dst_cpu, uint8_t order,
22740e3f 1835 uint64_t param, uint64_t *status_reg)
6eb8f212
DH
1836{
1837 SigpInfo si = {
22740e3f 1838 .param = param,
6eb8f212
DH
1839 .status_reg = status_reg,
1840 };
1841
1842 /* cpu available? */
1843 if (dst_cpu == NULL) {
1844 return SIGP_CC_NOT_OPERATIONAL;
1845 }
1846
18ff9494
DH
1847 /* only resets can break pending orders */
1848 if (dst_cpu->env.sigp_order != 0 &&
1849 order != SIGP_CPU_RESET &&
1850 order != SIGP_INITIAL_CPU_RESET) {
1851 return SIGP_CC_BUSY;
1852 }
1853
6eb8f212 1854 switch (order) {
b20a461f 1855 case SIGP_START:
14e6fe12 1856 run_on_cpu(CPU(dst_cpu), sigp_start, RUN_ON_CPU_HOST_PTR(&si));
6eb8f212 1857 break;
18ff9494 1858 case SIGP_STOP:
14e6fe12 1859 run_on_cpu(CPU(dst_cpu), sigp_stop, RUN_ON_CPU_HOST_PTR(&si));
b20a461f 1860 break;
0b9972a2 1861 case SIGP_RESTART:
14e6fe12 1862 run_on_cpu(CPU(dst_cpu), sigp_restart, RUN_ON_CPU_HOST_PTR(&si));
0b9972a2 1863 break;
18ff9494 1864 case SIGP_STOP_STORE_STATUS:
14e6fe12 1865 run_on_cpu(CPU(dst_cpu), sigp_stop_and_store_status, RUN_ON_CPU_HOST_PTR(&si));
18ff9494
DH
1866 break;
1867 case SIGP_STORE_STATUS_ADDR:
14e6fe12 1868 run_on_cpu(CPU(dst_cpu), sigp_store_status_at_address, RUN_ON_CPU_HOST_PTR(&si));
18ff9494 1869 break;
abec5356 1870 case SIGP_STORE_ADTL_STATUS:
14e6fe12 1871 run_on_cpu(CPU(dst_cpu), sigp_store_adtl_status, RUN_ON_CPU_HOST_PTR(&si));
abec5356 1872 break;
18ff9494 1873 case SIGP_SET_PREFIX:
14e6fe12 1874 run_on_cpu(CPU(dst_cpu), sigp_set_prefix, RUN_ON_CPU_HOST_PTR(&si));
0788082a 1875 break;
0b9972a2 1876 case SIGP_INITIAL_CPU_RESET:
14e6fe12 1877 run_on_cpu(CPU(dst_cpu), sigp_initial_cpu_reset, RUN_ON_CPU_HOST_PTR(&si));
0b9972a2 1878 break;
04c2b516 1879 case SIGP_CPU_RESET:
14e6fe12 1880 run_on_cpu(CPU(dst_cpu), sigp_cpu_reset, RUN_ON_CPU_HOST_PTR(&si));
04c2b516 1881 break;
0b9972a2 1882 default:
6eb8f212 1883 DPRINTF("KVM: unknown SIGP: 0x%x\n", order);
36b5c845 1884 set_sigp_status(&si, SIGP_STAT_INVALID_ORDER);
6eb8f212 1885 }
04c2b516 1886
6eb8f212 1887 return si.cc;
04c2b516
TH
1888}
1889
18ff9494
DH
1890static int sigp_set_architecture(S390CPU *cpu, uint32_t param,
1891 uint64_t *status_reg)
1892{
1893 CPUState *cur_cs;
1894 S390CPU *cur_cpu;
075e52b8 1895 bool all_stopped = true;
18ff9494 1896
18ff9494
DH
1897 CPU_FOREACH(cur_cs) {
1898 cur_cpu = S390_CPU(cur_cs);
075e52b8
JH
1899
1900 if (cur_cpu == cpu) {
1901 continue;
18ff9494 1902 }
075e52b8
JH
1903 if (s390_cpu_get_state(cur_cpu) != CPU_STATE_STOPPED) {
1904 all_stopped = false;
18ff9494
DH
1905 }
1906 }
1907
075e52b8 1908 *status_reg &= 0xffffffff00000000ULL;
0e60a699 1909
075e52b8
JH
1910 /* Reject set arch order, with czam we're always in z/Arch mode. */
1911 *status_reg |= (all_stopped ? SIGP_STAT_INVALID_PARAMETER :
1912 SIGP_STAT_INCORRECT_STATE);
1913 return SIGP_CC_STATUS_STORED;
18ff9494
DH
1914}
1915
f7575c96 1916static int handle_sigp(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
0e60a699 1917{
f7575c96 1918 CPUS390XState *env = &cpu->env;
6eb8f212
DH
1919 const uint8_t r1 = ipa1 >> 4;
1920 const uint8_t r3 = ipa1 & 0x0f;
1921 int ret;
1922 uint8_t order;
1923 uint64_t *status_reg;
22740e3f 1924 uint64_t param;
6eb8f212 1925 S390CPU *dst_cpu = NULL;
0e60a699 1926
cb446eca 1927 cpu_synchronize_state(CPU(cpu));
0e60a699
AG
1928
1929 /* get order code */
6cb1e49d
AY
1930 order = decode_basedisp_rs(env, run->s390_sieic.ipb, NULL)
1931 & SIGP_ORDER_MASK;
6eb8f212 1932 status_reg = &env->regs[r1];
22740e3f 1933 param = (r1 % 2) ? env->regs[r1] : env->regs[r1 + 1];
0e60a699 1934
f9530c32
CB
1935 if (qemu_mutex_trylock(&qemu_sigp_mutex)) {
1936 ret = SIGP_CC_BUSY;
1937 goto out;
1938 }
1939
6eb8f212 1940 switch (order) {
0b9972a2 1941 case SIGP_SET_ARCH:
18ff9494 1942 ret = sigp_set_architecture(cpu, param, status_reg);
04c2b516 1943 break;
0b9972a2 1944 default:
6eb8f212
DH
1945 /* all other sigp orders target a single vcpu */
1946 dst_cpu = s390_cpu_addr2state(env->regs[r3]);
22740e3f 1947 ret = handle_sigp_single_dst(dst_cpu, order, param, status_reg);
0e60a699 1948 }
f9530c32 1949 qemu_mutex_unlock(&qemu_sigp_mutex);
0e60a699 1950
f9530c32 1951out:
56dba22b
DH
1952 trace_kvm_sigp_finished(order, CPU(cpu)->cpu_index,
1953 dst_cpu ? CPU(dst_cpu)->cpu_index : -1, ret);
1954
6eb8f212
DH
1955 if (ret >= 0) {
1956 setcc(cpu, ret);
1957 return 0;
1958 }
1959
1960 return ret;
0e60a699
AG
1961}
1962
b30f4dfb 1963static int handle_instruction(S390CPU *cpu, struct kvm_run *run)
0e60a699
AG
1964{
1965 unsigned int ipa0 = (run->s390_sieic.ipa & 0xff00);
1966 uint8_t ipa1 = run->s390_sieic.ipa & 0x00ff;
d7963c43 1967 int r = -1;
0e60a699 1968
e67137c6
PM
1969 DPRINTF("handle_instruction 0x%x 0x%x\n",
1970 run->s390_sieic.ipa, run->s390_sieic.ipb);
0e60a699 1971 switch (ipa0) {
09b99878 1972 case IPA0_B2:
1eecf41b
FB
1973 r = handle_b2(cpu, run, ipa1);
1974 break;
09b99878 1975 case IPA0_B9:
1eecf41b
FB
1976 r = handle_b9(cpu, run, ipa1);
1977 break;
09b99878 1978 case IPA0_EB:
80765f07 1979 r = handle_eb(cpu, run, run->s390_sieic.ipb & 0xff);
09b99878 1980 break;
863f6f52
FB
1981 case IPA0_E3:
1982 r = handle_e3(cpu, run, run->s390_sieic.ipb & 0xff);
1983 break;
09b99878 1984 case IPA0_DIAG:
638129ff 1985 r = handle_diag(cpu, run, run->s390_sieic.ipb);
09b99878
CH
1986 break;
1987 case IPA0_SIGP:
1988 r = handle_sigp(cpu, run, ipa1);
1989 break;
0e60a699
AG
1990 }
1991
1992 if (r < 0) {
b30f4dfb 1993 r = 0;
e3cfd926 1994 kvm_s390_program_interrupt(cpu, PGM_OPERATION);
0e60a699 1995 }
b30f4dfb
DH
1996
1997 return r;
0e60a699
AG
1998}
1999
f7575c96 2000static bool is_special_wait_psw(CPUState *cs)
eca3ed03
CB
2001{
2002 /* signal quiesce */
f7575c96 2003 return cs->kvm_run->psw_addr == 0xfffUL;
eca3ed03
CB
2004}
2005
a2689242
TH
2006static void unmanageable_intercept(S390CPU *cpu, const char *str, int pswoffset)
2007{
2008 CPUState *cs = CPU(cpu);
2009
2010 error_report("Unmanageable %s! CPU%i new PSW: 0x%016lx:%016lx",
2011 str, cs->cpu_index, ldq_phys(cs->as, cpu->env.psa + pswoffset),
2012 ldq_phys(cs->as, cpu->env.psa + pswoffset + 8));
eb24f7c6 2013 s390_cpu_halt(cpu);
c86f106b 2014 qemu_system_guest_panicked(NULL);
a2689242
TH
2015}
2016
409422cd
CB
2017/* try to detect pgm check loops */
2018static int handle_oper_loop(S390CPU *cpu, struct kvm_run *run)
2019{
2020 CPUState *cs = CPU(cpu);
2021 PSW oldpsw, newpsw;
2022
2023 cpu_synchronize_state(cs);
2024 newpsw.mask = ldq_phys(cs->as, cpu->env.psa +
2025 offsetof(LowCore, program_new_psw));
2026 newpsw.addr = ldq_phys(cs->as, cpu->env.psa +
2027 offsetof(LowCore, program_new_psw) + 8);
2028 oldpsw.mask = run->psw_mask;
2029 oldpsw.addr = run->psw_addr;
2030 /*
2031 * Avoid endless loops of operation exceptions, if the pgm new
2032 * PSW will cause a new operation exception.
2033 * The heuristic checks if the pgm new psw is within 6 bytes before
2034 * the faulting psw address (with same DAT, AS settings) and the
2035 * new psw is not a wait psw and the fault was not triggered by
2036 * problem state. In that case go into crashed state.
2037 */
2038
2039 if (oldpsw.addr - newpsw.addr <= 6 &&
2040 !(newpsw.mask & PSW_MASK_WAIT) &&
2041 !(oldpsw.mask & PSW_MASK_PSTATE) &&
2042 (newpsw.mask & PSW_MASK_ASC) == (oldpsw.mask & PSW_MASK_ASC) &&
2043 (newpsw.mask & PSW_MASK_DAT) == (oldpsw.mask & PSW_MASK_DAT)) {
2044 unmanageable_intercept(cpu, "operation exception loop",
2045 offsetof(LowCore, program_new_psw));
2046 return EXCP_HALTED;
2047 }
2048 return 0;
2049}
2050
1bc22652 2051static int handle_intercept(S390CPU *cpu)
0e60a699 2052{
f7575c96
AF
2053 CPUState *cs = CPU(cpu);
2054 struct kvm_run *run = cs->kvm_run;
0e60a699
AG
2055 int icpt_code = run->s390_sieic.icptcode;
2056 int r = 0;
2057
e67137c6 2058 DPRINTF("intercept: 0x%x (at 0x%lx)\n", icpt_code,
f7575c96 2059 (long)cs->kvm_run->psw_addr);
0e60a699
AG
2060 switch (icpt_code) {
2061 case ICPT_INSTRUCTION:
b30f4dfb 2062 r = handle_instruction(cpu, run);
0e60a699 2063 break;
6449a41a
TH
2064 case ICPT_PROGRAM:
2065 unmanageable_intercept(cpu, "program interrupt",
2066 offsetof(LowCore, program_new_psw));
2067 r = EXCP_HALTED;
2068 break;
a2689242
TH
2069 case ICPT_EXT_INT:
2070 unmanageable_intercept(cpu, "external interrupt",
2071 offsetof(LowCore, external_new_psw));
2072 r = EXCP_HALTED;
2073 break;
0e60a699 2074 case ICPT_WAITPSW:
08eb8c85 2075 /* disabled wait, since enabled wait is handled in kernel */
eb24f7c6
DH
2076 cpu_synchronize_state(cs);
2077 if (s390_cpu_halt(cpu) == 0) {
08eb8c85 2078 if (is_special_wait_psw(cs)) {
cf83f140 2079 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
08eb8c85 2080 } else {
c86f106b 2081 qemu_system_guest_panicked(NULL);
08eb8c85 2082 }
eca3ed03
CB
2083 }
2084 r = EXCP_HALTED;
2085 break;
854e42f3 2086 case ICPT_CPU_STOP:
eb24f7c6 2087 if (s390_cpu_set_state(CPU_STATE_STOPPED, cpu) == 0) {
cf83f140 2088 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
854e42f3 2089 }
18ff9494
DH
2090 if (cpu->env.sigp_order == SIGP_STOP_STORE_STATUS) {
2091 kvm_s390_store_status(cpu, KVM_S390_STORE_STATUS_DEF_ADDR,
2092 true);
2093 }
2094 cpu->env.sigp_order = 0;
854e42f3 2095 r = EXCP_HALTED;
0e60a699 2096 break;
b60fae32 2097 case ICPT_OPEREXC:
409422cd 2098 /* check for break points */
b60fae32
DH
2099 r = handle_sw_breakpoint(cpu, run);
2100 if (r == -ENOENT) {
409422cd
CB
2101 /* Then check for potential pgm check loops */
2102 r = handle_oper_loop(cpu, run);
2103 if (r == 0) {
e3cfd926 2104 kvm_s390_program_interrupt(cpu, PGM_OPERATION);
409422cd 2105 }
b60fae32
DH
2106 }
2107 break;
0e60a699
AG
2108 case ICPT_SOFT_INTERCEPT:
2109 fprintf(stderr, "KVM unimplemented icpt SOFT\n");
2110 exit(1);
2111 break;
0e60a699
AG
2112 case ICPT_IO:
2113 fprintf(stderr, "KVM unimplemented icpt IO\n");
2114 exit(1);
2115 break;
2116 default:
2117 fprintf(stderr, "Unknown intercept code: %d\n", icpt_code);
2118 exit(1);
2119 break;
2120 }
2121
2122 return r;
2123}
2124
09b99878
CH
2125static int handle_tsch(S390CPU *cpu)
2126{
09b99878
CH
2127 CPUState *cs = CPU(cpu);
2128 struct kvm_run *run = cs->kvm_run;
2129 int ret;
2130
44c68de0 2131 cpu_synchronize_state(cs);
3474b679 2132
653b0809
TH
2133 ret = ioinst_handle_tsch(cpu, cpu->env.regs[1], run->s390_tsch.ipb);
2134 if (ret < 0) {
09b99878
CH
2135 /*
2136 * Failure.
2137 * If an I/O interrupt had been dequeued, we have to reinject it.
2138 */
2139 if (run->s390_tsch.dequeued) {
de13d216
CH
2140 kvm_s390_io_interrupt(run->s390_tsch.subchannel_id,
2141 run->s390_tsch.subchannel_nr,
2142 run->s390_tsch.io_int_parm,
2143 run->s390_tsch.io_int_word);
09b99878
CH
2144 }
2145 ret = 0;
2146 }
2147 return ret;
2148}
2149
6cb1e49d 2150static void insert_stsi_3_2_2(S390CPU *cpu, __u64 addr, uint8_t ar)
f07177a5
ET
2151{
2152 struct sysib_322 sysib;
2153 int del;
2154
6cb1e49d 2155 if (s390_cpu_virt_mem_read(cpu, addr, ar, &sysib, sizeof(sysib))) {
f07177a5
ET
2156 return;
2157 }
2158 /* Shift the stack of Extended Names to prepare for our own data */
2159 memmove(&sysib.ext_names[1], &sysib.ext_names[0],
2160 sizeof(sysib.ext_names[0]) * (sysib.count - 1));
2161 /* First virt level, that doesn't provide Ext Names delimits stack. It is
2162 * assumed it's not capable of managing Extended Names for lower levels.
2163 */
2164 for (del = 1; del < sysib.count; del++) {
2165 if (!sysib.vm[del].ext_name_encoding || !sysib.ext_names[del][0]) {
2166 break;
2167 }
2168 }
2169 if (del < sysib.count) {
2170 memset(sysib.ext_names[del], 0,
2171 sizeof(sysib.ext_names[0]) * (sysib.count - del));
2172 }
2173 /* Insert short machine name in EBCDIC, padded with blanks */
2174 if (qemu_name) {
2175 memset(sysib.vm[0].name, 0x40, sizeof(sysib.vm[0].name));
2176 ebcdic_put(sysib.vm[0].name, qemu_name, MIN(sizeof(sysib.vm[0].name),
2177 strlen(qemu_name)));
2178 }
2179 sysib.vm[0].ext_name_encoding = 2; /* 2 = UTF-8 */
2180 memset(sysib.ext_names[0], 0, sizeof(sysib.ext_names[0]));
2181 /* If hypervisor specifies zero Extended Name in STSI322 SYSIB, it's
2182 * considered by s390 as not capable of providing any Extended Name.
2183 * Therefore if no name was specified on qemu invocation, we go with the
2184 * same "KVMguest" default, which KVM has filled into short name field.
2185 */
2186 if (qemu_name) {
2187 strncpy((char *)sysib.ext_names[0], qemu_name,
2188 sizeof(sysib.ext_names[0]));
2189 } else {
2190 strcpy((char *)sysib.ext_names[0], "KVMguest");
2191 }
2192 /* Insert UUID */
794afd70 2193 memcpy(sysib.vm[0].uuid, &qemu_uuid, sizeof(sysib.vm[0].uuid));
f07177a5 2194
6cb1e49d 2195 s390_cpu_virt_mem_write(cpu, addr, ar, &sysib, sizeof(sysib));
f07177a5
ET
2196}
2197
2198static int handle_stsi(S390CPU *cpu)
2199{
2200 CPUState *cs = CPU(cpu);
2201 struct kvm_run *run = cs->kvm_run;
2202
2203 switch (run->s390_stsi.fc) {
2204 case 3:
2205 if (run->s390_stsi.sel1 != 2 || run->s390_stsi.sel2 != 2) {
2206 return 0;
2207 }
2208 /* Only sysib 3.2.2 needs post-handling for now. */
6cb1e49d 2209 insert_stsi_3_2_2(cpu, run->s390_stsi.addr, run->s390_stsi.ar);
f07177a5
ET
2210 return 0;
2211 default:
2212 return 0;
2213 }
2214}
2215
8c012449
DH
2216static int kvm_arch_handle_debug_exit(S390CPU *cpu)
2217{
770a6379
DH
2218 CPUState *cs = CPU(cpu);
2219 struct kvm_run *run = cs->kvm_run;
2220
2221 int ret = 0;
2222 struct kvm_debug_exit_arch *arch_info = &run->debug.arch;
2223
2224 switch (arch_info->type) {
2225 case KVM_HW_WP_WRITE:
2226 if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) {
2227 cs->watchpoint_hit = &hw_watchpoint;
2228 hw_watchpoint.vaddr = arch_info->addr;
2229 hw_watchpoint.flags = BP_MEM_WRITE;
2230 ret = EXCP_DEBUG;
2231 }
2232 break;
2233 case KVM_HW_BP:
2234 if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) {
2235 ret = EXCP_DEBUG;
2236 }
2237 break;
2238 case KVM_SINGLESTEP:
2239 if (cs->singlestep_enabled) {
2240 ret = EXCP_DEBUG;
2241 }
2242 break;
2243 default:
2244 ret = -ENOSYS;
2245 }
2246
2247 return ret;
8c012449
DH
2248}
2249
20d695a9 2250int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
0e60a699 2251{
20d695a9 2252 S390CPU *cpu = S390_CPU(cs);
0e60a699
AG
2253 int ret = 0;
2254
4b8523ee
JK
2255 qemu_mutex_lock_iothread();
2256
0e60a699
AG
2257 switch (run->exit_reason) {
2258 case KVM_EXIT_S390_SIEIC:
1bc22652 2259 ret = handle_intercept(cpu);
0e60a699
AG
2260 break;
2261 case KVM_EXIT_S390_RESET:
e91e972c 2262 s390_reipl_request();
0e60a699 2263 break;
09b99878
CH
2264 case KVM_EXIT_S390_TSCH:
2265 ret = handle_tsch(cpu);
2266 break;
f07177a5
ET
2267 case KVM_EXIT_S390_STSI:
2268 ret = handle_stsi(cpu);
2269 break;
8c012449
DH
2270 case KVM_EXIT_DEBUG:
2271 ret = kvm_arch_handle_debug_exit(cpu);
2272 break;
0e60a699
AG
2273 default:
2274 fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason);
2275 break;
2276 }
4b8523ee 2277 qemu_mutex_unlock_iothread();
0e60a699 2278
bb4ea393
JK
2279 if (ret == 0) {
2280 ret = EXCP_INTERRUPT;
bb4ea393 2281 }
0e60a699
AG
2282 return ret;
2283}
4513d923 2284
20d695a9 2285bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
4513d923
GN
2286{
2287 return true;
2288}
a1b87fe0 2289
de13d216 2290void kvm_s390_io_interrupt(uint16_t subchannel_id,
09b99878
CH
2291 uint16_t subchannel_nr, uint32_t io_int_parm,
2292 uint32_t io_int_word)
2293{
de13d216
CH
2294 struct kvm_s390_irq irq = {
2295 .u.io.subchannel_id = subchannel_id,
2296 .u.io.subchannel_nr = subchannel_nr,
2297 .u.io.io_int_parm = io_int_parm,
2298 .u.io.io_int_word = io_int_word,
2299 };
09b99878 2300
7e749462 2301 if (io_int_word & IO_INT_WORD_AI) {
de13d216 2302 irq.type = KVM_S390_INT_IO(1, 0, 0, 0);
7e749462 2303 } else {
393ad2a4
CB
2304 irq.type = KVM_S390_INT_IO(0, (subchannel_id & 0xff00) >> 8,
2305 (subchannel_id & 0x0006),
2306 subchannel_nr);
7e749462 2307 }
de13d216 2308 kvm_s390_floating_interrupt(&irq);
09b99878
CH
2309}
2310
b080364a
CH
2311static uint64_t build_channel_report_mcic(void)
2312{
2313 uint64_t mcic;
2314
2315 /* subclass: indicate channel report pending */
2316 mcic = MCIC_SC_CP |
2317 /* subclass modifiers: none */
2318 /* storage errors: none */
2319 /* validity bits: no damage */
2320 MCIC_VB_WP | MCIC_VB_MS | MCIC_VB_PM | MCIC_VB_IA | MCIC_VB_FP |
2321 MCIC_VB_GR | MCIC_VB_CR | MCIC_VB_ST | MCIC_VB_AR | MCIC_VB_PR |
2322 MCIC_VB_FC | MCIC_VB_CT | MCIC_VB_CC;
7c72ac49 2323 if (s390_has_feat(S390_FEAT_VECTOR)) {
b080364a
CH
2324 mcic |= MCIC_VB_VR;
2325 }
62deb62d
FZ
2326 if (s390_has_feat(S390_FEAT_GUARDED_STORAGE)) {
2327 mcic |= MCIC_VB_GS;
2328 }
b080364a
CH
2329 return mcic;
2330}
2331
de13d216 2332void kvm_s390_crw_mchk(void)
09b99878 2333{
de13d216
CH
2334 struct kvm_s390_irq irq = {
2335 .type = KVM_S390_MCHK,
2336 .u.mchk.cr14 = 1 << 28,
b080364a 2337 .u.mchk.mcic = build_channel_report_mcic(),
de13d216
CH
2338 };
2339 kvm_s390_floating_interrupt(&irq);
09b99878
CH
2340}
2341
2342void kvm_s390_enable_css_support(S390CPU *cpu)
2343{
09b99878
CH
2344 int r;
2345
2346 /* Activate host kernel channel subsystem support. */
e080f0fd 2347 r = kvm_vcpu_enable_cap(CPU(cpu), KVM_CAP_S390_CSS_SUPPORT, 0);
09b99878
CH
2348 assert(r == 0);
2349}
48475e14
AK
2350
2351void kvm_arch_init_irq_routing(KVMState *s)
2352{
d426d9fb
CH
2353 /*
2354 * Note that while irqchip capabilities generally imply that cpustates
2355 * are handled in-kernel, it is not true for s390 (yet); therefore, we
2356 * have to override the common code kvm_halt_in_kernel_allowed setting.
2357 */
2358 if (kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
d426d9fb
CH
2359 kvm_gsi_routing_allowed = true;
2360 kvm_halt_in_kernel_allowed = false;
2361 }
48475e14 2362}
b4436a0b 2363
cc3ac9c4
CH
2364int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch,
2365 int vq, bool assign)
b4436a0b
CH
2366{
2367 struct kvm_ioeventfd kick = {
2368 .flags = KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY |
2369 KVM_IOEVENTFD_FLAG_DATAMATCH,
cc3ac9c4 2370 .fd = event_notifier_get_fd(notifier),
b4436a0b
CH
2371 .datamatch = vq,
2372 .addr = sch,
2373 .len = 8,
2374 };
2375 if (!kvm_check_extension(kvm_state, KVM_CAP_IOEVENTFD)) {
2376 return -ENOSYS;
2377 }
2378 if (!assign) {
2379 kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
2380 }
2381 return kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
2382}
1def6656 2383
fba5f6fe 2384int kvm_s390_get_memslot_count(void)
1def6656 2385{
fba5f6fe 2386 return kvm_check_extension(kvm_state, KVM_CAP_NR_MEMSLOTS);
1def6656 2387}
c9e659c9 2388
9700230b
FZ
2389int kvm_s390_get_ri(void)
2390{
2391 return cap_ri;
2392}
2393
62deb62d
FZ
2394int kvm_s390_get_gs(void)
2395{
2396 return cap_gs;
2397}
2398
c9e659c9
DH
2399int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state)
2400{
2401 struct kvm_mp_state mp_state = {};
2402 int ret;
2403
2404 /* the kvm part might not have been initialized yet */
2405 if (CPU(cpu)->kvm_state == NULL) {
2406 return 0;
2407 }
2408
2409 switch (cpu_state) {
2410 case CPU_STATE_STOPPED:
2411 mp_state.mp_state = KVM_MP_STATE_STOPPED;
2412 break;
2413 case CPU_STATE_CHECK_STOP:
2414 mp_state.mp_state = KVM_MP_STATE_CHECK_STOP;
2415 break;
2416 case CPU_STATE_OPERATING:
2417 mp_state.mp_state = KVM_MP_STATE_OPERATING;
2418 break;
2419 case CPU_STATE_LOAD:
2420 mp_state.mp_state = KVM_MP_STATE_LOAD;
2421 break;
2422 default:
2423 error_report("Requested CPU state is not a valid S390 CPU state: %u",
2424 cpu_state);
2425 exit(1);
2426 }
2427
2428 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
2429 if (ret) {
2430 trace_kvm_failed_cpu_state_set(CPU(cpu)->cpu_index, cpu_state,
2431 strerror(-ret));
2432 }
2433
2434 return ret;
2435}
9e03a040 2436
3cda44f7
JF
2437void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu)
2438{
2439 struct kvm_s390_irq_state irq_state;
2440 CPUState *cs = CPU(cpu);
2441 int32_t bytes;
2442
2443 if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) {
2444 return;
2445 }
2446
2447 irq_state.buf = (uint64_t) cpu->irqstate;
2448 irq_state.len = VCPU_IRQ_BUF_SIZE;
2449
2450 bytes = kvm_vcpu_ioctl(cs, KVM_S390_GET_IRQ_STATE, &irq_state);
2451 if (bytes < 0) {
2452 cpu->irqstate_saved_size = 0;
2453 error_report("Migration of interrupt state failed");
2454 return;
2455 }
2456
2457 cpu->irqstate_saved_size = bytes;
2458}
2459
2460int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu)
2461{
2462 CPUState *cs = CPU(cpu);
2463 struct kvm_s390_irq_state irq_state;
2464 int r;
2465
b853d4cb
SS
2466 if (cpu->irqstate_saved_size == 0) {
2467 return 0;
2468 }
2469
3cda44f7
JF
2470 if (!kvm_check_extension(kvm_state, KVM_CAP_S390_IRQ_STATE)) {
2471 return -ENOSYS;
2472 }
2473
3cda44f7
JF
2474 irq_state.buf = (uint64_t) cpu->irqstate;
2475 irq_state.len = cpu->irqstate_saved_size;
2476
2477 r = kvm_vcpu_ioctl(cs, KVM_S390_SET_IRQ_STATE, &irq_state);
2478 if (r) {
2479 error_report("Setting interrupt state failed %d", r);
2480 }
2481 return r;
2482}
2483
9e03a040 2484int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
dc9f06ca 2485 uint64_t address, uint32_t data, PCIDevice *dev)
9e03a040
FB
2486{
2487 S390PCIBusDevice *pbdev;
9e03a040
FB
2488 uint32_t vec = data & ZPCI_MSI_VEC_MASK;
2489
ceb7054f
YMZ
2490 if (!dev) {
2491 DPRINTF("add_msi_route no pci device\n");
2492 return -ENODEV;
2493 }
2494
2495 pbdev = s390_pci_find_dev_by_target(s390_get_phb(), DEVICE(dev)->id);
9e03a040 2496 if (!pbdev) {
ceb7054f 2497 DPRINTF("add_msi_route no zpci device\n");
9e03a040
FB
2498 return -ENODEV;
2499 }
2500
9e03a040
FB
2501 route->type = KVM_IRQ_ROUTING_S390_ADAPTER;
2502 route->flags = 0;
2503 route->u.adapter.summary_addr = pbdev->routes.adapter.summary_addr;
2504 route->u.adapter.ind_addr = pbdev->routes.adapter.ind_addr;
2505 route->u.adapter.summary_offset = pbdev->routes.adapter.summary_offset;
01c36195 2506 route->u.adapter.ind_offset = pbdev->routes.adapter.ind_offset + vec;
9e03a040
FB
2507 route->u.adapter.adapter_id = pbdev->routes.adapter.adapter_id;
2508 return 0;
2509}
1850b6b7 2510
38d87493
PX
2511int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
2512 int vector, PCIDevice *dev)
2513{
2514 return 0;
2515}
2516
2517int kvm_arch_release_virq_post(int virq)
2518{
2519 return 0;
2520}
2521
1850b6b7
EA
2522int kvm_arch_msi_data_to_gsi(uint32_t data)
2523{
2524 abort();
2525}
3b84c25c 2526
3b84c25c
DH
2527static int query_cpu_subfunc(S390FeatBitmap features)
2528{
2529 struct kvm_s390_vm_cpu_subfunc prop;
2530 struct kvm_device_attr attr = {
2531 .group = KVM_S390_VM_CPU_MODEL,
2532 .attr = KVM_S390_VM_CPU_MACHINE_SUBFUNC,
2533 .addr = (uint64_t) &prop,
2534 };
2535 int rc;
2536
2537 rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
2538 if (rc) {
2539 return rc;
2540 }
2541
2542 /*
2543 * We're going to add all subfunctions now, if the corresponding feature
2544 * is available that unlocks the query functions.
2545 */
2546 s390_add_from_feat_block(features, S390_FEAT_TYPE_PLO, prop.plo);
2547 if (test_bit(S390_FEAT_TOD_CLOCK_STEERING, features)) {
2548 s390_add_from_feat_block(features, S390_FEAT_TYPE_PTFF, prop.ptff);
2549 }
2550 if (test_bit(S390_FEAT_MSA, features)) {
2551 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMAC, prop.kmac);
2552 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMC, prop.kmc);
2553 s390_add_from_feat_block(features, S390_FEAT_TYPE_KM, prop.km);
2554 s390_add_from_feat_block(features, S390_FEAT_TYPE_KIMD, prop.kimd);
2555 s390_add_from_feat_block(features, S390_FEAT_TYPE_KLMD, prop.klmd);
2556 }
2557 if (test_bit(S390_FEAT_MSA_EXT_3, features)) {
2558 s390_add_from_feat_block(features, S390_FEAT_TYPE_PCKMO, prop.pckmo);
2559 }
2560 if (test_bit(S390_FEAT_MSA_EXT_4, features)) {
2561 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMCTR, prop.kmctr);
2562 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMF, prop.kmf);
2563 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMO, prop.kmo);
2564 s390_add_from_feat_block(features, S390_FEAT_TYPE_PCC, prop.pcc);
2565 }
2566 if (test_bit(S390_FEAT_MSA_EXT_5, features)) {
2567 s390_add_from_feat_block(features, S390_FEAT_TYPE_PPNO, prop.ppno);
2568 }
6da5c593
JH
2569 if (test_bit(S390_FEAT_MSA_EXT_8, features)) {
2570 s390_add_from_feat_block(features, S390_FEAT_TYPE_KMA, prop.kma);
2571 }
3b84c25c
DH
2572 return 0;
2573}
2574
2575static int configure_cpu_subfunc(const S390FeatBitmap features)
2576{
2577 struct kvm_s390_vm_cpu_subfunc prop = {};
2578 struct kvm_device_attr attr = {
2579 .group = KVM_S390_VM_CPU_MODEL,
2580 .attr = KVM_S390_VM_CPU_PROCESSOR_SUBFUNC,
2581 .addr = (uint64_t) &prop,
2582 };
2583
2584 if (!kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
2585 KVM_S390_VM_CPU_PROCESSOR_SUBFUNC)) {
2586 /* hardware support might be missing, IBC will handle most of this */
2587 return 0;
2588 }
2589
2590 s390_fill_feat_block(features, S390_FEAT_TYPE_PLO, prop.plo);
2591 if (test_bit(S390_FEAT_TOD_CLOCK_STEERING, features)) {
2592 s390_fill_feat_block(features, S390_FEAT_TYPE_PTFF, prop.ptff);
3b84c25c
DH
2593 }
2594 if (test_bit(S390_FEAT_MSA, features)) {
2595 s390_fill_feat_block(features, S390_FEAT_TYPE_KMAC, prop.kmac);
3b84c25c 2596 s390_fill_feat_block(features, S390_FEAT_TYPE_KMC, prop.kmc);
3b84c25c 2597 s390_fill_feat_block(features, S390_FEAT_TYPE_KM, prop.km);
3b84c25c 2598 s390_fill_feat_block(features, S390_FEAT_TYPE_KIMD, prop.kimd);
3b84c25c 2599 s390_fill_feat_block(features, S390_FEAT_TYPE_KLMD, prop.klmd);
3b84c25c
DH
2600 }
2601 if (test_bit(S390_FEAT_MSA_EXT_3, features)) {
2602 s390_fill_feat_block(features, S390_FEAT_TYPE_PCKMO, prop.pckmo);
3b84c25c
DH
2603 }
2604 if (test_bit(S390_FEAT_MSA_EXT_4, features)) {
2605 s390_fill_feat_block(features, S390_FEAT_TYPE_KMCTR, prop.kmctr);
3b84c25c 2606 s390_fill_feat_block(features, S390_FEAT_TYPE_KMF, prop.kmf);
3b84c25c 2607 s390_fill_feat_block(features, S390_FEAT_TYPE_KMO, prop.kmo);
3b84c25c 2608 s390_fill_feat_block(features, S390_FEAT_TYPE_PCC, prop.pcc);
3b84c25c
DH
2609 }
2610 if (test_bit(S390_FEAT_MSA_EXT_5, features)) {
2611 s390_fill_feat_block(features, S390_FEAT_TYPE_PPNO, prop.ppno);
3b84c25c 2612 }
6da5c593
JH
2613 if (test_bit(S390_FEAT_MSA_EXT_8, features)) {
2614 s390_fill_feat_block(features, S390_FEAT_TYPE_KMA, prop.kma);
6da5c593 2615 }
3b84c25c
DH
2616 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
2617}
2618
2619static int kvm_to_feat[][2] = {
2620 { KVM_S390_VM_CPU_FEAT_ESOP, S390_FEAT_ESOP },
2621 { KVM_S390_VM_CPU_FEAT_SIEF2, S390_FEAT_SIE_F2 },
2622 { KVM_S390_VM_CPU_FEAT_64BSCAO , S390_FEAT_SIE_64BSCAO },
2623 { KVM_S390_VM_CPU_FEAT_SIIF, S390_FEAT_SIE_SIIF },
2624 { KVM_S390_VM_CPU_FEAT_GPERE, S390_FEAT_SIE_GPERE },
2625 { KVM_S390_VM_CPU_FEAT_GSLS, S390_FEAT_SIE_GSLS },
2626 { KVM_S390_VM_CPU_FEAT_IB, S390_FEAT_SIE_IB },
2627 { KVM_S390_VM_CPU_FEAT_CEI, S390_FEAT_SIE_CEI },
2628 { KVM_S390_VM_CPU_FEAT_IBS, S390_FEAT_SIE_IBS },
2629 { KVM_S390_VM_CPU_FEAT_SKEY, S390_FEAT_SIE_SKEY },
2630 { KVM_S390_VM_CPU_FEAT_CMMA, S390_FEAT_SIE_CMMA },
2631 { KVM_S390_VM_CPU_FEAT_PFMFI, S390_FEAT_SIE_PFMFI},
2632 { KVM_S390_VM_CPU_FEAT_SIGPIF, S390_FEAT_SIE_SIGPIF},
c0a9cd94 2633 { KVM_S390_VM_CPU_FEAT_KSS, S390_FEAT_SIE_KSS},
3b84c25c
DH
2634};
2635
2636static int query_cpu_feat(S390FeatBitmap features)
2637{
2638 struct kvm_s390_vm_cpu_feat prop;
2639 struct kvm_device_attr attr = {
2640 .group = KVM_S390_VM_CPU_MODEL,
2641 .attr = KVM_S390_VM_CPU_MACHINE_FEAT,
2642 .addr = (uint64_t) &prop,
2643 };
2644 int rc;
2645 int i;
2646
2647 rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
2648 if (rc) {
2649 return rc;
2650 }
2651
2652 for (i = 0; i < ARRAY_SIZE(kvm_to_feat); i++) {
3d1cfc3c 2653 if (test_be_bit(kvm_to_feat[i][0], (uint8_t *) prop.feat)) {
3b84c25c
DH
2654 set_bit(kvm_to_feat[i][1], features);
2655 }
2656 }
2657 return 0;
2658}
2659
2660static int configure_cpu_feat(const S390FeatBitmap features)
2661{
2662 struct kvm_s390_vm_cpu_feat prop = {};
2663 struct kvm_device_attr attr = {
2664 .group = KVM_S390_VM_CPU_MODEL,
2665 .attr = KVM_S390_VM_CPU_PROCESSOR_FEAT,
2666 .addr = (uint64_t) &prop,
2667 };
2668 int i;
2669
2670 for (i = 0; i < ARRAY_SIZE(kvm_to_feat); i++) {
2671 if (test_bit(kvm_to_feat[i][1], features)) {
3d1cfc3c 2672 set_be_bit(kvm_to_feat[i][0], (uint8_t *) prop.feat);
3b84c25c
DH
2673 }
2674 }
2675 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
2676}
2677
2678bool kvm_s390_cpu_models_supported(void)
2679{
e73316d5 2680 if (!cpu_model_allowed()) {
34821036
DH
2681 /* compatibility machines interfere with the cpu model */
2682 return false;
2683 }
3b84c25c
DH
2684 return kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
2685 KVM_S390_VM_CPU_MACHINE) &&
2686 kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
2687 KVM_S390_VM_CPU_PROCESSOR) &&
2688 kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
2689 KVM_S390_VM_CPU_MACHINE_FEAT) &&
2690 kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
2691 KVM_S390_VM_CPU_PROCESSOR_FEAT) &&
2692 kvm_vm_check_attr(kvm_state, KVM_S390_VM_CPU_MODEL,
2693 KVM_S390_VM_CPU_MACHINE_SUBFUNC);
2694}
2695
2696void kvm_s390_get_host_cpu_model(S390CPUModel *model, Error **errp)
2697{
2698 struct kvm_s390_vm_cpu_machine prop = {};
2699 struct kvm_device_attr attr = {
2700 .group = KVM_S390_VM_CPU_MODEL,
2701 .attr = KVM_S390_VM_CPU_MACHINE,
2702 .addr = (uint64_t) &prop,
2703 };
2704 uint16_t unblocked_ibc = 0, cpu_type = 0;
2705 int rc;
2706
2707 memset(model, 0, sizeof(*model));
2708
2709 if (!kvm_s390_cpu_models_supported()) {
2710 error_setg(errp, "KVM doesn't support CPU models");
2711 return;
2712 }
2713
2714 /* query the basic cpu model properties */
2715 rc = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
2716 if (rc) {
2717 error_setg(errp, "KVM: Error querying host CPU model: %d", rc);
2718 return;
2719 }
2720
2721 cpu_type = cpuid_type(prop.cpuid);
2722 if (has_ibc(prop.ibc)) {
2723 model->lowest_ibc = lowest_ibc(prop.ibc);
2724 unblocked_ibc = unblocked_ibc(prop.ibc);
2725 }
2726 model->cpu_id = cpuid_id(prop.cpuid);
64bc98f4 2727 model->cpu_id_format = cpuid_format(prop.cpuid);
3b84c25c
DH
2728 model->cpu_ver = 0xff;
2729
2730 /* get supported cpu features indicated via STFL(E) */
2731 s390_add_from_feat_block(model->features, S390_FEAT_TYPE_STFL,
2732 (uint8_t *) prop.fac_mask);
2733 /* dat-enhancement facility 2 has no bit but was introduced with stfle */
2734 if (test_bit(S390_FEAT_STFLE, model->features)) {
2735 set_bit(S390_FEAT_DAT_ENH_2, model->features);
2736 }
2737 /* get supported cpu features indicated e.g. via SCLP */
2738 rc = query_cpu_feat(model->features);
2739 if (rc) {
2740 error_setg(errp, "KVM: Error querying CPU features: %d", rc);
2741 return;
2742 }
2743 /* get supported cpu subfunctions indicated via query / test bit */
2744 rc = query_cpu_subfunc(model->features);
2745 if (rc) {
2746 error_setg(errp, "KVM: Error querying CPU subfunctions: %d", rc);
2747 return;
2748 }
2749
07059eff
DH
2750 /* with cpu model support, CMM is only indicated if really available */
2751 if (kvm_s390_cmma_available()) {
2752 set_bit(S390_FEAT_CMM, model->features);
6da5c593
JH
2753 } else {
2754 /* no cmm -> no cmm nt */
2755 clear_bit(S390_FEAT_CMM_NT, model->features);
07059eff
DH
2756 }
2757
e23bc1b2 2758 /* We emulate a zPCI bus and AEN, therefore we don't need HW support */
21eb052c
CH
2759 if (pci_available) {
2760 set_bit(S390_FEAT_ZPCI, model->features);
2761 }
3b00f702
YMZ
2762 set_bit(S390_FEAT_ADAPTER_EVENT_NOTIFICATION, model->features);
2763
3b84c25c
DH
2764 if (s390_known_cpu_type(cpu_type)) {
2765 /* we want the exact model, even if some features are missing */
2766 model->def = s390_find_cpu_def(cpu_type, ibc_gen(unblocked_ibc),
2767 ibc_ec_ga(unblocked_ibc), NULL);
2768 } else {
2769 /* model unknown, e.g. too new - search using features */
2770 model->def = s390_find_cpu_def(0, ibc_gen(unblocked_ibc),
2771 ibc_ec_ga(unblocked_ibc),
2772 model->features);
2773 }
2774 if (!model->def) {
2775 error_setg(errp, "KVM: host CPU model could not be identified");
2776 return;
2777 }
2778 /* strip of features that are not part of the maximum model */
2779 bitmap_and(model->features, model->features, model->def->full_feat,
2780 S390_FEAT_MAX);
2781}
2782
2783void kvm_s390_apply_cpu_model(const S390CPUModel *model, Error **errp)
2784{
2785 struct kvm_s390_vm_cpu_processor prop = {
2786 .fac_list = { 0 },
2787 };
2788 struct kvm_device_attr attr = {
2789 .group = KVM_S390_VM_CPU_MODEL,
2790 .attr = KVM_S390_VM_CPU_PROCESSOR,
2791 .addr = (uint64_t) &prop,
2792 };
2793 int rc;
2794
2795 if (!model) {
07059eff 2796 /* compatibility handling if cpu models are disabled */
03f47ee4 2797 if (kvm_s390_cmma_available()) {
07059eff
DH
2798 kvm_s390_enable_cmma();
2799 }
3b84c25c
DH
2800 return;
2801 }
2802 if (!kvm_s390_cpu_models_supported()) {
2803 error_setg(errp, "KVM doesn't support CPU models");
2804 return;
2805 }
2806 prop.cpuid = s390_cpuid_from_cpu_model(model);
2807 prop.ibc = s390_ibc_from_cpu_model(model);
2808 /* configure cpu features indicated via STFL(e) */
2809 s390_fill_feat_block(model->features, S390_FEAT_TYPE_STFL,
2810 (uint8_t *) prop.fac_list);
2811 rc = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
2812 if (rc) {
2813 error_setg(errp, "KVM: Error configuring the CPU model: %d", rc);
2814 return;
2815 }
2816 /* configure cpu features indicated e.g. via SCLP */
2817 rc = configure_cpu_feat(model->features);
2818 if (rc) {
2819 error_setg(errp, "KVM: Error configuring CPU features: %d", rc);
2820 return;
2821 }
2822 /* configure cpu subfunctions indicated via query / test bit */
2823 rc = configure_cpu_subfunc(model->features);
2824 if (rc) {
2825 error_setg(errp, "KVM: Error configuring CPU subfunctions: %d", rc);
2826 return;
2827 }
03f47ee4 2828 /* enable CMM via CMMA */
07059eff 2829 if (test_bit(S390_FEAT_CMM, model->features)) {
03f47ee4 2830 kvm_s390_enable_cmma();
07059eff 2831 }
3b84c25c 2832}