]> git.proxmox.com Git - mirror_qemu.git/blame - target-s390x/kvm.c
kvm: encapsulate HAS_DEVICE for vm attrs
[mirror_qemu.git] / target-s390x / kvm.c
CommitLineData
0e60a699
AG
1/*
2 * QEMU S390x KVM implementation
3 *
4 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
ccb084d3 5 * Copyright IBM Corp. 2012
0e60a699
AG
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
ccb084d3
CB
17 * Contributions after 2012-10-29 are licensed under the terms of the
18 * GNU GPL, version 2 or (at your option) any later version.
19 *
20 * You should have received a copy of the GNU (Lesser) General Public
0e60a699
AG
21 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 */
23
24#include <sys/types.h>
25#include <sys/ioctl.h>
26#include <sys/mman.h>
27
28#include <linux/kvm.h>
29#include <asm/ptrace.h>
30
31#include "qemu-common.h"
1de7afc9 32#include "qemu/timer.h"
9c17d615
PB
33#include "sysemu/sysemu.h"
34#include "sysemu/kvm.h"
4cb88c3c 35#include "hw/hw.h"
0e60a699 36#include "cpu.h"
9c17d615 37#include "sysemu/device_tree.h"
08eb8c85
CB
38#include "qapi/qmp/qjson.h"
39#include "monitor/monitor.h"
770a6379 40#include "exec/gdbstub.h"
18ff9494 41#include "exec/address-spaces.h"
860643bc 42#include "trace.h"
3a449690 43#include "qapi-event.h"
863f6f52 44#include "hw/s390x/s390-pci-inst.h"
9e03a040 45#include "hw/s390x/s390-pci-bus.h"
e91e972c 46#include "hw/s390x/ipl.h"
0e60a699
AG
47
48/* #define DEBUG_KVM */
49
50#ifdef DEBUG_KVM
e67137c6 51#define DPRINTF(fmt, ...) \
0e60a699
AG
52 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
53#else
e67137c6 54#define DPRINTF(fmt, ...) \
0e60a699
AG
55 do { } while (0)
56#endif
57
58#define IPA0_DIAG 0x8300
59#define IPA0_SIGP 0xae00
09b99878
CH
60#define IPA0_B2 0xb200
61#define IPA0_B9 0xb900
62#define IPA0_EB 0xeb00
863f6f52 63#define IPA0_E3 0xe300
0e60a699 64
1eecf41b
FB
65#define PRIV_B2_SCLP_CALL 0x20
66#define PRIV_B2_CSCH 0x30
67#define PRIV_B2_HSCH 0x31
68#define PRIV_B2_MSCH 0x32
69#define PRIV_B2_SSCH 0x33
70#define PRIV_B2_STSCH 0x34
71#define PRIV_B2_TSCH 0x35
72#define PRIV_B2_TPI 0x36
73#define PRIV_B2_SAL 0x37
74#define PRIV_B2_RSCH 0x38
75#define PRIV_B2_STCRW 0x39
76#define PRIV_B2_STCPS 0x3a
77#define PRIV_B2_RCHP 0x3b
78#define PRIV_B2_SCHM 0x3c
79#define PRIV_B2_CHSC 0x5f
80#define PRIV_B2_SIGA 0x74
81#define PRIV_B2_XSCH 0x76
82
83#define PRIV_EB_SQBS 0x8a
863f6f52
FB
84#define PRIV_EB_PCISTB 0xd0
85#define PRIV_EB_SIC 0xd1
1eecf41b
FB
86
87#define PRIV_B9_EQBS 0x9c
863f6f52
FB
88#define PRIV_B9_CLP 0xa0
89#define PRIV_B9_PCISTG 0xd0
90#define PRIV_B9_PCILG 0xd2
91#define PRIV_B9_RPCIT 0xd3
92
93#define PRIV_E3_MPCIFC 0xd0
94#define PRIV_E3_STPCIFC 0xd4
1eecf41b 95
268846ba 96#define DIAG_IPL 0x308
0e60a699
AG
97#define DIAG_KVM_HYPERCALL 0x500
98#define DIAG_KVM_BREAKPOINT 0x501
99
0e60a699 100#define ICPT_INSTRUCTION 0x04
6449a41a 101#define ICPT_PROGRAM 0x08
a2689242 102#define ICPT_EXT_INT 0x14
0e60a699
AG
103#define ICPT_WAITPSW 0x1c
104#define ICPT_SOFT_INTERCEPT 0x24
105#define ICPT_CPU_STOP 0x28
106#define ICPT_IO 0x40
107
770a6379
DH
108static CPUWatchpoint hw_watchpoint;
109/*
110 * We don't use a list because this structure is also used to transmit the
111 * hardware breakpoints to the kernel.
112 */
113static struct kvm_hw_breakpoint *hw_breakpoints;
114static int nb_hw_breakpoints;
115
94a8d39a
JK
116const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
117 KVM_CAP_LAST_INFO
118};
119
5b08b344 120static int cap_sync_regs;
819bd309 121static int cap_async_pf;
5b08b344 122
dc622deb 123static void *legacy_s390_alloc(size_t size, uint64_t *align);
91138037 124
a310b283
DD
125static int kvm_s390_supports_mem_limit(KVMState *s)
126{
127 struct kvm_device_attr attr = {
128 .group = KVM_S390_VM_MEM_CTRL,
129 .attr = KVM_S390_VM_MEM_LIMIT_SIZE,
130 };
131
132 return (kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attr) == 0);
133}
134
135static int kvm_s390_query_mem_limit(KVMState *s, uint64_t *memory_limit)
136{
137 struct kvm_device_attr attr = {
138 .group = KVM_S390_VM_MEM_CTRL,
139 .attr = KVM_S390_VM_MEM_LIMIT_SIZE,
140 .addr = (uint64_t) memory_limit,
141 };
142
143 return kvm_vm_ioctl(s, KVM_GET_DEVICE_ATTR, &attr);
144}
145
146int kvm_s390_set_mem_limit(KVMState *s, uint64_t new_limit, uint64_t *hw_limit)
147{
148 int rc;
149
150 struct kvm_device_attr attr = {
151 .group = KVM_S390_VM_MEM_CTRL,
152 .attr = KVM_S390_VM_MEM_LIMIT_SIZE,
153 .addr = (uint64_t) &new_limit,
154 };
155
156 if (!kvm_s390_supports_mem_limit(s)) {
157 return 0;
158 }
159
160 rc = kvm_s390_query_mem_limit(s, hw_limit);
161 if (rc) {
162 return rc;
163 } else if (*hw_limit < new_limit) {
164 return -E2BIG;
165 }
166
167 return kvm_vm_ioctl(s, KVM_SET_DEVICE_ATTR, &attr);
168}
169
4cb88c3c
DD
170static int kvm_s390_check_clear_cmma(KVMState *s)
171{
172 struct kvm_device_attr attr = {
173 .group = KVM_S390_VM_MEM_CTRL,
174 .attr = KVM_S390_VM_MEM_CLR_CMMA,
175 };
176
177 return kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attr);
178}
179
180static int kvm_s390_check_enable_cmma(KVMState *s)
181{
182 struct kvm_device_attr attr = {
183 .group = KVM_S390_VM_MEM_CTRL,
184 .attr = KVM_S390_VM_MEM_ENABLE_CMMA,
185 };
186
187 return kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attr);
188}
189
190void kvm_s390_clear_cmma_callback(void *opaque)
191{
192 int rc;
193 KVMState *s = opaque;
194 struct kvm_device_attr attr = {
195 .group = KVM_S390_VM_MEM_CTRL,
196 .attr = KVM_S390_VM_MEM_CLR_CMMA,
197 };
198
199 rc = kvm_vm_ioctl(s, KVM_SET_DEVICE_ATTR, &attr);
200 trace_kvm_clear_cmma(rc);
201}
202
203static void kvm_s390_enable_cmma(KVMState *s)
204{
205 int rc;
206 struct kvm_device_attr attr = {
207 .group = KVM_S390_VM_MEM_CTRL,
208 .attr = KVM_S390_VM_MEM_ENABLE_CMMA,
209 };
210
211 if (kvm_s390_check_enable_cmma(s) || kvm_s390_check_clear_cmma(s)) {
212 return;
213 }
214
215 rc = kvm_vm_ioctl(s, KVM_SET_DEVICE_ATTR, &attr);
216 if (!rc) {
217 qemu_register_reset(kvm_s390_clear_cmma_callback, s);
218 }
219 trace_kvm_enable_cmma(rc);
220}
221
b16565b3 222int kvm_arch_init(MachineState *ms, KVMState *s)
0e60a699 223{
5b08b344 224 cap_sync_regs = kvm_check_extension(s, KVM_CAP_SYNC_REGS);
819bd309 225 cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF);
4cb88c3c
DD
226
227 if (kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES)) {
228 kvm_s390_enable_cmma(s);
229 }
230
91138037
MA
231 if (!kvm_check_extension(s, KVM_CAP_S390_GMAP)
232 || !kvm_check_extension(s, KVM_CAP_S390_COW)) {
233 phys_mem_set_alloc(legacy_s390_alloc);
234 }
f16d3f58
DH
235
236 kvm_vm_enable_cap(s, KVM_CAP_S390_USER_SIGP, 0);
237
0e60a699
AG
238 return 0;
239}
240
b164e48e
EH
241unsigned long kvm_arch_vcpu_id(CPUState *cpu)
242{
243 return cpu->cpu_index;
244}
245
c9e659c9 246int kvm_arch_init_vcpu(CPUState *cs)
0e60a699 247{
c9e659c9
DH
248 S390CPU *cpu = S390_CPU(cs);
249 kvm_s390_set_cpu_state(cpu, cpu->env.cpu_state);
1c9d2a1d 250 return 0;
0e60a699
AG
251}
252
50a2c6e5 253void kvm_s390_reset_vcpu(S390CPU *cpu)
0e60a699 254{
50a2c6e5
PB
255 CPUState *cs = CPU(cpu);
256
419831d7
AG
257 /* The initial reset call is needed here to reset in-kernel
258 * vcpu data that we can't access directly from QEMU
259 * (i.e. with older kernels which don't support sync_regs/ONE_REG).
260 * Before this ioctl cpu_synchronize_state() is called in common kvm
261 * code (kvm-all) */
50a2c6e5 262 if (kvm_vcpu_ioctl(cs, KVM_S390_INITIAL_RESET, NULL)) {
81b07353 263 error_report("Initial CPU reset failed on CPU %i", cs->cpu_index);
70bada03 264 }
0e60a699
AG
265}
266
fdb78ec0
DH
267static int can_sync_regs(CPUState *cs, int regs)
268{
269 return cap_sync_regs && (cs->kvm_run->kvm_valid_regs & regs) == regs;
270}
271
20d695a9 272int kvm_arch_put_registers(CPUState *cs, int level)
0e60a699 273{
20d695a9
AF
274 S390CPU *cpu = S390_CPU(cs);
275 CPUS390XState *env = &cpu->env;
5b08b344 276 struct kvm_sregs sregs;
0e60a699 277 struct kvm_regs regs;
e6eef7c2 278 struct kvm_fpu fpu = {};
860643bc 279 int r;
0e60a699
AG
280 int i;
281
5b08b344 282 /* always save the PSW and the GPRS*/
f7575c96
AF
283 cs->kvm_run->psw_addr = env->psw.addr;
284 cs->kvm_run->psw_mask = env->psw.mask;
0e60a699 285
fdb78ec0 286 if (can_sync_regs(cs, KVM_SYNC_GPRS)) {
5b08b344 287 for (i = 0; i < 16; i++) {
f7575c96
AF
288 cs->kvm_run->s.regs.gprs[i] = env->regs[i];
289 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GPRS;
5b08b344
CB
290 }
291 } else {
292 for (i = 0; i < 16; i++) {
293 regs.gprs[i] = env->regs[i];
294 }
860643bc
CB
295 r = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
296 if (r < 0) {
297 return r;
5b08b344 298 }
0e60a699
AG
299 }
300
85ad6230
JH
301 /* Floating point */
302 for (i = 0; i < 16; i++) {
303 fpu.fprs[i] = env->fregs[i].ll;
304 }
305 fpu.fpc = env->fpc;
306
307 r = kvm_vcpu_ioctl(cs, KVM_SET_FPU, &fpu);
308 if (r < 0) {
309 return r;
310 }
311
44c68de0
DD
312 /* Do we need to save more than that? */
313 if (level == KVM_PUT_RUNTIME_STATE) {
314 return 0;
315 }
420840e5 316
59ac1532
DH
317 if (can_sync_regs(cs, KVM_SYNC_ARCH0)) {
318 cs->kvm_run->s.regs.cputm = env->cputm;
319 cs->kvm_run->s.regs.ckc = env->ckc;
320 cs->kvm_run->s.regs.todpr = env->todpr;
321 cs->kvm_run->s.regs.gbea = env->gbea;
322 cs->kvm_run->s.regs.pp = env->pp;
323 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ARCH0;
324 } else {
325 /*
326 * These ONE_REGS are not protected by a capability. As they are only
327 * necessary for migration we just trace a possible error, but don't
328 * return with an error return code.
329 */
330 kvm_set_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm);
331 kvm_set_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc);
332 kvm_set_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr);
333 kvm_set_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea);
334 kvm_set_one_reg(cs, KVM_REG_S390_PP, &env->pp);
335 }
336
337 /* pfault parameters */
338 if (can_sync_regs(cs, KVM_SYNC_PFAULT)) {
339 cs->kvm_run->s.regs.pft = env->pfault_token;
340 cs->kvm_run->s.regs.pfs = env->pfault_select;
341 cs->kvm_run->s.regs.pfc = env->pfault_compare;
342 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PFAULT;
343 } else if (cap_async_pf) {
860643bc
CB
344 r = kvm_set_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token);
345 if (r < 0) {
346 return r;
819bd309 347 }
860643bc
CB
348 r = kvm_set_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare);
349 if (r < 0) {
350 return r;
819bd309 351 }
860643bc
CB
352 r = kvm_set_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select);
353 if (r < 0) {
354 return r;
819bd309
DD
355 }
356 }
357
fdb78ec0
DH
358 /* access registers and control registers*/
359 if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) {
5b08b344 360 for (i = 0; i < 16; i++) {
f7575c96
AF
361 cs->kvm_run->s.regs.acrs[i] = env->aregs[i];
362 cs->kvm_run->s.regs.crs[i] = env->cregs[i];
5b08b344 363 }
f7575c96
AF
364 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ACRS;
365 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_CRS;
5b08b344
CB
366 } else {
367 for (i = 0; i < 16; i++) {
368 sregs.acrs[i] = env->aregs[i];
369 sregs.crs[i] = env->cregs[i];
370 }
860643bc
CB
371 r = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
372 if (r < 0) {
373 return r;
5b08b344
CB
374 }
375 }
0e60a699 376
5b08b344 377 /* Finally the prefix */
fdb78ec0 378 if (can_sync_regs(cs, KVM_SYNC_PREFIX)) {
f7575c96
AF
379 cs->kvm_run->s.regs.prefix = env->psa;
380 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PREFIX;
5b08b344
CB
381 } else {
382 /* prefix is only supported via sync regs */
383 }
384 return 0;
0e60a699
AG
385}
386
20d695a9 387int kvm_arch_get_registers(CPUState *cs)
420840e5
JH
388{
389 S390CPU *cpu = S390_CPU(cs);
390 CPUS390XState *env = &cpu->env;
5b08b344 391 struct kvm_sregs sregs;
0e60a699 392 struct kvm_regs regs;
85ad6230 393 struct kvm_fpu fpu;
44c68de0 394 int i, r;
420840e5 395
5b08b344 396 /* get the PSW */
f7575c96
AF
397 env->psw.addr = cs->kvm_run->psw_addr;
398 env->psw.mask = cs->kvm_run->psw_mask;
5b08b344
CB
399
400 /* the GPRS */
fdb78ec0 401 if (can_sync_regs(cs, KVM_SYNC_GPRS)) {
5b08b344 402 for (i = 0; i < 16; i++) {
f7575c96 403 env->regs[i] = cs->kvm_run->s.regs.gprs[i];
5b08b344
CB
404 }
405 } else {
44c68de0
DD
406 r = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
407 if (r < 0) {
408 return r;
5b08b344
CB
409 }
410 for (i = 0; i < 16; i++) {
411 env->regs[i] = regs.gprs[i];
412 }
0e60a699
AG
413 }
414
5b08b344 415 /* The ACRS and CRS */
fdb78ec0 416 if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) {
5b08b344 417 for (i = 0; i < 16; i++) {
f7575c96
AF
418 env->aregs[i] = cs->kvm_run->s.regs.acrs[i];
419 env->cregs[i] = cs->kvm_run->s.regs.crs[i];
5b08b344
CB
420 }
421 } else {
44c68de0
DD
422 r = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
423 if (r < 0) {
424 return r;
5b08b344
CB
425 }
426 for (i = 0; i < 16; i++) {
427 env->aregs[i] = sregs.acrs[i];
428 env->cregs[i] = sregs.crs[i];
429 }
0e60a699
AG
430 }
431
85ad6230
JH
432 /* Floating point */
433 r = kvm_vcpu_ioctl(cs, KVM_GET_FPU, &fpu);
434 if (r < 0) {
435 return r;
436 }
437 for (i = 0; i < 16; i++) {
438 env->fregs[i].ll = fpu.fprs[i];
439 }
440 env->fpc = fpu.fpc;
441
44c68de0 442 /* The prefix */
fdb78ec0 443 if (can_sync_regs(cs, KVM_SYNC_PREFIX)) {
f7575c96 444 env->psa = cs->kvm_run->s.regs.prefix;
5b08b344 445 }
0e60a699 446
59ac1532
DH
447 if (can_sync_regs(cs, KVM_SYNC_ARCH0)) {
448 env->cputm = cs->kvm_run->s.regs.cputm;
449 env->ckc = cs->kvm_run->s.regs.ckc;
450 env->todpr = cs->kvm_run->s.regs.todpr;
451 env->gbea = cs->kvm_run->s.regs.gbea;
452 env->pp = cs->kvm_run->s.regs.pp;
453 } else {
454 /*
455 * These ONE_REGS are not protected by a capability. As they are only
456 * necessary for migration we just trace a possible error, but don't
457 * return with an error return code.
458 */
459 kvm_get_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm);
460 kvm_get_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc);
461 kvm_get_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr);
462 kvm_get_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea);
463 kvm_get_one_reg(cs, KVM_REG_S390_PP, &env->pp);
464 }
465
466 /* pfault parameters */
467 if (can_sync_regs(cs, KVM_SYNC_PFAULT)) {
468 env->pfault_token = cs->kvm_run->s.regs.pft;
469 env->pfault_select = cs->kvm_run->s.regs.pfs;
470 env->pfault_compare = cs->kvm_run->s.regs.pfc;
471 } else if (cap_async_pf) {
860643bc 472 r = kvm_get_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token);
819bd309
DD
473 if (r < 0) {
474 return r;
475 }
860643bc 476 r = kvm_get_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare);
819bd309
DD
477 if (r < 0) {
478 return r;
479 }
860643bc 480 r = kvm_get_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select);
819bd309
DD
481 if (r < 0) {
482 return r;
483 }
484 }
485
0e60a699
AG
486 return 0;
487}
488
3f9e59bb
JH
489int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_low)
490{
491 int r;
492 struct kvm_device_attr attr = {
493 .group = KVM_S390_VM_TOD,
494 .attr = KVM_S390_VM_TOD_LOW,
495 .addr = (uint64_t)tod_low,
496 };
497
498 r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
499 if (r) {
500 return r;
501 }
502
503 attr.attr = KVM_S390_VM_TOD_HIGH;
504 attr.addr = (uint64_t)tod_high;
505 return kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
506}
507
508int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_low)
509{
510 int r;
511
512 struct kvm_device_attr attr = {
513 .group = KVM_S390_VM_TOD,
514 .attr = KVM_S390_VM_TOD_LOW,
515 .addr = (uint64_t)tod_low,
516 };
517
518 r = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
519 if (r) {
520 return r;
521 }
522
523 attr.attr = KVM_S390_VM_TOD_HIGH;
524 attr.addr = (uint64_t)tod_high;
525 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
526}
527
fdec9918
CB
528/*
529 * Legacy layout for s390:
530 * Older S390 KVM requires the topmost vma of the RAM to be
531 * smaller than an system defined value, which is at least 256GB.
532 * Larger systems have larger values. We put the guest between
533 * the end of data segment (system break) and this value. We
534 * use 32GB as a base to have enough room for the system break
535 * to grow. We also have to use MAP parameters that avoid
536 * read-only mapping of guest pages.
537 */
dc622deb 538static void *legacy_s390_alloc(size_t size, uint64_t *align)
fdec9918
CB
539{
540 void *mem;
541
542 mem = mmap((void *) 0x800000000ULL, size,
543 PROT_EXEC|PROT_READ|PROT_WRITE,
544 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
39228250 545 return mem == MAP_FAILED ? NULL : mem;
fdec9918
CB
546}
547
8e4e86af
DH
548/* DIAG 501 is used for sw breakpoints */
549static const uint8_t diag_501[] = {0x83, 0x24, 0x05, 0x01};
550
20d695a9 551int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
0e60a699 552{
0e60a699 553
8e4e86af
DH
554 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
555 sizeof(diag_501), 0) ||
556 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)diag_501,
557 sizeof(diag_501), 1)) {
0e60a699
AG
558 return -EINVAL;
559 }
560 return 0;
561}
562
20d695a9 563int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
0e60a699 564{
8e4e86af 565 uint8_t t[sizeof(diag_501)];
0e60a699 566
8e4e86af 567 if (cpu_memory_rw_debug(cs, bp->pc, t, sizeof(diag_501), 0)) {
0e60a699 568 return -EINVAL;
8e4e86af 569 } else if (memcmp(t, diag_501, sizeof(diag_501))) {
0e60a699 570 return -EINVAL;
8e4e86af
DH
571 } else if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
572 sizeof(diag_501), 1)) {
0e60a699
AG
573 return -EINVAL;
574 }
575
576 return 0;
577}
578
770a6379
DH
579static struct kvm_hw_breakpoint *find_hw_breakpoint(target_ulong addr,
580 int len, int type)
581{
582 int n;
583
584 for (n = 0; n < nb_hw_breakpoints; n++) {
585 if (hw_breakpoints[n].addr == addr && hw_breakpoints[n].type == type &&
586 (hw_breakpoints[n].len == len || len == -1)) {
587 return &hw_breakpoints[n];
588 }
589 }
590
591 return NULL;
592}
593
594static int insert_hw_breakpoint(target_ulong addr, int len, int type)
595{
596 int size;
597
598 if (find_hw_breakpoint(addr, len, type)) {
599 return -EEXIST;
600 }
601
602 size = (nb_hw_breakpoints + 1) * sizeof(struct kvm_hw_breakpoint);
603
604 if (!hw_breakpoints) {
605 nb_hw_breakpoints = 0;
606 hw_breakpoints = (struct kvm_hw_breakpoint *)g_try_malloc(size);
607 } else {
608 hw_breakpoints =
609 (struct kvm_hw_breakpoint *)g_try_realloc(hw_breakpoints, size);
610 }
611
612 if (!hw_breakpoints) {
613 nb_hw_breakpoints = 0;
614 return -ENOMEM;
615 }
616
617 hw_breakpoints[nb_hw_breakpoints].addr = addr;
618 hw_breakpoints[nb_hw_breakpoints].len = len;
619 hw_breakpoints[nb_hw_breakpoints].type = type;
620
621 nb_hw_breakpoints++;
622
623 return 0;
624}
625
8c012449
DH
626int kvm_arch_insert_hw_breakpoint(target_ulong addr,
627 target_ulong len, int type)
628{
770a6379
DH
629 switch (type) {
630 case GDB_BREAKPOINT_HW:
631 type = KVM_HW_BP;
632 break;
633 case GDB_WATCHPOINT_WRITE:
634 if (len < 1) {
635 return -EINVAL;
636 }
637 type = KVM_HW_WP_WRITE;
638 break;
639 default:
640 return -ENOSYS;
641 }
642 return insert_hw_breakpoint(addr, len, type);
8c012449
DH
643}
644
645int kvm_arch_remove_hw_breakpoint(target_ulong addr,
646 target_ulong len, int type)
647{
770a6379
DH
648 int size;
649 struct kvm_hw_breakpoint *bp = find_hw_breakpoint(addr, len, type);
650
651 if (bp == NULL) {
652 return -ENOENT;
653 }
654
655 nb_hw_breakpoints--;
656 if (nb_hw_breakpoints > 0) {
657 /*
658 * In order to trim the array, move the last element to the position to
659 * be removed - if necessary.
660 */
661 if (bp != &hw_breakpoints[nb_hw_breakpoints]) {
662 *bp = hw_breakpoints[nb_hw_breakpoints];
663 }
664 size = nb_hw_breakpoints * sizeof(struct kvm_hw_breakpoint);
665 hw_breakpoints =
666 (struct kvm_hw_breakpoint *)g_realloc(hw_breakpoints, size);
667 } else {
668 g_free(hw_breakpoints);
669 hw_breakpoints = NULL;
670 }
671
672 return 0;
8c012449
DH
673}
674
675void kvm_arch_remove_all_hw_breakpoints(void)
676{
770a6379
DH
677 nb_hw_breakpoints = 0;
678 g_free(hw_breakpoints);
679 hw_breakpoints = NULL;
8c012449
DH
680}
681
682void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
683{
770a6379
DH
684 int i;
685
686 if (nb_hw_breakpoints > 0) {
687 dbg->arch.nr_hw_bp = nb_hw_breakpoints;
688 dbg->arch.hw_bp = hw_breakpoints;
689
690 for (i = 0; i < nb_hw_breakpoints; ++i) {
691 hw_breakpoints[i].phys_addr = s390_cpu_get_phys_addr_debug(cpu,
692 hw_breakpoints[i].addr);
693 }
694 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
695 } else {
696 dbg->arch.nr_hw_bp = 0;
697 dbg->arch.hw_bp = NULL;
698 }
8c012449
DH
699}
700
20d695a9 701void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
0e60a699 702{
0e60a699
AG
703}
704
20d695a9 705void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
0e60a699 706{
0e60a699
AG
707}
708
20d695a9 709int kvm_arch_process_async_events(CPUState *cs)
0af691d7 710{
225dc991 711 return cs->halted;
0af691d7
MT
712}
713
66ad0893
CH
714static int s390_kvm_irq_to_interrupt(struct kvm_s390_irq *irq,
715 struct kvm_s390_interrupt *interrupt)
716{
717 int r = 0;
718
719 interrupt->type = irq->type;
720 switch (irq->type) {
721 case KVM_S390_INT_VIRTIO:
722 interrupt->parm = irq->u.ext.ext_params;
723 /* fall through */
724 case KVM_S390_INT_PFAULT_INIT:
725 case KVM_S390_INT_PFAULT_DONE:
726 interrupt->parm64 = irq->u.ext.ext_params2;
727 break;
728 case KVM_S390_PROGRAM_INT:
729 interrupt->parm = irq->u.pgm.code;
730 break;
731 case KVM_S390_SIGP_SET_PREFIX:
732 interrupt->parm = irq->u.prefix.address;
733 break;
734 case KVM_S390_INT_SERVICE:
735 interrupt->parm = irq->u.ext.ext_params;
736 break;
737 case KVM_S390_MCHK:
738 interrupt->parm = irq->u.mchk.cr14;
739 interrupt->parm64 = irq->u.mchk.mcic;
740 break;
741 case KVM_S390_INT_EXTERNAL_CALL:
742 interrupt->parm = irq->u.extcall.code;
743 break;
744 case KVM_S390_INT_EMERGENCY:
745 interrupt->parm = irq->u.emerg.code;
746 break;
747 case KVM_S390_SIGP_STOP:
748 case KVM_S390_RESTART:
749 break; /* These types have no parameters */
750 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
751 interrupt->parm = irq->u.io.subchannel_id << 16;
752 interrupt->parm |= irq->u.io.subchannel_nr;
753 interrupt->parm64 = (uint64_t)irq->u.io.io_int_parm << 32;
754 interrupt->parm64 |= irq->u.io.io_int_word;
755 break;
756 default:
757 r = -EINVAL;
758 break;
759 }
760 return r;
761}
762
763void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq)
764{
765 struct kvm_s390_interrupt kvmint = {};
766 CPUState *cs = CPU(cpu);
767 int r;
768
769 r = s390_kvm_irq_to_interrupt(irq, &kvmint);
770 if (r < 0) {
771 fprintf(stderr, "%s called with bogus interrupt\n", __func__);
772 exit(1);
773 }
774
775 r = kvm_vcpu_ioctl(cs, KVM_S390_INTERRUPT, &kvmint);
776 if (r < 0) {
777 fprintf(stderr, "KVM failed to inject interrupt\n");
778 exit(1);
779 }
780}
781
bbd8bb8e 782static void __kvm_s390_floating_interrupt(struct kvm_s390_irq *irq)
66ad0893
CH
783{
784 struct kvm_s390_interrupt kvmint = {};
785 int r;
786
787 r = s390_kvm_irq_to_interrupt(irq, &kvmint);
788 if (r < 0) {
789 fprintf(stderr, "%s called with bogus interrupt\n", __func__);
790 exit(1);
791 }
792
793 r = kvm_vm_ioctl(kvm_state, KVM_S390_INTERRUPT, &kvmint);
794 if (r < 0) {
795 fprintf(stderr, "KVM failed to inject interrupt\n");
796 exit(1);
797 }
798}
799
bbd8bb8e
CH
800void kvm_s390_floating_interrupt(struct kvm_s390_irq *irq)
801{
802 static bool use_flic = true;
803 int r;
804
805 if (use_flic) {
806 r = kvm_s390_inject_flic(irq);
807 if (r == -ENOSYS) {
808 use_flic = false;
809 }
810 if (!r) {
811 return;
812 }
813 }
814 __kvm_s390_floating_interrupt(irq);
815}
816
de13d216 817void kvm_s390_virtio_irq(int config_change, uint64_t token)
0e60a699 818{
de13d216
CH
819 struct kvm_s390_irq irq = {
820 .type = KVM_S390_INT_VIRTIO,
821 .u.ext.ext_params = config_change,
822 .u.ext.ext_params2 = token,
823 };
0e60a699 824
de13d216 825 kvm_s390_floating_interrupt(&irq);
0e60a699
AG
826}
827
de13d216 828void kvm_s390_service_interrupt(uint32_t parm)
0e60a699 829{
de13d216
CH
830 struct kvm_s390_irq irq = {
831 .type = KVM_S390_INT_SERVICE,
832 .u.ext.ext_params = parm,
833 };
0e60a699 834
de13d216 835 kvm_s390_floating_interrupt(&irq);
79afc36d
CH
836}
837
1bc22652 838static void enter_pgmcheck(S390CPU *cpu, uint16_t code)
0e60a699 839{
de13d216
CH
840 struct kvm_s390_irq irq = {
841 .type = KVM_S390_PROGRAM_INT,
842 .u.pgm.code = code,
843 };
844
845 kvm_s390_vcpu_interrupt(cpu, &irq);
0e60a699
AG
846}
847
801cdd35
TH
848void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code)
849{
850 struct kvm_s390_irq irq = {
851 .type = KVM_S390_PROGRAM_INT,
852 .u.pgm.code = code,
853 .u.pgm.trans_exc_code = te_code,
854 .u.pgm.exc_access_id = te_code & 3,
855 };
856
857 kvm_s390_vcpu_interrupt(cpu, &irq);
858}
859
1bc22652 860static int kvm_sclp_service_call(S390CPU *cpu, struct kvm_run *run,
bcec36ea 861 uint16_t ipbh0)
0e60a699 862{
1bc22652 863 CPUS390XState *env = &cpu->env;
a0fa2cb8
TH
864 uint64_t sccb;
865 uint32_t code;
0e60a699
AG
866 int r = 0;
867
cb446eca 868 cpu_synchronize_state(CPU(cpu));
0e60a699
AG
869 sccb = env->regs[ipbh0 & 0xf];
870 code = env->regs[(ipbh0 & 0xf0) >> 4];
871
6e252802 872 r = sclp_service_call(env, sccb, code);
9abf567d 873 if (r < 0) {
1bc22652 874 enter_pgmcheck(cpu, -r);
e8803d93
TH
875 } else {
876 setcc(cpu, r);
0e60a699 877 }
81f7c56c 878
0e60a699
AG
879 return 0;
880}
881
1eecf41b 882static int handle_b2(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
09b99878 883{
09b99878 884 CPUS390XState *env = &cpu->env;
1eecf41b
FB
885 int rc = 0;
886 uint16_t ipbh0 = (run->s390_sieic.ipb & 0xffff0000) >> 16;
3474b679 887
44c68de0 888 cpu_synchronize_state(CPU(cpu));
3474b679 889
09b99878 890 switch (ipa1) {
1eecf41b 891 case PRIV_B2_XSCH:
5d9bf1c0 892 ioinst_handle_xsch(cpu, env->regs[1]);
09b99878 893 break;
1eecf41b 894 case PRIV_B2_CSCH:
5d9bf1c0 895 ioinst_handle_csch(cpu, env->regs[1]);
09b99878 896 break;
1eecf41b 897 case PRIV_B2_HSCH:
5d9bf1c0 898 ioinst_handle_hsch(cpu, env->regs[1]);
09b99878 899 break;
1eecf41b 900 case PRIV_B2_MSCH:
5d9bf1c0 901 ioinst_handle_msch(cpu, env->regs[1], run->s390_sieic.ipb);
09b99878 902 break;
1eecf41b 903 case PRIV_B2_SSCH:
5d9bf1c0 904 ioinst_handle_ssch(cpu, env->regs[1], run->s390_sieic.ipb);
09b99878 905 break;
1eecf41b 906 case PRIV_B2_STCRW:
5d9bf1c0 907 ioinst_handle_stcrw(cpu, run->s390_sieic.ipb);
09b99878 908 break;
1eecf41b 909 case PRIV_B2_STSCH:
5d9bf1c0 910 ioinst_handle_stsch(cpu, env->regs[1], run->s390_sieic.ipb);
09b99878 911 break;
1eecf41b 912 case PRIV_B2_TSCH:
09b99878
CH
913 /* We should only get tsch via KVM_EXIT_S390_TSCH. */
914 fprintf(stderr, "Spurious tsch intercept\n");
915 break;
1eecf41b 916 case PRIV_B2_CHSC:
5d9bf1c0 917 ioinst_handle_chsc(cpu, run->s390_sieic.ipb);
09b99878 918 break;
1eecf41b 919 case PRIV_B2_TPI:
09b99878
CH
920 /* This should have been handled by kvm already. */
921 fprintf(stderr, "Spurious tpi intercept\n");
922 break;
1eecf41b 923 case PRIV_B2_SCHM:
5d9bf1c0
TH
924 ioinst_handle_schm(cpu, env->regs[1], env->regs[2],
925 run->s390_sieic.ipb);
09b99878 926 break;
1eecf41b 927 case PRIV_B2_RSCH:
5d9bf1c0 928 ioinst_handle_rsch(cpu, env->regs[1]);
09b99878 929 break;
1eecf41b 930 case PRIV_B2_RCHP:
5d9bf1c0 931 ioinst_handle_rchp(cpu, env->regs[1]);
09b99878 932 break;
1eecf41b 933 case PRIV_B2_STCPS:
09b99878 934 /* We do not provide this instruction, it is suppressed. */
09b99878 935 break;
1eecf41b 936 case PRIV_B2_SAL:
5d9bf1c0 937 ioinst_handle_sal(cpu, env->regs[1]);
09b99878 938 break;
1eecf41b 939 case PRIV_B2_SIGA:
c1e8dfb5 940 /* Not provided, set CC = 3 for subchannel not operational */
5d9bf1c0 941 setcc(cpu, 3);
09b99878 942 break;
1eecf41b
FB
943 case PRIV_B2_SCLP_CALL:
944 rc = kvm_sclp_service_call(cpu, run, ipbh0);
945 break;
c1e8dfb5 946 default:
1eecf41b
FB
947 rc = -1;
948 DPRINTF("KVM: unhandled PRIV: 0xb2%x\n", ipa1);
949 break;
09b99878
CH
950 }
951
1eecf41b 952 return rc;
09b99878
CH
953}
954
863f6f52
FB
955static uint64_t get_base_disp_rxy(S390CPU *cpu, struct kvm_run *run)
956{
957 CPUS390XState *env = &cpu->env;
958 uint32_t x2 = (run->s390_sieic.ipa & 0x000f);
959 uint32_t base2 = run->s390_sieic.ipb >> 28;
960 uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) +
961 ((run->s390_sieic.ipb & 0xff00) << 4);
962
963 if (disp2 & 0x80000) {
964 disp2 += 0xfff00000;
965 }
966
967 return (base2 ? env->regs[base2] : 0) +
968 (x2 ? env->regs[x2] : 0) + (long)(int)disp2;
969}
970
971static uint64_t get_base_disp_rsy(S390CPU *cpu, struct kvm_run *run)
972{
973 CPUS390XState *env = &cpu->env;
974 uint32_t base2 = run->s390_sieic.ipb >> 28;
975 uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) +
976 ((run->s390_sieic.ipb & 0xff00) << 4);
977
978 if (disp2 & 0x80000) {
979 disp2 += 0xfff00000;
980 }
981
982 return (base2 ? env->regs[base2] : 0) + (long)(int)disp2;
983}
984
985static int kvm_clp_service_call(S390CPU *cpu, struct kvm_run *run)
986{
987 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
988
989 return clp_service_call(cpu, r2);
990}
991
992static int kvm_pcilg_service_call(S390CPU *cpu, struct kvm_run *run)
993{
994 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
995 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
996
997 return pcilg_service_call(cpu, r1, r2);
998}
999
1000static int kvm_pcistg_service_call(S390CPU *cpu, struct kvm_run *run)
1001{
1002 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
1003 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
1004
1005 return pcistg_service_call(cpu, r1, r2);
1006}
1007
1008static int kvm_stpcifc_service_call(S390CPU *cpu, struct kvm_run *run)
1009{
1010 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1011 uint64_t fiba;
1012
1013 cpu_synchronize_state(CPU(cpu));
1014 fiba = get_base_disp_rxy(cpu, run);
1015
1016 return stpcifc_service_call(cpu, r1, fiba);
1017}
1018
1019static int kvm_sic_service_call(S390CPU *cpu, struct kvm_run *run)
1020{
1021 /* NOOP */
1022 return 0;
1023}
1024
1025static int kvm_rpcit_service_call(S390CPU *cpu, struct kvm_run *run)
1026{
1027 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
1028 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
1029
1030 return rpcit_service_call(cpu, r1, r2);
1031}
1032
1033static int kvm_pcistb_service_call(S390CPU *cpu, struct kvm_run *run)
1034{
1035 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1036 uint8_t r3 = run->s390_sieic.ipa & 0x000f;
1037 uint64_t gaddr;
1038
1039 cpu_synchronize_state(CPU(cpu));
1040 gaddr = get_base_disp_rsy(cpu, run);
1041
1042 return pcistb_service_call(cpu, r1, r3, gaddr);
1043}
1044
1045static int kvm_mpcifc_service_call(S390CPU *cpu, struct kvm_run *run)
1046{
1047 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1048 uint64_t fiba;
1049
1050 cpu_synchronize_state(CPU(cpu));
1051 fiba = get_base_disp_rxy(cpu, run);
1052
1053 return mpcifc_service_call(cpu, r1, fiba);
1054}
1055
1eecf41b 1056static int handle_b9(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
0e60a699
AG
1057{
1058 int r = 0;
0e60a699 1059
0e60a699 1060 switch (ipa1) {
863f6f52
FB
1061 case PRIV_B9_CLP:
1062 r = kvm_clp_service_call(cpu, run);
1063 break;
1064 case PRIV_B9_PCISTG:
1065 r = kvm_pcistg_service_call(cpu, run);
1066 break;
1067 case PRIV_B9_PCILG:
1068 r = kvm_pcilg_service_call(cpu, run);
1069 break;
1070 case PRIV_B9_RPCIT:
1071 r = kvm_rpcit_service_call(cpu, run);
1072 break;
1eecf41b
FB
1073 case PRIV_B9_EQBS:
1074 /* just inject exception */
1075 r = -1;
1076 break;
1077 default:
1078 r = -1;
1079 DPRINTF("KVM: unhandled PRIV: 0xb9%x\n", ipa1);
1080 break;
1081 }
1082
1083 return r;
1084}
1085
80765f07 1086static int handle_eb(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl)
1eecf41b
FB
1087{
1088 int r = 0;
1089
80765f07 1090 switch (ipbl) {
863f6f52
FB
1091 case PRIV_EB_PCISTB:
1092 r = kvm_pcistb_service_call(cpu, run);
1093 break;
1094 case PRIV_EB_SIC:
1095 r = kvm_sic_service_call(cpu, run);
1096 break;
1eecf41b
FB
1097 case PRIV_EB_SQBS:
1098 /* just inject exception */
1099 r = -1;
1100 break;
1101 default:
1102 r = -1;
80765f07 1103 DPRINTF("KVM: unhandled PRIV: 0xeb%x\n", ipbl);
1eecf41b 1104 break;
0e60a699
AG
1105 }
1106
1107 return r;
1108}
1109
863f6f52
FB
1110static int handle_e3(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl)
1111{
1112 int r = 0;
1113
1114 switch (ipbl) {
1115 case PRIV_E3_MPCIFC:
1116 r = kvm_mpcifc_service_call(cpu, run);
1117 break;
1118 case PRIV_E3_STPCIFC:
1119 r = kvm_stpcifc_service_call(cpu, run);
1120 break;
1121 default:
1122 r = -1;
1123 DPRINTF("KVM: unhandled PRIV: 0xe3%x\n", ipbl);
1124 break;
1125 }
1126
1127 return r;
1128}
1129
4fd6dd06 1130static int handle_hypercall(S390CPU *cpu, struct kvm_run *run)
0e60a699 1131{
4fd6dd06 1132 CPUS390XState *env = &cpu->env;
77319f22 1133 int ret;
3474b679 1134
44c68de0 1135 cpu_synchronize_state(CPU(cpu));
77319f22
TH
1136 ret = s390_virtio_hypercall(env);
1137 if (ret == -EINVAL) {
1138 enter_pgmcheck(cpu, PGM_SPECIFICATION);
1139 return 0;
1140 }
0e60a699 1141
77319f22 1142 return ret;
0e60a699
AG
1143}
1144
268846ba
ED
1145static void kvm_handle_diag_308(S390CPU *cpu, struct kvm_run *run)
1146{
1147 uint64_t r1, r3;
1148
1149 cpu_synchronize_state(CPU(cpu));
20dd25bb 1150 r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
268846ba
ED
1151 r3 = run->s390_sieic.ipa & 0x000f;
1152 handle_diag_308(&cpu->env, r1, r3);
1153}
1154
b30f4dfb
DH
1155static int handle_sw_breakpoint(S390CPU *cpu, struct kvm_run *run)
1156{
1157 CPUS390XState *env = &cpu->env;
1158 unsigned long pc;
1159
1160 cpu_synchronize_state(CPU(cpu));
1161
1162 pc = env->psw.addr - 4;
1163 if (kvm_find_sw_breakpoint(CPU(cpu), pc)) {
1164 env->psw.addr = pc;
1165 return EXCP_DEBUG;
1166 }
1167
1168 return -ENOENT;
1169}
1170
638129ff
CH
1171#define DIAG_KVM_CODE_MASK 0x000000000000ffff
1172
1173static int handle_diag(S390CPU *cpu, struct kvm_run *run, uint32_t ipb)
0e60a699
AG
1174{
1175 int r = 0;
638129ff
CH
1176 uint16_t func_code;
1177
1178 /*
1179 * For any diagnose call we support, bits 48-63 of the resulting
1180 * address specify the function code; the remainder is ignored.
1181 */
1182 func_code = decode_basedisp_rs(&cpu->env, ipb) & DIAG_KVM_CODE_MASK;
1183 switch (func_code) {
268846ba
ED
1184 case DIAG_IPL:
1185 kvm_handle_diag_308(cpu, run);
1186 break;
39fbc5c6
CB
1187 case DIAG_KVM_HYPERCALL:
1188 r = handle_hypercall(cpu, run);
1189 break;
1190 case DIAG_KVM_BREAKPOINT:
b30f4dfb 1191 r = handle_sw_breakpoint(cpu, run);
39fbc5c6
CB
1192 break;
1193 default:
638129ff 1194 DPRINTF("KVM: unknown DIAG: 0x%x\n", func_code);
68540b1a 1195 enter_pgmcheck(cpu, PGM_SPECIFICATION);
39fbc5c6 1196 break;
0e60a699
AG
1197 }
1198
1199 return r;
1200}
1201
6eb8f212
DH
1202typedef struct SigpInfo {
1203 S390CPU *cpu;
22740e3f 1204 uint64_t param;
6eb8f212
DH
1205 int cc;
1206 uint64_t *status_reg;
1207} SigpInfo;
1208
36b5c845 1209static void set_sigp_status(SigpInfo *si, uint64_t status)
b20a461f 1210{
36b5c845
DH
1211 *si->status_reg &= 0xffffffff00000000ULL;
1212 *si->status_reg |= status;
1213 si->cc = SIGP_CC_STATUS_STORED;
1214}
6e6ad8db 1215
6eb8f212 1216static void sigp_start(void *arg)
b20a461f 1217{
6eb8f212 1218 SigpInfo *si = arg;
6e6ad8db 1219
4f2b55d1
DH
1220 if (s390_cpu_get_state(si->cpu) != CPU_STATE_STOPPED) {
1221 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1222 return;
1223 }
1224
6eb8f212
DH
1225 s390_cpu_set_state(CPU_STATE_OPERATING, si->cpu);
1226 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
b20a461f
TH
1227}
1228
18ff9494 1229static void sigp_stop(void *arg)
0e60a699 1230{
18ff9494
DH
1231 SigpInfo *si = arg;
1232 struct kvm_s390_irq irq = {
1233 .type = KVM_S390_SIGP_STOP,
1234 };
1235
1236 if (s390_cpu_get_state(si->cpu) != CPU_STATE_OPERATING) {
1237 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1238 return;
1239 }
1240
1241 /* disabled wait - sleeping in user space */
1242 if (CPU(si->cpu)->halted) {
1243 s390_cpu_set_state(CPU_STATE_STOPPED, si->cpu);
1244 } else {
1245 /* execute the stop function */
1246 si->cpu->env.sigp_order = SIGP_STOP;
1247 kvm_s390_vcpu_interrupt(si->cpu, &irq);
1248 }
1249 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1250}
1251
1252#define KVM_S390_STORE_STATUS_DEF_ADDR offsetof(LowCore, floating_pt_save_area)
1253#define SAVE_AREA_SIZE 512
1254static int kvm_s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch)
1255{
1256 static const uint8_t ar_id = 1;
1257 uint64_t ckc = cpu->env.ckc >> 8;
1258 void *mem;
1259 hwaddr len = SAVE_AREA_SIZE;
1260
1261 mem = cpu_physical_memory_map(addr, &len, 1);
1262 if (!mem) {
1263 return -EFAULT;
1264 }
1265 if (len != SAVE_AREA_SIZE) {
1266 cpu_physical_memory_unmap(mem, len, 1, 0);
1267 return -EFAULT;
1268 }
1269
1270 if (store_arch) {
1271 cpu_physical_memory_write(offsetof(LowCore, ar_access_id), &ar_id, 1);
1272 }
1273 memcpy(mem, &cpu->env.fregs, 128);
1274 memcpy(mem + 128, &cpu->env.regs, 128);
1275 memcpy(mem + 256, &cpu->env.psw, 16);
1276 memcpy(mem + 280, &cpu->env.psa, 4);
1277 memcpy(mem + 284, &cpu->env.fpc, 4);
1278 memcpy(mem + 292, &cpu->env.todpr, 4);
1279 memcpy(mem + 296, &cpu->env.cputm, 8);
1280 memcpy(mem + 304, &ckc, 8);
1281 memcpy(mem + 320, &cpu->env.aregs, 64);
1282 memcpy(mem + 384, &cpu->env.cregs, 128);
1283
1284 cpu_physical_memory_unmap(mem, len, 1, len);
1285
1286 return 0;
1287}
1288
1289static void sigp_stop_and_store_status(void *arg)
1290{
1291 SigpInfo *si = arg;
1292 struct kvm_s390_irq irq = {
1293 .type = KVM_S390_SIGP_STOP,
1294 };
1295
1296 /* disabled wait - sleeping in user space */
1297 if (s390_cpu_get_state(si->cpu) == CPU_STATE_OPERATING &&
1298 CPU(si->cpu)->halted) {
1299 s390_cpu_set_state(CPU_STATE_STOPPED, si->cpu);
1300 }
1301
1302 switch (s390_cpu_get_state(si->cpu)) {
1303 case CPU_STATE_OPERATING:
1304 si->cpu->env.sigp_order = SIGP_STOP_STORE_STATUS;
1305 kvm_s390_vcpu_interrupt(si->cpu, &irq);
1306 /* store will be performed when handling the stop intercept */
1307 break;
1308 case CPU_STATE_STOPPED:
1309 /* already stopped, just store the status */
1310 cpu_synchronize_state(CPU(si->cpu));
1311 kvm_s390_store_status(si->cpu, KVM_S390_STORE_STATUS_DEF_ADDR, true);
1312 break;
1313 }
1314 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1315}
1316
1317static void sigp_store_status_at_address(void *arg)
1318{
1319 SigpInfo *si = arg;
1320 uint32_t address = si->param & 0x7ffffe00u;
1321
1322 /* cpu has to be stopped */
1323 if (s390_cpu_get_state(si->cpu) != CPU_STATE_STOPPED) {
1324 set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
1325 return;
1326 }
1327
1328 cpu_synchronize_state(CPU(si->cpu));
1329
1330 if (kvm_s390_store_status(si->cpu, address, false)) {
1331 set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
1332 return;
1333 }
1334 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1335}
1336
6eb8f212 1337static void sigp_restart(void *arg)
0e60a699 1338{
6eb8f212 1339 SigpInfo *si = arg;
de13d216
CH
1340 struct kvm_s390_irq irq = {
1341 .type = KVM_S390_RESTART,
1342 };
1343
e3b7b578
DH
1344 switch (s390_cpu_get_state(si->cpu)) {
1345 case CPU_STATE_STOPPED:
1346 /* the restart irq has to be delivered prior to any other pending irq */
1347 cpu_synchronize_state(CPU(si->cpu));
1348 do_restart_interrupt(&si->cpu->env);
1349 s390_cpu_set_state(CPU_STATE_OPERATING, si->cpu);
1350 break;
1351 case CPU_STATE_OPERATING:
1352 kvm_s390_vcpu_interrupt(si->cpu, &irq);
1353 break;
1354 }
6eb8f212 1355 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
6e6ad8db
DH
1356}
1357
1358int kvm_s390_cpu_restart(S390CPU *cpu)
1359{
6eb8f212
DH
1360 SigpInfo si = {
1361 .cpu = cpu,
1362 };
1363
1364 run_on_cpu(CPU(cpu), sigp_restart, &si);
7f7f9752 1365 DPRINTF("DONE: KVM cpu restart: %p\n", &cpu->env);
0e60a699
AG
1366 return 0;
1367}
1368
f7d3e466 1369static void sigp_initial_cpu_reset(void *arg)
0e60a699 1370{
6eb8f212
DH
1371 SigpInfo *si = arg;
1372 CPUState *cs = CPU(si->cpu);
1373 S390CPUClass *scc = S390_CPU_GET_CLASS(si->cpu);
d5900813 1374
6eb8f212
DH
1375 cpu_synchronize_state(cs);
1376 scc->initial_cpu_reset(cs);
1377 cpu_synchronize_post_reset(cs);
1378 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
0e60a699
AG
1379}
1380
04c2b516
TH
1381static void sigp_cpu_reset(void *arg)
1382{
6eb8f212
DH
1383 SigpInfo *si = arg;
1384 CPUState *cs = CPU(si->cpu);
1385 S390CPUClass *scc = S390_CPU_GET_CLASS(si->cpu);
04c2b516 1386
6eb8f212
DH
1387 cpu_synchronize_state(cs);
1388 scc->cpu_reset(cs);
1389 cpu_synchronize_post_reset(cs);
1390 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
04c2b516
TH
1391}
1392
18ff9494 1393static void sigp_set_prefix(void *arg)
0e60a699 1394{
18ff9494
DH
1395 SigpInfo *si = arg;
1396 uint32_t addr = si->param & 0x7fffe000u;
0e60a699 1397
18ff9494 1398 cpu_synchronize_state(CPU(si->cpu));
0e60a699 1399
18ff9494
DH
1400 if (!address_space_access_valid(&address_space_memory, addr,
1401 sizeof(struct LowCore), false)) {
1402 set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
1403 return;
1404 }
0e60a699 1405
18ff9494
DH
1406 /* cpu has to be stopped */
1407 if (s390_cpu_get_state(si->cpu) != CPU_STATE_STOPPED) {
1408 set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
1409 return;
0e60a699
AG
1410 }
1411
18ff9494
DH
1412 si->cpu->env.psa = addr;
1413 cpu_synchronize_post_init(CPU(si->cpu));
1414 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1415}
1416
6eb8f212 1417static int handle_sigp_single_dst(S390CPU *dst_cpu, uint8_t order,
22740e3f 1418 uint64_t param, uint64_t *status_reg)
6eb8f212
DH
1419{
1420 SigpInfo si = {
1421 .cpu = dst_cpu,
22740e3f 1422 .param = param,
6eb8f212
DH
1423 .status_reg = status_reg,
1424 };
1425
1426 /* cpu available? */
1427 if (dst_cpu == NULL) {
1428 return SIGP_CC_NOT_OPERATIONAL;
1429 }
1430
18ff9494
DH
1431 /* only resets can break pending orders */
1432 if (dst_cpu->env.sigp_order != 0 &&
1433 order != SIGP_CPU_RESET &&
1434 order != SIGP_INITIAL_CPU_RESET) {
1435 return SIGP_CC_BUSY;
1436 }
1437
6eb8f212 1438 switch (order) {
b20a461f 1439 case SIGP_START:
6eb8f212
DH
1440 run_on_cpu(CPU(dst_cpu), sigp_start, &si);
1441 break;
18ff9494
DH
1442 case SIGP_STOP:
1443 run_on_cpu(CPU(dst_cpu), sigp_stop, &si);
b20a461f 1444 break;
0b9972a2 1445 case SIGP_RESTART:
6eb8f212 1446 run_on_cpu(CPU(dst_cpu), sigp_restart, &si);
0b9972a2 1447 break;
18ff9494
DH
1448 case SIGP_STOP_STORE_STATUS:
1449 run_on_cpu(CPU(dst_cpu), sigp_stop_and_store_status, &si);
1450 break;
1451 case SIGP_STORE_STATUS_ADDR:
1452 run_on_cpu(CPU(dst_cpu), sigp_store_status_at_address, &si);
1453 break;
1454 case SIGP_SET_PREFIX:
1455 run_on_cpu(CPU(dst_cpu), sigp_set_prefix, &si);
0788082a 1456 break;
0b9972a2 1457 case SIGP_INITIAL_CPU_RESET:
6eb8f212 1458 run_on_cpu(CPU(dst_cpu), sigp_initial_cpu_reset, &si);
0b9972a2 1459 break;
04c2b516 1460 case SIGP_CPU_RESET:
6eb8f212 1461 run_on_cpu(CPU(dst_cpu), sigp_cpu_reset, &si);
04c2b516 1462 break;
0b9972a2 1463 default:
6eb8f212 1464 DPRINTF("KVM: unknown SIGP: 0x%x\n", order);
36b5c845 1465 set_sigp_status(&si, SIGP_STAT_INVALID_ORDER);
6eb8f212 1466 }
04c2b516 1467
6eb8f212 1468 return si.cc;
04c2b516
TH
1469}
1470
18ff9494
DH
1471static int sigp_set_architecture(S390CPU *cpu, uint32_t param,
1472 uint64_t *status_reg)
1473{
1474 CPUState *cur_cs;
1475 S390CPU *cur_cpu;
1476
1477 /* due to the BQL, we are the only active cpu */
1478 CPU_FOREACH(cur_cs) {
1479 cur_cpu = S390_CPU(cur_cs);
1480 if (cur_cpu->env.sigp_order != 0) {
1481 return SIGP_CC_BUSY;
1482 }
1483 cpu_synchronize_state(cur_cs);
1484 /* all but the current one have to be stopped */
1485 if (cur_cpu != cpu &&
1486 s390_cpu_get_state(cur_cpu) != CPU_STATE_STOPPED) {
1487 *status_reg &= 0xffffffff00000000ULL;
1488 *status_reg |= SIGP_STAT_INCORRECT_STATE;
1489 return SIGP_CC_STATUS_STORED;
1490 }
1491 }
1492
1493 switch (param & 0xff) {
1494 case SIGP_MODE_ESA_S390:
1495 /* not supported */
1496 return SIGP_CC_NOT_OPERATIONAL;
1497 case SIGP_MODE_Z_ARCH_TRANS_ALL_PSW:
1498 case SIGP_MODE_Z_ARCH_TRANS_CUR_PSW:
1499 CPU_FOREACH(cur_cs) {
1500 cur_cpu = S390_CPU(cur_cs);
1501 cur_cpu->env.pfault_token = -1UL;
1502 }
0b9972a2 1503 break;
18ff9494
DH
1504 default:
1505 *status_reg &= 0xffffffff00000000ULL;
1506 *status_reg |= SIGP_STAT_INVALID_PARAMETER;
1507 return SIGP_CC_STATUS_STORED;
0e60a699
AG
1508 }
1509
18ff9494
DH
1510 return SIGP_CC_ORDER_CODE_ACCEPTED;
1511}
1512
b8031adb
TH
1513#define SIGP_ORDER_MASK 0x000000ff
1514
f7575c96 1515static int handle_sigp(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
0e60a699 1516{
f7575c96 1517 CPUS390XState *env = &cpu->env;
6eb8f212
DH
1518 const uint8_t r1 = ipa1 >> 4;
1519 const uint8_t r3 = ipa1 & 0x0f;
1520 int ret;
1521 uint8_t order;
1522 uint64_t *status_reg;
22740e3f 1523 uint64_t param;
6eb8f212 1524 S390CPU *dst_cpu = NULL;
0e60a699 1525
cb446eca 1526 cpu_synchronize_state(CPU(cpu));
0e60a699
AG
1527
1528 /* get order code */
6eb8f212
DH
1529 order = decode_basedisp_rs(env, run->s390_sieic.ipb) & SIGP_ORDER_MASK;
1530 status_reg = &env->regs[r1];
22740e3f 1531 param = (r1 % 2) ? env->regs[r1] : env->regs[r1 + 1];
0e60a699 1532
6eb8f212 1533 switch (order) {
0b9972a2 1534 case SIGP_SET_ARCH:
18ff9494 1535 ret = sigp_set_architecture(cpu, param, status_reg);
04c2b516 1536 break;
0b9972a2 1537 default:
6eb8f212
DH
1538 /* all other sigp orders target a single vcpu */
1539 dst_cpu = s390_cpu_addr2state(env->regs[r3]);
22740e3f 1540 ret = handle_sigp_single_dst(dst_cpu, order, param, status_reg);
0e60a699
AG
1541 }
1542
56dba22b
DH
1543 trace_kvm_sigp_finished(order, CPU(cpu)->cpu_index,
1544 dst_cpu ? CPU(dst_cpu)->cpu_index : -1, ret);
1545
6eb8f212
DH
1546 if (ret >= 0) {
1547 setcc(cpu, ret);
1548 return 0;
1549 }
1550
1551 return ret;
0e60a699
AG
1552}
1553
b30f4dfb 1554static int handle_instruction(S390CPU *cpu, struct kvm_run *run)
0e60a699
AG
1555{
1556 unsigned int ipa0 = (run->s390_sieic.ipa & 0xff00);
1557 uint8_t ipa1 = run->s390_sieic.ipa & 0x00ff;
d7963c43 1558 int r = -1;
0e60a699 1559
e67137c6
PM
1560 DPRINTF("handle_instruction 0x%x 0x%x\n",
1561 run->s390_sieic.ipa, run->s390_sieic.ipb);
0e60a699 1562 switch (ipa0) {
09b99878 1563 case IPA0_B2:
1eecf41b
FB
1564 r = handle_b2(cpu, run, ipa1);
1565 break;
09b99878 1566 case IPA0_B9:
1eecf41b
FB
1567 r = handle_b9(cpu, run, ipa1);
1568 break;
09b99878 1569 case IPA0_EB:
80765f07 1570 r = handle_eb(cpu, run, run->s390_sieic.ipb & 0xff);
09b99878 1571 break;
863f6f52
FB
1572 case IPA0_E3:
1573 r = handle_e3(cpu, run, run->s390_sieic.ipb & 0xff);
1574 break;
09b99878 1575 case IPA0_DIAG:
638129ff 1576 r = handle_diag(cpu, run, run->s390_sieic.ipb);
09b99878
CH
1577 break;
1578 case IPA0_SIGP:
1579 r = handle_sigp(cpu, run, ipa1);
1580 break;
0e60a699
AG
1581 }
1582
1583 if (r < 0) {
b30f4dfb 1584 r = 0;
1bc22652 1585 enter_pgmcheck(cpu, 0x0001);
0e60a699 1586 }
b30f4dfb
DH
1587
1588 return r;
0e60a699
AG
1589}
1590
f7575c96 1591static bool is_special_wait_psw(CPUState *cs)
eca3ed03
CB
1592{
1593 /* signal quiesce */
f7575c96 1594 return cs->kvm_run->psw_addr == 0xfffUL;
eca3ed03
CB
1595}
1596
a2689242
TH
1597static void guest_panicked(void)
1598{
3a449690
WX
1599 qapi_event_send_guest_panicked(GUEST_PANIC_ACTION_PAUSE,
1600 &error_abort);
a2689242
TH
1601 vm_stop(RUN_STATE_GUEST_PANICKED);
1602}
1603
1604static void unmanageable_intercept(S390CPU *cpu, const char *str, int pswoffset)
1605{
1606 CPUState *cs = CPU(cpu);
1607
1608 error_report("Unmanageable %s! CPU%i new PSW: 0x%016lx:%016lx",
1609 str, cs->cpu_index, ldq_phys(cs->as, cpu->env.psa + pswoffset),
1610 ldq_phys(cs->as, cpu->env.psa + pswoffset + 8));
eb24f7c6 1611 s390_cpu_halt(cpu);
a2689242
TH
1612 guest_panicked();
1613}
1614
1bc22652 1615static int handle_intercept(S390CPU *cpu)
0e60a699 1616{
f7575c96
AF
1617 CPUState *cs = CPU(cpu);
1618 struct kvm_run *run = cs->kvm_run;
0e60a699
AG
1619 int icpt_code = run->s390_sieic.icptcode;
1620 int r = 0;
1621
e67137c6 1622 DPRINTF("intercept: 0x%x (at 0x%lx)\n", icpt_code,
f7575c96 1623 (long)cs->kvm_run->psw_addr);
0e60a699
AG
1624 switch (icpt_code) {
1625 case ICPT_INSTRUCTION:
b30f4dfb 1626 r = handle_instruction(cpu, run);
0e60a699 1627 break;
6449a41a
TH
1628 case ICPT_PROGRAM:
1629 unmanageable_intercept(cpu, "program interrupt",
1630 offsetof(LowCore, program_new_psw));
1631 r = EXCP_HALTED;
1632 break;
a2689242
TH
1633 case ICPT_EXT_INT:
1634 unmanageable_intercept(cpu, "external interrupt",
1635 offsetof(LowCore, external_new_psw));
1636 r = EXCP_HALTED;
1637 break;
0e60a699 1638 case ICPT_WAITPSW:
08eb8c85 1639 /* disabled wait, since enabled wait is handled in kernel */
eb24f7c6
DH
1640 cpu_synchronize_state(cs);
1641 if (s390_cpu_halt(cpu) == 0) {
08eb8c85
CB
1642 if (is_special_wait_psw(cs)) {
1643 qemu_system_shutdown_request();
1644 } else {
a2689242 1645 guest_panicked();
08eb8c85 1646 }
eca3ed03
CB
1647 }
1648 r = EXCP_HALTED;
1649 break;
854e42f3 1650 case ICPT_CPU_STOP:
eb24f7c6 1651 if (s390_cpu_set_state(CPU_STATE_STOPPED, cpu) == 0) {
854e42f3
CB
1652 qemu_system_shutdown_request();
1653 }
18ff9494
DH
1654 if (cpu->env.sigp_order == SIGP_STOP_STORE_STATUS) {
1655 kvm_s390_store_status(cpu, KVM_S390_STORE_STATUS_DEF_ADDR,
1656 true);
1657 }
1658 cpu->env.sigp_order = 0;
854e42f3 1659 r = EXCP_HALTED;
0e60a699
AG
1660 break;
1661 case ICPT_SOFT_INTERCEPT:
1662 fprintf(stderr, "KVM unimplemented icpt SOFT\n");
1663 exit(1);
1664 break;
0e60a699
AG
1665 case ICPT_IO:
1666 fprintf(stderr, "KVM unimplemented icpt IO\n");
1667 exit(1);
1668 break;
1669 default:
1670 fprintf(stderr, "Unknown intercept code: %d\n", icpt_code);
1671 exit(1);
1672 break;
1673 }
1674
1675 return r;
1676}
1677
09b99878
CH
1678static int handle_tsch(S390CPU *cpu)
1679{
09b99878
CH
1680 CPUState *cs = CPU(cpu);
1681 struct kvm_run *run = cs->kvm_run;
1682 int ret;
1683
44c68de0 1684 cpu_synchronize_state(cs);
3474b679 1685
653b0809
TH
1686 ret = ioinst_handle_tsch(cpu, cpu->env.regs[1], run->s390_tsch.ipb);
1687 if (ret < 0) {
09b99878
CH
1688 /*
1689 * Failure.
1690 * If an I/O interrupt had been dequeued, we have to reinject it.
1691 */
1692 if (run->s390_tsch.dequeued) {
de13d216
CH
1693 kvm_s390_io_interrupt(run->s390_tsch.subchannel_id,
1694 run->s390_tsch.subchannel_nr,
1695 run->s390_tsch.io_int_parm,
1696 run->s390_tsch.io_int_word);
09b99878
CH
1697 }
1698 ret = 0;
1699 }
1700 return ret;
1701}
1702
8c012449
DH
1703static int kvm_arch_handle_debug_exit(S390CPU *cpu)
1704{
770a6379
DH
1705 CPUState *cs = CPU(cpu);
1706 struct kvm_run *run = cs->kvm_run;
1707
1708 int ret = 0;
1709 struct kvm_debug_exit_arch *arch_info = &run->debug.arch;
1710
1711 switch (arch_info->type) {
1712 case KVM_HW_WP_WRITE:
1713 if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) {
1714 cs->watchpoint_hit = &hw_watchpoint;
1715 hw_watchpoint.vaddr = arch_info->addr;
1716 hw_watchpoint.flags = BP_MEM_WRITE;
1717 ret = EXCP_DEBUG;
1718 }
1719 break;
1720 case KVM_HW_BP:
1721 if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) {
1722 ret = EXCP_DEBUG;
1723 }
1724 break;
1725 case KVM_SINGLESTEP:
1726 if (cs->singlestep_enabled) {
1727 ret = EXCP_DEBUG;
1728 }
1729 break;
1730 default:
1731 ret = -ENOSYS;
1732 }
1733
1734 return ret;
8c012449
DH
1735}
1736
20d695a9 1737int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
0e60a699 1738{
20d695a9 1739 S390CPU *cpu = S390_CPU(cs);
0e60a699
AG
1740 int ret = 0;
1741
1742 switch (run->exit_reason) {
1743 case KVM_EXIT_S390_SIEIC:
1bc22652 1744 ret = handle_intercept(cpu);
0e60a699
AG
1745 break;
1746 case KVM_EXIT_S390_RESET:
e91e972c 1747 s390_reipl_request();
0e60a699 1748 break;
09b99878
CH
1749 case KVM_EXIT_S390_TSCH:
1750 ret = handle_tsch(cpu);
1751 break;
8c012449
DH
1752 case KVM_EXIT_DEBUG:
1753 ret = kvm_arch_handle_debug_exit(cpu);
1754 break;
0e60a699
AG
1755 default:
1756 fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason);
1757 break;
1758 }
1759
bb4ea393
JK
1760 if (ret == 0) {
1761 ret = EXCP_INTERRUPT;
bb4ea393 1762 }
0e60a699
AG
1763 return ret;
1764}
4513d923 1765
20d695a9 1766bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
4513d923
GN
1767{
1768 return true;
1769}
a1b87fe0 1770
20d695a9 1771int kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
a1b87fe0
JK
1772{
1773 return 1;
1774}
1775
1776int kvm_arch_on_sigbus(int code, void *addr)
1777{
1778 return 1;
1779}
09b99878 1780
de13d216 1781void kvm_s390_io_interrupt(uint16_t subchannel_id,
09b99878
CH
1782 uint16_t subchannel_nr, uint32_t io_int_parm,
1783 uint32_t io_int_word)
1784{
de13d216
CH
1785 struct kvm_s390_irq irq = {
1786 .u.io.subchannel_id = subchannel_id,
1787 .u.io.subchannel_nr = subchannel_nr,
1788 .u.io.io_int_parm = io_int_parm,
1789 .u.io.io_int_word = io_int_word,
1790 };
09b99878 1791
7e749462 1792 if (io_int_word & IO_INT_WORD_AI) {
de13d216 1793 irq.type = KVM_S390_INT_IO(1, 0, 0, 0);
7e749462 1794 } else {
de13d216 1795 irq.type = ((subchannel_id & 0xff00) << 24) |
7e749462
CH
1796 ((subchannel_id & 0x00060) << 22) | (subchannel_nr << 16);
1797 }
de13d216 1798 kvm_s390_floating_interrupt(&irq);
09b99878
CH
1799}
1800
de13d216 1801void kvm_s390_crw_mchk(void)
09b99878 1802{
de13d216
CH
1803 struct kvm_s390_irq irq = {
1804 .type = KVM_S390_MCHK,
1805 .u.mchk.cr14 = 1 << 28,
f0d4dc18 1806 .u.mchk.mcic = 0x00400f1d40330000ULL,
de13d216
CH
1807 };
1808 kvm_s390_floating_interrupt(&irq);
09b99878
CH
1809}
1810
1811void kvm_s390_enable_css_support(S390CPU *cpu)
1812{
09b99878
CH
1813 int r;
1814
1815 /* Activate host kernel channel subsystem support. */
e080f0fd 1816 r = kvm_vcpu_enable_cap(CPU(cpu), KVM_CAP_S390_CSS_SUPPORT, 0);
09b99878
CH
1817 assert(r == 0);
1818}
48475e14
AK
1819
1820void kvm_arch_init_irq_routing(KVMState *s)
1821{
d426d9fb
CH
1822 /*
1823 * Note that while irqchip capabilities generally imply that cpustates
1824 * are handled in-kernel, it is not true for s390 (yet); therefore, we
1825 * have to override the common code kvm_halt_in_kernel_allowed setting.
1826 */
1827 if (kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
d426d9fb
CH
1828 kvm_gsi_routing_allowed = true;
1829 kvm_halt_in_kernel_allowed = false;
1830 }
48475e14 1831}
b4436a0b 1832
cc3ac9c4
CH
1833int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch,
1834 int vq, bool assign)
b4436a0b
CH
1835{
1836 struct kvm_ioeventfd kick = {
1837 .flags = KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY |
1838 KVM_IOEVENTFD_FLAG_DATAMATCH,
cc3ac9c4 1839 .fd = event_notifier_get_fd(notifier),
b4436a0b
CH
1840 .datamatch = vq,
1841 .addr = sch,
1842 .len = 8,
1843 };
1844 if (!kvm_check_extension(kvm_state, KVM_CAP_IOEVENTFD)) {
1845 return -ENOSYS;
1846 }
1847 if (!assign) {
1848 kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1849 }
1850 return kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
1851}
1def6656
MR
1852
1853int kvm_s390_get_memslot_count(KVMState *s)
1854{
1855 return kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS);
1856}
c9e659c9
DH
1857
1858int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state)
1859{
1860 struct kvm_mp_state mp_state = {};
1861 int ret;
1862
1863 /* the kvm part might not have been initialized yet */
1864 if (CPU(cpu)->kvm_state == NULL) {
1865 return 0;
1866 }
1867
1868 switch (cpu_state) {
1869 case CPU_STATE_STOPPED:
1870 mp_state.mp_state = KVM_MP_STATE_STOPPED;
1871 break;
1872 case CPU_STATE_CHECK_STOP:
1873 mp_state.mp_state = KVM_MP_STATE_CHECK_STOP;
1874 break;
1875 case CPU_STATE_OPERATING:
1876 mp_state.mp_state = KVM_MP_STATE_OPERATING;
1877 break;
1878 case CPU_STATE_LOAD:
1879 mp_state.mp_state = KVM_MP_STATE_LOAD;
1880 break;
1881 default:
1882 error_report("Requested CPU state is not a valid S390 CPU state: %u",
1883 cpu_state);
1884 exit(1);
1885 }
1886
1887 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
1888 if (ret) {
1889 trace_kvm_failed_cpu_state_set(CPU(cpu)->cpu_index, cpu_state,
1890 strerror(-ret));
1891 }
1892
1893 return ret;
1894}
9e03a040
FB
1895
1896int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
1897 uint64_t address, uint32_t data)
1898{
1899 S390PCIBusDevice *pbdev;
1900 uint32_t fid = data >> ZPCI_MSI_VEC_BITS;
1901 uint32_t vec = data & ZPCI_MSI_VEC_MASK;
1902
1903 pbdev = s390_pci_find_dev_by_fid(fid);
1904 if (!pbdev) {
1905 DPRINTF("add_msi_route no dev\n");
1906 return -ENODEV;
1907 }
1908
1909 pbdev->routes.adapter.ind_offset = vec;
1910
1911 route->type = KVM_IRQ_ROUTING_S390_ADAPTER;
1912 route->flags = 0;
1913 route->u.adapter.summary_addr = pbdev->routes.adapter.summary_addr;
1914 route->u.adapter.ind_addr = pbdev->routes.adapter.ind_addr;
1915 route->u.adapter.summary_offset = pbdev->routes.adapter.summary_offset;
1916 route->u.adapter.ind_offset = pbdev->routes.adapter.ind_offset;
1917 route->u.adapter.adapter_id = pbdev->routes.adapter.adapter_id;
1918 return 0;
1919}