2 * QEMU S390x KVM implementation
4 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
5 * Copyright IBM Corp. 2012
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * Contributions after 2012-10-29 are licensed under the terms of the
18 * GNU GPL, version 2 or (at your option) any later version.
20 * You should have received a copy of the GNU (Lesser) General Public
21 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 #include <sys/types.h>
25 #include <sys/ioctl.h>
28 #include <linux/kvm.h>
29 #include <asm/ptrace.h>
31 #include "qemu-common.h"
32 #include "qemu/timer.h"
33 #include "sysemu/sysemu.h"
34 #include "sysemu/kvm.h"
37 #include "sysemu/device_tree.h"
38 #include "qapi/qmp/qjson.h"
39 #include "monitor/monitor.h"
40 #include "exec/gdbstub.h"
41 #include "exec/address-spaces.h"
43 #include "qapi-event.h"
44 #include "hw/s390x/s390-pci-inst.h"
45 #include "hw/s390x/s390-pci-bus.h"
46 #include "hw/s390x/ipl.h"
48 /* #define DEBUG_KVM */
51 #define DPRINTF(fmt, ...) \
52 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
54 #define DPRINTF(fmt, ...) \
58 #define IPA0_DIAG 0x8300
59 #define IPA0_SIGP 0xae00
60 #define IPA0_B2 0xb200
61 #define IPA0_B9 0xb900
62 #define IPA0_EB 0xeb00
63 #define IPA0_E3 0xe300
65 #define PRIV_B2_SCLP_CALL 0x20
66 #define PRIV_B2_CSCH 0x30
67 #define PRIV_B2_HSCH 0x31
68 #define PRIV_B2_MSCH 0x32
69 #define PRIV_B2_SSCH 0x33
70 #define PRIV_B2_STSCH 0x34
71 #define PRIV_B2_TSCH 0x35
72 #define PRIV_B2_TPI 0x36
73 #define PRIV_B2_SAL 0x37
74 #define PRIV_B2_RSCH 0x38
75 #define PRIV_B2_STCRW 0x39
76 #define PRIV_B2_STCPS 0x3a
77 #define PRIV_B2_RCHP 0x3b
78 #define PRIV_B2_SCHM 0x3c
79 #define PRIV_B2_CHSC 0x5f
80 #define PRIV_B2_SIGA 0x74
81 #define PRIV_B2_XSCH 0x76
83 #define PRIV_EB_SQBS 0x8a
84 #define PRIV_EB_PCISTB 0xd0
85 #define PRIV_EB_SIC 0xd1
87 #define PRIV_B9_EQBS 0x9c
88 #define PRIV_B9_CLP 0xa0
89 #define PRIV_B9_PCISTG 0xd0
90 #define PRIV_B9_PCILG 0xd2
91 #define PRIV_B9_RPCIT 0xd3
93 #define PRIV_E3_MPCIFC 0xd0
94 #define PRIV_E3_STPCIFC 0xd4
96 #define DIAG_IPL 0x308
97 #define DIAG_KVM_HYPERCALL 0x500
98 #define DIAG_KVM_BREAKPOINT 0x501
100 #define ICPT_INSTRUCTION 0x04
101 #define ICPT_PROGRAM 0x08
102 #define ICPT_EXT_INT 0x14
103 #define ICPT_WAITPSW 0x1c
104 #define ICPT_SOFT_INTERCEPT 0x24
105 #define ICPT_CPU_STOP 0x28
108 static CPUWatchpoint hw_watchpoint
;
110 * We don't use a list because this structure is also used to transmit the
111 * hardware breakpoints to the kernel.
113 static struct kvm_hw_breakpoint
*hw_breakpoints
;
114 static int nb_hw_breakpoints
;
116 const KVMCapabilityInfo kvm_arch_required_capabilities
[] = {
120 static int cap_sync_regs
;
121 static int cap_async_pf
;
123 static void *legacy_s390_alloc(size_t size
, uint64_t *align
);
125 static int kvm_s390_check_clear_cmma(KVMState
*s
)
127 struct kvm_device_attr attr
= {
128 .group
= KVM_S390_VM_MEM_CTRL
,
129 .attr
= KVM_S390_VM_MEM_CLR_CMMA
,
132 return kvm_vm_ioctl(s
, KVM_HAS_DEVICE_ATTR
, &attr
);
135 static int kvm_s390_check_enable_cmma(KVMState
*s
)
137 struct kvm_device_attr attr
= {
138 .group
= KVM_S390_VM_MEM_CTRL
,
139 .attr
= KVM_S390_VM_MEM_ENABLE_CMMA
,
142 return kvm_vm_ioctl(s
, KVM_HAS_DEVICE_ATTR
, &attr
);
145 void kvm_s390_clear_cmma_callback(void *opaque
)
148 KVMState
*s
= opaque
;
149 struct kvm_device_attr attr
= {
150 .group
= KVM_S390_VM_MEM_CTRL
,
151 .attr
= KVM_S390_VM_MEM_CLR_CMMA
,
154 rc
= kvm_vm_ioctl(s
, KVM_SET_DEVICE_ATTR
, &attr
);
155 trace_kvm_clear_cmma(rc
);
158 static void kvm_s390_enable_cmma(KVMState
*s
)
161 struct kvm_device_attr attr
= {
162 .group
= KVM_S390_VM_MEM_CTRL
,
163 .attr
= KVM_S390_VM_MEM_ENABLE_CMMA
,
166 if (kvm_s390_check_enable_cmma(s
) || kvm_s390_check_clear_cmma(s
)) {
170 rc
= kvm_vm_ioctl(s
, KVM_SET_DEVICE_ATTR
, &attr
);
172 qemu_register_reset(kvm_s390_clear_cmma_callback
, s
);
174 trace_kvm_enable_cmma(rc
);
177 int kvm_arch_init(KVMState
*s
)
179 cap_sync_regs
= kvm_check_extension(s
, KVM_CAP_SYNC_REGS
);
180 cap_async_pf
= kvm_check_extension(s
, KVM_CAP_ASYNC_PF
);
182 if (kvm_check_extension(s
, KVM_CAP_VM_ATTRIBUTES
)) {
183 kvm_s390_enable_cmma(s
);
186 if (!kvm_check_extension(s
, KVM_CAP_S390_GMAP
)
187 || !kvm_check_extension(s
, KVM_CAP_S390_COW
)) {
188 phys_mem_set_alloc(legacy_s390_alloc
);
193 unsigned long kvm_arch_vcpu_id(CPUState
*cpu
)
195 return cpu
->cpu_index
;
198 int kvm_arch_init_vcpu(CPUState
*cs
)
200 S390CPU
*cpu
= S390_CPU(cs
);
201 kvm_s390_set_cpu_state(cpu
, cpu
->env
.cpu_state
);
205 void kvm_s390_reset_vcpu(S390CPU
*cpu
)
207 CPUState
*cs
= CPU(cpu
);
209 /* The initial reset call is needed here to reset in-kernel
210 * vcpu data that we can't access directly from QEMU
211 * (i.e. with older kernels which don't support sync_regs/ONE_REG).
212 * Before this ioctl cpu_synchronize_state() is called in common kvm
214 if (kvm_vcpu_ioctl(cs
, KVM_S390_INITIAL_RESET
, NULL
)) {
215 error_report("Initial CPU reset failed on CPU %i\n", cs
->cpu_index
);
219 static int can_sync_regs(CPUState
*cs
, int regs
)
221 return cap_sync_regs
&& (cs
->kvm_run
->kvm_valid_regs
& regs
) == regs
;
224 int kvm_arch_put_registers(CPUState
*cs
, int level
)
226 S390CPU
*cpu
= S390_CPU(cs
);
227 CPUS390XState
*env
= &cpu
->env
;
228 struct kvm_sregs sregs
;
229 struct kvm_regs regs
;
230 struct kvm_fpu fpu
= {};
234 /* always save the PSW and the GPRS*/
235 cs
->kvm_run
->psw_addr
= env
->psw
.addr
;
236 cs
->kvm_run
->psw_mask
= env
->psw
.mask
;
238 if (can_sync_regs(cs
, KVM_SYNC_GPRS
)) {
239 for (i
= 0; i
< 16; i
++) {
240 cs
->kvm_run
->s
.regs
.gprs
[i
] = env
->regs
[i
];
241 cs
->kvm_run
->kvm_dirty_regs
|= KVM_SYNC_GPRS
;
244 for (i
= 0; i
< 16; i
++) {
245 regs
.gprs
[i
] = env
->regs
[i
];
247 r
= kvm_vcpu_ioctl(cs
, KVM_SET_REGS
, ®s
);
254 for (i
= 0; i
< 16; i
++) {
255 fpu
.fprs
[i
] = env
->fregs
[i
].ll
;
259 r
= kvm_vcpu_ioctl(cs
, KVM_SET_FPU
, &fpu
);
264 /* Do we need to save more than that? */
265 if (level
== KVM_PUT_RUNTIME_STATE
) {
269 if (can_sync_regs(cs
, KVM_SYNC_ARCH0
)) {
270 cs
->kvm_run
->s
.regs
.cputm
= env
->cputm
;
271 cs
->kvm_run
->s
.regs
.ckc
= env
->ckc
;
272 cs
->kvm_run
->s
.regs
.todpr
= env
->todpr
;
273 cs
->kvm_run
->s
.regs
.gbea
= env
->gbea
;
274 cs
->kvm_run
->s
.regs
.pp
= env
->pp
;
275 cs
->kvm_run
->kvm_dirty_regs
|= KVM_SYNC_ARCH0
;
278 * These ONE_REGS are not protected by a capability. As they are only
279 * necessary for migration we just trace a possible error, but don't
280 * return with an error return code.
282 kvm_set_one_reg(cs
, KVM_REG_S390_CPU_TIMER
, &env
->cputm
);
283 kvm_set_one_reg(cs
, KVM_REG_S390_CLOCK_COMP
, &env
->ckc
);
284 kvm_set_one_reg(cs
, KVM_REG_S390_TODPR
, &env
->todpr
);
285 kvm_set_one_reg(cs
, KVM_REG_S390_GBEA
, &env
->gbea
);
286 kvm_set_one_reg(cs
, KVM_REG_S390_PP
, &env
->pp
);
289 /* pfault parameters */
290 if (can_sync_regs(cs
, KVM_SYNC_PFAULT
)) {
291 cs
->kvm_run
->s
.regs
.pft
= env
->pfault_token
;
292 cs
->kvm_run
->s
.regs
.pfs
= env
->pfault_select
;
293 cs
->kvm_run
->s
.regs
.pfc
= env
->pfault_compare
;
294 cs
->kvm_run
->kvm_dirty_regs
|= KVM_SYNC_PFAULT
;
295 } else if (cap_async_pf
) {
296 r
= kvm_set_one_reg(cs
, KVM_REG_S390_PFTOKEN
, &env
->pfault_token
);
300 r
= kvm_set_one_reg(cs
, KVM_REG_S390_PFCOMPARE
, &env
->pfault_compare
);
304 r
= kvm_set_one_reg(cs
, KVM_REG_S390_PFSELECT
, &env
->pfault_select
);
310 /* access registers and control registers*/
311 if (can_sync_regs(cs
, KVM_SYNC_ACRS
| KVM_SYNC_CRS
)) {
312 for (i
= 0; i
< 16; i
++) {
313 cs
->kvm_run
->s
.regs
.acrs
[i
] = env
->aregs
[i
];
314 cs
->kvm_run
->s
.regs
.crs
[i
] = env
->cregs
[i
];
316 cs
->kvm_run
->kvm_dirty_regs
|= KVM_SYNC_ACRS
;
317 cs
->kvm_run
->kvm_dirty_regs
|= KVM_SYNC_CRS
;
319 for (i
= 0; i
< 16; i
++) {
320 sregs
.acrs
[i
] = env
->aregs
[i
];
321 sregs
.crs
[i
] = env
->cregs
[i
];
323 r
= kvm_vcpu_ioctl(cs
, KVM_SET_SREGS
, &sregs
);
329 /* Finally the prefix */
330 if (can_sync_regs(cs
, KVM_SYNC_PREFIX
)) {
331 cs
->kvm_run
->s
.regs
.prefix
= env
->psa
;
332 cs
->kvm_run
->kvm_dirty_regs
|= KVM_SYNC_PREFIX
;
334 /* prefix is only supported via sync regs */
339 int kvm_arch_get_registers(CPUState
*cs
)
341 S390CPU
*cpu
= S390_CPU(cs
);
342 CPUS390XState
*env
= &cpu
->env
;
343 struct kvm_sregs sregs
;
344 struct kvm_regs regs
;
349 env
->psw
.addr
= cs
->kvm_run
->psw_addr
;
350 env
->psw
.mask
= cs
->kvm_run
->psw_mask
;
353 if (can_sync_regs(cs
, KVM_SYNC_GPRS
)) {
354 for (i
= 0; i
< 16; i
++) {
355 env
->regs
[i
] = cs
->kvm_run
->s
.regs
.gprs
[i
];
358 r
= kvm_vcpu_ioctl(cs
, KVM_GET_REGS
, ®s
);
362 for (i
= 0; i
< 16; i
++) {
363 env
->regs
[i
] = regs
.gprs
[i
];
367 /* The ACRS and CRS */
368 if (can_sync_regs(cs
, KVM_SYNC_ACRS
| KVM_SYNC_CRS
)) {
369 for (i
= 0; i
< 16; i
++) {
370 env
->aregs
[i
] = cs
->kvm_run
->s
.regs
.acrs
[i
];
371 env
->cregs
[i
] = cs
->kvm_run
->s
.regs
.crs
[i
];
374 r
= kvm_vcpu_ioctl(cs
, KVM_GET_SREGS
, &sregs
);
378 for (i
= 0; i
< 16; i
++) {
379 env
->aregs
[i
] = sregs
.acrs
[i
];
380 env
->cregs
[i
] = sregs
.crs
[i
];
385 r
= kvm_vcpu_ioctl(cs
, KVM_GET_FPU
, &fpu
);
389 for (i
= 0; i
< 16; i
++) {
390 env
->fregs
[i
].ll
= fpu
.fprs
[i
];
395 if (can_sync_regs(cs
, KVM_SYNC_PREFIX
)) {
396 env
->psa
= cs
->kvm_run
->s
.regs
.prefix
;
399 if (can_sync_regs(cs
, KVM_SYNC_ARCH0
)) {
400 env
->cputm
= cs
->kvm_run
->s
.regs
.cputm
;
401 env
->ckc
= cs
->kvm_run
->s
.regs
.ckc
;
402 env
->todpr
= cs
->kvm_run
->s
.regs
.todpr
;
403 env
->gbea
= cs
->kvm_run
->s
.regs
.gbea
;
404 env
->pp
= cs
->kvm_run
->s
.regs
.pp
;
407 * These ONE_REGS are not protected by a capability. As they are only
408 * necessary for migration we just trace a possible error, but don't
409 * return with an error return code.
411 kvm_get_one_reg(cs
, KVM_REG_S390_CPU_TIMER
, &env
->cputm
);
412 kvm_get_one_reg(cs
, KVM_REG_S390_CLOCK_COMP
, &env
->ckc
);
413 kvm_get_one_reg(cs
, KVM_REG_S390_TODPR
, &env
->todpr
);
414 kvm_get_one_reg(cs
, KVM_REG_S390_GBEA
, &env
->gbea
);
415 kvm_get_one_reg(cs
, KVM_REG_S390_PP
, &env
->pp
);
418 /* pfault parameters */
419 if (can_sync_regs(cs
, KVM_SYNC_PFAULT
)) {
420 env
->pfault_token
= cs
->kvm_run
->s
.regs
.pft
;
421 env
->pfault_select
= cs
->kvm_run
->s
.regs
.pfs
;
422 env
->pfault_compare
= cs
->kvm_run
->s
.regs
.pfc
;
423 } else if (cap_async_pf
) {
424 r
= kvm_get_one_reg(cs
, KVM_REG_S390_PFTOKEN
, &env
->pfault_token
);
428 r
= kvm_get_one_reg(cs
, KVM_REG_S390_PFCOMPARE
, &env
->pfault_compare
);
432 r
= kvm_get_one_reg(cs
, KVM_REG_S390_PFSELECT
, &env
->pfault_select
);
442 * Legacy layout for s390:
443 * Older S390 KVM requires the topmost vma of the RAM to be
444 * smaller than an system defined value, which is at least 256GB.
445 * Larger systems have larger values. We put the guest between
446 * the end of data segment (system break) and this value. We
447 * use 32GB as a base to have enough room for the system break
448 * to grow. We also have to use MAP parameters that avoid
449 * read-only mapping of guest pages.
451 static void *legacy_s390_alloc(size_t size
, uint64_t *align
)
455 mem
= mmap((void *) 0x800000000ULL
, size
,
456 PROT_EXEC
|PROT_READ
|PROT_WRITE
,
457 MAP_SHARED
| MAP_ANONYMOUS
| MAP_FIXED
, -1, 0);
458 return mem
== MAP_FAILED
? NULL
: mem
;
461 /* DIAG 501 is used for sw breakpoints */
462 static const uint8_t diag_501
[] = {0x83, 0x24, 0x05, 0x01};
464 int kvm_arch_insert_sw_breakpoint(CPUState
*cs
, struct kvm_sw_breakpoint
*bp
)
467 if (cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&bp
->saved_insn
,
468 sizeof(diag_501
), 0) ||
469 cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)diag_501
,
470 sizeof(diag_501
), 1)) {
476 int kvm_arch_remove_sw_breakpoint(CPUState
*cs
, struct kvm_sw_breakpoint
*bp
)
478 uint8_t t
[sizeof(diag_501
)];
480 if (cpu_memory_rw_debug(cs
, bp
->pc
, t
, sizeof(diag_501
), 0)) {
482 } else if (memcmp(t
, diag_501
, sizeof(diag_501
))) {
484 } else if (cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&bp
->saved_insn
,
485 sizeof(diag_501
), 1)) {
492 static struct kvm_hw_breakpoint
*find_hw_breakpoint(target_ulong addr
,
497 for (n
= 0; n
< nb_hw_breakpoints
; n
++) {
498 if (hw_breakpoints
[n
].addr
== addr
&& hw_breakpoints
[n
].type
== type
&&
499 (hw_breakpoints
[n
].len
== len
|| len
== -1)) {
500 return &hw_breakpoints
[n
];
507 static int insert_hw_breakpoint(target_ulong addr
, int len
, int type
)
511 if (find_hw_breakpoint(addr
, len
, type
)) {
515 size
= (nb_hw_breakpoints
+ 1) * sizeof(struct kvm_hw_breakpoint
);
517 if (!hw_breakpoints
) {
518 nb_hw_breakpoints
= 0;
519 hw_breakpoints
= (struct kvm_hw_breakpoint
*)g_try_malloc(size
);
522 (struct kvm_hw_breakpoint
*)g_try_realloc(hw_breakpoints
, size
);
525 if (!hw_breakpoints
) {
526 nb_hw_breakpoints
= 0;
530 hw_breakpoints
[nb_hw_breakpoints
].addr
= addr
;
531 hw_breakpoints
[nb_hw_breakpoints
].len
= len
;
532 hw_breakpoints
[nb_hw_breakpoints
].type
= type
;
539 int kvm_arch_insert_hw_breakpoint(target_ulong addr
,
540 target_ulong len
, int type
)
543 case GDB_BREAKPOINT_HW
:
546 case GDB_WATCHPOINT_WRITE
:
550 type
= KVM_HW_WP_WRITE
;
555 return insert_hw_breakpoint(addr
, len
, type
);
558 int kvm_arch_remove_hw_breakpoint(target_ulong addr
,
559 target_ulong len
, int type
)
562 struct kvm_hw_breakpoint
*bp
= find_hw_breakpoint(addr
, len
, type
);
569 if (nb_hw_breakpoints
> 0) {
571 * In order to trim the array, move the last element to the position to
572 * be removed - if necessary.
574 if (bp
!= &hw_breakpoints
[nb_hw_breakpoints
]) {
575 *bp
= hw_breakpoints
[nb_hw_breakpoints
];
577 size
= nb_hw_breakpoints
* sizeof(struct kvm_hw_breakpoint
);
579 (struct kvm_hw_breakpoint
*)g_realloc(hw_breakpoints
, size
);
581 g_free(hw_breakpoints
);
582 hw_breakpoints
= NULL
;
588 void kvm_arch_remove_all_hw_breakpoints(void)
590 nb_hw_breakpoints
= 0;
591 g_free(hw_breakpoints
);
592 hw_breakpoints
= NULL
;
595 void kvm_arch_update_guest_debug(CPUState
*cpu
, struct kvm_guest_debug
*dbg
)
599 if (nb_hw_breakpoints
> 0) {
600 dbg
->arch
.nr_hw_bp
= nb_hw_breakpoints
;
601 dbg
->arch
.hw_bp
= hw_breakpoints
;
603 for (i
= 0; i
< nb_hw_breakpoints
; ++i
) {
604 hw_breakpoints
[i
].phys_addr
= s390_cpu_get_phys_addr_debug(cpu
,
605 hw_breakpoints
[i
].addr
);
607 dbg
->control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_USE_HW_BP
;
609 dbg
->arch
.nr_hw_bp
= 0;
610 dbg
->arch
.hw_bp
= NULL
;
614 void kvm_arch_pre_run(CPUState
*cpu
, struct kvm_run
*run
)
618 void kvm_arch_post_run(CPUState
*cpu
, struct kvm_run
*run
)
622 int kvm_arch_process_async_events(CPUState
*cs
)
627 static int s390_kvm_irq_to_interrupt(struct kvm_s390_irq
*irq
,
628 struct kvm_s390_interrupt
*interrupt
)
632 interrupt
->type
= irq
->type
;
634 case KVM_S390_INT_VIRTIO
:
635 interrupt
->parm
= irq
->u
.ext
.ext_params
;
637 case KVM_S390_INT_PFAULT_INIT
:
638 case KVM_S390_INT_PFAULT_DONE
:
639 interrupt
->parm64
= irq
->u
.ext
.ext_params2
;
641 case KVM_S390_PROGRAM_INT
:
642 interrupt
->parm
= irq
->u
.pgm
.code
;
644 case KVM_S390_SIGP_SET_PREFIX
:
645 interrupt
->parm
= irq
->u
.prefix
.address
;
647 case KVM_S390_INT_SERVICE
:
648 interrupt
->parm
= irq
->u
.ext
.ext_params
;
651 interrupt
->parm
= irq
->u
.mchk
.cr14
;
652 interrupt
->parm64
= irq
->u
.mchk
.mcic
;
654 case KVM_S390_INT_EXTERNAL_CALL
:
655 interrupt
->parm
= irq
->u
.extcall
.code
;
657 case KVM_S390_INT_EMERGENCY
:
658 interrupt
->parm
= irq
->u
.emerg
.code
;
660 case KVM_S390_SIGP_STOP
:
661 case KVM_S390_RESTART
:
662 break; /* These types have no parameters */
663 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
664 interrupt
->parm
= irq
->u
.io
.subchannel_id
<< 16;
665 interrupt
->parm
|= irq
->u
.io
.subchannel_nr
;
666 interrupt
->parm64
= (uint64_t)irq
->u
.io
.io_int_parm
<< 32;
667 interrupt
->parm64
|= irq
->u
.io
.io_int_word
;
676 void kvm_s390_vcpu_interrupt(S390CPU
*cpu
, struct kvm_s390_irq
*irq
)
678 struct kvm_s390_interrupt kvmint
= {};
679 CPUState
*cs
= CPU(cpu
);
682 r
= s390_kvm_irq_to_interrupt(irq
, &kvmint
);
684 fprintf(stderr
, "%s called with bogus interrupt\n", __func__
);
688 r
= kvm_vcpu_ioctl(cs
, KVM_S390_INTERRUPT
, &kvmint
);
690 fprintf(stderr
, "KVM failed to inject interrupt\n");
695 static void __kvm_s390_floating_interrupt(struct kvm_s390_irq
*irq
)
697 struct kvm_s390_interrupt kvmint
= {};
700 r
= s390_kvm_irq_to_interrupt(irq
, &kvmint
);
702 fprintf(stderr
, "%s called with bogus interrupt\n", __func__
);
706 r
= kvm_vm_ioctl(kvm_state
, KVM_S390_INTERRUPT
, &kvmint
);
708 fprintf(stderr
, "KVM failed to inject interrupt\n");
713 void kvm_s390_floating_interrupt(struct kvm_s390_irq
*irq
)
715 static bool use_flic
= true;
719 r
= kvm_s390_inject_flic(irq
);
727 __kvm_s390_floating_interrupt(irq
);
730 void kvm_s390_virtio_irq(int config_change
, uint64_t token
)
732 struct kvm_s390_irq irq
= {
733 .type
= KVM_S390_INT_VIRTIO
,
734 .u
.ext
.ext_params
= config_change
,
735 .u
.ext
.ext_params2
= token
,
738 kvm_s390_floating_interrupt(&irq
);
741 void kvm_s390_service_interrupt(uint32_t parm
)
743 struct kvm_s390_irq irq
= {
744 .type
= KVM_S390_INT_SERVICE
,
745 .u
.ext
.ext_params
= parm
,
748 kvm_s390_floating_interrupt(&irq
);
751 static void enter_pgmcheck(S390CPU
*cpu
, uint16_t code
)
753 struct kvm_s390_irq irq
= {
754 .type
= KVM_S390_PROGRAM_INT
,
758 kvm_s390_vcpu_interrupt(cpu
, &irq
);
761 void kvm_s390_access_exception(S390CPU
*cpu
, uint16_t code
, uint64_t te_code
)
763 struct kvm_s390_irq irq
= {
764 .type
= KVM_S390_PROGRAM_INT
,
766 .u
.pgm
.trans_exc_code
= te_code
,
767 .u
.pgm
.exc_access_id
= te_code
& 3,
770 kvm_s390_vcpu_interrupt(cpu
, &irq
);
773 static int kvm_sclp_service_call(S390CPU
*cpu
, struct kvm_run
*run
,
776 CPUS390XState
*env
= &cpu
->env
;
781 cpu_synchronize_state(CPU(cpu
));
782 sccb
= env
->regs
[ipbh0
& 0xf];
783 code
= env
->regs
[(ipbh0
& 0xf0) >> 4];
785 r
= sclp_service_call(env
, sccb
, code
);
787 enter_pgmcheck(cpu
, -r
);
795 static int handle_b2(S390CPU
*cpu
, struct kvm_run
*run
, uint8_t ipa1
)
797 CPUS390XState
*env
= &cpu
->env
;
799 uint16_t ipbh0
= (run
->s390_sieic
.ipb
& 0xffff0000) >> 16;
801 cpu_synchronize_state(CPU(cpu
));
805 ioinst_handle_xsch(cpu
, env
->regs
[1]);
808 ioinst_handle_csch(cpu
, env
->regs
[1]);
811 ioinst_handle_hsch(cpu
, env
->regs
[1]);
814 ioinst_handle_msch(cpu
, env
->regs
[1], run
->s390_sieic
.ipb
);
817 ioinst_handle_ssch(cpu
, env
->regs
[1], run
->s390_sieic
.ipb
);
820 ioinst_handle_stcrw(cpu
, run
->s390_sieic
.ipb
);
823 ioinst_handle_stsch(cpu
, env
->regs
[1], run
->s390_sieic
.ipb
);
826 /* We should only get tsch via KVM_EXIT_S390_TSCH. */
827 fprintf(stderr
, "Spurious tsch intercept\n");
830 ioinst_handle_chsc(cpu
, run
->s390_sieic
.ipb
);
833 /* This should have been handled by kvm already. */
834 fprintf(stderr
, "Spurious tpi intercept\n");
837 ioinst_handle_schm(cpu
, env
->regs
[1], env
->regs
[2],
838 run
->s390_sieic
.ipb
);
841 ioinst_handle_rsch(cpu
, env
->regs
[1]);
844 ioinst_handle_rchp(cpu
, env
->regs
[1]);
847 /* We do not provide this instruction, it is suppressed. */
850 ioinst_handle_sal(cpu
, env
->regs
[1]);
853 /* Not provided, set CC = 3 for subchannel not operational */
856 case PRIV_B2_SCLP_CALL
:
857 rc
= kvm_sclp_service_call(cpu
, run
, ipbh0
);
861 DPRINTF("KVM: unhandled PRIV: 0xb2%x\n", ipa1
);
868 static uint64_t get_base_disp_rxy(S390CPU
*cpu
, struct kvm_run
*run
)
870 CPUS390XState
*env
= &cpu
->env
;
871 uint32_t x2
= (run
->s390_sieic
.ipa
& 0x000f);
872 uint32_t base2
= run
->s390_sieic
.ipb
>> 28;
873 uint32_t disp2
= ((run
->s390_sieic
.ipb
& 0x0fff0000) >> 16) +
874 ((run
->s390_sieic
.ipb
& 0xff00) << 4);
876 if (disp2
& 0x80000) {
880 return (base2
? env
->regs
[base2
] : 0) +
881 (x2
? env
->regs
[x2
] : 0) + (long)(int)disp2
;
884 static uint64_t get_base_disp_rsy(S390CPU
*cpu
, struct kvm_run
*run
)
886 CPUS390XState
*env
= &cpu
->env
;
887 uint32_t base2
= run
->s390_sieic
.ipb
>> 28;
888 uint32_t disp2
= ((run
->s390_sieic
.ipb
& 0x0fff0000) >> 16) +
889 ((run
->s390_sieic
.ipb
& 0xff00) << 4);
891 if (disp2
& 0x80000) {
895 return (base2
? env
->regs
[base2
] : 0) + (long)(int)disp2
;
898 static int kvm_clp_service_call(S390CPU
*cpu
, struct kvm_run
*run
)
900 uint8_t r2
= (run
->s390_sieic
.ipb
& 0x000f0000) >> 16;
902 return clp_service_call(cpu
, r2
);
905 static int kvm_pcilg_service_call(S390CPU
*cpu
, struct kvm_run
*run
)
907 uint8_t r1
= (run
->s390_sieic
.ipb
& 0x00f00000) >> 20;
908 uint8_t r2
= (run
->s390_sieic
.ipb
& 0x000f0000) >> 16;
910 return pcilg_service_call(cpu
, r1
, r2
);
913 static int kvm_pcistg_service_call(S390CPU
*cpu
, struct kvm_run
*run
)
915 uint8_t r1
= (run
->s390_sieic
.ipb
& 0x00f00000) >> 20;
916 uint8_t r2
= (run
->s390_sieic
.ipb
& 0x000f0000) >> 16;
918 return pcistg_service_call(cpu
, r1
, r2
);
921 static int kvm_stpcifc_service_call(S390CPU
*cpu
, struct kvm_run
*run
)
923 uint8_t r1
= (run
->s390_sieic
.ipa
& 0x00f0) >> 4;
926 cpu_synchronize_state(CPU(cpu
));
927 fiba
= get_base_disp_rxy(cpu
, run
);
929 return stpcifc_service_call(cpu
, r1
, fiba
);
932 static int kvm_sic_service_call(S390CPU
*cpu
, struct kvm_run
*run
)
938 static int kvm_rpcit_service_call(S390CPU
*cpu
, struct kvm_run
*run
)
940 uint8_t r1
= (run
->s390_sieic
.ipb
& 0x00f00000) >> 20;
941 uint8_t r2
= (run
->s390_sieic
.ipb
& 0x000f0000) >> 16;
943 return rpcit_service_call(cpu
, r1
, r2
);
946 static int kvm_pcistb_service_call(S390CPU
*cpu
, struct kvm_run
*run
)
948 uint8_t r1
= (run
->s390_sieic
.ipa
& 0x00f0) >> 4;
949 uint8_t r3
= run
->s390_sieic
.ipa
& 0x000f;
952 cpu_synchronize_state(CPU(cpu
));
953 gaddr
= get_base_disp_rsy(cpu
, run
);
955 return pcistb_service_call(cpu
, r1
, r3
, gaddr
);
958 static int kvm_mpcifc_service_call(S390CPU
*cpu
, struct kvm_run
*run
)
960 uint8_t r1
= (run
->s390_sieic
.ipa
& 0x00f0) >> 4;
963 cpu_synchronize_state(CPU(cpu
));
964 fiba
= get_base_disp_rxy(cpu
, run
);
966 return mpcifc_service_call(cpu
, r1
, fiba
);
969 static int handle_b9(S390CPU
*cpu
, struct kvm_run
*run
, uint8_t ipa1
)
975 r
= kvm_clp_service_call(cpu
, run
);
978 r
= kvm_pcistg_service_call(cpu
, run
);
981 r
= kvm_pcilg_service_call(cpu
, run
);
984 r
= kvm_rpcit_service_call(cpu
, run
);
987 /* just inject exception */
992 DPRINTF("KVM: unhandled PRIV: 0xb9%x\n", ipa1
);
999 static int handle_eb(S390CPU
*cpu
, struct kvm_run
*run
, uint8_t ipbl
)
1004 case PRIV_EB_PCISTB
:
1005 r
= kvm_pcistb_service_call(cpu
, run
);
1008 r
= kvm_sic_service_call(cpu
, run
);
1011 /* just inject exception */
1016 DPRINTF("KVM: unhandled PRIV: 0xeb%x\n", ipbl
);
1023 static int handle_e3(S390CPU
*cpu
, struct kvm_run
*run
, uint8_t ipbl
)
1028 case PRIV_E3_MPCIFC
:
1029 r
= kvm_mpcifc_service_call(cpu
, run
);
1031 case PRIV_E3_STPCIFC
:
1032 r
= kvm_stpcifc_service_call(cpu
, run
);
1036 DPRINTF("KVM: unhandled PRIV: 0xe3%x\n", ipbl
);
1043 static int handle_hypercall(S390CPU
*cpu
, struct kvm_run
*run
)
1045 CPUS390XState
*env
= &cpu
->env
;
1048 cpu_synchronize_state(CPU(cpu
));
1049 ret
= s390_virtio_hypercall(env
);
1050 if (ret
== -EINVAL
) {
1051 enter_pgmcheck(cpu
, PGM_SPECIFICATION
);
1058 static void kvm_handle_diag_308(S390CPU
*cpu
, struct kvm_run
*run
)
1062 cpu_synchronize_state(CPU(cpu
));
1063 r1
= (run
->s390_sieic
.ipa
& 0x00f0) >> 4;
1064 r3
= run
->s390_sieic
.ipa
& 0x000f;
1065 handle_diag_308(&cpu
->env
, r1
, r3
);
1068 static int handle_sw_breakpoint(S390CPU
*cpu
, struct kvm_run
*run
)
1070 CPUS390XState
*env
= &cpu
->env
;
1073 cpu_synchronize_state(CPU(cpu
));
1075 pc
= env
->psw
.addr
- 4;
1076 if (kvm_find_sw_breakpoint(CPU(cpu
), pc
)) {
1084 #define DIAG_KVM_CODE_MASK 0x000000000000ffff
1086 static int handle_diag(S390CPU
*cpu
, struct kvm_run
*run
, uint32_t ipb
)
1092 * For any diagnose call we support, bits 48-63 of the resulting
1093 * address specify the function code; the remainder is ignored.
1095 func_code
= decode_basedisp_rs(&cpu
->env
, ipb
) & DIAG_KVM_CODE_MASK
;
1096 switch (func_code
) {
1098 kvm_handle_diag_308(cpu
, run
);
1100 case DIAG_KVM_HYPERCALL
:
1101 r
= handle_hypercall(cpu
, run
);
1103 case DIAG_KVM_BREAKPOINT
:
1104 r
= handle_sw_breakpoint(cpu
, run
);
1107 DPRINTF("KVM: unknown DIAG: 0x%x\n", func_code
);
1108 enter_pgmcheck(cpu
, PGM_SPECIFICATION
);
1115 typedef struct SigpInfo
{
1119 uint64_t *status_reg
;
1122 static void set_sigp_status(SigpInfo
*si
, uint64_t status
)
1124 *si
->status_reg
&= 0xffffffff00000000ULL
;
1125 *si
->status_reg
|= status
;
1126 si
->cc
= SIGP_CC_STATUS_STORED
;
1129 static void sigp_start(void *arg
)
1133 s390_cpu_set_state(CPU_STATE_OPERATING
, si
->cpu
);
1134 si
->cc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
1137 static void sigp_stop(void *arg
)
1140 struct kvm_s390_irq irq
= {
1141 .type
= KVM_S390_SIGP_STOP
,
1144 if (s390_cpu_get_state(si
->cpu
) != CPU_STATE_OPERATING
) {
1145 si
->cc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
1149 /* disabled wait - sleeping in user space */
1150 if (CPU(si
->cpu
)->halted
) {
1151 s390_cpu_set_state(CPU_STATE_STOPPED
, si
->cpu
);
1153 /* execute the stop function */
1154 si
->cpu
->env
.sigp_order
= SIGP_STOP
;
1155 kvm_s390_vcpu_interrupt(si
->cpu
, &irq
);
1157 si
->cc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
1160 #define KVM_S390_STORE_STATUS_DEF_ADDR offsetof(LowCore, floating_pt_save_area)
1161 #define SAVE_AREA_SIZE 512
1162 static int kvm_s390_store_status(S390CPU
*cpu
, hwaddr addr
, bool store_arch
)
1164 static const uint8_t ar_id
= 1;
1165 uint64_t ckc
= cpu
->env
.ckc
>> 8;
1167 hwaddr len
= SAVE_AREA_SIZE
;
1169 mem
= cpu_physical_memory_map(addr
, &len
, 1);
1173 if (len
!= SAVE_AREA_SIZE
) {
1174 cpu_physical_memory_unmap(mem
, len
, 1, 0);
1179 cpu_physical_memory_write(offsetof(LowCore
, ar_access_id
), &ar_id
, 1);
1181 memcpy(mem
, &cpu
->env
.fregs
, 128);
1182 memcpy(mem
+ 128, &cpu
->env
.regs
, 128);
1183 memcpy(mem
+ 256, &cpu
->env
.psw
, 16);
1184 memcpy(mem
+ 280, &cpu
->env
.psa
, 4);
1185 memcpy(mem
+ 284, &cpu
->env
.fpc
, 4);
1186 memcpy(mem
+ 292, &cpu
->env
.todpr
, 4);
1187 memcpy(mem
+ 296, &cpu
->env
.cputm
, 8);
1188 memcpy(mem
+ 304, &ckc
, 8);
1189 memcpy(mem
+ 320, &cpu
->env
.aregs
, 64);
1190 memcpy(mem
+ 384, &cpu
->env
.cregs
, 128);
1192 cpu_physical_memory_unmap(mem
, len
, 1, len
);
1197 static void sigp_stop_and_store_status(void *arg
)
1200 struct kvm_s390_irq irq
= {
1201 .type
= KVM_S390_SIGP_STOP
,
1204 /* disabled wait - sleeping in user space */
1205 if (s390_cpu_get_state(si
->cpu
) == CPU_STATE_OPERATING
&&
1206 CPU(si
->cpu
)->halted
) {
1207 s390_cpu_set_state(CPU_STATE_STOPPED
, si
->cpu
);
1210 switch (s390_cpu_get_state(si
->cpu
)) {
1211 case CPU_STATE_OPERATING
:
1212 si
->cpu
->env
.sigp_order
= SIGP_STOP_STORE_STATUS
;
1213 kvm_s390_vcpu_interrupt(si
->cpu
, &irq
);
1214 /* store will be performed when handling the stop intercept */
1216 case CPU_STATE_STOPPED
:
1217 /* already stopped, just store the status */
1218 cpu_synchronize_state(CPU(si
->cpu
));
1219 kvm_s390_store_status(si
->cpu
, KVM_S390_STORE_STATUS_DEF_ADDR
, true);
1222 si
->cc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
1225 static void sigp_store_status_at_address(void *arg
)
1228 uint32_t address
= si
->param
& 0x7ffffe00u
;
1230 /* cpu has to be stopped */
1231 if (s390_cpu_get_state(si
->cpu
) != CPU_STATE_STOPPED
) {
1232 set_sigp_status(si
, SIGP_STAT_INCORRECT_STATE
);
1236 cpu_synchronize_state(CPU(si
->cpu
));
1238 if (kvm_s390_store_status(si
->cpu
, address
, false)) {
1239 set_sigp_status(si
, SIGP_STAT_INVALID_PARAMETER
);
1242 si
->cc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
1245 static void sigp_restart(void *arg
)
1248 struct kvm_s390_irq irq
= {
1249 .type
= KVM_S390_RESTART
,
1252 kvm_s390_vcpu_interrupt(si
->cpu
, &irq
);
1253 s390_cpu_set_state(CPU_STATE_OPERATING
, si
->cpu
);
1254 si
->cc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
1257 int kvm_s390_cpu_restart(S390CPU
*cpu
)
1263 run_on_cpu(CPU(cpu
), sigp_restart
, &si
);
1264 DPRINTF("DONE: KVM cpu restart: %p\n", &cpu
->env
);
1268 static void sigp_initial_cpu_reset(void *arg
)
1271 CPUState
*cs
= CPU(si
->cpu
);
1272 S390CPUClass
*scc
= S390_CPU_GET_CLASS(si
->cpu
);
1274 cpu_synchronize_state(cs
);
1275 scc
->initial_cpu_reset(cs
);
1276 cpu_synchronize_post_reset(cs
);
1277 si
->cc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
1280 static void sigp_cpu_reset(void *arg
)
1283 CPUState
*cs
= CPU(si
->cpu
);
1284 S390CPUClass
*scc
= S390_CPU_GET_CLASS(si
->cpu
);
1286 cpu_synchronize_state(cs
);
1288 cpu_synchronize_post_reset(cs
);
1289 si
->cc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
1292 static void sigp_set_prefix(void *arg
)
1295 uint32_t addr
= si
->param
& 0x7fffe000u
;
1297 cpu_synchronize_state(CPU(si
->cpu
));
1299 if (!address_space_access_valid(&address_space_memory
, addr
,
1300 sizeof(struct LowCore
), false)) {
1301 set_sigp_status(si
, SIGP_STAT_INVALID_PARAMETER
);
1305 /* cpu has to be stopped */
1306 if (s390_cpu_get_state(si
->cpu
) != CPU_STATE_STOPPED
) {
1307 set_sigp_status(si
, SIGP_STAT_INCORRECT_STATE
);
1311 si
->cpu
->env
.psa
= addr
;
1312 cpu_synchronize_post_init(CPU(si
->cpu
));
1313 si
->cc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
1316 static int handle_sigp_single_dst(S390CPU
*dst_cpu
, uint8_t order
,
1317 uint64_t param
, uint64_t *status_reg
)
1322 .status_reg
= status_reg
,
1325 /* cpu available? */
1326 if (dst_cpu
== NULL
) {
1327 return SIGP_CC_NOT_OPERATIONAL
;
1330 /* only resets can break pending orders */
1331 if (dst_cpu
->env
.sigp_order
!= 0 &&
1332 order
!= SIGP_CPU_RESET
&&
1333 order
!= SIGP_INITIAL_CPU_RESET
) {
1334 return SIGP_CC_BUSY
;
1339 run_on_cpu(CPU(dst_cpu
), sigp_start
, &si
);
1342 run_on_cpu(CPU(dst_cpu
), sigp_stop
, &si
);
1345 run_on_cpu(CPU(dst_cpu
), sigp_restart
, &si
);
1347 case SIGP_STOP_STORE_STATUS
:
1348 run_on_cpu(CPU(dst_cpu
), sigp_stop_and_store_status
, &si
);
1350 case SIGP_STORE_STATUS_ADDR
:
1351 run_on_cpu(CPU(dst_cpu
), sigp_store_status_at_address
, &si
);
1353 case SIGP_SET_PREFIX
:
1354 run_on_cpu(CPU(dst_cpu
), sigp_set_prefix
, &si
);
1356 case SIGP_INITIAL_CPU_RESET
:
1357 run_on_cpu(CPU(dst_cpu
), sigp_initial_cpu_reset
, &si
);
1359 case SIGP_CPU_RESET
:
1360 run_on_cpu(CPU(dst_cpu
), sigp_cpu_reset
, &si
);
1363 DPRINTF("KVM: unknown SIGP: 0x%x\n", order
);
1364 set_sigp_status(&si
, SIGP_STAT_INVALID_ORDER
);
1370 static int sigp_set_architecture(S390CPU
*cpu
, uint32_t param
,
1371 uint64_t *status_reg
)
1376 /* due to the BQL, we are the only active cpu */
1377 CPU_FOREACH(cur_cs
) {
1378 cur_cpu
= S390_CPU(cur_cs
);
1379 if (cur_cpu
->env
.sigp_order
!= 0) {
1380 return SIGP_CC_BUSY
;
1382 cpu_synchronize_state(cur_cs
);
1383 /* all but the current one have to be stopped */
1384 if (cur_cpu
!= cpu
&&
1385 s390_cpu_get_state(cur_cpu
) != CPU_STATE_STOPPED
) {
1386 *status_reg
&= 0xffffffff00000000ULL
;
1387 *status_reg
|= SIGP_STAT_INCORRECT_STATE
;
1388 return SIGP_CC_STATUS_STORED
;
1392 switch (param
& 0xff) {
1393 case SIGP_MODE_ESA_S390
:
1395 return SIGP_CC_NOT_OPERATIONAL
;
1396 case SIGP_MODE_Z_ARCH_TRANS_ALL_PSW
:
1397 case SIGP_MODE_Z_ARCH_TRANS_CUR_PSW
:
1398 CPU_FOREACH(cur_cs
) {
1399 cur_cpu
= S390_CPU(cur_cs
);
1400 cur_cpu
->env
.pfault_token
= -1UL;
1404 *status_reg
&= 0xffffffff00000000ULL
;
1405 *status_reg
|= SIGP_STAT_INVALID_PARAMETER
;
1406 return SIGP_CC_STATUS_STORED
;
1409 return SIGP_CC_ORDER_CODE_ACCEPTED
;
1412 #define SIGP_ORDER_MASK 0x000000ff
1414 static int handle_sigp(S390CPU
*cpu
, struct kvm_run
*run
, uint8_t ipa1
)
1416 CPUS390XState
*env
= &cpu
->env
;
1417 const uint8_t r1
= ipa1
>> 4;
1418 const uint8_t r3
= ipa1
& 0x0f;
1421 uint64_t *status_reg
;
1423 S390CPU
*dst_cpu
= NULL
;
1425 cpu_synchronize_state(CPU(cpu
));
1427 /* get order code */
1428 order
= decode_basedisp_rs(env
, run
->s390_sieic
.ipb
) & SIGP_ORDER_MASK
;
1429 status_reg
= &env
->regs
[r1
];
1430 param
= (r1
% 2) ? env
->regs
[r1
] : env
->regs
[r1
+ 1];
1434 ret
= sigp_set_architecture(cpu
, param
, status_reg
);
1437 /* all other sigp orders target a single vcpu */
1438 dst_cpu
= s390_cpu_addr2state(env
->regs
[r3
]);
1439 ret
= handle_sigp_single_dst(dst_cpu
, order
, param
, status_reg
);
1442 trace_kvm_sigp_finished(order
, CPU(cpu
)->cpu_index
,
1443 dst_cpu
? CPU(dst_cpu
)->cpu_index
: -1, ret
);
1453 static int handle_instruction(S390CPU
*cpu
, struct kvm_run
*run
)
1455 unsigned int ipa0
= (run
->s390_sieic
.ipa
& 0xff00);
1456 uint8_t ipa1
= run
->s390_sieic
.ipa
& 0x00ff;
1459 DPRINTF("handle_instruction 0x%x 0x%x\n",
1460 run
->s390_sieic
.ipa
, run
->s390_sieic
.ipb
);
1463 r
= handle_b2(cpu
, run
, ipa1
);
1466 r
= handle_b9(cpu
, run
, ipa1
);
1469 r
= handle_eb(cpu
, run
, run
->s390_sieic
.ipb
& 0xff);
1472 r
= handle_e3(cpu
, run
, run
->s390_sieic
.ipb
& 0xff);
1475 r
= handle_diag(cpu
, run
, run
->s390_sieic
.ipb
);
1478 r
= handle_sigp(cpu
, run
, ipa1
);
1484 enter_pgmcheck(cpu
, 0x0001);
1490 static bool is_special_wait_psw(CPUState
*cs
)
1492 /* signal quiesce */
1493 return cs
->kvm_run
->psw_addr
== 0xfffUL
;
1496 static void guest_panicked(void)
1498 qapi_event_send_guest_panicked(GUEST_PANIC_ACTION_PAUSE
,
1500 vm_stop(RUN_STATE_GUEST_PANICKED
);
1503 static void unmanageable_intercept(S390CPU
*cpu
, const char *str
, int pswoffset
)
1505 CPUState
*cs
= CPU(cpu
);
1507 error_report("Unmanageable %s! CPU%i new PSW: 0x%016lx:%016lx",
1508 str
, cs
->cpu_index
, ldq_phys(cs
->as
, cpu
->env
.psa
+ pswoffset
),
1509 ldq_phys(cs
->as
, cpu
->env
.psa
+ pswoffset
+ 8));
1514 static int handle_intercept(S390CPU
*cpu
)
1516 CPUState
*cs
= CPU(cpu
);
1517 struct kvm_run
*run
= cs
->kvm_run
;
1518 int icpt_code
= run
->s390_sieic
.icptcode
;
1521 DPRINTF("intercept: 0x%x (at 0x%lx)\n", icpt_code
,
1522 (long)cs
->kvm_run
->psw_addr
);
1523 switch (icpt_code
) {
1524 case ICPT_INSTRUCTION
:
1525 r
= handle_instruction(cpu
, run
);
1528 unmanageable_intercept(cpu
, "program interrupt",
1529 offsetof(LowCore
, program_new_psw
));
1533 unmanageable_intercept(cpu
, "external interrupt",
1534 offsetof(LowCore
, external_new_psw
));
1538 /* disabled wait, since enabled wait is handled in kernel */
1539 cpu_synchronize_state(cs
);
1540 if (s390_cpu_halt(cpu
) == 0) {
1541 if (is_special_wait_psw(cs
)) {
1542 qemu_system_shutdown_request();
1550 if (s390_cpu_set_state(CPU_STATE_STOPPED
, cpu
) == 0) {
1551 qemu_system_shutdown_request();
1553 if (cpu
->env
.sigp_order
== SIGP_STOP_STORE_STATUS
) {
1554 kvm_s390_store_status(cpu
, KVM_S390_STORE_STATUS_DEF_ADDR
,
1557 cpu
->env
.sigp_order
= 0;
1560 case ICPT_SOFT_INTERCEPT
:
1561 fprintf(stderr
, "KVM unimplemented icpt SOFT\n");
1565 fprintf(stderr
, "KVM unimplemented icpt IO\n");
1569 fprintf(stderr
, "Unknown intercept code: %d\n", icpt_code
);
1577 static int handle_tsch(S390CPU
*cpu
)
1579 CPUState
*cs
= CPU(cpu
);
1580 struct kvm_run
*run
= cs
->kvm_run
;
1583 cpu_synchronize_state(cs
);
1585 ret
= ioinst_handle_tsch(cpu
, cpu
->env
.regs
[1], run
->s390_tsch
.ipb
);
1589 * If an I/O interrupt had been dequeued, we have to reinject it.
1591 if (run
->s390_tsch
.dequeued
) {
1592 kvm_s390_io_interrupt(run
->s390_tsch
.subchannel_id
,
1593 run
->s390_tsch
.subchannel_nr
,
1594 run
->s390_tsch
.io_int_parm
,
1595 run
->s390_tsch
.io_int_word
);
1602 static int kvm_arch_handle_debug_exit(S390CPU
*cpu
)
1604 CPUState
*cs
= CPU(cpu
);
1605 struct kvm_run
*run
= cs
->kvm_run
;
1608 struct kvm_debug_exit_arch
*arch_info
= &run
->debug
.arch
;
1610 switch (arch_info
->type
) {
1611 case KVM_HW_WP_WRITE
:
1612 if (find_hw_breakpoint(arch_info
->addr
, -1, arch_info
->type
)) {
1613 cs
->watchpoint_hit
= &hw_watchpoint
;
1614 hw_watchpoint
.vaddr
= arch_info
->addr
;
1615 hw_watchpoint
.flags
= BP_MEM_WRITE
;
1620 if (find_hw_breakpoint(arch_info
->addr
, -1, arch_info
->type
)) {
1624 case KVM_SINGLESTEP
:
1625 if (cs
->singlestep_enabled
) {
1636 int kvm_arch_handle_exit(CPUState
*cs
, struct kvm_run
*run
)
1638 S390CPU
*cpu
= S390_CPU(cs
);
1641 switch (run
->exit_reason
) {
1642 case KVM_EXIT_S390_SIEIC
:
1643 ret
= handle_intercept(cpu
);
1645 case KVM_EXIT_S390_RESET
:
1646 s390_reipl_request();
1648 case KVM_EXIT_S390_TSCH
:
1649 ret
= handle_tsch(cpu
);
1651 case KVM_EXIT_DEBUG
:
1652 ret
= kvm_arch_handle_debug_exit(cpu
);
1655 fprintf(stderr
, "Unknown KVM exit: %d\n", run
->exit_reason
);
1660 ret
= EXCP_INTERRUPT
;
1665 bool kvm_arch_stop_on_emulation_error(CPUState
*cpu
)
1670 int kvm_arch_on_sigbus_vcpu(CPUState
*cpu
, int code
, void *addr
)
1675 int kvm_arch_on_sigbus(int code
, void *addr
)
1680 void kvm_s390_io_interrupt(uint16_t subchannel_id
,
1681 uint16_t subchannel_nr
, uint32_t io_int_parm
,
1682 uint32_t io_int_word
)
1684 struct kvm_s390_irq irq
= {
1685 .u
.io
.subchannel_id
= subchannel_id
,
1686 .u
.io
.subchannel_nr
= subchannel_nr
,
1687 .u
.io
.io_int_parm
= io_int_parm
,
1688 .u
.io
.io_int_word
= io_int_word
,
1691 if (io_int_word
& IO_INT_WORD_AI
) {
1692 irq
.type
= KVM_S390_INT_IO(1, 0, 0, 0);
1694 irq
.type
= ((subchannel_id
& 0xff00) << 24) |
1695 ((subchannel_id
& 0x00060) << 22) | (subchannel_nr
<< 16);
1697 kvm_s390_floating_interrupt(&irq
);
1700 void kvm_s390_crw_mchk(void)
1702 struct kvm_s390_irq irq
= {
1703 .type
= KVM_S390_MCHK
,
1704 .u
.mchk
.cr14
= 1 << 28,
1705 .u
.mchk
.mcic
= 0x00400f1d40330000ULL
,
1707 kvm_s390_floating_interrupt(&irq
);
1710 void kvm_s390_enable_css_support(S390CPU
*cpu
)
1714 /* Activate host kernel channel subsystem support. */
1715 r
= kvm_vcpu_enable_cap(CPU(cpu
), KVM_CAP_S390_CSS_SUPPORT
, 0);
1719 void kvm_arch_init_irq_routing(KVMState
*s
)
1722 * Note that while irqchip capabilities generally imply that cpustates
1723 * are handled in-kernel, it is not true for s390 (yet); therefore, we
1724 * have to override the common code kvm_halt_in_kernel_allowed setting.
1726 if (kvm_check_extension(s
, KVM_CAP_IRQ_ROUTING
)) {
1727 kvm_gsi_routing_allowed
= true;
1728 kvm_halt_in_kernel_allowed
= false;
1732 int kvm_s390_assign_subch_ioeventfd(EventNotifier
*notifier
, uint32_t sch
,
1733 int vq
, bool assign
)
1735 struct kvm_ioeventfd kick
= {
1736 .flags
= KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY
|
1737 KVM_IOEVENTFD_FLAG_DATAMATCH
,
1738 .fd
= event_notifier_get_fd(notifier
),
1743 if (!kvm_check_extension(kvm_state
, KVM_CAP_IOEVENTFD
)) {
1747 kick
.flags
|= KVM_IOEVENTFD_FLAG_DEASSIGN
;
1749 return kvm_vm_ioctl(kvm_state
, KVM_IOEVENTFD
, &kick
);
1752 int kvm_s390_get_memslot_count(KVMState
*s
)
1754 return kvm_check_extension(s
, KVM_CAP_NR_MEMSLOTS
);
1757 int kvm_s390_set_cpu_state(S390CPU
*cpu
, uint8_t cpu_state
)
1759 struct kvm_mp_state mp_state
= {};
1762 /* the kvm part might not have been initialized yet */
1763 if (CPU(cpu
)->kvm_state
== NULL
) {
1767 switch (cpu_state
) {
1768 case CPU_STATE_STOPPED
:
1769 mp_state
.mp_state
= KVM_MP_STATE_STOPPED
;
1771 case CPU_STATE_CHECK_STOP
:
1772 mp_state
.mp_state
= KVM_MP_STATE_CHECK_STOP
;
1774 case CPU_STATE_OPERATING
:
1775 mp_state
.mp_state
= KVM_MP_STATE_OPERATING
;
1777 case CPU_STATE_LOAD
:
1778 mp_state
.mp_state
= KVM_MP_STATE_LOAD
;
1781 error_report("Requested CPU state is not a valid S390 CPU state: %u",
1786 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_MP_STATE
, &mp_state
);
1788 trace_kvm_failed_cpu_state_set(CPU(cpu
)->cpu_index
, cpu_state
,
1795 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry
*route
,
1796 uint64_t address
, uint32_t data
)
1798 S390PCIBusDevice
*pbdev
;
1799 uint32_t fid
= data
>> ZPCI_MSI_VEC_BITS
;
1800 uint32_t vec
= data
& ZPCI_MSI_VEC_MASK
;
1802 pbdev
= s390_pci_find_dev_by_fid(fid
);
1804 DPRINTF("add_msi_route no dev\n");
1808 pbdev
->routes
.adapter
.ind_offset
= vec
;
1810 route
->type
= KVM_IRQ_ROUTING_S390_ADAPTER
;
1812 route
->u
.adapter
.summary_addr
= pbdev
->routes
.adapter
.summary_addr
;
1813 route
->u
.adapter
.ind_addr
= pbdev
->routes
.adapter
.ind_addr
;
1814 route
->u
.adapter
.summary_offset
= pbdev
->routes
.adapter
.summary_offset
;
1815 route
->u
.adapter
.ind_offset
= pbdev
->routes
.adapter
.ind_offset
;
1816 route
->u
.adapter
.adapter_id
= pbdev
->routes
.adapter
.adapter_id
;