2 * QEMU S390x KVM implementation
4 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
5 * Copyright IBM Corp. 2012
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * Contributions after 2012-10-29 are licensed under the terms of the
18 * GNU GPL, version 2 or (at your option) any later version.
20 * You should have received a copy of the GNU (Lesser) General Public
21 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
24 #include <sys/types.h>
25 #include <sys/ioctl.h>
28 #include <linux/kvm.h>
29 #include <asm/ptrace.h>
31 #include "qemu-common.h"
32 #include "qemu/timer.h"
33 #include "sysemu/sysemu.h"
34 #include "sysemu/kvm.h"
37 #include "sysemu/device_tree.h"
38 #include "qapi/qmp/qjson.h"
39 #include "monitor/monitor.h"
40 #include "exec/gdbstub.h"
41 #include "exec/address-spaces.h"
43 #include "qapi-event.h"
44 #include "hw/s390x/s390-pci-inst.h"
45 #include "hw/s390x/s390-pci-bus.h"
46 #include "hw/s390x/ipl.h"
48 /* #define DEBUG_KVM */
51 #define DPRINTF(fmt, ...) \
52 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
54 #define DPRINTF(fmt, ...) \
58 #define IPA0_DIAG 0x8300
59 #define IPA0_SIGP 0xae00
60 #define IPA0_B2 0xb200
61 #define IPA0_B9 0xb900
62 #define IPA0_EB 0xeb00
63 #define IPA0_E3 0xe300
65 #define PRIV_B2_SCLP_CALL 0x20
66 #define PRIV_B2_CSCH 0x30
67 #define PRIV_B2_HSCH 0x31
68 #define PRIV_B2_MSCH 0x32
69 #define PRIV_B2_SSCH 0x33
70 #define PRIV_B2_STSCH 0x34
71 #define PRIV_B2_TSCH 0x35
72 #define PRIV_B2_TPI 0x36
73 #define PRIV_B2_SAL 0x37
74 #define PRIV_B2_RSCH 0x38
75 #define PRIV_B2_STCRW 0x39
76 #define PRIV_B2_STCPS 0x3a
77 #define PRIV_B2_RCHP 0x3b
78 #define PRIV_B2_SCHM 0x3c
79 #define PRIV_B2_CHSC 0x5f
80 #define PRIV_B2_SIGA 0x74
81 #define PRIV_B2_XSCH 0x76
83 #define PRIV_EB_SQBS 0x8a
84 #define PRIV_EB_PCISTB 0xd0
85 #define PRIV_EB_SIC 0xd1
87 #define PRIV_B9_EQBS 0x9c
88 #define PRIV_B9_CLP 0xa0
89 #define PRIV_B9_PCISTG 0xd0
90 #define PRIV_B9_PCILG 0xd2
91 #define PRIV_B9_RPCIT 0xd3
93 #define PRIV_E3_MPCIFC 0xd0
94 #define PRIV_E3_STPCIFC 0xd4
96 #define DIAG_IPL 0x308
97 #define DIAG_KVM_HYPERCALL 0x500
98 #define DIAG_KVM_BREAKPOINT 0x501
100 #define ICPT_INSTRUCTION 0x04
101 #define ICPT_PROGRAM 0x08
102 #define ICPT_EXT_INT 0x14
103 #define ICPT_WAITPSW 0x1c
104 #define ICPT_SOFT_INTERCEPT 0x24
105 #define ICPT_CPU_STOP 0x28
108 static CPUWatchpoint hw_watchpoint
;
110 * We don't use a list because this structure is also used to transmit the
111 * hardware breakpoints to the kernel.
113 static struct kvm_hw_breakpoint
*hw_breakpoints
;
114 static int nb_hw_breakpoints
;
116 const KVMCapabilityInfo kvm_arch_required_capabilities
[] = {
120 static int cap_sync_regs
;
121 static int cap_async_pf
;
123 static void *legacy_s390_alloc(size_t size
, uint64_t *align
);
125 static int kvm_s390_supports_mem_limit(KVMState
*s
)
127 struct kvm_device_attr attr
= {
128 .group
= KVM_S390_VM_MEM_CTRL
,
129 .attr
= KVM_S390_VM_MEM_LIMIT_SIZE
,
132 return (kvm_vm_ioctl(s
, KVM_HAS_DEVICE_ATTR
, &attr
) == 0);
135 static int kvm_s390_query_mem_limit(KVMState
*s
, uint64_t *memory_limit
)
137 struct kvm_device_attr attr
= {
138 .group
= KVM_S390_VM_MEM_CTRL
,
139 .attr
= KVM_S390_VM_MEM_LIMIT_SIZE
,
140 .addr
= (uint64_t) memory_limit
,
143 return kvm_vm_ioctl(s
, KVM_GET_DEVICE_ATTR
, &attr
);
146 int kvm_s390_set_mem_limit(KVMState
*s
, uint64_t new_limit
, uint64_t *hw_limit
)
150 struct kvm_device_attr attr
= {
151 .group
= KVM_S390_VM_MEM_CTRL
,
152 .attr
= KVM_S390_VM_MEM_LIMIT_SIZE
,
153 .addr
= (uint64_t) &new_limit
,
156 if (!kvm_s390_supports_mem_limit(s
)) {
160 rc
= kvm_s390_query_mem_limit(s
, hw_limit
);
163 } else if (*hw_limit
< new_limit
) {
167 return kvm_vm_ioctl(s
, KVM_SET_DEVICE_ATTR
, &attr
);
170 static int kvm_s390_check_clear_cmma(KVMState
*s
)
172 struct kvm_device_attr attr
= {
173 .group
= KVM_S390_VM_MEM_CTRL
,
174 .attr
= KVM_S390_VM_MEM_CLR_CMMA
,
177 return kvm_vm_ioctl(s
, KVM_HAS_DEVICE_ATTR
, &attr
);
180 static int kvm_s390_check_enable_cmma(KVMState
*s
)
182 struct kvm_device_attr attr
= {
183 .group
= KVM_S390_VM_MEM_CTRL
,
184 .attr
= KVM_S390_VM_MEM_ENABLE_CMMA
,
187 return kvm_vm_ioctl(s
, KVM_HAS_DEVICE_ATTR
, &attr
);
190 void kvm_s390_clear_cmma_callback(void *opaque
)
193 KVMState
*s
= opaque
;
194 struct kvm_device_attr attr
= {
195 .group
= KVM_S390_VM_MEM_CTRL
,
196 .attr
= KVM_S390_VM_MEM_CLR_CMMA
,
199 rc
= kvm_vm_ioctl(s
, KVM_SET_DEVICE_ATTR
, &attr
);
200 trace_kvm_clear_cmma(rc
);
203 static void kvm_s390_enable_cmma(KVMState
*s
)
206 struct kvm_device_attr attr
= {
207 .group
= KVM_S390_VM_MEM_CTRL
,
208 .attr
= KVM_S390_VM_MEM_ENABLE_CMMA
,
211 if (kvm_s390_check_enable_cmma(s
) || kvm_s390_check_clear_cmma(s
)) {
215 rc
= kvm_vm_ioctl(s
, KVM_SET_DEVICE_ATTR
, &attr
);
217 qemu_register_reset(kvm_s390_clear_cmma_callback
, s
);
219 trace_kvm_enable_cmma(rc
);
222 int kvm_arch_init(MachineState
*ms
, KVMState
*s
)
224 cap_sync_regs
= kvm_check_extension(s
, KVM_CAP_SYNC_REGS
);
225 cap_async_pf
= kvm_check_extension(s
, KVM_CAP_ASYNC_PF
);
227 if (kvm_check_extension(s
, KVM_CAP_VM_ATTRIBUTES
)) {
228 kvm_s390_enable_cmma(s
);
231 if (!kvm_check_extension(s
, KVM_CAP_S390_GMAP
)
232 || !kvm_check_extension(s
, KVM_CAP_S390_COW
)) {
233 phys_mem_set_alloc(legacy_s390_alloc
);
236 kvm_vm_enable_cap(s
, KVM_CAP_S390_USER_SIGP
, 0);
241 unsigned long kvm_arch_vcpu_id(CPUState
*cpu
)
243 return cpu
->cpu_index
;
246 int kvm_arch_init_vcpu(CPUState
*cs
)
248 S390CPU
*cpu
= S390_CPU(cs
);
249 kvm_s390_set_cpu_state(cpu
, cpu
->env
.cpu_state
);
253 void kvm_s390_reset_vcpu(S390CPU
*cpu
)
255 CPUState
*cs
= CPU(cpu
);
257 /* The initial reset call is needed here to reset in-kernel
258 * vcpu data that we can't access directly from QEMU
259 * (i.e. with older kernels which don't support sync_regs/ONE_REG).
260 * Before this ioctl cpu_synchronize_state() is called in common kvm
262 if (kvm_vcpu_ioctl(cs
, KVM_S390_INITIAL_RESET
, NULL
)) {
263 error_report("Initial CPU reset failed on CPU %i", cs
->cpu_index
);
267 static int can_sync_regs(CPUState
*cs
, int regs
)
269 return cap_sync_regs
&& (cs
->kvm_run
->kvm_valid_regs
& regs
) == regs
;
272 int kvm_arch_put_registers(CPUState
*cs
, int level
)
274 S390CPU
*cpu
= S390_CPU(cs
);
275 CPUS390XState
*env
= &cpu
->env
;
276 struct kvm_sregs sregs
;
277 struct kvm_regs regs
;
278 struct kvm_fpu fpu
= {};
282 /* always save the PSW and the GPRS*/
283 cs
->kvm_run
->psw_addr
= env
->psw
.addr
;
284 cs
->kvm_run
->psw_mask
= env
->psw
.mask
;
286 if (can_sync_regs(cs
, KVM_SYNC_GPRS
)) {
287 for (i
= 0; i
< 16; i
++) {
288 cs
->kvm_run
->s
.regs
.gprs
[i
] = env
->regs
[i
];
289 cs
->kvm_run
->kvm_dirty_regs
|= KVM_SYNC_GPRS
;
292 for (i
= 0; i
< 16; i
++) {
293 regs
.gprs
[i
] = env
->regs
[i
];
295 r
= kvm_vcpu_ioctl(cs
, KVM_SET_REGS
, ®s
);
302 for (i
= 0; i
< 16; i
++) {
303 fpu
.fprs
[i
] = env
->fregs
[i
].ll
;
307 r
= kvm_vcpu_ioctl(cs
, KVM_SET_FPU
, &fpu
);
312 /* Do we need to save more than that? */
313 if (level
== KVM_PUT_RUNTIME_STATE
) {
317 if (can_sync_regs(cs
, KVM_SYNC_ARCH0
)) {
318 cs
->kvm_run
->s
.regs
.cputm
= env
->cputm
;
319 cs
->kvm_run
->s
.regs
.ckc
= env
->ckc
;
320 cs
->kvm_run
->s
.regs
.todpr
= env
->todpr
;
321 cs
->kvm_run
->s
.regs
.gbea
= env
->gbea
;
322 cs
->kvm_run
->s
.regs
.pp
= env
->pp
;
323 cs
->kvm_run
->kvm_dirty_regs
|= KVM_SYNC_ARCH0
;
326 * These ONE_REGS are not protected by a capability. As they are only
327 * necessary for migration we just trace a possible error, but don't
328 * return with an error return code.
330 kvm_set_one_reg(cs
, KVM_REG_S390_CPU_TIMER
, &env
->cputm
);
331 kvm_set_one_reg(cs
, KVM_REG_S390_CLOCK_COMP
, &env
->ckc
);
332 kvm_set_one_reg(cs
, KVM_REG_S390_TODPR
, &env
->todpr
);
333 kvm_set_one_reg(cs
, KVM_REG_S390_GBEA
, &env
->gbea
);
334 kvm_set_one_reg(cs
, KVM_REG_S390_PP
, &env
->pp
);
337 /* pfault parameters */
338 if (can_sync_regs(cs
, KVM_SYNC_PFAULT
)) {
339 cs
->kvm_run
->s
.regs
.pft
= env
->pfault_token
;
340 cs
->kvm_run
->s
.regs
.pfs
= env
->pfault_select
;
341 cs
->kvm_run
->s
.regs
.pfc
= env
->pfault_compare
;
342 cs
->kvm_run
->kvm_dirty_regs
|= KVM_SYNC_PFAULT
;
343 } else if (cap_async_pf
) {
344 r
= kvm_set_one_reg(cs
, KVM_REG_S390_PFTOKEN
, &env
->pfault_token
);
348 r
= kvm_set_one_reg(cs
, KVM_REG_S390_PFCOMPARE
, &env
->pfault_compare
);
352 r
= kvm_set_one_reg(cs
, KVM_REG_S390_PFSELECT
, &env
->pfault_select
);
358 /* access registers and control registers*/
359 if (can_sync_regs(cs
, KVM_SYNC_ACRS
| KVM_SYNC_CRS
)) {
360 for (i
= 0; i
< 16; i
++) {
361 cs
->kvm_run
->s
.regs
.acrs
[i
] = env
->aregs
[i
];
362 cs
->kvm_run
->s
.regs
.crs
[i
] = env
->cregs
[i
];
364 cs
->kvm_run
->kvm_dirty_regs
|= KVM_SYNC_ACRS
;
365 cs
->kvm_run
->kvm_dirty_regs
|= KVM_SYNC_CRS
;
367 for (i
= 0; i
< 16; i
++) {
368 sregs
.acrs
[i
] = env
->aregs
[i
];
369 sregs
.crs
[i
] = env
->cregs
[i
];
371 r
= kvm_vcpu_ioctl(cs
, KVM_SET_SREGS
, &sregs
);
377 /* Finally the prefix */
378 if (can_sync_regs(cs
, KVM_SYNC_PREFIX
)) {
379 cs
->kvm_run
->s
.regs
.prefix
= env
->psa
;
380 cs
->kvm_run
->kvm_dirty_regs
|= KVM_SYNC_PREFIX
;
382 /* prefix is only supported via sync regs */
387 int kvm_arch_get_registers(CPUState
*cs
)
389 S390CPU
*cpu
= S390_CPU(cs
);
390 CPUS390XState
*env
= &cpu
->env
;
391 struct kvm_sregs sregs
;
392 struct kvm_regs regs
;
397 env
->psw
.addr
= cs
->kvm_run
->psw_addr
;
398 env
->psw
.mask
= cs
->kvm_run
->psw_mask
;
401 if (can_sync_regs(cs
, KVM_SYNC_GPRS
)) {
402 for (i
= 0; i
< 16; i
++) {
403 env
->regs
[i
] = cs
->kvm_run
->s
.regs
.gprs
[i
];
406 r
= kvm_vcpu_ioctl(cs
, KVM_GET_REGS
, ®s
);
410 for (i
= 0; i
< 16; i
++) {
411 env
->regs
[i
] = regs
.gprs
[i
];
415 /* The ACRS and CRS */
416 if (can_sync_regs(cs
, KVM_SYNC_ACRS
| KVM_SYNC_CRS
)) {
417 for (i
= 0; i
< 16; i
++) {
418 env
->aregs
[i
] = cs
->kvm_run
->s
.regs
.acrs
[i
];
419 env
->cregs
[i
] = cs
->kvm_run
->s
.regs
.crs
[i
];
422 r
= kvm_vcpu_ioctl(cs
, KVM_GET_SREGS
, &sregs
);
426 for (i
= 0; i
< 16; i
++) {
427 env
->aregs
[i
] = sregs
.acrs
[i
];
428 env
->cregs
[i
] = sregs
.crs
[i
];
433 r
= kvm_vcpu_ioctl(cs
, KVM_GET_FPU
, &fpu
);
437 for (i
= 0; i
< 16; i
++) {
438 env
->fregs
[i
].ll
= fpu
.fprs
[i
];
443 if (can_sync_regs(cs
, KVM_SYNC_PREFIX
)) {
444 env
->psa
= cs
->kvm_run
->s
.regs
.prefix
;
447 if (can_sync_regs(cs
, KVM_SYNC_ARCH0
)) {
448 env
->cputm
= cs
->kvm_run
->s
.regs
.cputm
;
449 env
->ckc
= cs
->kvm_run
->s
.regs
.ckc
;
450 env
->todpr
= cs
->kvm_run
->s
.regs
.todpr
;
451 env
->gbea
= cs
->kvm_run
->s
.regs
.gbea
;
452 env
->pp
= cs
->kvm_run
->s
.regs
.pp
;
455 * These ONE_REGS are not protected by a capability. As they are only
456 * necessary for migration we just trace a possible error, but don't
457 * return with an error return code.
459 kvm_get_one_reg(cs
, KVM_REG_S390_CPU_TIMER
, &env
->cputm
);
460 kvm_get_one_reg(cs
, KVM_REG_S390_CLOCK_COMP
, &env
->ckc
);
461 kvm_get_one_reg(cs
, KVM_REG_S390_TODPR
, &env
->todpr
);
462 kvm_get_one_reg(cs
, KVM_REG_S390_GBEA
, &env
->gbea
);
463 kvm_get_one_reg(cs
, KVM_REG_S390_PP
, &env
->pp
);
466 /* pfault parameters */
467 if (can_sync_regs(cs
, KVM_SYNC_PFAULT
)) {
468 env
->pfault_token
= cs
->kvm_run
->s
.regs
.pft
;
469 env
->pfault_select
= cs
->kvm_run
->s
.regs
.pfs
;
470 env
->pfault_compare
= cs
->kvm_run
->s
.regs
.pfc
;
471 } else if (cap_async_pf
) {
472 r
= kvm_get_one_reg(cs
, KVM_REG_S390_PFTOKEN
, &env
->pfault_token
);
476 r
= kvm_get_one_reg(cs
, KVM_REG_S390_PFCOMPARE
, &env
->pfault_compare
);
480 r
= kvm_get_one_reg(cs
, KVM_REG_S390_PFSELECT
, &env
->pfault_select
);
489 int kvm_s390_get_clock(uint8_t *tod_high
, uint64_t *tod_low
)
492 struct kvm_device_attr attr
= {
493 .group
= KVM_S390_VM_TOD
,
494 .attr
= KVM_S390_VM_TOD_LOW
,
495 .addr
= (uint64_t)tod_low
,
498 r
= kvm_vm_ioctl(kvm_state
, KVM_GET_DEVICE_ATTR
, &attr
);
503 attr
.attr
= KVM_S390_VM_TOD_HIGH
;
504 attr
.addr
= (uint64_t)tod_high
;
505 return kvm_vm_ioctl(kvm_state
, KVM_GET_DEVICE_ATTR
, &attr
);
508 int kvm_s390_set_clock(uint8_t *tod_high
, uint64_t *tod_low
)
512 struct kvm_device_attr attr
= {
513 .group
= KVM_S390_VM_TOD
,
514 .attr
= KVM_S390_VM_TOD_LOW
,
515 .addr
= (uint64_t)tod_low
,
518 r
= kvm_vm_ioctl(kvm_state
, KVM_SET_DEVICE_ATTR
, &attr
);
523 attr
.attr
= KVM_S390_VM_TOD_HIGH
;
524 attr
.addr
= (uint64_t)tod_high
;
525 return kvm_vm_ioctl(kvm_state
, KVM_SET_DEVICE_ATTR
, &attr
);
529 * Legacy layout for s390:
530 * Older S390 KVM requires the topmost vma of the RAM to be
531 * smaller than an system defined value, which is at least 256GB.
532 * Larger systems have larger values. We put the guest between
533 * the end of data segment (system break) and this value. We
534 * use 32GB as a base to have enough room for the system break
535 * to grow. We also have to use MAP parameters that avoid
536 * read-only mapping of guest pages.
538 static void *legacy_s390_alloc(size_t size
, uint64_t *align
)
542 mem
= mmap((void *) 0x800000000ULL
, size
,
543 PROT_EXEC
|PROT_READ
|PROT_WRITE
,
544 MAP_SHARED
| MAP_ANONYMOUS
| MAP_FIXED
, -1, 0);
545 return mem
== MAP_FAILED
? NULL
: mem
;
548 /* DIAG 501 is used for sw breakpoints */
549 static const uint8_t diag_501
[] = {0x83, 0x24, 0x05, 0x01};
551 int kvm_arch_insert_sw_breakpoint(CPUState
*cs
, struct kvm_sw_breakpoint
*bp
)
554 if (cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&bp
->saved_insn
,
555 sizeof(diag_501
), 0) ||
556 cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)diag_501
,
557 sizeof(diag_501
), 1)) {
563 int kvm_arch_remove_sw_breakpoint(CPUState
*cs
, struct kvm_sw_breakpoint
*bp
)
565 uint8_t t
[sizeof(diag_501
)];
567 if (cpu_memory_rw_debug(cs
, bp
->pc
, t
, sizeof(diag_501
), 0)) {
569 } else if (memcmp(t
, diag_501
, sizeof(diag_501
))) {
571 } else if (cpu_memory_rw_debug(cs
, bp
->pc
, (uint8_t *)&bp
->saved_insn
,
572 sizeof(diag_501
), 1)) {
579 static struct kvm_hw_breakpoint
*find_hw_breakpoint(target_ulong addr
,
584 for (n
= 0; n
< nb_hw_breakpoints
; n
++) {
585 if (hw_breakpoints
[n
].addr
== addr
&& hw_breakpoints
[n
].type
== type
&&
586 (hw_breakpoints
[n
].len
== len
|| len
== -1)) {
587 return &hw_breakpoints
[n
];
594 static int insert_hw_breakpoint(target_ulong addr
, int len
, int type
)
598 if (find_hw_breakpoint(addr
, len
, type
)) {
602 size
= (nb_hw_breakpoints
+ 1) * sizeof(struct kvm_hw_breakpoint
);
604 if (!hw_breakpoints
) {
605 nb_hw_breakpoints
= 0;
606 hw_breakpoints
= (struct kvm_hw_breakpoint
*)g_try_malloc(size
);
609 (struct kvm_hw_breakpoint
*)g_try_realloc(hw_breakpoints
, size
);
612 if (!hw_breakpoints
) {
613 nb_hw_breakpoints
= 0;
617 hw_breakpoints
[nb_hw_breakpoints
].addr
= addr
;
618 hw_breakpoints
[nb_hw_breakpoints
].len
= len
;
619 hw_breakpoints
[nb_hw_breakpoints
].type
= type
;
626 int kvm_arch_insert_hw_breakpoint(target_ulong addr
,
627 target_ulong len
, int type
)
630 case GDB_BREAKPOINT_HW
:
633 case GDB_WATCHPOINT_WRITE
:
637 type
= KVM_HW_WP_WRITE
;
642 return insert_hw_breakpoint(addr
, len
, type
);
645 int kvm_arch_remove_hw_breakpoint(target_ulong addr
,
646 target_ulong len
, int type
)
649 struct kvm_hw_breakpoint
*bp
= find_hw_breakpoint(addr
, len
, type
);
656 if (nb_hw_breakpoints
> 0) {
658 * In order to trim the array, move the last element to the position to
659 * be removed - if necessary.
661 if (bp
!= &hw_breakpoints
[nb_hw_breakpoints
]) {
662 *bp
= hw_breakpoints
[nb_hw_breakpoints
];
664 size
= nb_hw_breakpoints
* sizeof(struct kvm_hw_breakpoint
);
666 (struct kvm_hw_breakpoint
*)g_realloc(hw_breakpoints
, size
);
668 g_free(hw_breakpoints
);
669 hw_breakpoints
= NULL
;
675 void kvm_arch_remove_all_hw_breakpoints(void)
677 nb_hw_breakpoints
= 0;
678 g_free(hw_breakpoints
);
679 hw_breakpoints
= NULL
;
682 void kvm_arch_update_guest_debug(CPUState
*cpu
, struct kvm_guest_debug
*dbg
)
686 if (nb_hw_breakpoints
> 0) {
687 dbg
->arch
.nr_hw_bp
= nb_hw_breakpoints
;
688 dbg
->arch
.hw_bp
= hw_breakpoints
;
690 for (i
= 0; i
< nb_hw_breakpoints
; ++i
) {
691 hw_breakpoints
[i
].phys_addr
= s390_cpu_get_phys_addr_debug(cpu
,
692 hw_breakpoints
[i
].addr
);
694 dbg
->control
|= KVM_GUESTDBG_ENABLE
| KVM_GUESTDBG_USE_HW_BP
;
696 dbg
->arch
.nr_hw_bp
= 0;
697 dbg
->arch
.hw_bp
= NULL
;
701 void kvm_arch_pre_run(CPUState
*cpu
, struct kvm_run
*run
)
705 void kvm_arch_post_run(CPUState
*cpu
, struct kvm_run
*run
)
709 int kvm_arch_process_async_events(CPUState
*cs
)
714 static int s390_kvm_irq_to_interrupt(struct kvm_s390_irq
*irq
,
715 struct kvm_s390_interrupt
*interrupt
)
719 interrupt
->type
= irq
->type
;
721 case KVM_S390_INT_VIRTIO
:
722 interrupt
->parm
= irq
->u
.ext
.ext_params
;
724 case KVM_S390_INT_PFAULT_INIT
:
725 case KVM_S390_INT_PFAULT_DONE
:
726 interrupt
->parm64
= irq
->u
.ext
.ext_params2
;
728 case KVM_S390_PROGRAM_INT
:
729 interrupt
->parm
= irq
->u
.pgm
.code
;
731 case KVM_S390_SIGP_SET_PREFIX
:
732 interrupt
->parm
= irq
->u
.prefix
.address
;
734 case KVM_S390_INT_SERVICE
:
735 interrupt
->parm
= irq
->u
.ext
.ext_params
;
738 interrupt
->parm
= irq
->u
.mchk
.cr14
;
739 interrupt
->parm64
= irq
->u
.mchk
.mcic
;
741 case KVM_S390_INT_EXTERNAL_CALL
:
742 interrupt
->parm
= irq
->u
.extcall
.code
;
744 case KVM_S390_INT_EMERGENCY
:
745 interrupt
->parm
= irq
->u
.emerg
.code
;
747 case KVM_S390_SIGP_STOP
:
748 case KVM_S390_RESTART
:
749 break; /* These types have no parameters */
750 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
751 interrupt
->parm
= irq
->u
.io
.subchannel_id
<< 16;
752 interrupt
->parm
|= irq
->u
.io
.subchannel_nr
;
753 interrupt
->parm64
= (uint64_t)irq
->u
.io
.io_int_parm
<< 32;
754 interrupt
->parm64
|= irq
->u
.io
.io_int_word
;
763 void kvm_s390_vcpu_interrupt(S390CPU
*cpu
, struct kvm_s390_irq
*irq
)
765 struct kvm_s390_interrupt kvmint
= {};
766 CPUState
*cs
= CPU(cpu
);
769 r
= s390_kvm_irq_to_interrupt(irq
, &kvmint
);
771 fprintf(stderr
, "%s called with bogus interrupt\n", __func__
);
775 r
= kvm_vcpu_ioctl(cs
, KVM_S390_INTERRUPT
, &kvmint
);
777 fprintf(stderr
, "KVM failed to inject interrupt\n");
782 static void __kvm_s390_floating_interrupt(struct kvm_s390_irq
*irq
)
784 struct kvm_s390_interrupt kvmint
= {};
787 r
= s390_kvm_irq_to_interrupt(irq
, &kvmint
);
789 fprintf(stderr
, "%s called with bogus interrupt\n", __func__
);
793 r
= kvm_vm_ioctl(kvm_state
, KVM_S390_INTERRUPT
, &kvmint
);
795 fprintf(stderr
, "KVM failed to inject interrupt\n");
800 void kvm_s390_floating_interrupt(struct kvm_s390_irq
*irq
)
802 static bool use_flic
= true;
806 r
= kvm_s390_inject_flic(irq
);
814 __kvm_s390_floating_interrupt(irq
);
817 void kvm_s390_virtio_irq(int config_change
, uint64_t token
)
819 struct kvm_s390_irq irq
= {
820 .type
= KVM_S390_INT_VIRTIO
,
821 .u
.ext
.ext_params
= config_change
,
822 .u
.ext
.ext_params2
= token
,
825 kvm_s390_floating_interrupt(&irq
);
828 void kvm_s390_service_interrupt(uint32_t parm
)
830 struct kvm_s390_irq irq
= {
831 .type
= KVM_S390_INT_SERVICE
,
832 .u
.ext
.ext_params
= parm
,
835 kvm_s390_floating_interrupt(&irq
);
838 static void enter_pgmcheck(S390CPU
*cpu
, uint16_t code
)
840 struct kvm_s390_irq irq
= {
841 .type
= KVM_S390_PROGRAM_INT
,
845 kvm_s390_vcpu_interrupt(cpu
, &irq
);
848 void kvm_s390_access_exception(S390CPU
*cpu
, uint16_t code
, uint64_t te_code
)
850 struct kvm_s390_irq irq
= {
851 .type
= KVM_S390_PROGRAM_INT
,
853 .u
.pgm
.trans_exc_code
= te_code
,
854 .u
.pgm
.exc_access_id
= te_code
& 3,
857 kvm_s390_vcpu_interrupt(cpu
, &irq
);
860 static int kvm_sclp_service_call(S390CPU
*cpu
, struct kvm_run
*run
,
863 CPUS390XState
*env
= &cpu
->env
;
868 cpu_synchronize_state(CPU(cpu
));
869 sccb
= env
->regs
[ipbh0
& 0xf];
870 code
= env
->regs
[(ipbh0
& 0xf0) >> 4];
872 r
= sclp_service_call(env
, sccb
, code
);
874 enter_pgmcheck(cpu
, -r
);
882 static int handle_b2(S390CPU
*cpu
, struct kvm_run
*run
, uint8_t ipa1
)
884 CPUS390XState
*env
= &cpu
->env
;
886 uint16_t ipbh0
= (run
->s390_sieic
.ipb
& 0xffff0000) >> 16;
888 cpu_synchronize_state(CPU(cpu
));
892 ioinst_handle_xsch(cpu
, env
->regs
[1]);
895 ioinst_handle_csch(cpu
, env
->regs
[1]);
898 ioinst_handle_hsch(cpu
, env
->regs
[1]);
901 ioinst_handle_msch(cpu
, env
->regs
[1], run
->s390_sieic
.ipb
);
904 ioinst_handle_ssch(cpu
, env
->regs
[1], run
->s390_sieic
.ipb
);
907 ioinst_handle_stcrw(cpu
, run
->s390_sieic
.ipb
);
910 ioinst_handle_stsch(cpu
, env
->regs
[1], run
->s390_sieic
.ipb
);
913 /* We should only get tsch via KVM_EXIT_S390_TSCH. */
914 fprintf(stderr
, "Spurious tsch intercept\n");
917 ioinst_handle_chsc(cpu
, run
->s390_sieic
.ipb
);
920 /* This should have been handled by kvm already. */
921 fprintf(stderr
, "Spurious tpi intercept\n");
924 ioinst_handle_schm(cpu
, env
->regs
[1], env
->regs
[2],
925 run
->s390_sieic
.ipb
);
928 ioinst_handle_rsch(cpu
, env
->regs
[1]);
931 ioinst_handle_rchp(cpu
, env
->regs
[1]);
934 /* We do not provide this instruction, it is suppressed. */
937 ioinst_handle_sal(cpu
, env
->regs
[1]);
940 /* Not provided, set CC = 3 for subchannel not operational */
943 case PRIV_B2_SCLP_CALL
:
944 rc
= kvm_sclp_service_call(cpu
, run
, ipbh0
);
948 DPRINTF("KVM: unhandled PRIV: 0xb2%x\n", ipa1
);
955 static uint64_t get_base_disp_rxy(S390CPU
*cpu
, struct kvm_run
*run
)
957 CPUS390XState
*env
= &cpu
->env
;
958 uint32_t x2
= (run
->s390_sieic
.ipa
& 0x000f);
959 uint32_t base2
= run
->s390_sieic
.ipb
>> 28;
960 uint32_t disp2
= ((run
->s390_sieic
.ipb
& 0x0fff0000) >> 16) +
961 ((run
->s390_sieic
.ipb
& 0xff00) << 4);
963 if (disp2
& 0x80000) {
967 return (base2
? env
->regs
[base2
] : 0) +
968 (x2
? env
->regs
[x2
] : 0) + (long)(int)disp2
;
971 static uint64_t get_base_disp_rsy(S390CPU
*cpu
, struct kvm_run
*run
)
973 CPUS390XState
*env
= &cpu
->env
;
974 uint32_t base2
= run
->s390_sieic
.ipb
>> 28;
975 uint32_t disp2
= ((run
->s390_sieic
.ipb
& 0x0fff0000) >> 16) +
976 ((run
->s390_sieic
.ipb
& 0xff00) << 4);
978 if (disp2
& 0x80000) {
982 return (base2
? env
->regs
[base2
] : 0) + (long)(int)disp2
;
985 static int kvm_clp_service_call(S390CPU
*cpu
, struct kvm_run
*run
)
987 uint8_t r2
= (run
->s390_sieic
.ipb
& 0x000f0000) >> 16;
989 return clp_service_call(cpu
, r2
);
992 static int kvm_pcilg_service_call(S390CPU
*cpu
, struct kvm_run
*run
)
994 uint8_t r1
= (run
->s390_sieic
.ipb
& 0x00f00000) >> 20;
995 uint8_t r2
= (run
->s390_sieic
.ipb
& 0x000f0000) >> 16;
997 return pcilg_service_call(cpu
, r1
, r2
);
1000 static int kvm_pcistg_service_call(S390CPU
*cpu
, struct kvm_run
*run
)
1002 uint8_t r1
= (run
->s390_sieic
.ipb
& 0x00f00000) >> 20;
1003 uint8_t r2
= (run
->s390_sieic
.ipb
& 0x000f0000) >> 16;
1005 return pcistg_service_call(cpu
, r1
, r2
);
1008 static int kvm_stpcifc_service_call(S390CPU
*cpu
, struct kvm_run
*run
)
1010 uint8_t r1
= (run
->s390_sieic
.ipa
& 0x00f0) >> 4;
1013 cpu_synchronize_state(CPU(cpu
));
1014 fiba
= get_base_disp_rxy(cpu
, run
);
1016 return stpcifc_service_call(cpu
, r1
, fiba
);
1019 static int kvm_sic_service_call(S390CPU
*cpu
, struct kvm_run
*run
)
1025 static int kvm_rpcit_service_call(S390CPU
*cpu
, struct kvm_run
*run
)
1027 uint8_t r1
= (run
->s390_sieic
.ipb
& 0x00f00000) >> 20;
1028 uint8_t r2
= (run
->s390_sieic
.ipb
& 0x000f0000) >> 16;
1030 return rpcit_service_call(cpu
, r1
, r2
);
1033 static int kvm_pcistb_service_call(S390CPU
*cpu
, struct kvm_run
*run
)
1035 uint8_t r1
= (run
->s390_sieic
.ipa
& 0x00f0) >> 4;
1036 uint8_t r3
= run
->s390_sieic
.ipa
& 0x000f;
1039 cpu_synchronize_state(CPU(cpu
));
1040 gaddr
= get_base_disp_rsy(cpu
, run
);
1042 return pcistb_service_call(cpu
, r1
, r3
, gaddr
);
1045 static int kvm_mpcifc_service_call(S390CPU
*cpu
, struct kvm_run
*run
)
1047 uint8_t r1
= (run
->s390_sieic
.ipa
& 0x00f0) >> 4;
1050 cpu_synchronize_state(CPU(cpu
));
1051 fiba
= get_base_disp_rxy(cpu
, run
);
1053 return mpcifc_service_call(cpu
, r1
, fiba
);
1056 static int handle_b9(S390CPU
*cpu
, struct kvm_run
*run
, uint8_t ipa1
)
1062 r
= kvm_clp_service_call(cpu
, run
);
1064 case PRIV_B9_PCISTG
:
1065 r
= kvm_pcistg_service_call(cpu
, run
);
1068 r
= kvm_pcilg_service_call(cpu
, run
);
1071 r
= kvm_rpcit_service_call(cpu
, run
);
1074 /* just inject exception */
1079 DPRINTF("KVM: unhandled PRIV: 0xb9%x\n", ipa1
);
1086 static int handle_eb(S390CPU
*cpu
, struct kvm_run
*run
, uint8_t ipbl
)
1091 case PRIV_EB_PCISTB
:
1092 r
= kvm_pcistb_service_call(cpu
, run
);
1095 r
= kvm_sic_service_call(cpu
, run
);
1098 /* just inject exception */
1103 DPRINTF("KVM: unhandled PRIV: 0xeb%x\n", ipbl
);
1110 static int handle_e3(S390CPU
*cpu
, struct kvm_run
*run
, uint8_t ipbl
)
1115 case PRIV_E3_MPCIFC
:
1116 r
= kvm_mpcifc_service_call(cpu
, run
);
1118 case PRIV_E3_STPCIFC
:
1119 r
= kvm_stpcifc_service_call(cpu
, run
);
1123 DPRINTF("KVM: unhandled PRIV: 0xe3%x\n", ipbl
);
1130 static int handle_hypercall(S390CPU
*cpu
, struct kvm_run
*run
)
1132 CPUS390XState
*env
= &cpu
->env
;
1135 cpu_synchronize_state(CPU(cpu
));
1136 ret
= s390_virtio_hypercall(env
);
1137 if (ret
== -EINVAL
) {
1138 enter_pgmcheck(cpu
, PGM_SPECIFICATION
);
1145 static void kvm_handle_diag_308(S390CPU
*cpu
, struct kvm_run
*run
)
1149 cpu_synchronize_state(CPU(cpu
));
1150 r1
= (run
->s390_sieic
.ipa
& 0x00f0) >> 4;
1151 r3
= run
->s390_sieic
.ipa
& 0x000f;
1152 handle_diag_308(&cpu
->env
, r1
, r3
);
1155 static int handle_sw_breakpoint(S390CPU
*cpu
, struct kvm_run
*run
)
1157 CPUS390XState
*env
= &cpu
->env
;
1160 cpu_synchronize_state(CPU(cpu
));
1162 pc
= env
->psw
.addr
- 4;
1163 if (kvm_find_sw_breakpoint(CPU(cpu
), pc
)) {
1171 #define DIAG_KVM_CODE_MASK 0x000000000000ffff
1173 static int handle_diag(S390CPU
*cpu
, struct kvm_run
*run
, uint32_t ipb
)
1179 * For any diagnose call we support, bits 48-63 of the resulting
1180 * address specify the function code; the remainder is ignored.
1182 func_code
= decode_basedisp_rs(&cpu
->env
, ipb
) & DIAG_KVM_CODE_MASK
;
1183 switch (func_code
) {
1185 kvm_handle_diag_308(cpu
, run
);
1187 case DIAG_KVM_HYPERCALL
:
1188 r
= handle_hypercall(cpu
, run
);
1190 case DIAG_KVM_BREAKPOINT
:
1191 r
= handle_sw_breakpoint(cpu
, run
);
1194 DPRINTF("KVM: unknown DIAG: 0x%x\n", func_code
);
1195 enter_pgmcheck(cpu
, PGM_SPECIFICATION
);
1202 typedef struct SigpInfo
{
1206 uint64_t *status_reg
;
1209 static void set_sigp_status(SigpInfo
*si
, uint64_t status
)
1211 *si
->status_reg
&= 0xffffffff00000000ULL
;
1212 *si
->status_reg
|= status
;
1213 si
->cc
= SIGP_CC_STATUS_STORED
;
1216 static void sigp_start(void *arg
)
1220 if (s390_cpu_get_state(si
->cpu
) != CPU_STATE_STOPPED
) {
1221 si
->cc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
1225 s390_cpu_set_state(CPU_STATE_OPERATING
, si
->cpu
);
1226 si
->cc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
1229 static void sigp_stop(void *arg
)
1232 struct kvm_s390_irq irq
= {
1233 .type
= KVM_S390_SIGP_STOP
,
1236 if (s390_cpu_get_state(si
->cpu
) != CPU_STATE_OPERATING
) {
1237 si
->cc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
1241 /* disabled wait - sleeping in user space */
1242 if (CPU(si
->cpu
)->halted
) {
1243 s390_cpu_set_state(CPU_STATE_STOPPED
, si
->cpu
);
1245 /* execute the stop function */
1246 si
->cpu
->env
.sigp_order
= SIGP_STOP
;
1247 kvm_s390_vcpu_interrupt(si
->cpu
, &irq
);
1249 si
->cc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
1252 #define KVM_S390_STORE_STATUS_DEF_ADDR offsetof(LowCore, floating_pt_save_area)
1253 #define SAVE_AREA_SIZE 512
1254 static int kvm_s390_store_status(S390CPU
*cpu
, hwaddr addr
, bool store_arch
)
1256 static const uint8_t ar_id
= 1;
1257 uint64_t ckc
= cpu
->env
.ckc
>> 8;
1259 hwaddr len
= SAVE_AREA_SIZE
;
1261 mem
= cpu_physical_memory_map(addr
, &len
, 1);
1265 if (len
!= SAVE_AREA_SIZE
) {
1266 cpu_physical_memory_unmap(mem
, len
, 1, 0);
1271 cpu_physical_memory_write(offsetof(LowCore
, ar_access_id
), &ar_id
, 1);
1273 memcpy(mem
, &cpu
->env
.fregs
, 128);
1274 memcpy(mem
+ 128, &cpu
->env
.regs
, 128);
1275 memcpy(mem
+ 256, &cpu
->env
.psw
, 16);
1276 memcpy(mem
+ 280, &cpu
->env
.psa
, 4);
1277 memcpy(mem
+ 284, &cpu
->env
.fpc
, 4);
1278 memcpy(mem
+ 292, &cpu
->env
.todpr
, 4);
1279 memcpy(mem
+ 296, &cpu
->env
.cputm
, 8);
1280 memcpy(mem
+ 304, &ckc
, 8);
1281 memcpy(mem
+ 320, &cpu
->env
.aregs
, 64);
1282 memcpy(mem
+ 384, &cpu
->env
.cregs
, 128);
1284 cpu_physical_memory_unmap(mem
, len
, 1, len
);
1289 static void sigp_stop_and_store_status(void *arg
)
1292 struct kvm_s390_irq irq
= {
1293 .type
= KVM_S390_SIGP_STOP
,
1296 /* disabled wait - sleeping in user space */
1297 if (s390_cpu_get_state(si
->cpu
) == CPU_STATE_OPERATING
&&
1298 CPU(si
->cpu
)->halted
) {
1299 s390_cpu_set_state(CPU_STATE_STOPPED
, si
->cpu
);
1302 switch (s390_cpu_get_state(si
->cpu
)) {
1303 case CPU_STATE_OPERATING
:
1304 si
->cpu
->env
.sigp_order
= SIGP_STOP_STORE_STATUS
;
1305 kvm_s390_vcpu_interrupt(si
->cpu
, &irq
);
1306 /* store will be performed when handling the stop intercept */
1308 case CPU_STATE_STOPPED
:
1309 /* already stopped, just store the status */
1310 cpu_synchronize_state(CPU(si
->cpu
));
1311 kvm_s390_store_status(si
->cpu
, KVM_S390_STORE_STATUS_DEF_ADDR
, true);
1314 si
->cc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
1317 static void sigp_store_status_at_address(void *arg
)
1320 uint32_t address
= si
->param
& 0x7ffffe00u
;
1322 /* cpu has to be stopped */
1323 if (s390_cpu_get_state(si
->cpu
) != CPU_STATE_STOPPED
) {
1324 set_sigp_status(si
, SIGP_STAT_INCORRECT_STATE
);
1328 cpu_synchronize_state(CPU(si
->cpu
));
1330 if (kvm_s390_store_status(si
->cpu
, address
, false)) {
1331 set_sigp_status(si
, SIGP_STAT_INVALID_PARAMETER
);
1334 si
->cc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
1337 static void sigp_restart(void *arg
)
1340 struct kvm_s390_irq irq
= {
1341 .type
= KVM_S390_RESTART
,
1344 switch (s390_cpu_get_state(si
->cpu
)) {
1345 case CPU_STATE_STOPPED
:
1346 /* the restart irq has to be delivered prior to any other pending irq */
1347 cpu_synchronize_state(CPU(si
->cpu
));
1348 do_restart_interrupt(&si
->cpu
->env
);
1349 s390_cpu_set_state(CPU_STATE_OPERATING
, si
->cpu
);
1351 case CPU_STATE_OPERATING
:
1352 kvm_s390_vcpu_interrupt(si
->cpu
, &irq
);
1355 si
->cc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
1358 int kvm_s390_cpu_restart(S390CPU
*cpu
)
1364 run_on_cpu(CPU(cpu
), sigp_restart
, &si
);
1365 DPRINTF("DONE: KVM cpu restart: %p\n", &cpu
->env
);
1369 static void sigp_initial_cpu_reset(void *arg
)
1372 CPUState
*cs
= CPU(si
->cpu
);
1373 S390CPUClass
*scc
= S390_CPU_GET_CLASS(si
->cpu
);
1375 cpu_synchronize_state(cs
);
1376 scc
->initial_cpu_reset(cs
);
1377 cpu_synchronize_post_reset(cs
);
1378 si
->cc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
1381 static void sigp_cpu_reset(void *arg
)
1384 CPUState
*cs
= CPU(si
->cpu
);
1385 S390CPUClass
*scc
= S390_CPU_GET_CLASS(si
->cpu
);
1387 cpu_synchronize_state(cs
);
1389 cpu_synchronize_post_reset(cs
);
1390 si
->cc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
1393 static void sigp_set_prefix(void *arg
)
1396 uint32_t addr
= si
->param
& 0x7fffe000u
;
1398 cpu_synchronize_state(CPU(si
->cpu
));
1400 if (!address_space_access_valid(&address_space_memory
, addr
,
1401 sizeof(struct LowCore
), false)) {
1402 set_sigp_status(si
, SIGP_STAT_INVALID_PARAMETER
);
1406 /* cpu has to be stopped */
1407 if (s390_cpu_get_state(si
->cpu
) != CPU_STATE_STOPPED
) {
1408 set_sigp_status(si
, SIGP_STAT_INCORRECT_STATE
);
1412 si
->cpu
->env
.psa
= addr
;
1413 cpu_synchronize_post_init(CPU(si
->cpu
));
1414 si
->cc
= SIGP_CC_ORDER_CODE_ACCEPTED
;
1417 static int handle_sigp_single_dst(S390CPU
*dst_cpu
, uint8_t order
,
1418 uint64_t param
, uint64_t *status_reg
)
1423 .status_reg
= status_reg
,
1426 /* cpu available? */
1427 if (dst_cpu
== NULL
) {
1428 return SIGP_CC_NOT_OPERATIONAL
;
1431 /* only resets can break pending orders */
1432 if (dst_cpu
->env
.sigp_order
!= 0 &&
1433 order
!= SIGP_CPU_RESET
&&
1434 order
!= SIGP_INITIAL_CPU_RESET
) {
1435 return SIGP_CC_BUSY
;
1440 run_on_cpu(CPU(dst_cpu
), sigp_start
, &si
);
1443 run_on_cpu(CPU(dst_cpu
), sigp_stop
, &si
);
1446 run_on_cpu(CPU(dst_cpu
), sigp_restart
, &si
);
1448 case SIGP_STOP_STORE_STATUS
:
1449 run_on_cpu(CPU(dst_cpu
), sigp_stop_and_store_status
, &si
);
1451 case SIGP_STORE_STATUS_ADDR
:
1452 run_on_cpu(CPU(dst_cpu
), sigp_store_status_at_address
, &si
);
1454 case SIGP_SET_PREFIX
:
1455 run_on_cpu(CPU(dst_cpu
), sigp_set_prefix
, &si
);
1457 case SIGP_INITIAL_CPU_RESET
:
1458 run_on_cpu(CPU(dst_cpu
), sigp_initial_cpu_reset
, &si
);
1460 case SIGP_CPU_RESET
:
1461 run_on_cpu(CPU(dst_cpu
), sigp_cpu_reset
, &si
);
1464 DPRINTF("KVM: unknown SIGP: 0x%x\n", order
);
1465 set_sigp_status(&si
, SIGP_STAT_INVALID_ORDER
);
1471 static int sigp_set_architecture(S390CPU
*cpu
, uint32_t param
,
1472 uint64_t *status_reg
)
1477 /* due to the BQL, we are the only active cpu */
1478 CPU_FOREACH(cur_cs
) {
1479 cur_cpu
= S390_CPU(cur_cs
);
1480 if (cur_cpu
->env
.sigp_order
!= 0) {
1481 return SIGP_CC_BUSY
;
1483 cpu_synchronize_state(cur_cs
);
1484 /* all but the current one have to be stopped */
1485 if (cur_cpu
!= cpu
&&
1486 s390_cpu_get_state(cur_cpu
) != CPU_STATE_STOPPED
) {
1487 *status_reg
&= 0xffffffff00000000ULL
;
1488 *status_reg
|= SIGP_STAT_INCORRECT_STATE
;
1489 return SIGP_CC_STATUS_STORED
;
1493 switch (param
& 0xff) {
1494 case SIGP_MODE_ESA_S390
:
1496 return SIGP_CC_NOT_OPERATIONAL
;
1497 case SIGP_MODE_Z_ARCH_TRANS_ALL_PSW
:
1498 case SIGP_MODE_Z_ARCH_TRANS_CUR_PSW
:
1499 CPU_FOREACH(cur_cs
) {
1500 cur_cpu
= S390_CPU(cur_cs
);
1501 cur_cpu
->env
.pfault_token
= -1UL;
1505 *status_reg
&= 0xffffffff00000000ULL
;
1506 *status_reg
|= SIGP_STAT_INVALID_PARAMETER
;
1507 return SIGP_CC_STATUS_STORED
;
1510 return SIGP_CC_ORDER_CODE_ACCEPTED
;
1513 #define SIGP_ORDER_MASK 0x000000ff
1515 static int handle_sigp(S390CPU
*cpu
, struct kvm_run
*run
, uint8_t ipa1
)
1517 CPUS390XState
*env
= &cpu
->env
;
1518 const uint8_t r1
= ipa1
>> 4;
1519 const uint8_t r3
= ipa1
& 0x0f;
1522 uint64_t *status_reg
;
1524 S390CPU
*dst_cpu
= NULL
;
1526 cpu_synchronize_state(CPU(cpu
));
1528 /* get order code */
1529 order
= decode_basedisp_rs(env
, run
->s390_sieic
.ipb
) & SIGP_ORDER_MASK
;
1530 status_reg
= &env
->regs
[r1
];
1531 param
= (r1
% 2) ? env
->regs
[r1
] : env
->regs
[r1
+ 1];
1535 ret
= sigp_set_architecture(cpu
, param
, status_reg
);
1538 /* all other sigp orders target a single vcpu */
1539 dst_cpu
= s390_cpu_addr2state(env
->regs
[r3
]);
1540 ret
= handle_sigp_single_dst(dst_cpu
, order
, param
, status_reg
);
1543 trace_kvm_sigp_finished(order
, CPU(cpu
)->cpu_index
,
1544 dst_cpu
? CPU(dst_cpu
)->cpu_index
: -1, ret
);
1554 static int handle_instruction(S390CPU
*cpu
, struct kvm_run
*run
)
1556 unsigned int ipa0
= (run
->s390_sieic
.ipa
& 0xff00);
1557 uint8_t ipa1
= run
->s390_sieic
.ipa
& 0x00ff;
1560 DPRINTF("handle_instruction 0x%x 0x%x\n",
1561 run
->s390_sieic
.ipa
, run
->s390_sieic
.ipb
);
1564 r
= handle_b2(cpu
, run
, ipa1
);
1567 r
= handle_b9(cpu
, run
, ipa1
);
1570 r
= handle_eb(cpu
, run
, run
->s390_sieic
.ipb
& 0xff);
1573 r
= handle_e3(cpu
, run
, run
->s390_sieic
.ipb
& 0xff);
1576 r
= handle_diag(cpu
, run
, run
->s390_sieic
.ipb
);
1579 r
= handle_sigp(cpu
, run
, ipa1
);
1585 enter_pgmcheck(cpu
, 0x0001);
1591 static bool is_special_wait_psw(CPUState
*cs
)
1593 /* signal quiesce */
1594 return cs
->kvm_run
->psw_addr
== 0xfffUL
;
1597 static void guest_panicked(void)
1599 qapi_event_send_guest_panicked(GUEST_PANIC_ACTION_PAUSE
,
1601 vm_stop(RUN_STATE_GUEST_PANICKED
);
1604 static void unmanageable_intercept(S390CPU
*cpu
, const char *str
, int pswoffset
)
1606 CPUState
*cs
= CPU(cpu
);
1608 error_report("Unmanageable %s! CPU%i new PSW: 0x%016lx:%016lx",
1609 str
, cs
->cpu_index
, ldq_phys(cs
->as
, cpu
->env
.psa
+ pswoffset
),
1610 ldq_phys(cs
->as
, cpu
->env
.psa
+ pswoffset
+ 8));
1615 static int handle_intercept(S390CPU
*cpu
)
1617 CPUState
*cs
= CPU(cpu
);
1618 struct kvm_run
*run
= cs
->kvm_run
;
1619 int icpt_code
= run
->s390_sieic
.icptcode
;
1622 DPRINTF("intercept: 0x%x (at 0x%lx)\n", icpt_code
,
1623 (long)cs
->kvm_run
->psw_addr
);
1624 switch (icpt_code
) {
1625 case ICPT_INSTRUCTION
:
1626 r
= handle_instruction(cpu
, run
);
1629 unmanageable_intercept(cpu
, "program interrupt",
1630 offsetof(LowCore
, program_new_psw
));
1634 unmanageable_intercept(cpu
, "external interrupt",
1635 offsetof(LowCore
, external_new_psw
));
1639 /* disabled wait, since enabled wait is handled in kernel */
1640 cpu_synchronize_state(cs
);
1641 if (s390_cpu_halt(cpu
) == 0) {
1642 if (is_special_wait_psw(cs
)) {
1643 qemu_system_shutdown_request();
1651 if (s390_cpu_set_state(CPU_STATE_STOPPED
, cpu
) == 0) {
1652 qemu_system_shutdown_request();
1654 if (cpu
->env
.sigp_order
== SIGP_STOP_STORE_STATUS
) {
1655 kvm_s390_store_status(cpu
, KVM_S390_STORE_STATUS_DEF_ADDR
,
1658 cpu
->env
.sigp_order
= 0;
1661 case ICPT_SOFT_INTERCEPT
:
1662 fprintf(stderr
, "KVM unimplemented icpt SOFT\n");
1666 fprintf(stderr
, "KVM unimplemented icpt IO\n");
1670 fprintf(stderr
, "Unknown intercept code: %d\n", icpt_code
);
1678 static int handle_tsch(S390CPU
*cpu
)
1680 CPUState
*cs
= CPU(cpu
);
1681 struct kvm_run
*run
= cs
->kvm_run
;
1684 cpu_synchronize_state(cs
);
1686 ret
= ioinst_handle_tsch(cpu
, cpu
->env
.regs
[1], run
->s390_tsch
.ipb
);
1690 * If an I/O interrupt had been dequeued, we have to reinject it.
1692 if (run
->s390_tsch
.dequeued
) {
1693 kvm_s390_io_interrupt(run
->s390_tsch
.subchannel_id
,
1694 run
->s390_tsch
.subchannel_nr
,
1695 run
->s390_tsch
.io_int_parm
,
1696 run
->s390_tsch
.io_int_word
);
1703 static int kvm_arch_handle_debug_exit(S390CPU
*cpu
)
1705 CPUState
*cs
= CPU(cpu
);
1706 struct kvm_run
*run
= cs
->kvm_run
;
1709 struct kvm_debug_exit_arch
*arch_info
= &run
->debug
.arch
;
1711 switch (arch_info
->type
) {
1712 case KVM_HW_WP_WRITE
:
1713 if (find_hw_breakpoint(arch_info
->addr
, -1, arch_info
->type
)) {
1714 cs
->watchpoint_hit
= &hw_watchpoint
;
1715 hw_watchpoint
.vaddr
= arch_info
->addr
;
1716 hw_watchpoint
.flags
= BP_MEM_WRITE
;
1721 if (find_hw_breakpoint(arch_info
->addr
, -1, arch_info
->type
)) {
1725 case KVM_SINGLESTEP
:
1726 if (cs
->singlestep_enabled
) {
1737 int kvm_arch_handle_exit(CPUState
*cs
, struct kvm_run
*run
)
1739 S390CPU
*cpu
= S390_CPU(cs
);
1742 switch (run
->exit_reason
) {
1743 case KVM_EXIT_S390_SIEIC
:
1744 ret
= handle_intercept(cpu
);
1746 case KVM_EXIT_S390_RESET
:
1747 s390_reipl_request();
1749 case KVM_EXIT_S390_TSCH
:
1750 ret
= handle_tsch(cpu
);
1752 case KVM_EXIT_DEBUG
:
1753 ret
= kvm_arch_handle_debug_exit(cpu
);
1756 fprintf(stderr
, "Unknown KVM exit: %d\n", run
->exit_reason
);
1761 ret
= EXCP_INTERRUPT
;
1766 bool kvm_arch_stop_on_emulation_error(CPUState
*cpu
)
1771 int kvm_arch_on_sigbus_vcpu(CPUState
*cpu
, int code
, void *addr
)
1776 int kvm_arch_on_sigbus(int code
, void *addr
)
1781 void kvm_s390_io_interrupt(uint16_t subchannel_id
,
1782 uint16_t subchannel_nr
, uint32_t io_int_parm
,
1783 uint32_t io_int_word
)
1785 struct kvm_s390_irq irq
= {
1786 .u
.io
.subchannel_id
= subchannel_id
,
1787 .u
.io
.subchannel_nr
= subchannel_nr
,
1788 .u
.io
.io_int_parm
= io_int_parm
,
1789 .u
.io
.io_int_word
= io_int_word
,
1792 if (io_int_word
& IO_INT_WORD_AI
) {
1793 irq
.type
= KVM_S390_INT_IO(1, 0, 0, 0);
1795 irq
.type
= ((subchannel_id
& 0xff00) << 24) |
1796 ((subchannel_id
& 0x00060) << 22) | (subchannel_nr
<< 16);
1798 kvm_s390_floating_interrupt(&irq
);
1801 void kvm_s390_crw_mchk(void)
1803 struct kvm_s390_irq irq
= {
1804 .type
= KVM_S390_MCHK
,
1805 .u
.mchk
.cr14
= 1 << 28,
1806 .u
.mchk
.mcic
= 0x00400f1d40330000ULL
,
1808 kvm_s390_floating_interrupt(&irq
);
1811 void kvm_s390_enable_css_support(S390CPU
*cpu
)
1815 /* Activate host kernel channel subsystem support. */
1816 r
= kvm_vcpu_enable_cap(CPU(cpu
), KVM_CAP_S390_CSS_SUPPORT
, 0);
1820 void kvm_arch_init_irq_routing(KVMState
*s
)
1823 * Note that while irqchip capabilities generally imply that cpustates
1824 * are handled in-kernel, it is not true for s390 (yet); therefore, we
1825 * have to override the common code kvm_halt_in_kernel_allowed setting.
1827 if (kvm_check_extension(s
, KVM_CAP_IRQ_ROUTING
)) {
1828 kvm_gsi_routing_allowed
= true;
1829 kvm_halt_in_kernel_allowed
= false;
1833 int kvm_s390_assign_subch_ioeventfd(EventNotifier
*notifier
, uint32_t sch
,
1834 int vq
, bool assign
)
1836 struct kvm_ioeventfd kick
= {
1837 .flags
= KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY
|
1838 KVM_IOEVENTFD_FLAG_DATAMATCH
,
1839 .fd
= event_notifier_get_fd(notifier
),
1844 if (!kvm_check_extension(kvm_state
, KVM_CAP_IOEVENTFD
)) {
1848 kick
.flags
|= KVM_IOEVENTFD_FLAG_DEASSIGN
;
1850 return kvm_vm_ioctl(kvm_state
, KVM_IOEVENTFD
, &kick
);
1853 int kvm_s390_get_memslot_count(KVMState
*s
)
1855 return kvm_check_extension(s
, KVM_CAP_NR_MEMSLOTS
);
1858 int kvm_s390_set_cpu_state(S390CPU
*cpu
, uint8_t cpu_state
)
1860 struct kvm_mp_state mp_state
= {};
1863 /* the kvm part might not have been initialized yet */
1864 if (CPU(cpu
)->kvm_state
== NULL
) {
1868 switch (cpu_state
) {
1869 case CPU_STATE_STOPPED
:
1870 mp_state
.mp_state
= KVM_MP_STATE_STOPPED
;
1872 case CPU_STATE_CHECK_STOP
:
1873 mp_state
.mp_state
= KVM_MP_STATE_CHECK_STOP
;
1875 case CPU_STATE_OPERATING
:
1876 mp_state
.mp_state
= KVM_MP_STATE_OPERATING
;
1878 case CPU_STATE_LOAD
:
1879 mp_state
.mp_state
= KVM_MP_STATE_LOAD
;
1882 error_report("Requested CPU state is not a valid S390 CPU state: %u",
1887 ret
= kvm_vcpu_ioctl(CPU(cpu
), KVM_SET_MP_STATE
, &mp_state
);
1889 trace_kvm_failed_cpu_state_set(CPU(cpu
)->cpu_index
, cpu_state
,
1896 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry
*route
,
1897 uint64_t address
, uint32_t data
)
1899 S390PCIBusDevice
*pbdev
;
1900 uint32_t fid
= data
>> ZPCI_MSI_VEC_BITS
;
1901 uint32_t vec
= data
& ZPCI_MSI_VEC_MASK
;
1903 pbdev
= s390_pci_find_dev_by_fid(fid
);
1905 DPRINTF("add_msi_route no dev\n");
1909 pbdev
->routes
.adapter
.ind_offset
= vec
;
1911 route
->type
= KVM_IRQ_ROUTING_S390_ADAPTER
;
1913 route
->u
.adapter
.summary_addr
= pbdev
->routes
.adapter
.summary_addr
;
1914 route
->u
.adapter
.ind_addr
= pbdev
->routes
.adapter
.ind_addr
;
1915 route
->u
.adapter
.summary_offset
= pbdev
->routes
.adapter
.summary_offset
;
1916 route
->u
.adapter
.ind_offset
= pbdev
->routes
.adapter
.ind_offset
;
1917 route
->u
.adapter
.adapter_id
= pbdev
->routes
.adapter
.adapter_id
;