2 * s390x exception / interrupt helpers
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "s390x-internal.h"
25 #include "exec/helper-proto.h"
26 #include "qemu/timer.h"
27 #include "exec/exec-all.h"
28 #include "exec/cpu_ldst.h"
29 #include "hw/s390x/ioinst.h"
30 #include "exec/address-spaces.h"
31 #include "tcg_s390x.h"
32 #ifndef CONFIG_USER_ONLY
33 #include "hw/s390x/s390_flic.h"
34 #include "hw/boards.h"
37 void QEMU_NORETURN
tcg_s390_program_interrupt(CPUS390XState
*env
,
38 uint32_t code
, uintptr_t ra
)
40 CPUState
*cs
= env_cpu(env
);
42 cpu_restore_state(cs
, ra
, true);
43 qemu_log_mask(CPU_LOG_INT
, "program interrupt at %#" PRIx64
"\n",
45 trigger_pgm_exception(env
, code
);
49 void QEMU_NORETURN
tcg_s390_data_exception(CPUS390XState
*env
, uint32_t dxc
,
52 g_assert(dxc
<= 0xff);
53 #if !defined(CONFIG_USER_ONLY)
54 /* Store the DXC into the lowcore */
55 stl_phys(env_cpu(env
)->as
,
56 env
->psa
+ offsetof(LowCore
, data_exc_code
), dxc
);
59 /* Store the DXC into the FPC if AFP is enabled */
60 if (env
->cregs
[0] & CR0_AFP
) {
61 env
->fpc
= deposit32(env
->fpc
, 8, 8, dxc
);
63 tcg_s390_program_interrupt(env
, PGM_DATA
, ra
);
66 void QEMU_NORETURN
tcg_s390_vector_exception(CPUS390XState
*env
, uint32_t vxc
,
69 g_assert(vxc
<= 0xff);
70 #if !defined(CONFIG_USER_ONLY)
71 /* Always store the VXC into the lowcore, without AFP it is undefined */
72 stl_phys(env_cpu(env
)->as
,
73 env
->psa
+ offsetof(LowCore
, data_exc_code
), vxc
);
76 /* Always store the VXC into the FPC, without AFP it is undefined */
77 env
->fpc
= deposit32(env
->fpc
, 8, 8, vxc
);
78 tcg_s390_program_interrupt(env
, PGM_VECTOR_PROCESSING
, ra
);
81 void HELPER(data_exception
)(CPUS390XState
*env
, uint32_t dxc
)
83 tcg_s390_data_exception(env
, dxc
, GETPC());
87 * Unaligned accesses are only diagnosed with MO_ALIGN. At the moment,
88 * this is only for the atomic operations, for which we want to raise a
89 * specification exception.
91 static void QEMU_NORETURN
do_unaligned_access(CPUState
*cs
, uintptr_t retaddr
)
93 S390CPU
*cpu
= S390_CPU(cs
);
94 CPUS390XState
*env
= &cpu
->env
;
96 tcg_s390_program_interrupt(env
, PGM_SPECIFICATION
, retaddr
);
99 #if defined(CONFIG_USER_ONLY)
101 void s390_cpu_do_interrupt(CPUState
*cs
)
103 cs
->exception_index
= -1;
106 void s390_cpu_record_sigsegv(CPUState
*cs
, vaddr address
,
107 MMUAccessType access_type
,
108 bool maperr
, uintptr_t retaddr
)
110 S390CPU
*cpu
= S390_CPU(cs
);
112 trigger_pgm_exception(&cpu
->env
, maperr
? PGM_ADDRESSING
: PGM_PROTECTION
);
114 * On real machines this value is dropped into LowMem. Since this
115 * is userland, simply put this someplace that cpu_loop can find it.
116 * S390 only gives the page of the fault, not the exact address.
117 * C.f. the construction of TEC in mmu_translate().
119 cpu
->env
.__excp_addr
= address
& TARGET_PAGE_MASK
;
120 cpu_loop_exit_restore(cs
, retaddr
);
123 void s390_cpu_record_sigbus(CPUState
*cs
, vaddr address
,
124 MMUAccessType access_type
, uintptr_t retaddr
)
126 do_unaligned_access(cs
, retaddr
);
129 #else /* !CONFIG_USER_ONLY */
131 static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx
)
134 case MMU_PRIMARY_IDX
:
135 return PSW_ASC_PRIMARY
;
136 case MMU_SECONDARY_IDX
:
137 return PSW_ASC_SECONDARY
;
145 bool s390_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
146 MMUAccessType access_type
, int mmu_idx
,
147 bool probe
, uintptr_t retaddr
)
149 S390CPU
*cpu
= S390_CPU(cs
);
150 CPUS390XState
*env
= &cpu
->env
;
151 target_ulong vaddr
, raddr
;
155 qemu_log_mask(CPU_LOG_MMU
, "%s: addr 0x%" VADDR_PRIx
" rw %d mmu_idx %d\n",
156 __func__
, address
, access_type
, mmu_idx
);
160 if (mmu_idx
< MMU_REAL_IDX
) {
161 asc
= cpu_mmu_idx_to_asc(mmu_idx
);
163 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
166 excp
= mmu_translate(env
, vaddr
, access_type
, asc
, &raddr
, &prot
, &tec
);
167 } else if (mmu_idx
== MMU_REAL_IDX
) {
169 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
172 excp
= mmu_translate_real(env
, vaddr
, access_type
, &raddr
, &prot
, &tec
);
174 g_assert_not_reached();
177 env
->tlb_fill_exc
= excp
;
178 env
->tlb_fill_tec
= tec
;
181 qemu_log_mask(CPU_LOG_MMU
,
182 "%s: set tlb %" PRIx64
" -> %" PRIx64
" (%x)\n",
183 __func__
, (uint64_t)vaddr
, (uint64_t)raddr
, prot
);
184 tlb_set_page(cs
, address
& TARGET_PAGE_MASK
, raddr
, prot
,
185 mmu_idx
, TARGET_PAGE_SIZE
);
192 if (excp
!= PGM_ADDRESSING
) {
193 stq_phys(env_cpu(env
)->as
,
194 env
->psa
+ offsetof(LowCore
, trans_exc_code
), tec
);
198 * For data accesses, ILEN will be filled in from the unwind info,
199 * within cpu_loop_exit_restore. For code accesses, retaddr == 0,
200 * and so unwinding will not occur. However, ILEN is also undefined
201 * for that case -- we choose to set ILEN = 2.
203 env
->int_pgm_ilen
= 2;
204 trigger_pgm_exception(env
, excp
);
205 cpu_loop_exit_restore(cs
, retaddr
);
208 static void do_program_interrupt(CPUS390XState
*env
)
212 int ilen
= env
->int_pgm_ilen
;
214 assert(ilen
== 2 || ilen
== 4 || ilen
== 6);
216 switch (env
->int_pgm_code
) {
218 if (env
->per_perc_atmid
& PER_CODE_EVENT_NULLIFICATION
) {
227 case PGM_SPECIFICATION
:
229 case PGM_FIXPT_OVERFLOW
:
230 case PGM_FIXPT_DIVIDE
:
231 case PGM_DEC_OVERFLOW
:
233 case PGM_HFP_EXP_OVERFLOW
:
234 case PGM_HFP_EXP_UNDERFLOW
:
235 case PGM_HFP_SIGNIFICANCE
:
241 case PGM_PC_TRANS_SPEC
:
244 /* advance the PSW if our exception is not nullifying */
245 env
->psw
.addr
+= ilen
;
249 qemu_log_mask(CPU_LOG_INT
,
250 "%s: code=0x%x ilen=%d psw: %" PRIx64
" %" PRIx64
"\n",
251 __func__
, env
->int_pgm_code
, ilen
, env
->psw
.mask
,
254 lowcore
= cpu_map_lowcore(env
);
256 /* Signal PER events with the exception. */
257 if (env
->per_perc_atmid
) {
258 env
->int_pgm_code
|= PGM_PER
;
259 lowcore
->per_address
= cpu_to_be64(env
->per_address
);
260 lowcore
->per_perc_atmid
= cpu_to_be16(env
->per_perc_atmid
);
261 env
->per_perc_atmid
= 0;
264 lowcore
->pgm_ilen
= cpu_to_be16(ilen
);
265 lowcore
->pgm_code
= cpu_to_be16(env
->int_pgm_code
);
266 lowcore
->program_old_psw
.mask
= cpu_to_be64(s390_cpu_get_psw_mask(env
));
267 lowcore
->program_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
268 mask
= be64_to_cpu(lowcore
->program_new_psw
.mask
);
269 addr
= be64_to_cpu(lowcore
->program_new_psw
.addr
);
270 lowcore
->per_breaking_event_addr
= cpu_to_be64(env
->gbea
);
272 cpu_unmap_lowcore(lowcore
);
274 s390_cpu_set_psw(env
, mask
, addr
);
277 static void do_svc_interrupt(CPUS390XState
*env
)
282 lowcore
= cpu_map_lowcore(env
);
284 lowcore
->svc_code
= cpu_to_be16(env
->int_svc_code
);
285 lowcore
->svc_ilen
= cpu_to_be16(env
->int_svc_ilen
);
286 lowcore
->svc_old_psw
.mask
= cpu_to_be64(s390_cpu_get_psw_mask(env
));
287 lowcore
->svc_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
+ env
->int_svc_ilen
);
288 mask
= be64_to_cpu(lowcore
->svc_new_psw
.mask
);
289 addr
= be64_to_cpu(lowcore
->svc_new_psw
.addr
);
291 cpu_unmap_lowcore(lowcore
);
293 s390_cpu_set_psw(env
, mask
, addr
);
295 /* When a PER event is pending, the PER exception has to happen
296 immediately after the SERVICE CALL one. */
297 if (env
->per_perc_atmid
) {
298 env
->int_pgm_code
= PGM_PER
;
299 env
->int_pgm_ilen
= env
->int_svc_ilen
;
300 do_program_interrupt(env
);
304 #define VIRTIO_SUBCODE_64 0x0D00
306 static void do_ext_interrupt(CPUS390XState
*env
)
308 QEMUS390FLICState
*flic
= QEMU_S390_FLIC(s390_get_flic());
309 S390CPU
*cpu
= env_archcpu(env
);
314 if (!(env
->psw
.mask
& PSW_MASK_EXT
)) {
315 cpu_abort(CPU(cpu
), "Ext int w/o ext mask\n");
318 lowcore
= cpu_map_lowcore(env
);
320 if ((env
->pending_int
& INTERRUPT_EMERGENCY_SIGNAL
) &&
321 (env
->cregs
[0] & CR0_EMERGENCY_SIGNAL_SC
)) {
322 MachineState
*ms
= MACHINE(qdev_get_machine());
323 unsigned int max_cpus
= ms
->smp
.max_cpus
;
325 lowcore
->ext_int_code
= cpu_to_be16(EXT_EMERGENCY
);
326 cpu_addr
= find_first_bit(env
->emergency_signals
, S390_MAX_CPUS
);
327 g_assert(cpu_addr
< S390_MAX_CPUS
);
328 lowcore
->cpu_addr
= cpu_to_be16(cpu_addr
);
329 clear_bit(cpu_addr
, env
->emergency_signals
);
330 if (bitmap_empty(env
->emergency_signals
, max_cpus
)) {
331 env
->pending_int
&= ~INTERRUPT_EMERGENCY_SIGNAL
;
333 } else if ((env
->pending_int
& INTERRUPT_EXTERNAL_CALL
) &&
334 (env
->cregs
[0] & CR0_EXTERNAL_CALL_SC
)) {
335 lowcore
->ext_int_code
= cpu_to_be16(EXT_EXTERNAL_CALL
);
336 lowcore
->cpu_addr
= cpu_to_be16(env
->external_call_addr
);
337 env
->pending_int
&= ~INTERRUPT_EXTERNAL_CALL
;
338 } else if ((env
->pending_int
& INTERRUPT_EXT_CLOCK_COMPARATOR
) &&
339 (env
->cregs
[0] & CR0_CKC_SC
)) {
340 lowcore
->ext_int_code
= cpu_to_be16(EXT_CLOCK_COMP
);
341 lowcore
->cpu_addr
= 0;
342 env
->pending_int
&= ~INTERRUPT_EXT_CLOCK_COMPARATOR
;
343 } else if ((env
->pending_int
& INTERRUPT_EXT_CPU_TIMER
) &&
344 (env
->cregs
[0] & CR0_CPU_TIMER_SC
)) {
345 lowcore
->ext_int_code
= cpu_to_be16(EXT_CPU_TIMER
);
346 lowcore
->cpu_addr
= 0;
347 env
->pending_int
&= ~INTERRUPT_EXT_CPU_TIMER
;
348 } else if (qemu_s390_flic_has_service(flic
) &&
349 (env
->cregs
[0] & CR0_SERVICE_SC
)) {
352 param
= qemu_s390_flic_dequeue_service(flic
);
353 lowcore
->ext_int_code
= cpu_to_be16(EXT_SERVICE
);
354 lowcore
->ext_params
= cpu_to_be32(param
);
355 lowcore
->cpu_addr
= 0;
357 g_assert_not_reached();
360 mask
= be64_to_cpu(lowcore
->external_new_psw
.mask
);
361 addr
= be64_to_cpu(lowcore
->external_new_psw
.addr
);
362 lowcore
->external_old_psw
.mask
= cpu_to_be64(s390_cpu_get_psw_mask(env
));
363 lowcore
->external_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
365 cpu_unmap_lowcore(lowcore
);
367 s390_cpu_set_psw(env
, mask
, addr
);
370 static void do_io_interrupt(CPUS390XState
*env
)
372 QEMUS390FLICState
*flic
= QEMU_S390_FLIC(s390_get_flic());
377 g_assert(env
->psw
.mask
& PSW_MASK_IO
);
378 io
= qemu_s390_flic_dequeue_io(flic
, env
->cregs
[6]);
381 lowcore
= cpu_map_lowcore(env
);
383 lowcore
->subchannel_id
= cpu_to_be16(io
->id
);
384 lowcore
->subchannel_nr
= cpu_to_be16(io
->nr
);
385 lowcore
->io_int_parm
= cpu_to_be32(io
->parm
);
386 lowcore
->io_int_word
= cpu_to_be32(io
->word
);
387 lowcore
->io_old_psw
.mask
= cpu_to_be64(s390_cpu_get_psw_mask(env
));
388 lowcore
->io_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
389 mask
= be64_to_cpu(lowcore
->io_new_psw
.mask
);
390 addr
= be64_to_cpu(lowcore
->io_new_psw
.addr
);
392 cpu_unmap_lowcore(lowcore
);
395 s390_cpu_set_psw(env
, mask
, addr
);
398 typedef struct MchkExtSaveArea
{
399 uint64_t vregs
[32][2]; /* 0x0000 */
400 uint8_t pad_0x0200
[0x0400 - 0x0200]; /* 0x0200 */
402 QEMU_BUILD_BUG_ON(sizeof(MchkExtSaveArea
) != 1024);
404 static int mchk_store_vregs(CPUS390XState
*env
, uint64_t mcesao
)
406 hwaddr len
= sizeof(MchkExtSaveArea
);
410 sa
= cpu_physical_memory_map(mcesao
, &len
, true);
414 if (len
!= sizeof(MchkExtSaveArea
)) {
415 cpu_physical_memory_unmap(sa
, len
, 1, 0);
419 for (i
= 0; i
< 32; i
++) {
420 sa
->vregs
[i
][0] = cpu_to_be64(env
->vregs
[i
][0]);
421 sa
->vregs
[i
][1] = cpu_to_be64(env
->vregs
[i
][1]);
424 cpu_physical_memory_unmap(sa
, len
, 1, len
);
428 static void do_mchk_interrupt(CPUS390XState
*env
)
430 QEMUS390FLICState
*flic
= QEMU_S390_FLIC(s390_get_flic());
431 uint64_t mcic
= s390_build_validity_mcic() | MCIC_SC_CP
;
432 uint64_t mask
, addr
, mcesao
= 0;
436 /* for now we only support channel report machine checks (floating) */
437 g_assert(env
->psw
.mask
& PSW_MASK_MCHECK
);
438 g_assert(env
->cregs
[14] & CR14_CHANNEL_REPORT_SC
);
440 qemu_s390_flic_dequeue_crw_mchk(flic
);
442 lowcore
= cpu_map_lowcore(env
);
444 /* extended save area */
445 if (mcic
& MCIC_VB_VR
) {
446 /* length and alignment is 1024 bytes */
447 mcesao
= be64_to_cpu(lowcore
->mcesad
) & ~0x3ffull
;
450 /* try to store vector registers */
451 if (!mcesao
|| mchk_store_vregs(env
, mcesao
)) {
455 /* we are always in z/Architecture mode */
456 lowcore
->ar_access_id
= 1;
458 for (i
= 0; i
< 16; i
++) {
459 lowcore
->floating_pt_save_area
[i
] = cpu_to_be64(*get_freg(env
, i
));
460 lowcore
->gpregs_save_area
[i
] = cpu_to_be64(env
->regs
[i
]);
461 lowcore
->access_regs_save_area
[i
] = cpu_to_be32(env
->aregs
[i
]);
462 lowcore
->cregs_save_area
[i
] = cpu_to_be64(env
->cregs
[i
]);
464 lowcore
->prefixreg_save_area
= cpu_to_be32(env
->psa
);
465 lowcore
->fpt_creg_save_area
= cpu_to_be32(env
->fpc
);
466 lowcore
->tod_progreg_save_area
= cpu_to_be32(env
->todpr
);
467 lowcore
->cpu_timer_save_area
= cpu_to_be64(env
->cputm
);
468 lowcore
->clock_comp_save_area
= cpu_to_be64(env
->ckc
>> 8);
470 lowcore
->mcic
= cpu_to_be64(mcic
);
471 lowcore
->mcck_old_psw
.mask
= cpu_to_be64(s390_cpu_get_psw_mask(env
));
472 lowcore
->mcck_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
473 mask
= be64_to_cpu(lowcore
->mcck_new_psw
.mask
);
474 addr
= be64_to_cpu(lowcore
->mcck_new_psw
.addr
);
476 cpu_unmap_lowcore(lowcore
);
478 s390_cpu_set_psw(env
, mask
, addr
);
481 void s390_cpu_do_interrupt(CPUState
*cs
)
483 QEMUS390FLICState
*flic
= QEMU_S390_FLIC(s390_get_flic());
484 S390CPU
*cpu
= S390_CPU(cs
);
485 CPUS390XState
*env
= &cpu
->env
;
486 bool stopped
= false;
488 qemu_log_mask(CPU_LOG_INT
, "%s: %d at psw=%" PRIx64
":%" PRIx64
"\n",
489 __func__
, cs
->exception_index
, env
->psw
.mask
, env
->psw
.addr
);
492 /* handle machine checks */
493 if (cs
->exception_index
== -1 && s390_cpu_has_mcck_int(cpu
)) {
494 cs
->exception_index
= EXCP_MCHK
;
496 /* handle external interrupts */
497 if (cs
->exception_index
== -1 && s390_cpu_has_ext_int(cpu
)) {
498 cs
->exception_index
= EXCP_EXT
;
500 /* handle I/O interrupts */
501 if (cs
->exception_index
== -1 && s390_cpu_has_io_int(cpu
)) {
502 cs
->exception_index
= EXCP_IO
;
504 /* RESTART interrupt */
505 if (cs
->exception_index
== -1 && s390_cpu_has_restart_int(cpu
)) {
506 cs
->exception_index
= EXCP_RESTART
;
508 /* STOP interrupt has least priority */
509 if (cs
->exception_index
== -1 && s390_cpu_has_stop_int(cpu
)) {
510 cs
->exception_index
= EXCP_STOP
;
513 switch (cs
->exception_index
) {
515 do_program_interrupt(env
);
518 do_svc_interrupt(env
);
521 do_ext_interrupt(env
);
524 do_io_interrupt(env
);
527 do_mchk_interrupt(env
);
530 do_restart_interrupt(env
);
533 do_stop_interrupt(env
);
538 if (cs
->exception_index
!= -1 && !stopped
) {
539 /* check if there are more pending interrupts to deliver */
540 cs
->exception_index
= -1;
543 cs
->exception_index
= -1;
545 /* we might still have pending interrupts, but not deliverable */
546 if (!env
->pending_int
&& !qemu_s390_flic_has_any(flic
)) {
547 cs
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
550 /* WAIT PSW during interrupt injection or STOP interrupt */
551 if ((env
->psw
.mask
& PSW_MASK_WAIT
) || stopped
) {
552 /* don't trigger a cpu_loop_exit(), use an interrupt instead */
553 cpu_interrupt(CPU(cpu
), CPU_INTERRUPT_HALT
);
554 } else if (cs
->halted
) {
555 /* unhalt if we had a WAIT PSW somehwere in our injection chain */
556 s390_cpu_unhalt(cpu
);
560 bool s390_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
562 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
563 S390CPU
*cpu
= S390_CPU(cs
);
564 CPUS390XState
*env
= &cpu
->env
;
567 /* Execution of the target insn is indivisible from
568 the parent EXECUTE insn. */
571 if (s390_cpu_has_int(cpu
)) {
572 s390_cpu_do_interrupt(cs
);
575 if (env
->psw
.mask
& PSW_MASK_WAIT
) {
576 /* Woken up because of a floating interrupt but it has already
577 * been delivered. Go back to sleep. */
578 cpu_interrupt(CPU(cpu
), CPU_INTERRUPT_HALT
);
584 void s390x_cpu_debug_excp_handler(CPUState
*cs
)
586 S390CPU
*cpu
= S390_CPU(cs
);
587 CPUS390XState
*env
= &cpu
->env
;
588 CPUWatchpoint
*wp_hit
= cs
->watchpoint_hit
;
590 if (wp_hit
&& wp_hit
->flags
& BP_CPU
) {
591 /* FIXME: When the storage-alteration-space control bit is set,
592 the exception should only be triggered if the memory access
593 is done using an address space with the storage-alteration-event
594 bit set. We have no way to detect that with the current
596 cs
->watchpoint_hit
= NULL
;
598 env
->per_address
= env
->psw
.addr
;
599 env
->per_perc_atmid
|= PER_CODE_EVENT_STORE
| get_per_atmid(env
);
600 /* FIXME: We currently no way to detect the address space used
601 to trigger the watchpoint. For now just consider it is the
602 current default ASC. This turn to be true except when MVCP
603 and MVCS instrutions are not used. */
604 env
->per_perc_atmid
|= env
->psw
.mask
& (PSW_MASK_ASC
) >> 46;
607 * Remove all watchpoints to re-execute the code. A PER exception
608 * will be triggered, it will call s390_cpu_set_psw which will
609 * recompute the watchpoints.
611 cpu_watchpoint_remove_all(cs
, BP_CPU
);
612 cpu_loop_exit_noexc(cs
);
616 void s390x_cpu_do_unaligned_access(CPUState
*cs
, vaddr addr
,
617 MMUAccessType access_type
,
618 int mmu_idx
, uintptr_t retaddr
)
620 do_unaligned_access(cs
, retaddr
);
623 static void QEMU_NORETURN
monitor_event(CPUS390XState
*env
,
624 uint64_t monitor_code
,
625 uint8_t monitor_class
, uintptr_t ra
)
627 /* Store the Monitor Code and the Monitor Class Number into the lowcore */
628 stq_phys(env_cpu(env
)->as
,
629 env
->psa
+ offsetof(LowCore
, monitor_code
), monitor_code
);
630 stw_phys(env_cpu(env
)->as
,
631 env
->psa
+ offsetof(LowCore
, mon_class_num
), monitor_class
);
633 tcg_s390_program_interrupt(env
, PGM_MONITOR
, ra
);
636 void HELPER(monitor_call
)(CPUS390XState
*env
, uint64_t monitor_code
,
637 uint32_t monitor_class
)
639 g_assert(monitor_class
<= 0xff);
641 if (env
->cregs
[8] & (0x8000 >> monitor_class
)) {
642 monitor_event(env
, monitor_code
, monitor_class
, GETPC());
646 #endif /* !CONFIG_USER_ONLY */