2 * s390x exception / interrupt helpers
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "exec/helper-proto.h"
25 #include "qemu/timer.h"
26 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
28 #include "hw/s390x/ioinst.h"
29 #include "exec/address-spaces.h"
30 #include "tcg_s390x.h"
31 #ifndef CONFIG_USER_ONLY
32 #include "sysemu/sysemu.h"
33 #include "hw/s390x/s390_flic.h"
34 #include "hw/boards.h"
37 void QEMU_NORETURN
tcg_s390_program_interrupt(CPUS390XState
*env
,
38 uint32_t code
, uintptr_t ra
)
40 CPUState
*cs
= env_cpu(env
);
42 cpu_restore_state(cs
, ra
, true);
43 qemu_log_mask(CPU_LOG_INT
, "program interrupt at %#" PRIx64
"\n",
45 trigger_pgm_exception(env
, code
, ILEN_UNWIND
);
49 void QEMU_NORETURN
tcg_s390_data_exception(CPUS390XState
*env
, uint32_t dxc
,
52 g_assert(dxc
<= 0xff);
53 #if !defined(CONFIG_USER_ONLY)
54 /* Store the DXC into the lowcore */
55 stl_phys(env_cpu(env
)->as
,
56 env
->psa
+ offsetof(LowCore
, data_exc_code
), dxc
);
59 /* Store the DXC into the FPC if AFP is enabled */
60 if (env
->cregs
[0] & CR0_AFP
) {
61 env
->fpc
= deposit32(env
->fpc
, 8, 8, dxc
);
63 tcg_s390_program_interrupt(env
, PGM_DATA
, ra
);
66 void QEMU_NORETURN
tcg_s390_vector_exception(CPUS390XState
*env
, uint32_t vxc
,
69 g_assert(vxc
<= 0xff);
70 #if !defined(CONFIG_USER_ONLY)
71 /* Always store the VXC into the lowcore, without AFP it is undefined */
72 stl_phys(env_cpu(env
)->as
,
73 env
->psa
+ offsetof(LowCore
, data_exc_code
), vxc
);
76 /* Always store the VXC into the FPC, without AFP it is undefined */
77 env
->fpc
= deposit32(env
->fpc
, 8, 8, vxc
);
78 tcg_s390_program_interrupt(env
, PGM_VECTOR_PROCESSING
, ra
);
81 void HELPER(data_exception
)(CPUS390XState
*env
, uint32_t dxc
)
83 tcg_s390_data_exception(env
, dxc
, GETPC());
86 #if defined(CONFIG_USER_ONLY)
88 void s390_cpu_do_interrupt(CPUState
*cs
)
90 cs
->exception_index
= -1;
93 bool s390_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
94 MMUAccessType access_type
, int mmu_idx
,
95 bool probe
, uintptr_t retaddr
)
97 S390CPU
*cpu
= S390_CPU(cs
);
99 trigger_pgm_exception(&cpu
->env
, PGM_ADDRESSING
, ILEN_UNWIND
);
100 /* On real machines this value is dropped into LowMem. Since this
101 is userland, simply put this someplace that cpu_loop can find it. */
102 cpu
->env
.__excp_addr
= address
;
103 cpu_loop_exit_restore(cs
, retaddr
);
106 #else /* !CONFIG_USER_ONLY */
108 static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx
)
111 case MMU_PRIMARY_IDX
:
112 return PSW_ASC_PRIMARY
;
113 case MMU_SECONDARY_IDX
:
114 return PSW_ASC_SECONDARY
;
122 bool s390_cpu_tlb_fill(CPUState
*cs
, vaddr address
, int size
,
123 MMUAccessType access_type
, int mmu_idx
,
124 bool probe
, uintptr_t retaddr
)
126 S390CPU
*cpu
= S390_CPU(cs
);
127 CPUS390XState
*env
= &cpu
->env
;
128 target_ulong vaddr
, raddr
;
132 qemu_log_mask(CPU_LOG_MMU
, "%s: addr 0x%" VADDR_PRIx
" rw %d mmu_idx %d\n",
133 __func__
, address
, access_type
, mmu_idx
);
137 if (mmu_idx
< MMU_REAL_IDX
) {
138 asc
= cpu_mmu_idx_to_asc(mmu_idx
);
140 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
143 excp
= mmu_translate(env
, vaddr
, access_type
, asc
, &raddr
, &prot
, &tec
);
144 } else if (mmu_idx
== MMU_REAL_IDX
) {
146 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
149 excp
= mmu_translate_real(env
, vaddr
, access_type
, &raddr
, &prot
, &tec
);
151 g_assert_not_reached();
154 /* check out of RAM access */
156 !address_space_access_valid(&address_space_memory
, raddr
,
157 TARGET_PAGE_SIZE
, access_type
,
158 MEMTXATTRS_UNSPECIFIED
)) {
159 qemu_log_mask(CPU_LOG_MMU
,
160 "%s: raddr %" PRIx64
" > ram_size %" PRIx64
"\n",
161 __func__
, (uint64_t)raddr
, (uint64_t)ram_size
);
162 excp
= PGM_ADDRESSING
;
163 tec
= 0; /* unused */
167 qemu_log_mask(CPU_LOG_MMU
,
168 "%s: set tlb %" PRIx64
" -> %" PRIx64
" (%x)\n",
169 __func__
, (uint64_t)vaddr
, (uint64_t)raddr
, prot
);
170 tlb_set_page(cs
, address
& TARGET_PAGE_MASK
, raddr
, prot
,
171 mmu_idx
, TARGET_PAGE_SIZE
);
178 if (excp
!= PGM_ADDRESSING
) {
179 stq_phys(env_cpu(env
)->as
,
180 env
->psa
+ offsetof(LowCore
, trans_exc_code
), tec
);
184 * For data accesses, ILEN will be filled in from the unwind info,
185 * within cpu_loop_exit_restore. For code accesses, retaddr == 0,
186 * and so unwinding will not occur. However, ILEN is also undefined
187 * for that case -- we choose to set ILEN = 2.
189 trigger_pgm_exception(env
, excp
, 2);
190 cpu_loop_exit_restore(cs
, retaddr
);
193 static void do_program_interrupt(CPUS390XState
*env
)
197 int ilen
= env
->int_pgm_ilen
;
199 if (ilen
== ILEN_AUTO
) {
200 ilen
= get_ilen(cpu_ldub_code(env
, env
->psw
.addr
));
202 assert(ilen
== 2 || ilen
== 4 || ilen
== 6);
204 switch (env
->int_pgm_code
) {
206 if (env
->per_perc_atmid
& PER_CODE_EVENT_NULLIFICATION
) {
215 case PGM_SPECIFICATION
:
217 case PGM_FIXPT_OVERFLOW
:
218 case PGM_FIXPT_DIVIDE
:
219 case PGM_DEC_OVERFLOW
:
221 case PGM_HFP_EXP_OVERFLOW
:
222 case PGM_HFP_EXP_UNDERFLOW
:
223 case PGM_HFP_SIGNIFICANCE
:
229 case PGM_PC_TRANS_SPEC
:
232 /* advance the PSW if our exception is not nullifying */
233 env
->psw
.addr
+= ilen
;
237 qemu_log_mask(CPU_LOG_INT
,
238 "%s: code=0x%x ilen=%d psw: %" PRIx64
" %" PRIx64
"\n",
239 __func__
, env
->int_pgm_code
, ilen
, env
->psw
.mask
,
242 lowcore
= cpu_map_lowcore(env
);
244 /* Signal PER events with the exception. */
245 if (env
->per_perc_atmid
) {
246 env
->int_pgm_code
|= PGM_PER
;
247 lowcore
->per_address
= cpu_to_be64(env
->per_address
);
248 lowcore
->per_perc_atmid
= cpu_to_be16(env
->per_perc_atmid
);
249 env
->per_perc_atmid
= 0;
252 lowcore
->pgm_ilen
= cpu_to_be16(ilen
);
253 lowcore
->pgm_code
= cpu_to_be16(env
->int_pgm_code
);
254 lowcore
->program_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
255 lowcore
->program_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
256 mask
= be64_to_cpu(lowcore
->program_new_psw
.mask
);
257 addr
= be64_to_cpu(lowcore
->program_new_psw
.addr
);
258 lowcore
->per_breaking_event_addr
= cpu_to_be64(env
->gbea
);
260 cpu_unmap_lowcore(lowcore
);
262 load_psw(env
, mask
, addr
);
265 static void do_svc_interrupt(CPUS390XState
*env
)
270 lowcore
= cpu_map_lowcore(env
);
272 lowcore
->svc_code
= cpu_to_be16(env
->int_svc_code
);
273 lowcore
->svc_ilen
= cpu_to_be16(env
->int_svc_ilen
);
274 lowcore
->svc_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
275 lowcore
->svc_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
+ env
->int_svc_ilen
);
276 mask
= be64_to_cpu(lowcore
->svc_new_psw
.mask
);
277 addr
= be64_to_cpu(lowcore
->svc_new_psw
.addr
);
279 cpu_unmap_lowcore(lowcore
);
281 load_psw(env
, mask
, addr
);
283 /* When a PER event is pending, the PER exception has to happen
284 immediately after the SERVICE CALL one. */
285 if (env
->per_perc_atmid
) {
286 env
->int_pgm_code
= PGM_PER
;
287 env
->int_pgm_ilen
= env
->int_svc_ilen
;
288 do_program_interrupt(env
);
292 #define VIRTIO_SUBCODE_64 0x0D00
294 static void do_ext_interrupt(CPUS390XState
*env
)
296 QEMUS390FLICState
*flic
= QEMU_S390_FLIC(s390_get_flic());
297 S390CPU
*cpu
= env_archcpu(env
);
302 if (!(env
->psw
.mask
& PSW_MASK_EXT
)) {
303 cpu_abort(CPU(cpu
), "Ext int w/o ext mask\n");
306 lowcore
= cpu_map_lowcore(env
);
308 if ((env
->pending_int
& INTERRUPT_EMERGENCY_SIGNAL
) &&
309 (env
->cregs
[0] & CR0_EMERGENCY_SIGNAL_SC
)) {
310 lowcore
->ext_int_code
= cpu_to_be16(EXT_EMERGENCY
);
311 cpu_addr
= find_first_bit(env
->emergency_signals
, S390_MAX_CPUS
);
312 g_assert(cpu_addr
< S390_MAX_CPUS
);
313 lowcore
->cpu_addr
= cpu_to_be16(cpu_addr
);
314 clear_bit(cpu_addr
, env
->emergency_signals
);
315 #ifndef CONFIG_USER_ONLY
316 MachineState
*ms
= MACHINE(qdev_get_machine());
317 unsigned int max_cpus
= ms
->smp
.max_cpus
;
319 if (bitmap_empty(env
->emergency_signals
, max_cpus
)) {
320 env
->pending_int
&= ~INTERRUPT_EMERGENCY_SIGNAL
;
322 } else if ((env
->pending_int
& INTERRUPT_EXTERNAL_CALL
) &&
323 (env
->cregs
[0] & CR0_EXTERNAL_CALL_SC
)) {
324 lowcore
->ext_int_code
= cpu_to_be16(EXT_EXTERNAL_CALL
);
325 lowcore
->cpu_addr
= cpu_to_be16(env
->external_call_addr
);
326 env
->pending_int
&= ~INTERRUPT_EXTERNAL_CALL
;
327 } else if ((env
->pending_int
& INTERRUPT_EXT_CLOCK_COMPARATOR
) &&
328 (env
->cregs
[0] & CR0_CKC_SC
)) {
329 lowcore
->ext_int_code
= cpu_to_be16(EXT_CLOCK_COMP
);
330 lowcore
->cpu_addr
= 0;
331 env
->pending_int
&= ~INTERRUPT_EXT_CLOCK_COMPARATOR
;
332 } else if ((env
->pending_int
& INTERRUPT_EXT_CPU_TIMER
) &&
333 (env
->cregs
[0] & CR0_CPU_TIMER_SC
)) {
334 lowcore
->ext_int_code
= cpu_to_be16(EXT_CPU_TIMER
);
335 lowcore
->cpu_addr
= 0;
336 env
->pending_int
&= ~INTERRUPT_EXT_CPU_TIMER
;
337 } else if (qemu_s390_flic_has_service(flic
) &&
338 (env
->cregs
[0] & CR0_SERVICE_SC
)) {
341 param
= qemu_s390_flic_dequeue_service(flic
);
342 lowcore
->ext_int_code
= cpu_to_be16(EXT_SERVICE
);
343 lowcore
->ext_params
= cpu_to_be32(param
);
344 lowcore
->cpu_addr
= 0;
346 g_assert_not_reached();
349 mask
= be64_to_cpu(lowcore
->external_new_psw
.mask
);
350 addr
= be64_to_cpu(lowcore
->external_new_psw
.addr
);
351 lowcore
->external_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
352 lowcore
->external_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
354 cpu_unmap_lowcore(lowcore
);
356 load_psw(env
, mask
, addr
);
359 static void do_io_interrupt(CPUS390XState
*env
)
361 QEMUS390FLICState
*flic
= QEMU_S390_FLIC(s390_get_flic());
366 g_assert(env
->psw
.mask
& PSW_MASK_IO
);
367 io
= qemu_s390_flic_dequeue_io(flic
, env
->cregs
[6]);
370 lowcore
= cpu_map_lowcore(env
);
372 lowcore
->subchannel_id
= cpu_to_be16(io
->id
);
373 lowcore
->subchannel_nr
= cpu_to_be16(io
->nr
);
374 lowcore
->io_int_parm
= cpu_to_be32(io
->parm
);
375 lowcore
->io_int_word
= cpu_to_be32(io
->word
);
376 lowcore
->io_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
377 lowcore
->io_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
378 mask
= be64_to_cpu(lowcore
->io_new_psw
.mask
);
379 addr
= be64_to_cpu(lowcore
->io_new_psw
.addr
);
381 cpu_unmap_lowcore(lowcore
);
384 load_psw(env
, mask
, addr
);
387 typedef struct MchkExtSaveArea
{
388 uint64_t vregs
[32][2]; /* 0x0000 */
389 uint8_t pad_0x0200
[0x0400 - 0x0200]; /* 0x0200 */
391 QEMU_BUILD_BUG_ON(sizeof(MchkExtSaveArea
) != 1024);
393 static int mchk_store_vregs(CPUS390XState
*env
, uint64_t mcesao
)
395 hwaddr len
= sizeof(MchkExtSaveArea
);
399 sa
= cpu_physical_memory_map(mcesao
, &len
, 1);
403 if (len
!= sizeof(MchkExtSaveArea
)) {
404 cpu_physical_memory_unmap(sa
, len
, 1, 0);
408 for (i
= 0; i
< 32; i
++) {
409 sa
->vregs
[i
][0] = cpu_to_be64(env
->vregs
[i
][0]);
410 sa
->vregs
[i
][1] = cpu_to_be64(env
->vregs
[i
][1]);
413 cpu_physical_memory_unmap(sa
, len
, 1, len
);
417 static void do_mchk_interrupt(CPUS390XState
*env
)
419 QEMUS390FLICState
*flic
= QEMU_S390_FLIC(s390_get_flic());
420 uint64_t mcic
= s390_build_validity_mcic() | MCIC_SC_CP
;
421 uint64_t mask
, addr
, mcesao
= 0;
425 /* for now we only support channel report machine checks (floating) */
426 g_assert(env
->psw
.mask
& PSW_MASK_MCHECK
);
427 g_assert(env
->cregs
[14] & CR14_CHANNEL_REPORT_SC
);
429 qemu_s390_flic_dequeue_crw_mchk(flic
);
431 lowcore
= cpu_map_lowcore(env
);
433 /* extended save area */
434 if (mcic
& MCIC_VB_VR
) {
435 /* length and alignment is 1024 bytes */
436 mcesao
= be64_to_cpu(lowcore
->mcesad
) & ~0x3ffull
;
439 /* try to store vector registers */
440 if (!mcesao
|| mchk_store_vregs(env
, mcesao
)) {
444 /* we are always in z/Architecture mode */
445 lowcore
->ar_access_id
= 1;
447 for (i
= 0; i
< 16; i
++) {
448 lowcore
->floating_pt_save_area
[i
] = cpu_to_be64(*get_freg(env
, i
));
449 lowcore
->gpregs_save_area
[i
] = cpu_to_be64(env
->regs
[i
]);
450 lowcore
->access_regs_save_area
[i
] = cpu_to_be32(env
->aregs
[i
]);
451 lowcore
->cregs_save_area
[i
] = cpu_to_be64(env
->cregs
[i
]);
453 lowcore
->prefixreg_save_area
= cpu_to_be32(env
->psa
);
454 lowcore
->fpt_creg_save_area
= cpu_to_be32(env
->fpc
);
455 lowcore
->tod_progreg_save_area
= cpu_to_be32(env
->todpr
);
456 lowcore
->cpu_timer_save_area
= cpu_to_be64(env
->cputm
);
457 lowcore
->clock_comp_save_area
= cpu_to_be64(env
->ckc
>> 8);
459 lowcore
->mcic
= cpu_to_be64(mcic
);
460 lowcore
->mcck_old_psw
.mask
= cpu_to_be64(get_psw_mask(env
));
461 lowcore
->mcck_old_psw
.addr
= cpu_to_be64(env
->psw
.addr
);
462 mask
= be64_to_cpu(lowcore
->mcck_new_psw
.mask
);
463 addr
= be64_to_cpu(lowcore
->mcck_new_psw
.addr
);
465 cpu_unmap_lowcore(lowcore
);
467 load_psw(env
, mask
, addr
);
470 void s390_cpu_do_interrupt(CPUState
*cs
)
472 QEMUS390FLICState
*flic
= QEMU_S390_FLIC(s390_get_flic());
473 S390CPU
*cpu
= S390_CPU(cs
);
474 CPUS390XState
*env
= &cpu
->env
;
475 bool stopped
= false;
477 qemu_log_mask(CPU_LOG_INT
, "%s: %d at psw=%" PRIx64
":%" PRIx64
"\n",
478 __func__
, cs
->exception_index
, env
->psw
.mask
, env
->psw
.addr
);
481 /* handle machine checks */
482 if (cs
->exception_index
== -1 && s390_cpu_has_mcck_int(cpu
)) {
483 cs
->exception_index
= EXCP_MCHK
;
485 /* handle external interrupts */
486 if (cs
->exception_index
== -1 && s390_cpu_has_ext_int(cpu
)) {
487 cs
->exception_index
= EXCP_EXT
;
489 /* handle I/O interrupts */
490 if (cs
->exception_index
== -1 && s390_cpu_has_io_int(cpu
)) {
491 cs
->exception_index
= EXCP_IO
;
493 /* RESTART interrupt */
494 if (cs
->exception_index
== -1 && s390_cpu_has_restart_int(cpu
)) {
495 cs
->exception_index
= EXCP_RESTART
;
497 /* STOP interrupt has least priority */
498 if (cs
->exception_index
== -1 && s390_cpu_has_stop_int(cpu
)) {
499 cs
->exception_index
= EXCP_STOP
;
502 switch (cs
->exception_index
) {
504 do_program_interrupt(env
);
507 do_svc_interrupt(env
);
510 do_ext_interrupt(env
);
513 do_io_interrupt(env
);
516 do_mchk_interrupt(env
);
519 do_restart_interrupt(env
);
522 do_stop_interrupt(env
);
527 if (cs
->exception_index
!= -1 && !stopped
) {
528 /* check if there are more pending interrupts to deliver */
529 cs
->exception_index
= -1;
532 cs
->exception_index
= -1;
534 /* we might still have pending interrupts, but not deliverable */
535 if (!env
->pending_int
&& !qemu_s390_flic_has_any(flic
)) {
536 cs
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
539 /* WAIT PSW during interrupt injection or STOP interrupt */
540 if ((env
->psw
.mask
& PSW_MASK_WAIT
) || stopped
) {
541 /* don't trigger a cpu_loop_exit(), use an interrupt instead */
542 cpu_interrupt(CPU(cpu
), CPU_INTERRUPT_HALT
);
543 } else if (cs
->halted
) {
544 /* unhalt if we had a WAIT PSW somehwere in our injection chain */
545 s390_cpu_unhalt(cpu
);
549 bool s390_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
551 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
552 S390CPU
*cpu
= S390_CPU(cs
);
553 CPUS390XState
*env
= &cpu
->env
;
556 /* Execution of the target insn is indivisible from
557 the parent EXECUTE insn. */
560 if (s390_cpu_has_int(cpu
)) {
561 s390_cpu_do_interrupt(cs
);
564 if (env
->psw
.mask
& PSW_MASK_WAIT
) {
565 /* Woken up because of a floating interrupt but it has already
566 * been delivered. Go back to sleep. */
567 cpu_interrupt(CPU(cpu
), CPU_INTERRUPT_HALT
);
573 void s390x_cpu_debug_excp_handler(CPUState
*cs
)
575 S390CPU
*cpu
= S390_CPU(cs
);
576 CPUS390XState
*env
= &cpu
->env
;
577 CPUWatchpoint
*wp_hit
= cs
->watchpoint_hit
;
579 if (wp_hit
&& wp_hit
->flags
& BP_CPU
) {
580 /* FIXME: When the storage-alteration-space control bit is set,
581 the exception should only be triggered if the memory access
582 is done using an address space with the storage-alteration-event
583 bit set. We have no way to detect that with the current
585 cs
->watchpoint_hit
= NULL
;
587 env
->per_address
= env
->psw
.addr
;
588 env
->per_perc_atmid
|= PER_CODE_EVENT_STORE
| get_per_atmid(env
);
589 /* FIXME: We currently no way to detect the address space used
590 to trigger the watchpoint. For now just consider it is the
591 current default ASC. This turn to be true except when MVCP
592 and MVCS instrutions are not used. */
593 env
->per_perc_atmid
|= env
->psw
.mask
& (PSW_MASK_ASC
) >> 46;
595 /* Remove all watchpoints to re-execute the code. A PER exception
596 will be triggered, it will call load_psw which will recompute
598 cpu_watchpoint_remove_all(cs
, BP_CPU
);
599 cpu_loop_exit_noexc(cs
);
603 /* Unaligned accesses are only diagnosed with MO_ALIGN. At the moment,
604 this is only for the atomic operations, for which we want to raise a
605 specification exception. */
606 void s390x_cpu_do_unaligned_access(CPUState
*cs
, vaddr addr
,
607 MMUAccessType access_type
,
608 int mmu_idx
, uintptr_t retaddr
)
610 S390CPU
*cpu
= S390_CPU(cs
);
611 CPUS390XState
*env
= &cpu
->env
;
613 tcg_s390_program_interrupt(env
, PGM_SPECIFICATION
, retaddr
);
616 #endif /* CONFIG_USER_ONLY */