2 * HPPA emulation cpu translation for qemu.
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
32 #define HELPER_H "helper.h"
33 #include "exec/helper-info.c.inc"
36 /* Choose to use explicit sizes within this file. */
39 typedef struct DisasCond
{
44 typedef struct DisasContext
{
45 DisasContextBase base
;
63 #ifdef CONFIG_USER_ONLY
68 #ifdef CONFIG_USER_ONLY
69 #define UNALIGN(C) (C)->unalign
71 #define UNALIGN(C) MO_ALIGN
74 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
75 static int expand_sm_imm(DisasContext
*ctx
, int val
)
78 val
= (val
& ~PSW_SM_E
) | PSW_E
;
81 val
= (val
& ~PSW_SM_W
) | PSW_W
;
86 /* Inverted space register indicates 0 means sr0 not inferred from base. */
87 static int expand_sr3x(DisasContext
*ctx
, int val
)
92 /* Convert the M:A bits within a memory insn to the tri-state value
93 we use for the final M. */
94 static int ma_to_m(DisasContext
*ctx
, int val
)
96 return val
& 2 ? (val
& 1 ? -1 : 1) : 0;
99 /* Convert the sign of the displacement to a pre or post-modify. */
100 static int pos_to_m(DisasContext
*ctx
, int val
)
105 static int neg_to_m(DisasContext
*ctx
, int val
)
110 /* Used for branch targets and fp memory ops. */
111 static int expand_shl2(DisasContext
*ctx
, int val
)
116 /* Used for fp memory ops. */
117 static int expand_shl3(DisasContext
*ctx
, int val
)
122 /* Used for assemble_21. */
123 static int expand_shl11(DisasContext
*ctx
, int val
)
128 static int assemble_6(DisasContext
*ctx
, int val
)
131 * Officially, 32 * x + 32 - y.
132 * Here, x is already in bit 5, and y is [4:0].
133 * Since -y = ~y + 1, in 5 bits 32 - y => y ^ 31 + 1,
134 * with the overflow from bit 4 summing with x.
136 return (val
^ 31) + 1;
139 /* Translate CMPI doubleword conditions to standard. */
140 static int cmpbid_c(DisasContext
*ctx
, int val
)
142 return val
? val
: 4; /* 0 == "*<<" */
146 /* Include the auto-generated decoder. */
147 #include "decode-insns.c.inc"
149 /* We are not using a goto_tb (for whatever reason), but have updated
150 the iaq (for whatever reason), so don't do it again on exit. */
151 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
153 /* We are exiting the TB, but have neither emitted a goto_tb, nor
154 updated the iaq for the next instruction to be executed. */
155 #define DISAS_IAQ_N_STALE DISAS_TARGET_1
157 /* Similarly, but we want to return to the main loop immediately
158 to recognize unmasked interrupts. */
159 #define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
160 #define DISAS_EXIT DISAS_TARGET_3
162 /* global register indexes */
163 static TCGv_i64 cpu_gr
[32];
164 static TCGv_i64 cpu_sr
[4];
165 static TCGv_i64 cpu_srH
;
166 static TCGv_i64 cpu_iaoq_f
;
167 static TCGv_i64 cpu_iaoq_b
;
168 static TCGv_i64 cpu_iasq_f
;
169 static TCGv_i64 cpu_iasq_b
;
170 static TCGv_i64 cpu_sar
;
171 static TCGv_i64 cpu_psw_n
;
172 static TCGv_i64 cpu_psw_v
;
173 static TCGv_i64 cpu_psw_cb
;
174 static TCGv_i64 cpu_psw_cb_msb
;
176 void hppa_translate_init(void)
178 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
180 typedef struct { TCGv_i64
*var
; const char *name
; int ofs
; } GlobalVar
;
181 static const GlobalVar vars
[] = {
182 { &cpu_sar
, "sar", offsetof(CPUHPPAState
, cr
[CR_SAR
]) },
193 /* Use the symbolic register names that match the disassembler. */
194 static const char gr_names
[32][4] = {
195 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
196 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
197 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
198 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
200 /* SR[4-7] are not global registers so that we can index them. */
201 static const char sr_names
[5][4] = {
202 "sr0", "sr1", "sr2", "sr3", "srH"
208 for (i
= 1; i
< 32; i
++) {
209 cpu_gr
[i
] = tcg_global_mem_new(tcg_env
,
210 offsetof(CPUHPPAState
, gr
[i
]),
213 for (i
= 0; i
< 4; i
++) {
214 cpu_sr
[i
] = tcg_global_mem_new_i64(tcg_env
,
215 offsetof(CPUHPPAState
, sr
[i
]),
218 cpu_srH
= tcg_global_mem_new_i64(tcg_env
,
219 offsetof(CPUHPPAState
, sr
[4]),
222 for (i
= 0; i
< ARRAY_SIZE(vars
); ++i
) {
223 const GlobalVar
*v
= &vars
[i
];
224 *v
->var
= tcg_global_mem_new(tcg_env
, v
->ofs
, v
->name
);
227 cpu_iasq_f
= tcg_global_mem_new_i64(tcg_env
,
228 offsetof(CPUHPPAState
, iasq_f
),
230 cpu_iasq_b
= tcg_global_mem_new_i64(tcg_env
,
231 offsetof(CPUHPPAState
, iasq_b
),
235 static DisasCond
cond_make_f(void)
244 static DisasCond
cond_make_t(void)
247 .c
= TCG_COND_ALWAYS
,
253 static DisasCond
cond_make_n(void)
258 .a1
= tcg_constant_i64(0)
262 static DisasCond
cond_make_tmp(TCGCond c
, TCGv_i64 a0
, TCGv_i64 a1
)
264 assert (c
!= TCG_COND_NEVER
&& c
!= TCG_COND_ALWAYS
);
265 return (DisasCond
){ .c
= c
, .a0
= a0
, .a1
= a1
};
268 static DisasCond
cond_make_0_tmp(TCGCond c
, TCGv_i64 a0
)
270 return cond_make_tmp(c
, a0
, tcg_constant_i64(0));
273 static DisasCond
cond_make_0(TCGCond c
, TCGv_i64 a0
)
275 TCGv_i64 tmp
= tcg_temp_new_i64();
276 tcg_gen_mov_i64(tmp
, a0
);
277 return cond_make_0_tmp(c
, tmp
);
280 static DisasCond
cond_make(TCGCond c
, TCGv_i64 a0
, TCGv_i64 a1
)
282 TCGv_i64 t0
= tcg_temp_new_i64();
283 TCGv_i64 t1
= tcg_temp_new_i64();
285 tcg_gen_mov_i64(t0
, a0
);
286 tcg_gen_mov_i64(t1
, a1
);
287 return cond_make_tmp(c
, t0
, t1
);
290 static void cond_free(DisasCond
*cond
)
297 case TCG_COND_ALWAYS
:
298 cond
->c
= TCG_COND_NEVER
;
305 static TCGv_i64
load_gpr(DisasContext
*ctx
, unsigned reg
)
308 TCGv_i64 t
= tcg_temp_new_i64();
309 tcg_gen_movi_i64(t
, 0);
316 static TCGv_i64
dest_gpr(DisasContext
*ctx
, unsigned reg
)
318 if (reg
== 0 || ctx
->null_cond
.c
!= TCG_COND_NEVER
) {
319 return tcg_temp_new_i64();
325 static void save_or_nullify(DisasContext
*ctx
, TCGv_i64 dest
, TCGv_i64 t
)
327 if (ctx
->null_cond
.c
!= TCG_COND_NEVER
) {
328 tcg_gen_movcond_i64(ctx
->null_cond
.c
, dest
, ctx
->null_cond
.a0
,
329 ctx
->null_cond
.a1
, dest
, t
);
331 tcg_gen_mov_i64(dest
, t
);
335 static void save_gpr(DisasContext
*ctx
, unsigned reg
, TCGv_i64 t
)
338 save_or_nullify(ctx
, cpu_gr
[reg
], t
);
350 static TCGv_i32
load_frw_i32(unsigned rt
)
352 TCGv_i32 ret
= tcg_temp_new_i32();
353 tcg_gen_ld_i32(ret
, tcg_env
,
354 offsetof(CPUHPPAState
, fr
[rt
& 31])
355 + (rt
& 32 ? LO_OFS
: HI_OFS
));
359 static TCGv_i32
load_frw0_i32(unsigned rt
)
362 TCGv_i32 ret
= tcg_temp_new_i32();
363 tcg_gen_movi_i32(ret
, 0);
366 return load_frw_i32(rt
);
370 static TCGv_i64
load_frw0_i64(unsigned rt
)
372 TCGv_i64 ret
= tcg_temp_new_i64();
374 tcg_gen_movi_i64(ret
, 0);
376 tcg_gen_ld32u_i64(ret
, tcg_env
,
377 offsetof(CPUHPPAState
, fr
[rt
& 31])
378 + (rt
& 32 ? LO_OFS
: HI_OFS
));
383 static void save_frw_i32(unsigned rt
, TCGv_i32 val
)
385 tcg_gen_st_i32(val
, tcg_env
,
386 offsetof(CPUHPPAState
, fr
[rt
& 31])
387 + (rt
& 32 ? LO_OFS
: HI_OFS
));
393 static TCGv_i64
load_frd(unsigned rt
)
395 TCGv_i64 ret
= tcg_temp_new_i64();
396 tcg_gen_ld_i64(ret
, tcg_env
, offsetof(CPUHPPAState
, fr
[rt
]));
400 static TCGv_i64
load_frd0(unsigned rt
)
403 TCGv_i64 ret
= tcg_temp_new_i64();
404 tcg_gen_movi_i64(ret
, 0);
411 static void save_frd(unsigned rt
, TCGv_i64 val
)
413 tcg_gen_st_i64(val
, tcg_env
, offsetof(CPUHPPAState
, fr
[rt
]));
416 static void load_spr(DisasContext
*ctx
, TCGv_i64 dest
, unsigned reg
)
418 #ifdef CONFIG_USER_ONLY
419 tcg_gen_movi_i64(dest
, 0);
422 tcg_gen_mov_i64(dest
, cpu_sr
[reg
]);
423 } else if (ctx
->tb_flags
& TB_FLAG_SR_SAME
) {
424 tcg_gen_mov_i64(dest
, cpu_srH
);
426 tcg_gen_ld_i64(dest
, tcg_env
, offsetof(CPUHPPAState
, sr
[reg
]));
431 /* Skip over the implementation of an insn that has been nullified.
432 Use this when the insn is too complex for a conditional move. */
433 static void nullify_over(DisasContext
*ctx
)
435 if (ctx
->null_cond
.c
!= TCG_COND_NEVER
) {
436 /* The always condition should have been handled in the main loop. */
437 assert(ctx
->null_cond
.c
!= TCG_COND_ALWAYS
);
439 ctx
->null_lab
= gen_new_label();
441 /* If we're using PSW[N], copy it to a temp because... */
442 if (ctx
->null_cond
.a0
== cpu_psw_n
) {
443 ctx
->null_cond
.a0
= tcg_temp_new_i64();
444 tcg_gen_mov_i64(ctx
->null_cond
.a0
, cpu_psw_n
);
446 /* ... we clear it before branching over the implementation,
447 so that (1) it's clear after nullifying this insn and
448 (2) if this insn nullifies the next, PSW[N] is valid. */
449 if (ctx
->psw_n_nonzero
) {
450 ctx
->psw_n_nonzero
= false;
451 tcg_gen_movi_i64(cpu_psw_n
, 0);
454 tcg_gen_brcond_i64(ctx
->null_cond
.c
, ctx
->null_cond
.a0
,
455 ctx
->null_cond
.a1
, ctx
->null_lab
);
456 cond_free(&ctx
->null_cond
);
460 /* Save the current nullification state to PSW[N]. */
461 static void nullify_save(DisasContext
*ctx
)
463 if (ctx
->null_cond
.c
== TCG_COND_NEVER
) {
464 if (ctx
->psw_n_nonzero
) {
465 tcg_gen_movi_i64(cpu_psw_n
, 0);
469 if (ctx
->null_cond
.a0
!= cpu_psw_n
) {
470 tcg_gen_setcond_i64(ctx
->null_cond
.c
, cpu_psw_n
,
471 ctx
->null_cond
.a0
, ctx
->null_cond
.a1
);
472 ctx
->psw_n_nonzero
= true;
474 cond_free(&ctx
->null_cond
);
477 /* Set a PSW[N] to X. The intention is that this is used immediately
478 before a goto_tb/exit_tb, so that there is no fallthru path to other
479 code within the TB. Therefore we do not update psw_n_nonzero. */
480 static void nullify_set(DisasContext
*ctx
, bool x
)
482 if (ctx
->psw_n_nonzero
|| x
) {
483 tcg_gen_movi_i64(cpu_psw_n
, x
);
487 /* Mark the end of an instruction that may have been nullified.
488 This is the pair to nullify_over. Always returns true so that
489 it may be tail-called from a translate function. */
490 static bool nullify_end(DisasContext
*ctx
)
492 TCGLabel
*null_lab
= ctx
->null_lab
;
493 DisasJumpType status
= ctx
->base
.is_jmp
;
495 /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
496 For UPDATED, we cannot update on the nullified path. */
497 assert(status
!= DISAS_IAQ_N_UPDATED
);
499 if (likely(null_lab
== NULL
)) {
500 /* The current insn wasn't conditional or handled the condition
501 applied to it without a branch, so the (new) setting of
502 NULL_COND can be applied directly to the next insn. */
505 ctx
->null_lab
= NULL
;
507 if (likely(ctx
->null_cond
.c
== TCG_COND_NEVER
)) {
508 /* The next instruction will be unconditional,
509 and NULL_COND already reflects that. */
510 gen_set_label(null_lab
);
512 /* The insn that we just executed is itself nullifying the next
513 instruction. Store the condition in the PSW[N] global.
514 We asserted PSW[N] = 0 in nullify_over, so that after the
515 label we have the proper value in place. */
517 gen_set_label(null_lab
);
518 ctx
->null_cond
= cond_make_n();
520 if (status
== DISAS_NORETURN
) {
521 ctx
->base
.is_jmp
= DISAS_NEXT
;
526 static uint64_t gva_offset_mask(DisasContext
*ctx
)
528 return (ctx
->tb_flags
& PSW_W
529 ? MAKE_64BIT_MASK(0, 62)
530 : MAKE_64BIT_MASK(0, 32));
533 static void copy_iaoq_entry(DisasContext
*ctx
, TCGv_i64 dest
,
534 uint64_t ival
, TCGv_i64 vval
)
536 uint64_t mask
= gva_offset_mask(ctx
);
539 tcg_gen_movi_i64(dest
, ival
& mask
);
542 tcg_debug_assert(vval
!= NULL
);
545 * We know that the IAOQ is already properly masked.
546 * This optimization is primarily for "iaoq_f = iaoq_b".
548 if (vval
== cpu_iaoq_f
|| vval
== cpu_iaoq_b
) {
549 tcg_gen_mov_i64(dest
, vval
);
551 tcg_gen_andi_i64(dest
, vval
, mask
);
555 static inline uint64_t iaoq_dest(DisasContext
*ctx
, int64_t disp
)
557 return ctx
->iaoq_f
+ disp
+ 8;
560 static void gen_excp_1(int exception
)
562 gen_helper_excp(tcg_env
, tcg_constant_i32(exception
));
565 static void gen_excp(DisasContext
*ctx
, int exception
)
567 copy_iaoq_entry(ctx
, cpu_iaoq_f
, ctx
->iaoq_f
, cpu_iaoq_f
);
568 copy_iaoq_entry(ctx
, cpu_iaoq_b
, ctx
->iaoq_b
, cpu_iaoq_b
);
570 gen_excp_1(exception
);
571 ctx
->base
.is_jmp
= DISAS_NORETURN
;
574 static bool gen_excp_iir(DisasContext
*ctx
, int exc
)
577 tcg_gen_st_i64(tcg_constant_i64(ctx
->insn
),
578 tcg_env
, offsetof(CPUHPPAState
, cr
[CR_IIR
]));
580 return nullify_end(ctx
);
583 static bool gen_illegal(DisasContext
*ctx
)
585 return gen_excp_iir(ctx
, EXCP_ILL
);
588 #ifdef CONFIG_USER_ONLY
589 #define CHECK_MOST_PRIVILEGED(EXCP) \
590 return gen_excp_iir(ctx, EXCP)
592 #define CHECK_MOST_PRIVILEGED(EXCP) \
594 if (ctx->privilege != 0) { \
595 return gen_excp_iir(ctx, EXCP); \
600 static bool use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
602 return translator_use_goto_tb(&ctx
->base
, dest
);
605 /* If the next insn is to be nullified, and it's on the same page,
606 and we're not attempting to set a breakpoint on it, then we can
607 totally skip the nullified insn. This avoids creating and
608 executing a TB that merely branches to the next TB. */
609 static bool use_nullify_skip(DisasContext
*ctx
)
611 return (((ctx
->iaoq_b
^ ctx
->iaoq_f
) & TARGET_PAGE_MASK
) == 0
612 && !cpu_breakpoint_test(ctx
->cs
, ctx
->iaoq_b
, BP_ANY
));
615 static void gen_goto_tb(DisasContext
*ctx
, int which
,
616 uint64_t f
, uint64_t b
)
618 if (f
!= -1 && b
!= -1 && use_goto_tb(ctx
, f
)) {
619 tcg_gen_goto_tb(which
);
620 copy_iaoq_entry(ctx
, cpu_iaoq_f
, f
, NULL
);
621 copy_iaoq_entry(ctx
, cpu_iaoq_b
, b
, NULL
);
622 tcg_gen_exit_tb(ctx
->base
.tb
, which
);
624 copy_iaoq_entry(ctx
, cpu_iaoq_f
, f
, cpu_iaoq_b
);
625 copy_iaoq_entry(ctx
, cpu_iaoq_b
, b
, ctx
->iaoq_n_var
);
626 tcg_gen_lookup_and_goto_ptr();
630 static bool cond_need_sv(int c
)
632 return c
== 2 || c
== 3 || c
== 6;
635 static bool cond_need_cb(int c
)
637 return c
== 4 || c
== 5;
640 /* Need extensions from TCGv_i32 to TCGv_i64. */
641 static bool cond_need_ext(DisasContext
*ctx
, bool d
)
643 return !(ctx
->is_pa20
&& d
);
647 * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of
648 * the Parisc 1.1 Architecture Reference Manual for details.
651 static DisasCond
do_cond(DisasContext
*ctx
, unsigned cf
, bool d
,
652 TCGv_i64 res
, TCGv_i64 cb_msb
, TCGv_i64 sv
)
658 case 0: /* Never / TR (0 / 1) */
659 cond
= cond_make_f();
661 case 1: /* = / <> (Z / !Z) */
662 if (cond_need_ext(ctx
, d
)) {
663 tmp
= tcg_temp_new_i64();
664 tcg_gen_ext32u_i64(tmp
, res
);
667 cond
= cond_make_0(TCG_COND_EQ
, res
);
669 case 2: /* < / >= (N ^ V / !(N ^ V) */
670 tmp
= tcg_temp_new_i64();
671 tcg_gen_xor_i64(tmp
, res
, sv
);
672 if (cond_need_ext(ctx
, d
)) {
673 tcg_gen_ext32s_i64(tmp
, tmp
);
675 cond
= cond_make_0_tmp(TCG_COND_LT
, tmp
);
677 case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */
681 * ((res < 0) ^ (sv < 0)) | !res
682 * ((res ^ sv) < 0) | !res
683 * (~(res ^ sv) >= 0) | !res
684 * !(~(res ^ sv) >> 31) | !res
685 * !(~(res ^ sv) >> 31 & res)
687 tmp
= tcg_temp_new_i64();
688 tcg_gen_eqv_i64(tmp
, res
, sv
);
689 if (cond_need_ext(ctx
, d
)) {
690 tcg_gen_sextract_i64(tmp
, tmp
, 31, 1);
691 tcg_gen_and_i64(tmp
, tmp
, res
);
692 tcg_gen_ext32u_i64(tmp
, tmp
);
694 tcg_gen_sari_i64(tmp
, tmp
, 63);
695 tcg_gen_and_i64(tmp
, tmp
, res
);
697 cond
= cond_make_0_tmp(TCG_COND_EQ
, tmp
);
699 case 4: /* NUV / UV (!C / C) */
700 /* Only bit 0 of cb_msb is ever set. */
701 cond
= cond_make_0(TCG_COND_EQ
, cb_msb
);
703 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
704 tmp
= tcg_temp_new_i64();
705 tcg_gen_neg_i64(tmp
, cb_msb
);
706 tcg_gen_and_i64(tmp
, tmp
, res
);
707 if (cond_need_ext(ctx
, d
)) {
708 tcg_gen_ext32u_i64(tmp
, tmp
);
710 cond
= cond_make_0_tmp(TCG_COND_EQ
, tmp
);
712 case 6: /* SV / NSV (V / !V) */
713 if (cond_need_ext(ctx
, d
)) {
714 tmp
= tcg_temp_new_i64();
715 tcg_gen_ext32s_i64(tmp
, sv
);
718 cond
= cond_make_0(TCG_COND_LT
, sv
);
720 case 7: /* OD / EV */
721 tmp
= tcg_temp_new_i64();
722 tcg_gen_andi_i64(tmp
, res
, 1);
723 cond
= cond_make_0_tmp(TCG_COND_NE
, tmp
);
726 g_assert_not_reached();
729 cond
.c
= tcg_invert_cond(cond
.c
);
735 /* Similar, but for the special case of subtraction without borrow, we
736 can use the inputs directly. This can allow other computation to be
737 deleted as unused. */
739 static DisasCond
do_sub_cond(DisasContext
*ctx
, unsigned cf
, bool d
,
740 TCGv_i64 res
, TCGv_i64 in1
,
741 TCGv_i64 in2
, TCGv_i64 sv
)
759 case 4: /* << / >>= */
763 case 5: /* <<= / >> */
768 return do_cond(ctx
, cf
, d
, res
, NULL
, sv
);
772 tc
= tcg_invert_cond(tc
);
774 if (cond_need_ext(ctx
, d
)) {
775 TCGv_i64 t1
= tcg_temp_new_i64();
776 TCGv_i64 t2
= tcg_temp_new_i64();
779 tcg_gen_ext32u_i64(t1
, in1
);
780 tcg_gen_ext32u_i64(t2
, in2
);
782 tcg_gen_ext32s_i64(t1
, in1
);
783 tcg_gen_ext32s_i64(t2
, in2
);
785 return cond_make_tmp(tc
, t1
, t2
);
787 return cond_make(tc
, in1
, in2
);
791 * Similar, but for logicals, where the carry and overflow bits are not
792 * computed, and use of them is undefined.
794 * Undefined or not, hardware does not trap. It seems reasonable to
795 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
796 * how cases c={2,3} are treated.
799 static DisasCond
do_log_cond(DisasContext
*ctx
, unsigned cf
, bool d
,
807 case 9: /* undef, C */
808 case 11: /* undef, C & !Z */
809 case 12: /* undef, V */
810 return cond_make_f();
813 case 8: /* undef, !C */
814 case 10: /* undef, !C | Z */
815 case 13: /* undef, !V */
816 return cond_make_t();
845 return do_cond(ctx
, cf
, d
, res
, NULL
, NULL
);
848 g_assert_not_reached();
851 if (cond_need_ext(ctx
, d
)) {
852 TCGv_i64 tmp
= tcg_temp_new_i64();
855 tcg_gen_ext32u_i64(tmp
, res
);
857 tcg_gen_ext32s_i64(tmp
, res
);
859 return cond_make_0_tmp(tc
, tmp
);
861 return cond_make_0(tc
, res
);
864 /* Similar, but for shift/extract/deposit conditions. */
866 static DisasCond
do_sed_cond(DisasContext
*ctx
, unsigned orig
, bool d
,
871 /* Convert the compressed condition codes to standard.
872 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
873 4-7 are the reverse of 0-3. */
880 return do_log_cond(ctx
, c
* 2 + f
, d
, res
);
883 /* Similar, but for unit conditions. */
885 static DisasCond
do_unit_cond(unsigned cf
, bool d
, TCGv_i64 res
,
886 TCGv_i64 in1
, TCGv_i64 in2
)
889 TCGv_i64 tmp
, cb
= NULL
;
890 uint64_t d_repl
= d
? 0x0000000100000001ull
: 1;
893 /* Since we want to test lots of carry-out bits all at once, do not
894 * do our normal thing and compute carry-in of bit B+1 since that
895 * leaves us with carry bits spread across two words.
897 cb
= tcg_temp_new_i64();
898 tmp
= tcg_temp_new_i64();
899 tcg_gen_or_i64(cb
, in1
, in2
);
900 tcg_gen_and_i64(tmp
, in1
, in2
);
901 tcg_gen_andc_i64(cb
, cb
, res
);
902 tcg_gen_or_i64(cb
, cb
, tmp
);
906 case 0: /* never / TR */
907 case 1: /* undefined */
908 case 5: /* undefined */
909 cond
= cond_make_f();
912 case 2: /* SBZ / NBZ */
913 /* See hasless(v,1) from
914 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
916 tmp
= tcg_temp_new_i64();
917 tcg_gen_subi_i64(tmp
, res
, d_repl
* 0x01010101u
);
918 tcg_gen_andc_i64(tmp
, tmp
, res
);
919 tcg_gen_andi_i64(tmp
, tmp
, d_repl
* 0x80808080u
);
920 cond
= cond_make_0(TCG_COND_NE
, tmp
);
923 case 3: /* SHZ / NHZ */
924 tmp
= tcg_temp_new_i64();
925 tcg_gen_subi_i64(tmp
, res
, d_repl
* 0x00010001u
);
926 tcg_gen_andc_i64(tmp
, tmp
, res
);
927 tcg_gen_andi_i64(tmp
, tmp
, d_repl
* 0x80008000u
);
928 cond
= cond_make_0(TCG_COND_NE
, tmp
);
931 case 4: /* SDC / NDC */
932 tcg_gen_andi_i64(cb
, cb
, d_repl
* 0x88888888u
);
933 cond
= cond_make_0(TCG_COND_NE
, cb
);
936 case 6: /* SBC / NBC */
937 tcg_gen_andi_i64(cb
, cb
, d_repl
* 0x80808080u
);
938 cond
= cond_make_0(TCG_COND_NE
, cb
);
941 case 7: /* SHC / NHC */
942 tcg_gen_andi_i64(cb
, cb
, d_repl
* 0x80008000u
);
943 cond
= cond_make_0(TCG_COND_NE
, cb
);
947 g_assert_not_reached();
950 cond
.c
= tcg_invert_cond(cond
.c
);
956 static TCGv_i64
get_carry(DisasContext
*ctx
, bool d
,
957 TCGv_i64 cb
, TCGv_i64 cb_msb
)
959 if (cond_need_ext(ctx
, d
)) {
960 TCGv_i64 t
= tcg_temp_new_i64();
961 tcg_gen_extract_i64(t
, cb
, 32, 1);
967 static TCGv_i64
get_psw_carry(DisasContext
*ctx
, bool d
)
969 return get_carry(ctx
, d
, cpu_psw_cb
, cpu_psw_cb_msb
);
972 /* Compute signed overflow for addition. */
973 static TCGv_i64
do_add_sv(DisasContext
*ctx
, TCGv_i64 res
,
974 TCGv_i64 in1
, TCGv_i64 in2
)
976 TCGv_i64 sv
= tcg_temp_new_i64();
977 TCGv_i64 tmp
= tcg_temp_new_i64();
979 tcg_gen_xor_i64(sv
, res
, in1
);
980 tcg_gen_xor_i64(tmp
, in1
, in2
);
981 tcg_gen_andc_i64(sv
, sv
, tmp
);
986 /* Compute signed overflow for subtraction. */
987 static TCGv_i64
do_sub_sv(DisasContext
*ctx
, TCGv_i64 res
,
988 TCGv_i64 in1
, TCGv_i64 in2
)
990 TCGv_i64 sv
= tcg_temp_new_i64();
991 TCGv_i64 tmp
= tcg_temp_new_i64();
993 tcg_gen_xor_i64(sv
, res
, in1
);
994 tcg_gen_xor_i64(tmp
, in1
, in2
);
995 tcg_gen_and_i64(sv
, sv
, tmp
);
1000 static void do_add(DisasContext
*ctx
, unsigned rt
, TCGv_i64 in1
,
1001 TCGv_i64 in2
, unsigned shift
, bool is_l
,
1002 bool is_tsv
, bool is_tc
, bool is_c
, unsigned cf
, bool d
)
1004 TCGv_i64 dest
, cb
, cb_msb
, cb_cond
, sv
, tmp
;
1005 unsigned c
= cf
>> 1;
1008 dest
= tcg_temp_new_i64();
1014 tmp
= tcg_temp_new_i64();
1015 tcg_gen_shli_i64(tmp
, in1
, shift
);
1019 if (!is_l
|| cond_need_cb(c
)) {
1020 TCGv_i64 zero
= tcg_constant_i64(0);
1021 cb_msb
= tcg_temp_new_i64();
1022 cb
= tcg_temp_new_i64();
1024 tcg_gen_add2_i64(dest
, cb_msb
, in1
, zero
, in2
, zero
);
1026 tcg_gen_add2_i64(dest
, cb_msb
, dest
, cb_msb
,
1027 get_psw_carry(ctx
, d
), zero
);
1029 tcg_gen_xor_i64(cb
, in1
, in2
);
1030 tcg_gen_xor_i64(cb
, cb
, dest
);
1031 if (cond_need_cb(c
)) {
1032 cb_cond
= get_carry(ctx
, d
, cb
, cb_msb
);
1035 tcg_gen_add_i64(dest
, in1
, in2
);
1037 tcg_gen_add_i64(dest
, dest
, get_psw_carry(ctx
, d
));
1041 /* Compute signed overflow if required. */
1043 if (is_tsv
|| cond_need_sv(c
)) {
1044 sv
= do_add_sv(ctx
, dest
, in1
, in2
);
1046 /* ??? Need to include overflow from shift. */
1047 gen_helper_tsv(tcg_env
, sv
);
1051 /* Emit any conditional trap before any writeback. */
1052 cond
= do_cond(ctx
, cf
, d
, dest
, cb_cond
, sv
);
1054 tmp
= tcg_temp_new_i64();
1055 tcg_gen_setcond_i64(cond
.c
, tmp
, cond
.a0
, cond
.a1
);
1056 gen_helper_tcond(tcg_env
, tmp
);
1059 /* Write back the result. */
1061 save_or_nullify(ctx
, cpu_psw_cb
, cb
);
1062 save_or_nullify(ctx
, cpu_psw_cb_msb
, cb_msb
);
1064 save_gpr(ctx
, rt
, dest
);
1066 /* Install the new nullification. */
1067 cond_free(&ctx
->null_cond
);
1068 ctx
->null_cond
= cond
;
1071 static bool do_add_reg(DisasContext
*ctx
, arg_rrr_cf_d_sh
*a
,
1072 bool is_l
, bool is_tsv
, bool is_tc
, bool is_c
)
1074 TCGv_i64 tcg_r1
, tcg_r2
;
1079 tcg_r1
= load_gpr(ctx
, a
->r1
);
1080 tcg_r2
= load_gpr(ctx
, a
->r2
);
1081 do_add(ctx
, a
->t
, tcg_r1
, tcg_r2
, a
->sh
, is_l
,
1082 is_tsv
, is_tc
, is_c
, a
->cf
, a
->d
);
1083 return nullify_end(ctx
);
1086 static bool do_add_imm(DisasContext
*ctx
, arg_rri_cf
*a
,
1087 bool is_tsv
, bool is_tc
)
1089 TCGv_i64 tcg_im
, tcg_r2
;
1094 tcg_im
= tcg_constant_i64(a
->i
);
1095 tcg_r2
= load_gpr(ctx
, a
->r
);
1096 /* All ADDI conditions are 32-bit. */
1097 do_add(ctx
, a
->t
, tcg_im
, tcg_r2
, 0, 0, is_tsv
, is_tc
, 0, a
->cf
, false);
1098 return nullify_end(ctx
);
1101 static void do_sub(DisasContext
*ctx
, unsigned rt
, TCGv_i64 in1
,
1102 TCGv_i64 in2
, bool is_tsv
, bool is_b
,
1103 bool is_tc
, unsigned cf
, bool d
)
1105 TCGv_i64 dest
, sv
, cb
, cb_msb
, zero
, tmp
;
1106 unsigned c
= cf
>> 1;
1109 dest
= tcg_temp_new_i64();
1110 cb
= tcg_temp_new_i64();
1111 cb_msb
= tcg_temp_new_i64();
1113 zero
= tcg_constant_i64(0);
1115 /* DEST,C = IN1 + ~IN2 + C. */
1116 tcg_gen_not_i64(cb
, in2
);
1117 tcg_gen_add2_i64(dest
, cb_msb
, in1
, zero
, get_psw_carry(ctx
, d
), zero
);
1118 tcg_gen_add2_i64(dest
, cb_msb
, dest
, cb_msb
, cb
, zero
);
1119 tcg_gen_xor_i64(cb
, cb
, in1
);
1120 tcg_gen_xor_i64(cb
, cb
, dest
);
1123 * DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
1124 * operations by seeding the high word with 1 and subtracting.
1126 TCGv_i64 one
= tcg_constant_i64(1);
1127 tcg_gen_sub2_i64(dest
, cb_msb
, in1
, one
, in2
, zero
);
1128 tcg_gen_eqv_i64(cb
, in1
, in2
);
1129 tcg_gen_xor_i64(cb
, cb
, dest
);
1132 /* Compute signed overflow if required. */
1134 if (is_tsv
|| cond_need_sv(c
)) {
1135 sv
= do_sub_sv(ctx
, dest
, in1
, in2
);
1137 gen_helper_tsv(tcg_env
, sv
);
1141 /* Compute the condition. We cannot use the special case for borrow. */
1143 cond
= do_sub_cond(ctx
, cf
, d
, dest
, in1
, in2
, sv
);
1145 cond
= do_cond(ctx
, cf
, d
, dest
, get_carry(ctx
, d
, cb
, cb_msb
), sv
);
1148 /* Emit any conditional trap before any writeback. */
1150 tmp
= tcg_temp_new_i64();
1151 tcg_gen_setcond_i64(cond
.c
, tmp
, cond
.a0
, cond
.a1
);
1152 gen_helper_tcond(tcg_env
, tmp
);
1155 /* Write back the result. */
1156 save_or_nullify(ctx
, cpu_psw_cb
, cb
);
1157 save_or_nullify(ctx
, cpu_psw_cb_msb
, cb_msb
);
1158 save_gpr(ctx
, rt
, dest
);
1160 /* Install the new nullification. */
1161 cond_free(&ctx
->null_cond
);
1162 ctx
->null_cond
= cond
;
1165 static bool do_sub_reg(DisasContext
*ctx
, arg_rrr_cf_d
*a
,
1166 bool is_tsv
, bool is_b
, bool is_tc
)
1168 TCGv_i64 tcg_r1
, tcg_r2
;
1173 tcg_r1
= load_gpr(ctx
, a
->r1
);
1174 tcg_r2
= load_gpr(ctx
, a
->r2
);
1175 do_sub(ctx
, a
->t
, tcg_r1
, tcg_r2
, is_tsv
, is_b
, is_tc
, a
->cf
, a
->d
);
1176 return nullify_end(ctx
);
1179 static bool do_sub_imm(DisasContext
*ctx
, arg_rri_cf
*a
, bool is_tsv
)
1181 TCGv_i64 tcg_im
, tcg_r2
;
1186 tcg_im
= tcg_constant_i64(a
->i
);
1187 tcg_r2
= load_gpr(ctx
, a
->r
);
1188 /* All SUBI conditions are 32-bit. */
1189 do_sub(ctx
, a
->t
, tcg_im
, tcg_r2
, is_tsv
, 0, 0, a
->cf
, false);
1190 return nullify_end(ctx
);
1193 static void do_cmpclr(DisasContext
*ctx
, unsigned rt
, TCGv_i64 in1
,
1194 TCGv_i64 in2
, unsigned cf
, bool d
)
1199 dest
= tcg_temp_new_i64();
1200 tcg_gen_sub_i64(dest
, in1
, in2
);
1202 /* Compute signed overflow if required. */
1204 if (cond_need_sv(cf
>> 1)) {
1205 sv
= do_sub_sv(ctx
, dest
, in1
, in2
);
1208 /* Form the condition for the compare. */
1209 cond
= do_sub_cond(ctx
, cf
, d
, dest
, in1
, in2
, sv
);
1212 tcg_gen_movi_i64(dest
, 0);
1213 save_gpr(ctx
, rt
, dest
);
1215 /* Install the new nullification. */
1216 cond_free(&ctx
->null_cond
);
1217 ctx
->null_cond
= cond
;
1220 static void do_log(DisasContext
*ctx
, unsigned rt
, TCGv_i64 in1
,
1221 TCGv_i64 in2
, unsigned cf
, bool d
,
1222 void (*fn
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
1224 TCGv_i64 dest
= dest_gpr(ctx
, rt
);
1226 /* Perform the operation, and writeback. */
1228 save_gpr(ctx
, rt
, dest
);
1230 /* Install the new nullification. */
1231 cond_free(&ctx
->null_cond
);
1233 ctx
->null_cond
= do_log_cond(ctx
, cf
, d
, dest
);
1237 static bool do_log_reg(DisasContext
*ctx
, arg_rrr_cf_d
*a
,
1238 void (*fn
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
1240 TCGv_i64 tcg_r1
, tcg_r2
;
1245 tcg_r1
= load_gpr(ctx
, a
->r1
);
1246 tcg_r2
= load_gpr(ctx
, a
->r2
);
1247 do_log(ctx
, a
->t
, tcg_r1
, tcg_r2
, a
->cf
, a
->d
, fn
);
1248 return nullify_end(ctx
);
1251 static void do_unit(DisasContext
*ctx
, unsigned rt
, TCGv_i64 in1
,
1252 TCGv_i64 in2
, unsigned cf
, bool d
, bool is_tc
,
1253 void (*fn
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
1259 dest
= dest_gpr(ctx
, rt
);
1261 save_gpr(ctx
, rt
, dest
);
1262 cond_free(&ctx
->null_cond
);
1264 dest
= tcg_temp_new_i64();
1267 cond
= do_unit_cond(cf
, d
, dest
, in1
, in2
);
1270 TCGv_i64 tmp
= tcg_temp_new_i64();
1271 tcg_gen_setcond_i64(cond
.c
, tmp
, cond
.a0
, cond
.a1
);
1272 gen_helper_tcond(tcg_env
, tmp
);
1274 save_gpr(ctx
, rt
, dest
);
1276 cond_free(&ctx
->null_cond
);
1277 ctx
->null_cond
= cond
;
1281 #ifndef CONFIG_USER_ONLY
1282 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1283 from the top 2 bits of the base register. There are a few system
1284 instructions that have a 3-bit space specifier, for which SR0 is
1285 not special. To handle this, pass ~SP. */
1286 static TCGv_i64
space_select(DisasContext
*ctx
, int sp
, TCGv_i64 base
)
1296 spc
= tcg_temp_new_i64();
1297 load_spr(ctx
, spc
, sp
);
1300 if (ctx
->tb_flags
& TB_FLAG_SR_SAME
) {
1304 ptr
= tcg_temp_new_ptr();
1305 tmp
= tcg_temp_new_i64();
1306 spc
= tcg_temp_new_i64();
1308 /* Extract top 2 bits of the address, shift left 3 for uint64_t index. */
1309 tcg_gen_shri_i64(tmp
, base
, (ctx
->tb_flags
& PSW_W
? 64 : 32) - 5);
1310 tcg_gen_andi_i64(tmp
, tmp
, 030);
1311 tcg_gen_trunc_i64_ptr(ptr
, tmp
);
1313 tcg_gen_add_ptr(ptr
, ptr
, tcg_env
);
1314 tcg_gen_ld_i64(spc
, ptr
, offsetof(CPUHPPAState
, sr
[4]));
1320 static void form_gva(DisasContext
*ctx
, TCGv_i64
*pgva
, TCGv_i64
*pofs
,
1321 unsigned rb
, unsigned rx
, int scale
, int64_t disp
,
1322 unsigned sp
, int modify
, bool is_phys
)
1324 TCGv_i64 base
= load_gpr(ctx
, rb
);
1328 /* Note that RX is mutually exclusive with DISP. */
1330 ofs
= tcg_temp_new_i64();
1331 tcg_gen_shli_i64(ofs
, cpu_gr
[rx
], scale
);
1332 tcg_gen_add_i64(ofs
, ofs
, base
);
1333 } else if (disp
|| modify
) {
1334 ofs
= tcg_temp_new_i64();
1335 tcg_gen_addi_i64(ofs
, base
, disp
);
1341 *pgva
= addr
= tcg_temp_new_i64();
1342 tcg_gen_andi_i64(addr
, modify
<= 0 ? ofs
: base
, gva_offset_mask(ctx
));
1343 #ifndef CONFIG_USER_ONLY
1345 tcg_gen_or_i64(addr
, addr
, space_select(ctx
, sp
, base
));
1350 /* Emit a memory load. The modify parameter should be
1351 * < 0 for pre-modify,
1352 * > 0 for post-modify,
1353 * = 0 for no base register update.
1355 static void do_load_32(DisasContext
*ctx
, TCGv_i32 dest
, unsigned rb
,
1356 unsigned rx
, int scale
, int64_t disp
,
1357 unsigned sp
, int modify
, MemOp mop
)
1362 /* Caller uses nullify_over/nullify_end. */
1363 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1365 form_gva(ctx
, &addr
, &ofs
, rb
, rx
, scale
, disp
, sp
, modify
,
1366 ctx
->mmu_idx
== MMU_PHYS_IDX
);
1367 tcg_gen_qemu_ld_i32(dest
, addr
, ctx
->mmu_idx
, mop
| UNALIGN(ctx
));
1369 save_gpr(ctx
, rb
, ofs
);
1373 static void do_load_64(DisasContext
*ctx
, TCGv_i64 dest
, unsigned rb
,
1374 unsigned rx
, int scale
, int64_t disp
,
1375 unsigned sp
, int modify
, MemOp mop
)
1380 /* Caller uses nullify_over/nullify_end. */
1381 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1383 form_gva(ctx
, &addr
, &ofs
, rb
, rx
, scale
, disp
, sp
, modify
,
1384 ctx
->mmu_idx
== MMU_PHYS_IDX
);
1385 tcg_gen_qemu_ld_i64(dest
, addr
, ctx
->mmu_idx
, mop
| UNALIGN(ctx
));
1387 save_gpr(ctx
, rb
, ofs
);
1391 static void do_store_32(DisasContext
*ctx
, TCGv_i32 src
, unsigned rb
,
1392 unsigned rx
, int scale
, int64_t disp
,
1393 unsigned sp
, int modify
, MemOp mop
)
1398 /* Caller uses nullify_over/nullify_end. */
1399 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1401 form_gva(ctx
, &addr
, &ofs
, rb
, rx
, scale
, disp
, sp
, modify
,
1402 ctx
->mmu_idx
== MMU_PHYS_IDX
);
1403 tcg_gen_qemu_st_i32(src
, addr
, ctx
->mmu_idx
, mop
| UNALIGN(ctx
));
1405 save_gpr(ctx
, rb
, ofs
);
1409 static void do_store_64(DisasContext
*ctx
, TCGv_i64 src
, unsigned rb
,
1410 unsigned rx
, int scale
, int64_t disp
,
1411 unsigned sp
, int modify
, MemOp mop
)
1416 /* Caller uses nullify_over/nullify_end. */
1417 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1419 form_gva(ctx
, &addr
, &ofs
, rb
, rx
, scale
, disp
, sp
, modify
,
1420 ctx
->mmu_idx
== MMU_PHYS_IDX
);
1421 tcg_gen_qemu_st_i64(src
, addr
, ctx
->mmu_idx
, mop
| UNALIGN(ctx
));
1423 save_gpr(ctx
, rb
, ofs
);
1427 static bool do_load(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1428 unsigned rx
, int scale
, int64_t disp
,
1429 unsigned sp
, int modify
, MemOp mop
)
1436 /* No base register update. */
1437 dest
= dest_gpr(ctx
, rt
);
1439 /* Make sure if RT == RB, we see the result of the load. */
1440 dest
= tcg_temp_new_i64();
1442 do_load_64(ctx
, dest
, rb
, rx
, scale
, disp
, sp
, modify
, mop
);
1443 save_gpr(ctx
, rt
, dest
);
1445 return nullify_end(ctx
);
1448 static bool do_floadw(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1449 unsigned rx
, int scale
, int64_t disp
,
1450 unsigned sp
, int modify
)
1456 tmp
= tcg_temp_new_i32();
1457 do_load_32(ctx
, tmp
, rb
, rx
, scale
, disp
, sp
, modify
, MO_TEUL
);
1458 save_frw_i32(rt
, tmp
);
1461 gen_helper_loaded_fr0(tcg_env
);
1464 return nullify_end(ctx
);
1467 static bool trans_fldw(DisasContext
*ctx
, arg_ldst
*a
)
1469 return do_floadw(ctx
, a
->t
, a
->b
, a
->x
, a
->scale
? 2 : 0,
1470 a
->disp
, a
->sp
, a
->m
);
1473 static bool do_floadd(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1474 unsigned rx
, int scale
, int64_t disp
,
1475 unsigned sp
, int modify
)
1481 tmp
= tcg_temp_new_i64();
1482 do_load_64(ctx
, tmp
, rb
, rx
, scale
, disp
, sp
, modify
, MO_TEUQ
);
1486 gen_helper_loaded_fr0(tcg_env
);
1489 return nullify_end(ctx
);
1492 static bool trans_fldd(DisasContext
*ctx
, arg_ldst
*a
)
1494 return do_floadd(ctx
, a
->t
, a
->b
, a
->x
, a
->scale
? 3 : 0,
1495 a
->disp
, a
->sp
, a
->m
);
1498 static bool do_store(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1499 int64_t disp
, unsigned sp
,
1500 int modify
, MemOp mop
)
1503 do_store_64(ctx
, load_gpr(ctx
, rt
), rb
, 0, 0, disp
, sp
, modify
, mop
);
1504 return nullify_end(ctx
);
1507 static bool do_fstorew(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1508 unsigned rx
, int scale
, int64_t disp
,
1509 unsigned sp
, int modify
)
1515 tmp
= load_frw_i32(rt
);
1516 do_store_32(ctx
, tmp
, rb
, rx
, scale
, disp
, sp
, modify
, MO_TEUL
);
1518 return nullify_end(ctx
);
1521 static bool trans_fstw(DisasContext
*ctx
, arg_ldst
*a
)
1523 return do_fstorew(ctx
, a
->t
, a
->b
, a
->x
, a
->scale
? 2 : 0,
1524 a
->disp
, a
->sp
, a
->m
);
1527 static bool do_fstored(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1528 unsigned rx
, int scale
, int64_t disp
,
1529 unsigned sp
, int modify
)
1536 do_store_64(ctx
, tmp
, rb
, rx
, scale
, disp
, sp
, modify
, MO_TEUQ
);
1538 return nullify_end(ctx
);
1541 static bool trans_fstd(DisasContext
*ctx
, arg_ldst
*a
)
1543 return do_fstored(ctx
, a
->t
, a
->b
, a
->x
, a
->scale
? 3 : 0,
1544 a
->disp
, a
->sp
, a
->m
);
1547 static bool do_fop_wew(DisasContext
*ctx
, unsigned rt
, unsigned ra
,
1548 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i32
))
1553 tmp
= load_frw0_i32(ra
);
1555 func(tmp
, tcg_env
, tmp
);
1557 save_frw_i32(rt
, tmp
);
1558 return nullify_end(ctx
);
1561 static bool do_fop_wed(DisasContext
*ctx
, unsigned rt
, unsigned ra
,
1562 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i64
))
1569 dst
= tcg_temp_new_i32();
1571 func(dst
, tcg_env
, src
);
1573 save_frw_i32(rt
, dst
);
1574 return nullify_end(ctx
);
1577 static bool do_fop_ded(DisasContext
*ctx
, unsigned rt
, unsigned ra
,
1578 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i64
))
1583 tmp
= load_frd0(ra
);
1585 func(tmp
, tcg_env
, tmp
);
1588 return nullify_end(ctx
);
1591 static bool do_fop_dew(DisasContext
*ctx
, unsigned rt
, unsigned ra
,
1592 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i32
))
1598 src
= load_frw0_i32(ra
);
1599 dst
= tcg_temp_new_i64();
1601 func(dst
, tcg_env
, src
);
1604 return nullify_end(ctx
);
1607 static bool do_fop_weww(DisasContext
*ctx
, unsigned rt
,
1608 unsigned ra
, unsigned rb
,
1609 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i32
, TCGv_i32
))
1614 a
= load_frw0_i32(ra
);
1615 b
= load_frw0_i32(rb
);
1617 func(a
, tcg_env
, a
, b
);
1619 save_frw_i32(rt
, a
);
1620 return nullify_end(ctx
);
1623 static bool do_fop_dedd(DisasContext
*ctx
, unsigned rt
,
1624 unsigned ra
, unsigned rb
,
1625 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i64
, TCGv_i64
))
1633 func(a
, tcg_env
, a
, b
);
1636 return nullify_end(ctx
);
1639 /* Emit an unconditional branch to a direct target, which may or may not
1640 have already had nullification handled. */
1641 static bool do_dbranch(DisasContext
*ctx
, uint64_t dest
,
1642 unsigned link
, bool is_n
)
1644 if (ctx
->null_cond
.c
== TCG_COND_NEVER
&& ctx
->null_lab
== NULL
) {
1646 copy_iaoq_entry(ctx
, cpu_gr
[link
], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1650 ctx
->null_cond
.c
= TCG_COND_ALWAYS
;
1656 copy_iaoq_entry(ctx
, cpu_gr
[link
], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1659 if (is_n
&& use_nullify_skip(ctx
)) {
1660 nullify_set(ctx
, 0);
1661 gen_goto_tb(ctx
, 0, dest
, dest
+ 4);
1663 nullify_set(ctx
, is_n
);
1664 gen_goto_tb(ctx
, 0, ctx
->iaoq_b
, dest
);
1669 nullify_set(ctx
, 0);
1670 gen_goto_tb(ctx
, 1, ctx
->iaoq_b
, ctx
->iaoq_n
);
1671 ctx
->base
.is_jmp
= DISAS_NORETURN
;
1676 /* Emit a conditional branch to a direct target. If the branch itself
1677 is nullified, we should have already used nullify_over. */
1678 static bool do_cbranch(DisasContext
*ctx
, int64_t disp
, bool is_n
,
1681 uint64_t dest
= iaoq_dest(ctx
, disp
);
1682 TCGLabel
*taken
= NULL
;
1683 TCGCond c
= cond
->c
;
1686 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1688 /* Handle TRUE and NEVER as direct branches. */
1689 if (c
== TCG_COND_ALWAYS
) {
1690 return do_dbranch(ctx
, dest
, 0, is_n
&& disp
>= 0);
1692 if (c
== TCG_COND_NEVER
) {
1693 return do_dbranch(ctx
, ctx
->iaoq_n
, 0, is_n
&& disp
< 0);
1696 taken
= gen_new_label();
1697 tcg_gen_brcond_i64(c
, cond
->a0
, cond
->a1
, taken
);
1700 /* Not taken: Condition not satisfied; nullify on backward branches. */
1701 n
= is_n
&& disp
< 0;
1702 if (n
&& use_nullify_skip(ctx
)) {
1703 nullify_set(ctx
, 0);
1704 gen_goto_tb(ctx
, 0, ctx
->iaoq_n
, ctx
->iaoq_n
+ 4);
1706 if (!n
&& ctx
->null_lab
) {
1707 gen_set_label(ctx
->null_lab
);
1708 ctx
->null_lab
= NULL
;
1710 nullify_set(ctx
, n
);
1711 if (ctx
->iaoq_n
== -1) {
1712 /* The temporary iaoq_n_var died at the branch above.
1713 Regenerate it here instead of saving it. */
1714 tcg_gen_addi_i64(ctx
->iaoq_n_var
, cpu_iaoq_b
, 4);
1716 gen_goto_tb(ctx
, 0, ctx
->iaoq_b
, ctx
->iaoq_n
);
1719 gen_set_label(taken
);
1721 /* Taken: Condition satisfied; nullify on forward branches. */
1722 n
= is_n
&& disp
>= 0;
1723 if (n
&& use_nullify_skip(ctx
)) {
1724 nullify_set(ctx
, 0);
1725 gen_goto_tb(ctx
, 1, dest
, dest
+ 4);
1727 nullify_set(ctx
, n
);
1728 gen_goto_tb(ctx
, 1, ctx
->iaoq_b
, dest
);
1731 /* Not taken: the branch itself was nullified. */
1732 if (ctx
->null_lab
) {
1733 gen_set_label(ctx
->null_lab
);
1734 ctx
->null_lab
= NULL
;
1735 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE
;
1737 ctx
->base
.is_jmp
= DISAS_NORETURN
;
1742 /* Emit an unconditional branch to an indirect target. This handles
1743 nullification of the branch itself. */
1744 static bool do_ibranch(DisasContext
*ctx
, TCGv_i64 dest
,
1745 unsigned link
, bool is_n
)
1747 TCGv_i64 a0
, a1
, next
, tmp
;
1750 assert(ctx
->null_lab
== NULL
);
1752 if (ctx
->null_cond
.c
== TCG_COND_NEVER
) {
1754 copy_iaoq_entry(ctx
, cpu_gr
[link
], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1756 next
= tcg_temp_new_i64();
1757 tcg_gen_mov_i64(next
, dest
);
1759 if (use_nullify_skip(ctx
)) {
1760 copy_iaoq_entry(ctx
, cpu_iaoq_f
, -1, next
);
1761 tcg_gen_addi_i64(next
, next
, 4);
1762 copy_iaoq_entry(ctx
, cpu_iaoq_b
, -1, next
);
1763 nullify_set(ctx
, 0);
1764 ctx
->base
.is_jmp
= DISAS_IAQ_N_UPDATED
;
1767 ctx
->null_cond
.c
= TCG_COND_ALWAYS
;
1770 ctx
->iaoq_n_var
= next
;
1771 } else if (is_n
&& use_nullify_skip(ctx
)) {
1772 /* The (conditional) branch, B, nullifies the next insn, N,
1773 and we're allowed to skip execution N (no single-step or
1774 tracepoint in effect). Since the goto_ptr that we must use
1775 for the indirect branch consumes no special resources, we
1776 can (conditionally) skip B and continue execution. */
1777 /* The use_nullify_skip test implies we have a known control path. */
1778 tcg_debug_assert(ctx
->iaoq_b
!= -1);
1779 tcg_debug_assert(ctx
->iaoq_n
!= -1);
1781 /* We do have to handle the non-local temporary, DEST, before
1782 branching. Since IOAQ_F is not really live at this point, we
1783 can simply store DEST optimistically. Similarly with IAOQ_B. */
1784 copy_iaoq_entry(ctx
, cpu_iaoq_f
, -1, dest
);
1785 next
= tcg_temp_new_i64();
1786 tcg_gen_addi_i64(next
, dest
, 4);
1787 copy_iaoq_entry(ctx
, cpu_iaoq_b
, -1, next
);
1791 copy_iaoq_entry(ctx
, cpu_gr
[link
], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1793 tcg_gen_lookup_and_goto_ptr();
1794 return nullify_end(ctx
);
1796 c
= ctx
->null_cond
.c
;
1797 a0
= ctx
->null_cond
.a0
;
1798 a1
= ctx
->null_cond
.a1
;
1800 tmp
= tcg_temp_new_i64();
1801 next
= tcg_temp_new_i64();
1803 copy_iaoq_entry(ctx
, tmp
, ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1804 tcg_gen_movcond_i64(c
, next
, a0
, a1
, tmp
, dest
);
1806 ctx
->iaoq_n_var
= next
;
1809 tcg_gen_movcond_i64(c
, cpu_gr
[link
], a0
, a1
, cpu_gr
[link
], tmp
);
1813 /* The branch nullifies the next insn, which means the state of N
1814 after the branch is the inverse of the state of N that applied
1816 tcg_gen_setcond_i64(tcg_invert_cond(c
), cpu_psw_n
, a0
, a1
);
1817 cond_free(&ctx
->null_cond
);
1818 ctx
->null_cond
= cond_make_n();
1819 ctx
->psw_n_nonzero
= true;
1821 cond_free(&ctx
->null_cond
);
1828 * if (IAOQ_Front{30..31} < GR[b]{30..31})
1829 * IAOQ_Next{30..31} ← GR[b]{30..31};
1831 * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1832 * which keeps the privilege level from being increased.
1834 static TCGv_i64
do_ibranch_priv(DisasContext
*ctx
, TCGv_i64 offset
)
1837 switch (ctx
->privilege
) {
1839 /* Privilege 0 is maximum and is allowed to decrease. */
1842 /* Privilege 3 is minimum and is never allowed to increase. */
1843 dest
= tcg_temp_new_i64();
1844 tcg_gen_ori_i64(dest
, offset
, 3);
1847 dest
= tcg_temp_new_i64();
1848 tcg_gen_andi_i64(dest
, offset
, -4);
1849 tcg_gen_ori_i64(dest
, dest
, ctx
->privilege
);
1850 tcg_gen_movcond_i64(TCG_COND_GTU
, dest
, dest
, offset
, dest
, offset
);
1856 #ifdef CONFIG_USER_ONLY
1857 /* On Linux, page zero is normally marked execute only + gateway.
1858 Therefore normal read or write is supposed to fail, but specific
1859 offsets have kernel code mapped to raise permissions to implement
1860 system calls. Handling this via an explicit check here, rather
1861 in than the "be disp(sr2,r0)" instruction that probably sent us
1862 here, is the easiest way to handle the branch delay slot on the
1863 aforementioned BE. */
1864 static void do_page_zero(DisasContext
*ctx
)
1868 /* If by some means we get here with PSW[N]=1, that implies that
1869 the B,GATE instruction would be skipped, and we'd fault on the
1870 next insn within the privileged page. */
1871 switch (ctx
->null_cond
.c
) {
1872 case TCG_COND_NEVER
:
1874 case TCG_COND_ALWAYS
:
1875 tcg_gen_movi_i64(cpu_psw_n
, 0);
1878 /* Since this is always the first (and only) insn within the
1879 TB, we should know the state of PSW[N] from TB->FLAGS. */
1880 g_assert_not_reached();
1883 /* Check that we didn't arrive here via some means that allowed
1884 non-sequential instruction execution. Normally the PSW[B] bit
1885 detects this by disallowing the B,GATE instruction to execute
1886 under such conditions. */
1887 if (ctx
->iaoq_b
!= ctx
->iaoq_f
+ 4) {
1891 switch (ctx
->iaoq_f
& -4) {
1892 case 0x00: /* Null pointer call */
1893 gen_excp_1(EXCP_IMP
);
1894 ctx
->base
.is_jmp
= DISAS_NORETURN
;
1897 case 0xb0: /* LWS */
1898 gen_excp_1(EXCP_SYSCALL_LWS
);
1899 ctx
->base
.is_jmp
= DISAS_NORETURN
;
1902 case 0xe0: /* SET_THREAD_POINTER */
1903 tcg_gen_st_i64(cpu_gr
[26], tcg_env
, offsetof(CPUHPPAState
, cr
[27]));
1904 tmp
= tcg_temp_new_i64();
1905 tcg_gen_ori_i64(tmp
, cpu_gr
[31], 3);
1906 copy_iaoq_entry(ctx
, cpu_iaoq_f
, -1, tmp
);
1907 tcg_gen_addi_i64(tmp
, tmp
, 4);
1908 copy_iaoq_entry(ctx
, cpu_iaoq_b
, -1, tmp
);
1909 ctx
->base
.is_jmp
= DISAS_IAQ_N_UPDATED
;
1912 case 0x100: /* SYSCALL */
1913 gen_excp_1(EXCP_SYSCALL
);
1914 ctx
->base
.is_jmp
= DISAS_NORETURN
;
1919 gen_excp_1(EXCP_ILL
);
1920 ctx
->base
.is_jmp
= DISAS_NORETURN
;
1926 static bool trans_nop(DisasContext
*ctx
, arg_nop
*a
)
1928 cond_free(&ctx
->null_cond
);
1932 static bool trans_break(DisasContext
*ctx
, arg_break
*a
)
1934 return gen_excp_iir(ctx
, EXCP_BREAK
);
1937 static bool trans_sync(DisasContext
*ctx
, arg_sync
*a
)
1939 /* No point in nullifying the memory barrier. */
1940 tcg_gen_mb(TCG_BAR_SC
| TCG_MO_ALL
);
1942 cond_free(&ctx
->null_cond
);
1946 static bool trans_mfia(DisasContext
*ctx
, arg_mfia
*a
)
1949 TCGv_i64 tmp
= dest_gpr(ctx
, rt
);
1950 tcg_gen_movi_i64(tmp
, ctx
->iaoq_f
);
1951 save_gpr(ctx
, rt
, tmp
);
1953 cond_free(&ctx
->null_cond
);
1957 static bool trans_mfsp(DisasContext
*ctx
, arg_mfsp
*a
)
1960 unsigned rs
= a
->sp
;
1961 TCGv_i64 t0
= tcg_temp_new_i64();
1963 load_spr(ctx
, t0
, rs
);
1964 tcg_gen_shri_i64(t0
, t0
, 32);
1966 save_gpr(ctx
, rt
, t0
);
1968 cond_free(&ctx
->null_cond
);
1972 static bool trans_mfctl(DisasContext
*ctx
, arg_mfctl
*a
)
1975 unsigned ctl
= a
->r
;
1981 /* MFSAR without ,W masks low 5 bits. */
1982 tmp
= dest_gpr(ctx
, rt
);
1983 tcg_gen_andi_i64(tmp
, cpu_sar
, 31);
1984 save_gpr(ctx
, rt
, tmp
);
1987 save_gpr(ctx
, rt
, cpu_sar
);
1989 case CR_IT
: /* Interval Timer */
1990 /* FIXME: Respect PSW_S bit. */
1992 tmp
= dest_gpr(ctx
, rt
);
1993 if (translator_io_start(&ctx
->base
)) {
1994 gen_helper_read_interval_timer(tmp
);
1995 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE
;
1997 gen_helper_read_interval_timer(tmp
);
1999 save_gpr(ctx
, rt
, tmp
);
2000 return nullify_end(ctx
);
2005 /* All other control registers are privileged. */
2006 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG
);
2010 tmp
= tcg_temp_new_i64();
2011 tcg_gen_ld_i64(tmp
, tcg_env
, offsetof(CPUHPPAState
, cr
[ctl
]));
2012 save_gpr(ctx
, rt
, tmp
);
2015 cond_free(&ctx
->null_cond
);
2019 static bool trans_mtsp(DisasContext
*ctx
, arg_mtsp
*a
)
2022 unsigned rs
= a
->sp
;
2026 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG
);
2030 tmp
= tcg_temp_new_i64();
2031 tcg_gen_shli_i64(tmp
, load_gpr(ctx
, rr
), 32);
2034 tcg_gen_st_i64(tmp
, tcg_env
, offsetof(CPUHPPAState
, sr
[rs
]));
2035 ctx
->tb_flags
&= ~TB_FLAG_SR_SAME
;
2037 tcg_gen_mov_i64(cpu_sr
[rs
], tmp
);
2040 return nullify_end(ctx
);
2043 static bool trans_mtctl(DisasContext
*ctx
, arg_mtctl
*a
)
2045 unsigned ctl
= a
->t
;
2049 if (ctl
== CR_SAR
) {
2050 reg
= load_gpr(ctx
, a
->r
);
2051 tmp
= tcg_temp_new_i64();
2052 tcg_gen_andi_i64(tmp
, reg
, ctx
->is_pa20
? 63 : 31);
2053 save_or_nullify(ctx
, cpu_sar
, tmp
);
2055 cond_free(&ctx
->null_cond
);
2059 /* All other control registers are privileged or read-only. */
2060 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG
);
2062 #ifndef CONFIG_USER_ONLY
2064 reg
= load_gpr(ctx
, a
->r
);
2068 gen_helper_write_interval_timer(tcg_env
, reg
);
2071 gen_helper_write_eirr(tcg_env
, reg
);
2074 gen_helper_write_eiem(tcg_env
, reg
);
2075 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE_EXIT
;
2080 /* FIXME: Respect PSW_Q bit */
2081 /* The write advances the queue and stores to the back element. */
2082 tmp
= tcg_temp_new_i64();
2083 tcg_gen_ld_i64(tmp
, tcg_env
,
2084 offsetof(CPUHPPAState
, cr_back
[ctl
- CR_IIASQ
]));
2085 tcg_gen_st_i64(tmp
, tcg_env
, offsetof(CPUHPPAState
, cr
[ctl
]));
2086 tcg_gen_st_i64(reg
, tcg_env
,
2087 offsetof(CPUHPPAState
, cr_back
[ctl
- CR_IIASQ
]));
2094 tcg_gen_st_i64(reg
, tcg_env
, offsetof(CPUHPPAState
, cr
[ctl
]));
2095 #ifndef CONFIG_USER_ONLY
2096 gen_helper_change_prot_id(tcg_env
);
2101 tcg_gen_st_i64(reg
, tcg_env
, offsetof(CPUHPPAState
, cr
[ctl
]));
2104 return nullify_end(ctx
);
2108 static bool trans_mtsarcm(DisasContext
*ctx
, arg_mtsarcm
*a
)
2110 TCGv_i64 tmp
= tcg_temp_new_i64();
2112 tcg_gen_not_i64(tmp
, load_gpr(ctx
, a
->r
));
2113 tcg_gen_andi_i64(tmp
, tmp
, ctx
->is_pa20
? 63 : 31);
2114 save_or_nullify(ctx
, cpu_sar
, tmp
);
2116 cond_free(&ctx
->null_cond
);
2120 static bool trans_ldsid(DisasContext
*ctx
, arg_ldsid
*a
)
2122 TCGv_i64 dest
= dest_gpr(ctx
, a
->t
);
2124 #ifdef CONFIG_USER_ONLY
2125 /* We don't implement space registers in user mode. */
2126 tcg_gen_movi_i64(dest
, 0);
2128 tcg_gen_mov_i64(dest
, space_select(ctx
, a
->sp
, load_gpr(ctx
, a
->b
)));
2129 tcg_gen_shri_i64(dest
, dest
, 32);
2131 save_gpr(ctx
, a
->t
, dest
);
2133 cond_free(&ctx
->null_cond
);
2137 static bool trans_rsm(DisasContext
*ctx
, arg_rsm
*a
)
2139 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2140 #ifndef CONFIG_USER_ONLY
2145 tmp
= tcg_temp_new_i64();
2146 tcg_gen_ld_i64(tmp
, tcg_env
, offsetof(CPUHPPAState
, psw
));
2147 tcg_gen_andi_i64(tmp
, tmp
, ~a
->i
);
2148 gen_helper_swap_system_mask(tmp
, tcg_env
, tmp
);
2149 save_gpr(ctx
, a
->t
, tmp
);
2151 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
2152 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE_EXIT
;
2153 return nullify_end(ctx
);
2157 static bool trans_ssm(DisasContext
*ctx
, arg_ssm
*a
)
2159 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2160 #ifndef CONFIG_USER_ONLY
2165 tmp
= tcg_temp_new_i64();
2166 tcg_gen_ld_i64(tmp
, tcg_env
, offsetof(CPUHPPAState
, psw
));
2167 tcg_gen_ori_i64(tmp
, tmp
, a
->i
);
2168 gen_helper_swap_system_mask(tmp
, tcg_env
, tmp
);
2169 save_gpr(ctx
, a
->t
, tmp
);
2171 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
2172 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE_EXIT
;
2173 return nullify_end(ctx
);
2177 static bool trans_mtsm(DisasContext
*ctx
, arg_mtsm
*a
)
2179 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2180 #ifndef CONFIG_USER_ONLY
2184 reg
= load_gpr(ctx
, a
->r
);
2185 tmp
= tcg_temp_new_i64();
2186 gen_helper_swap_system_mask(tmp
, tcg_env
, reg
);
2188 /* Exit the TB to recognize new interrupts. */
2189 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE_EXIT
;
2190 return nullify_end(ctx
);
2194 static bool do_rfi(DisasContext
*ctx
, bool rfi_r
)
2196 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2197 #ifndef CONFIG_USER_ONLY
2201 gen_helper_rfi_r(tcg_env
);
2203 gen_helper_rfi(tcg_env
);
2205 /* Exit the TB to recognize new interrupts. */
2206 tcg_gen_exit_tb(NULL
, 0);
2207 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2209 return nullify_end(ctx
);
2213 static bool trans_rfi(DisasContext
*ctx
, arg_rfi
*a
)
2215 return do_rfi(ctx
, false);
2218 static bool trans_rfi_r(DisasContext
*ctx
, arg_rfi_r
*a
)
2220 return do_rfi(ctx
, true);
2223 static bool trans_halt(DisasContext
*ctx
, arg_halt
*a
)
2225 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2226 #ifndef CONFIG_USER_ONLY
2228 gen_helper_halt(tcg_env
);
2229 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2230 return nullify_end(ctx
);
2234 static bool trans_reset(DisasContext
*ctx
, arg_reset
*a
)
2236 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2237 #ifndef CONFIG_USER_ONLY
2239 gen_helper_reset(tcg_env
);
2240 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2241 return nullify_end(ctx
);
2245 static bool trans_getshadowregs(DisasContext
*ctx
, arg_getshadowregs
*a
)
2247 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2248 #ifndef CONFIG_USER_ONLY
2250 gen_helper_getshadowregs(tcg_env
);
2251 return nullify_end(ctx
);
2255 static bool trans_nop_addrx(DisasContext
*ctx
, arg_ldst
*a
)
2258 TCGv_i64 dest
= dest_gpr(ctx
, a
->b
);
2259 TCGv_i64 src1
= load_gpr(ctx
, a
->b
);
2260 TCGv_i64 src2
= load_gpr(ctx
, a
->x
);
2262 /* The only thing we need to do is the base register modification. */
2263 tcg_gen_add_i64(dest
, src1
, src2
);
2264 save_gpr(ctx
, a
->b
, dest
);
2266 cond_free(&ctx
->null_cond
);
2270 static bool trans_probe(DisasContext
*ctx
, arg_probe
*a
)
2273 TCGv_i32 level
, want
;
2278 dest
= dest_gpr(ctx
, a
->t
);
2279 form_gva(ctx
, &addr
, &ofs
, a
->b
, 0, 0, 0, a
->sp
, 0, false);
2282 level
= tcg_constant_i32(a
->ri
);
2284 level
= tcg_temp_new_i32();
2285 tcg_gen_extrl_i64_i32(level
, load_gpr(ctx
, a
->ri
));
2286 tcg_gen_andi_i32(level
, level
, 3);
2288 want
= tcg_constant_i32(a
->write
? PAGE_WRITE
: PAGE_READ
);
2290 gen_helper_probe(dest
, tcg_env
, addr
, level
, want
);
2292 save_gpr(ctx
, a
->t
, dest
);
2293 return nullify_end(ctx
);
2296 static bool trans_ixtlbx(DisasContext
*ctx
, arg_ixtlbx
*a
)
2301 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2302 #ifndef CONFIG_USER_ONLY
2308 form_gva(ctx
, &addr
, &ofs
, a
->b
, 0, 0, 0, a
->sp
, 0, false);
2309 reg
= load_gpr(ctx
, a
->r
);
2311 gen_helper_itlba_pa11(tcg_env
, addr
, reg
);
2313 gen_helper_itlbp_pa11(tcg_env
, addr
, reg
);
2316 /* Exit TB for TLB change if mmu is enabled. */
2317 if (ctx
->tb_flags
& PSW_C
) {
2318 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE
;
2320 return nullify_end(ctx
);
2324 static bool trans_pxtlbx(DisasContext
*ctx
, arg_pxtlbx
*a
)
2326 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2327 #ifndef CONFIG_USER_ONLY
2333 form_gva(ctx
, &addr
, &ofs
, a
->b
, a
->x
, 0, 0, a
->sp
, a
->m
, false);
2335 save_gpr(ctx
, a
->b
, ofs
);
2338 gen_helper_ptlbe(tcg_env
);
2340 gen_helper_ptlb(tcg_env
, addr
);
2343 /* Exit TB for TLB change if mmu is enabled. */
2344 if (ctx
->tb_flags
& PSW_C
) {
2345 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE
;
2347 return nullify_end(ctx
);
2352 * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2354 * https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2355 * page 13-9 (195/206)
2357 static bool trans_ixtlbxf(DisasContext
*ctx
, arg_ixtlbxf
*a
)
2362 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2363 #ifndef CONFIG_USER_ONLY
2364 TCGv_i64 addr
, atl
, stl
;
2371 * if (not (pcxl or pcxl2))
2372 * return gen_illegal(ctx);
2375 atl
= tcg_temp_new_i64();
2376 stl
= tcg_temp_new_i64();
2377 addr
= tcg_temp_new_i64();
2379 tcg_gen_ld32u_i64(stl
, tcg_env
,
2380 a
->data
? offsetof(CPUHPPAState
, cr
[CR_ISR
])
2381 : offsetof(CPUHPPAState
, cr
[CR_IIASQ
]));
2382 tcg_gen_ld32u_i64(atl
, tcg_env
,
2383 a
->data
? offsetof(CPUHPPAState
, cr
[CR_IOR
])
2384 : offsetof(CPUHPPAState
, cr
[CR_IIAOQ
]));
2385 tcg_gen_shli_i64(stl
, stl
, 32);
2386 tcg_gen_or_i64(addr
, atl
, stl
);
2388 reg
= load_gpr(ctx
, a
->r
);
2390 gen_helper_itlba_pa11(tcg_env
, addr
, reg
);
2392 gen_helper_itlbp_pa11(tcg_env
, addr
, reg
);
2395 /* Exit TB for TLB change if mmu is enabled. */
2396 if (ctx
->tb_flags
& PSW_C
) {
2397 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE
;
2399 return nullify_end(ctx
);
2403 static bool trans_ixtlbt(DisasContext
*ctx
, arg_ixtlbt
*a
)
2405 if (!ctx
->is_pa20
) {
2408 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2409 #ifndef CONFIG_USER_ONLY
2412 TCGv_i64 src1
= load_gpr(ctx
, a
->r1
);
2413 TCGv_i64 src2
= load_gpr(ctx
, a
->r2
);
2416 gen_helper_idtlbt_pa20(tcg_env
, src1
, src2
);
2418 gen_helper_iitlbt_pa20(tcg_env
, src1
, src2
);
2421 /* Exit TB for TLB change if mmu is enabled. */
2422 if (ctx
->tb_flags
& PSW_C
) {
2423 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE
;
2425 return nullify_end(ctx
);
2429 static bool trans_lpa(DisasContext
*ctx
, arg_ldst
*a
)
2431 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2432 #ifndef CONFIG_USER_ONLY
2434 TCGv_i64 ofs
, paddr
;
2438 form_gva(ctx
, &vaddr
, &ofs
, a
->b
, a
->x
, 0, 0, a
->sp
, a
->m
, false);
2440 paddr
= tcg_temp_new_i64();
2441 gen_helper_lpa(paddr
, tcg_env
, vaddr
);
2443 /* Note that physical address result overrides base modification. */
2445 save_gpr(ctx
, a
->b
, ofs
);
2447 save_gpr(ctx
, a
->t
, paddr
);
2449 return nullify_end(ctx
);
2453 static bool trans_lci(DisasContext
*ctx
, arg_lci
*a
)
2455 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2457 /* The Coherence Index is an implementation-defined function of the
2458 physical address. Two addresses with the same CI have a coherent
2459 view of the cache. Our implementation is to return 0 for all,
2460 since the entire address space is coherent. */
2461 save_gpr(ctx
, a
->t
, tcg_constant_i64(0));
2463 cond_free(&ctx
->null_cond
);
2467 static bool trans_add(DisasContext
*ctx
, arg_rrr_cf_d_sh
*a
)
2469 return do_add_reg(ctx
, a
, false, false, false, false);
2472 static bool trans_add_l(DisasContext
*ctx
, arg_rrr_cf_d_sh
*a
)
2474 return do_add_reg(ctx
, a
, true, false, false, false);
2477 static bool trans_add_tsv(DisasContext
*ctx
, arg_rrr_cf_d_sh
*a
)
2479 return do_add_reg(ctx
, a
, false, true, false, false);
2482 static bool trans_add_c(DisasContext
*ctx
, arg_rrr_cf_d_sh
*a
)
2484 return do_add_reg(ctx
, a
, false, false, false, true);
2487 static bool trans_add_c_tsv(DisasContext
*ctx
, arg_rrr_cf_d_sh
*a
)
2489 return do_add_reg(ctx
, a
, false, true, false, true);
2492 static bool trans_sub(DisasContext
*ctx
, arg_rrr_cf_d
*a
)
2494 return do_sub_reg(ctx
, a
, false, false, false);
2497 static bool trans_sub_tsv(DisasContext
*ctx
, arg_rrr_cf_d
*a
)
2499 return do_sub_reg(ctx
, a
, true, false, false);
2502 static bool trans_sub_tc(DisasContext
*ctx
, arg_rrr_cf_d
*a
)
2504 return do_sub_reg(ctx
, a
, false, false, true);
2507 static bool trans_sub_tsv_tc(DisasContext
*ctx
, arg_rrr_cf_d
*a
)
2509 return do_sub_reg(ctx
, a
, true, false, true);
2512 static bool trans_sub_b(DisasContext
*ctx
, arg_rrr_cf_d
*a
)
2514 return do_sub_reg(ctx
, a
, false, true, false);
2517 static bool trans_sub_b_tsv(DisasContext
*ctx
, arg_rrr_cf_d
*a
)
2519 return do_sub_reg(ctx
, a
, true, true, false);
2522 static bool trans_andcm(DisasContext
*ctx
, arg_rrr_cf_d
*a
)
2524 return do_log_reg(ctx
, a
, tcg_gen_andc_i64
);
2527 static bool trans_and(DisasContext
*ctx
, arg_rrr_cf_d
*a
)
2529 return do_log_reg(ctx
, a
, tcg_gen_and_i64
);
2532 static bool trans_or(DisasContext
*ctx
, arg_rrr_cf_d
*a
)
2535 unsigned r2
= a
->r2
;
2536 unsigned r1
= a
->r1
;
2539 if (rt
== 0) { /* NOP */
2540 cond_free(&ctx
->null_cond
);
2543 if (r2
== 0) { /* COPY */
2545 TCGv_i64 dest
= dest_gpr(ctx
, rt
);
2546 tcg_gen_movi_i64(dest
, 0);
2547 save_gpr(ctx
, rt
, dest
);
2549 save_gpr(ctx
, rt
, cpu_gr
[r1
]);
2551 cond_free(&ctx
->null_cond
);
2554 #ifndef CONFIG_USER_ONLY
2555 /* These are QEMU extensions and are nops in the real architecture:
2557 * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2558 * or %r31,%r31,%r31 -- death loop; offline cpu
2559 * currently implemented as idle.
2561 if ((rt
== 10 || rt
== 31) && r1
== rt
&& r2
== rt
) { /* PAUSE */
2562 /* No need to check for supervisor, as userland can only pause
2563 until the next timer interrupt. */
2566 /* Advance the instruction queue. */
2567 copy_iaoq_entry(ctx
, cpu_iaoq_f
, ctx
->iaoq_b
, cpu_iaoq_b
);
2568 copy_iaoq_entry(ctx
, cpu_iaoq_b
, ctx
->iaoq_n
, ctx
->iaoq_n_var
);
2569 nullify_set(ctx
, 0);
2571 /* Tell the qemu main loop to halt until this cpu has work. */
2572 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env
,
2573 offsetof(CPUState
, halted
) - offsetof(HPPACPU
, env
));
2574 gen_excp_1(EXCP_HALTED
);
2575 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2577 return nullify_end(ctx
);
2581 return do_log_reg(ctx
, a
, tcg_gen_or_i64
);
2584 static bool trans_xor(DisasContext
*ctx
, arg_rrr_cf_d
*a
)
2586 return do_log_reg(ctx
, a
, tcg_gen_xor_i64
);
2589 static bool trans_cmpclr(DisasContext
*ctx
, arg_rrr_cf_d
*a
)
2591 TCGv_i64 tcg_r1
, tcg_r2
;
2596 tcg_r1
= load_gpr(ctx
, a
->r1
);
2597 tcg_r2
= load_gpr(ctx
, a
->r2
);
2598 do_cmpclr(ctx
, a
->t
, tcg_r1
, tcg_r2
, a
->cf
, a
->d
);
2599 return nullify_end(ctx
);
2602 static bool trans_uxor(DisasContext
*ctx
, arg_rrr_cf_d
*a
)
2604 TCGv_i64 tcg_r1
, tcg_r2
;
2609 tcg_r1
= load_gpr(ctx
, a
->r1
);
2610 tcg_r2
= load_gpr(ctx
, a
->r2
);
2611 do_unit(ctx
, a
->t
, tcg_r1
, tcg_r2
, a
->cf
, a
->d
, false, tcg_gen_xor_i64
);
2612 return nullify_end(ctx
);
2615 static bool do_uaddcm(DisasContext
*ctx
, arg_rrr_cf_d
*a
, bool is_tc
)
2617 TCGv_i64 tcg_r1
, tcg_r2
, tmp
;
2622 tcg_r1
= load_gpr(ctx
, a
->r1
);
2623 tcg_r2
= load_gpr(ctx
, a
->r2
);
2624 tmp
= tcg_temp_new_i64();
2625 tcg_gen_not_i64(tmp
, tcg_r2
);
2626 do_unit(ctx
, a
->t
, tcg_r1
, tmp
, a
->cf
, a
->d
, is_tc
, tcg_gen_add_i64
);
2627 return nullify_end(ctx
);
2630 static bool trans_uaddcm(DisasContext
*ctx
, arg_rrr_cf_d
*a
)
2632 return do_uaddcm(ctx
, a
, false);
2635 static bool trans_uaddcm_tc(DisasContext
*ctx
, arg_rrr_cf_d
*a
)
2637 return do_uaddcm(ctx
, a
, true);
2640 static bool do_dcor(DisasContext
*ctx
, arg_rr_cf_d
*a
, bool is_i
)
2646 tmp
= tcg_temp_new_i64();
2647 tcg_gen_shri_i64(tmp
, cpu_psw_cb
, 3);
2649 tcg_gen_not_i64(tmp
, tmp
);
2651 tcg_gen_andi_i64(tmp
, tmp
, (uint64_t)0x1111111111111111ull
);
2652 tcg_gen_muli_i64(tmp
, tmp
, 6);
2653 do_unit(ctx
, a
->t
, load_gpr(ctx
, a
->r
), tmp
, a
->cf
, a
->d
, false,
2654 is_i
? tcg_gen_add_i64
: tcg_gen_sub_i64
);
2655 return nullify_end(ctx
);
2658 static bool trans_dcor(DisasContext
*ctx
, arg_rr_cf_d
*a
)
2660 return do_dcor(ctx
, a
, false);
2663 static bool trans_dcor_i(DisasContext
*ctx
, arg_rr_cf_d
*a
)
2665 return do_dcor(ctx
, a
, true);
2668 static bool trans_ds(DisasContext
*ctx
, arg_rrr_cf
*a
)
2670 TCGv_i64 dest
, add1
, add2
, addc
, zero
, in1
, in2
;
2675 in1
= load_gpr(ctx
, a
->r1
);
2676 in2
= load_gpr(ctx
, a
->r2
);
2678 add1
= tcg_temp_new_i64();
2679 add2
= tcg_temp_new_i64();
2680 addc
= tcg_temp_new_i64();
2681 dest
= tcg_temp_new_i64();
2682 zero
= tcg_constant_i64(0);
2684 /* Form R1 << 1 | PSW[CB]{8}. */
2685 tcg_gen_add_i64(add1
, in1
, in1
);
2686 tcg_gen_add_i64(add1
, add1
, get_psw_carry(ctx
, false));
2689 * Add or subtract R2, depending on PSW[V]. Proper computation of
2690 * carry requires that we subtract via + ~R2 + 1, as described in
2691 * the manual. By extracting and masking V, we can produce the
2692 * proper inputs to the addition without movcond.
2694 tcg_gen_sextract_i64(addc
, cpu_psw_v
, 31, 1);
2695 tcg_gen_xor_i64(add2
, in2
, addc
);
2696 tcg_gen_andi_i64(addc
, addc
, 1);
2698 tcg_gen_add2_i64(dest
, cpu_psw_cb_msb
, add1
, zero
, add2
, zero
);
2699 tcg_gen_add2_i64(dest
, cpu_psw_cb_msb
, dest
, cpu_psw_cb_msb
, addc
, zero
);
2701 /* Write back the result register. */
2702 save_gpr(ctx
, a
->t
, dest
);
2704 /* Write back PSW[CB]. */
2705 tcg_gen_xor_i64(cpu_psw_cb
, add1
, add2
);
2706 tcg_gen_xor_i64(cpu_psw_cb
, cpu_psw_cb
, dest
);
2708 /* Write back PSW[V] for the division step. */
2709 cout
= get_psw_carry(ctx
, false);
2710 tcg_gen_neg_i64(cpu_psw_v
, cout
);
2711 tcg_gen_xor_i64(cpu_psw_v
, cpu_psw_v
, in2
);
2713 /* Install the new nullification. */
2716 if (cond_need_sv(a
->cf
>> 1)) {
2717 /* ??? The lshift is supposed to contribute to overflow. */
2718 sv
= do_add_sv(ctx
, dest
, add1
, add2
);
2720 ctx
->null_cond
= do_cond(ctx
, a
->cf
, false, dest
, cout
, sv
);
2723 return nullify_end(ctx
);
2726 static bool trans_addi(DisasContext
*ctx
, arg_rri_cf
*a
)
2728 return do_add_imm(ctx
, a
, false, false);
2731 static bool trans_addi_tsv(DisasContext
*ctx
, arg_rri_cf
*a
)
2733 return do_add_imm(ctx
, a
, true, false);
2736 static bool trans_addi_tc(DisasContext
*ctx
, arg_rri_cf
*a
)
2738 return do_add_imm(ctx
, a
, false, true);
2741 static bool trans_addi_tc_tsv(DisasContext
*ctx
, arg_rri_cf
*a
)
2743 return do_add_imm(ctx
, a
, true, true);
2746 static bool trans_subi(DisasContext
*ctx
, arg_rri_cf
*a
)
2748 return do_sub_imm(ctx
, a
, false);
2751 static bool trans_subi_tsv(DisasContext
*ctx
, arg_rri_cf
*a
)
2753 return do_sub_imm(ctx
, a
, true);
2756 static bool trans_cmpiclr(DisasContext
*ctx
, arg_rri_cf_d
*a
)
2758 TCGv_i64 tcg_im
, tcg_r2
;
2764 tcg_im
= tcg_constant_i64(a
->i
);
2765 tcg_r2
= load_gpr(ctx
, a
->r
);
2766 do_cmpclr(ctx
, a
->t
, tcg_im
, tcg_r2
, a
->cf
, a
->d
);
2768 return nullify_end(ctx
);
2771 static bool do_multimedia(DisasContext
*ctx
, arg_rrr
*a
,
2772 void (*fn
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
2774 TCGv_i64 r1
, r2
, dest
;
2776 if (!ctx
->is_pa20
) {
2782 r1
= load_gpr(ctx
, a
->r1
);
2783 r2
= load_gpr(ctx
, a
->r2
);
2784 dest
= dest_gpr(ctx
, a
->t
);
2787 save_gpr(ctx
, a
->t
, dest
);
2789 return nullify_end(ctx
);
2792 static bool do_multimedia_sh(DisasContext
*ctx
, arg_rri
*a
,
2793 void (*fn
)(TCGv_i64
, TCGv_i64
, int64_t))
2797 if (!ctx
->is_pa20
) {
2803 r
= load_gpr(ctx
, a
->r
);
2804 dest
= dest_gpr(ctx
, a
->t
);
2807 save_gpr(ctx
, a
->t
, dest
);
2809 return nullify_end(ctx
);
2812 static bool do_multimedia_shadd(DisasContext
*ctx
, arg_rrr_sh
*a
,
2813 void (*fn
)(TCGv_i64
, TCGv_i64
,
2814 TCGv_i64
, TCGv_i32
))
2816 TCGv_i64 r1
, r2
, dest
;
2818 if (!ctx
->is_pa20
) {
2824 r1
= load_gpr(ctx
, a
->r1
);
2825 r2
= load_gpr(ctx
, a
->r2
);
2826 dest
= dest_gpr(ctx
, a
->t
);
2828 fn(dest
, r1
, r2
, tcg_constant_i32(a
->sh
));
2829 save_gpr(ctx
, a
->t
, dest
);
2831 return nullify_end(ctx
);
2834 static bool trans_hadd(DisasContext
*ctx
, arg_rrr
*a
)
2836 return do_multimedia(ctx
, a
, tcg_gen_vec_add16_i64
);
2839 static bool trans_hadd_ss(DisasContext
*ctx
, arg_rrr
*a
)
2841 return do_multimedia(ctx
, a
, gen_helper_hadd_ss
);
2844 static bool trans_hadd_us(DisasContext
*ctx
, arg_rrr
*a
)
2846 return do_multimedia(ctx
, a
, gen_helper_hadd_us
);
2849 static bool trans_havg(DisasContext
*ctx
, arg_rrr
*a
)
2851 return do_multimedia(ctx
, a
, gen_helper_havg
);
2854 static bool trans_hshl(DisasContext
*ctx
, arg_rri
*a
)
2856 return do_multimedia_sh(ctx
, a
, tcg_gen_vec_shl16i_i64
);
2859 static bool trans_hshr_s(DisasContext
*ctx
, arg_rri
*a
)
2861 return do_multimedia_sh(ctx
, a
, tcg_gen_vec_sar16i_i64
);
2864 static bool trans_hshr_u(DisasContext
*ctx
, arg_rri
*a
)
2866 return do_multimedia_sh(ctx
, a
, tcg_gen_vec_shr16i_i64
);
2869 static bool trans_hshladd(DisasContext
*ctx
, arg_rrr_sh
*a
)
2871 return do_multimedia_shadd(ctx
, a
, gen_helper_hshladd
);
2874 static bool trans_hshradd(DisasContext
*ctx
, arg_rrr_sh
*a
)
2876 return do_multimedia_shadd(ctx
, a
, gen_helper_hshradd
);
2879 static bool trans_hsub(DisasContext
*ctx
, arg_rrr
*a
)
2881 return do_multimedia(ctx
, a
, tcg_gen_vec_sub16_i64
);
2884 static bool trans_hsub_ss(DisasContext
*ctx
, arg_rrr
*a
)
2886 return do_multimedia(ctx
, a
, gen_helper_hsub_ss
);
2889 static bool trans_hsub_us(DisasContext
*ctx
, arg_rrr
*a
)
2891 return do_multimedia(ctx
, a
, gen_helper_hsub_us
);
2894 static bool trans_ld(DisasContext
*ctx
, arg_ldst
*a
)
2896 if (!ctx
->is_pa20
&& a
->size
> MO_32
) {
2897 return gen_illegal(ctx
);
2899 return do_load(ctx
, a
->t
, a
->b
, a
->x
, a
->scale
? a
->size
: 0,
2900 a
->disp
, a
->sp
, a
->m
, a
->size
| MO_TE
);
2903 static bool trans_st(DisasContext
*ctx
, arg_ldst
*a
)
2905 assert(a
->x
== 0 && a
->scale
== 0);
2906 if (!ctx
->is_pa20
&& a
->size
> MO_32
) {
2907 return gen_illegal(ctx
);
2909 return do_store(ctx
, a
->t
, a
->b
, a
->disp
, a
->sp
, a
->m
, a
->size
| MO_TE
);
2912 static bool trans_ldc(DisasContext
*ctx
, arg_ldst
*a
)
2914 MemOp mop
= MO_TE
| MO_ALIGN
| a
->size
;
2915 TCGv_i64 zero
, dest
, ofs
;
2918 if (!ctx
->is_pa20
&& a
->size
> MO_32
) {
2919 return gen_illegal(ctx
);
2925 /* Base register modification. Make sure if RT == RB,
2926 we see the result of the load. */
2927 dest
= tcg_temp_new_i64();
2929 dest
= dest_gpr(ctx
, a
->t
);
2932 form_gva(ctx
, &addr
, &ofs
, a
->b
, a
->x
, a
->scale
? a
->size
: 0,
2933 a
->disp
, a
->sp
, a
->m
, ctx
->mmu_idx
== MMU_PHYS_IDX
);
2936 * For hppa1.1, LDCW is undefined unless aligned mod 16.
2937 * However actual hardware succeeds with aligned mod 4.
2938 * Detect this case and log a GUEST_ERROR.
2940 * TODO: HPPA64 relaxes the over-alignment requirement
2941 * with the ,co completer.
2943 gen_helper_ldc_check(addr
);
2945 zero
= tcg_constant_i64(0);
2946 tcg_gen_atomic_xchg_i64(dest
, addr
, zero
, ctx
->mmu_idx
, mop
);
2949 save_gpr(ctx
, a
->b
, ofs
);
2951 save_gpr(ctx
, a
->t
, dest
);
2953 return nullify_end(ctx
);
2956 static bool trans_stby(DisasContext
*ctx
, arg_stby
*a
)
2963 form_gva(ctx
, &addr
, &ofs
, a
->b
, 0, 0, a
->disp
, a
->sp
, a
->m
,
2964 ctx
->mmu_idx
== MMU_PHYS_IDX
);
2965 val
= load_gpr(ctx
, a
->r
);
2967 if (tb_cflags(ctx
->base
.tb
) & CF_PARALLEL
) {
2968 gen_helper_stby_e_parallel(tcg_env
, addr
, val
);
2970 gen_helper_stby_e(tcg_env
, addr
, val
);
2973 if (tb_cflags(ctx
->base
.tb
) & CF_PARALLEL
) {
2974 gen_helper_stby_b_parallel(tcg_env
, addr
, val
);
2976 gen_helper_stby_b(tcg_env
, addr
, val
);
2980 tcg_gen_andi_i64(ofs
, ofs
, ~3);
2981 save_gpr(ctx
, a
->b
, ofs
);
2984 return nullify_end(ctx
);
2987 static bool trans_stdby(DisasContext
*ctx
, arg_stby
*a
)
2992 if (!ctx
->is_pa20
) {
2997 form_gva(ctx
, &addr
, &ofs
, a
->b
, 0, 0, a
->disp
, a
->sp
, a
->m
,
2998 ctx
->mmu_idx
== MMU_PHYS_IDX
);
2999 val
= load_gpr(ctx
, a
->r
);
3001 if (tb_cflags(ctx
->base
.tb
) & CF_PARALLEL
) {
3002 gen_helper_stdby_e_parallel(tcg_env
, addr
, val
);
3004 gen_helper_stdby_e(tcg_env
, addr
, val
);
3007 if (tb_cflags(ctx
->base
.tb
) & CF_PARALLEL
) {
3008 gen_helper_stdby_b_parallel(tcg_env
, addr
, val
);
3010 gen_helper_stdby_b(tcg_env
, addr
, val
);
3014 tcg_gen_andi_i64(ofs
, ofs
, ~7);
3015 save_gpr(ctx
, a
->b
, ofs
);
3018 return nullify_end(ctx
);
3021 static bool trans_lda(DisasContext
*ctx
, arg_ldst
*a
)
3023 int hold_mmu_idx
= ctx
->mmu_idx
;
3025 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
3026 ctx
->mmu_idx
= MMU_PHYS_IDX
;
3028 ctx
->mmu_idx
= hold_mmu_idx
;
3032 static bool trans_sta(DisasContext
*ctx
, arg_ldst
*a
)
3034 int hold_mmu_idx
= ctx
->mmu_idx
;
3036 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
3037 ctx
->mmu_idx
= MMU_PHYS_IDX
;
3039 ctx
->mmu_idx
= hold_mmu_idx
;
3043 static bool trans_ldil(DisasContext
*ctx
, arg_ldil
*a
)
3045 TCGv_i64 tcg_rt
= dest_gpr(ctx
, a
->t
);
3047 tcg_gen_movi_i64(tcg_rt
, a
->i
);
3048 save_gpr(ctx
, a
->t
, tcg_rt
);
3049 cond_free(&ctx
->null_cond
);
3053 static bool trans_addil(DisasContext
*ctx
, arg_addil
*a
)
3055 TCGv_i64 tcg_rt
= load_gpr(ctx
, a
->r
);
3056 TCGv_i64 tcg_r1
= dest_gpr(ctx
, 1);
3058 tcg_gen_addi_i64(tcg_r1
, tcg_rt
, a
->i
);
3059 save_gpr(ctx
, 1, tcg_r1
);
3060 cond_free(&ctx
->null_cond
);
3064 static bool trans_ldo(DisasContext
*ctx
, arg_ldo
*a
)
3066 TCGv_i64 tcg_rt
= dest_gpr(ctx
, a
->t
);
3068 /* Special case rb == 0, for the LDI pseudo-op.
3069 The COPY pseudo-op is handled for free within tcg_gen_addi_i64. */
3071 tcg_gen_movi_i64(tcg_rt
, a
->i
);
3073 tcg_gen_addi_i64(tcg_rt
, cpu_gr
[a
->b
], a
->i
);
3075 save_gpr(ctx
, a
->t
, tcg_rt
);
3076 cond_free(&ctx
->null_cond
);
3080 static bool do_cmpb(DisasContext
*ctx
, unsigned r
, TCGv_i64 in1
,
3081 unsigned c
, unsigned f
, bool d
, unsigned n
, int disp
)
3083 TCGv_i64 dest
, in2
, sv
;
3086 in2
= load_gpr(ctx
, r
);
3087 dest
= tcg_temp_new_i64();
3089 tcg_gen_sub_i64(dest
, in1
, in2
);
3092 if (cond_need_sv(c
)) {
3093 sv
= do_sub_sv(ctx
, dest
, in1
, in2
);
3096 cond
= do_sub_cond(ctx
, c
* 2 + f
, d
, dest
, in1
, in2
, sv
);
3097 return do_cbranch(ctx
, disp
, n
, &cond
);
3100 static bool trans_cmpb(DisasContext
*ctx
, arg_cmpb
*a
)
3102 if (!ctx
->is_pa20
&& a
->d
) {
3106 return do_cmpb(ctx
, a
->r2
, load_gpr(ctx
, a
->r1
),
3107 a
->c
, a
->f
, a
->d
, a
->n
, a
->disp
);
3110 static bool trans_cmpbi(DisasContext
*ctx
, arg_cmpbi
*a
)
3112 if (!ctx
->is_pa20
&& a
->d
) {
3116 return do_cmpb(ctx
, a
->r
, tcg_constant_i64(a
->i
),
3117 a
->c
, a
->f
, a
->d
, a
->n
, a
->disp
);
3120 static bool do_addb(DisasContext
*ctx
, unsigned r
, TCGv_i64 in1
,
3121 unsigned c
, unsigned f
, unsigned n
, int disp
)
3123 TCGv_i64 dest
, in2
, sv
, cb_cond
;
3128 * For hppa64, the ADDB conditions change with PSW.W,
3129 * dropping ZNV, SV, OD in favor of double-word EQ, LT, LE.
3131 if (ctx
->tb_flags
& PSW_W
) {
3138 in2
= load_gpr(ctx
, r
);
3139 dest
= tcg_temp_new_i64();
3143 if (cond_need_cb(c
)) {
3144 TCGv_i64 cb
= tcg_temp_new_i64();
3145 TCGv_i64 cb_msb
= tcg_temp_new_i64();
3147 tcg_gen_movi_i64(cb_msb
, 0);
3148 tcg_gen_add2_i64(dest
, cb_msb
, in1
, cb_msb
, in2
, cb_msb
);
3149 tcg_gen_xor_i64(cb
, in1
, in2
);
3150 tcg_gen_xor_i64(cb
, cb
, dest
);
3151 cb_cond
= get_carry(ctx
, d
, cb
, cb_msb
);
3153 tcg_gen_add_i64(dest
, in1
, in2
);
3155 if (cond_need_sv(c
)) {
3156 sv
= do_add_sv(ctx
, dest
, in1
, in2
);
3159 cond
= do_cond(ctx
, c
* 2 + f
, d
, dest
, cb_cond
, sv
);
3160 save_gpr(ctx
, r
, dest
);
3161 return do_cbranch(ctx
, disp
, n
, &cond
);
3164 static bool trans_addb(DisasContext
*ctx
, arg_addb
*a
)
3167 return do_addb(ctx
, a
->r2
, load_gpr(ctx
, a
->r1
), a
->c
, a
->f
, a
->n
, a
->disp
);
3170 static bool trans_addbi(DisasContext
*ctx
, arg_addbi
*a
)
3173 return do_addb(ctx
, a
->r
, tcg_constant_i64(a
->i
), a
->c
, a
->f
, a
->n
, a
->disp
);
3176 static bool trans_bb_sar(DisasContext
*ctx
, arg_bb_sar
*a
)
3178 TCGv_i64 tmp
, tcg_r
;
3183 tmp
= tcg_temp_new_i64();
3184 tcg_r
= load_gpr(ctx
, a
->r
);
3185 if (cond_need_ext(ctx
, a
->d
)) {
3186 /* Force shift into [32,63] */
3187 tcg_gen_ori_i64(tmp
, cpu_sar
, 32);
3188 tcg_gen_shl_i64(tmp
, tcg_r
, tmp
);
3190 tcg_gen_shl_i64(tmp
, tcg_r
, cpu_sar
);
3193 cond
= cond_make_0_tmp(a
->c
? TCG_COND_GE
: TCG_COND_LT
, tmp
);
3194 return do_cbranch(ctx
, a
->disp
, a
->n
, &cond
);
3197 static bool trans_bb_imm(DisasContext
*ctx
, arg_bb_imm
*a
)
3199 TCGv_i64 tmp
, tcg_r
;
3205 tmp
= tcg_temp_new_i64();
3206 tcg_r
= load_gpr(ctx
, a
->r
);
3207 p
= a
->p
| (cond_need_ext(ctx
, a
->d
) ? 32 : 0);
3208 tcg_gen_shli_i64(tmp
, tcg_r
, p
);
3210 cond
= cond_make_0(a
->c
? TCG_COND_GE
: TCG_COND_LT
, tmp
);
3211 return do_cbranch(ctx
, a
->disp
, a
->n
, &cond
);
3214 static bool trans_movb(DisasContext
*ctx
, arg_movb
*a
)
3221 dest
= dest_gpr(ctx
, a
->r2
);
3223 tcg_gen_movi_i64(dest
, 0);
3225 tcg_gen_mov_i64(dest
, cpu_gr
[a
->r1
]);
3228 /* All MOVB conditions are 32-bit. */
3229 cond
= do_sed_cond(ctx
, a
->c
, false, dest
);
3230 return do_cbranch(ctx
, a
->disp
, a
->n
, &cond
);
3233 static bool trans_movbi(DisasContext
*ctx
, arg_movbi
*a
)
3240 dest
= dest_gpr(ctx
, a
->r
);
3241 tcg_gen_movi_i64(dest
, a
->i
);
3243 /* All MOVBI conditions are 32-bit. */
3244 cond
= do_sed_cond(ctx
, a
->c
, false, dest
);
3245 return do_cbranch(ctx
, a
->disp
, a
->n
, &cond
);
3248 static bool trans_shrp_sar(DisasContext
*ctx
, arg_shrp_sar
*a
)
3250 TCGv_i64 dest
, src2
;
3252 if (!ctx
->is_pa20
&& a
->d
) {
3259 dest
= dest_gpr(ctx
, a
->t
);
3260 src2
= load_gpr(ctx
, a
->r2
);
3263 tcg_gen_shr_i64(dest
, src2
, cpu_sar
);
3265 TCGv_i64 tmp
= tcg_temp_new_i64();
3267 tcg_gen_ext32u_i64(dest
, src2
);
3268 tcg_gen_andi_i64(tmp
, cpu_sar
, 31);
3269 tcg_gen_shr_i64(dest
, dest
, tmp
);
3271 } else if (a
->r1
== a
->r2
) {
3273 tcg_gen_rotr_i64(dest
, src2
, cpu_sar
);
3275 TCGv_i32 t32
= tcg_temp_new_i32();
3276 TCGv_i32 s32
= tcg_temp_new_i32();
3278 tcg_gen_extrl_i64_i32(t32
, src2
);
3279 tcg_gen_extrl_i64_i32(s32
, cpu_sar
);
3280 tcg_gen_andi_i32(s32
, s32
, 31);
3281 tcg_gen_rotr_i32(t32
, t32
, s32
);
3282 tcg_gen_extu_i32_i64(dest
, t32
);
3285 TCGv_i64 src1
= load_gpr(ctx
, a
->r1
);
3288 TCGv_i64 t
= tcg_temp_new_i64();
3289 TCGv_i64 n
= tcg_temp_new_i64();
3291 tcg_gen_xori_i64(n
, cpu_sar
, 63);
3292 tcg_gen_shl_i64(t
, src2
, n
);
3293 tcg_gen_shli_i64(t
, t
, 1);
3294 tcg_gen_shr_i64(dest
, src1
, cpu_sar
);
3295 tcg_gen_or_i64(dest
, dest
, t
);
3297 TCGv_i64 t
= tcg_temp_new_i64();
3298 TCGv_i64 s
= tcg_temp_new_i64();
3300 tcg_gen_concat32_i64(t
, src2
, src1
);
3301 tcg_gen_andi_i64(s
, cpu_sar
, 31);
3302 tcg_gen_shr_i64(dest
, t
, s
);
3305 save_gpr(ctx
, a
->t
, dest
);
3307 /* Install the new nullification. */
3308 cond_free(&ctx
->null_cond
);
3310 ctx
->null_cond
= do_sed_cond(ctx
, a
->c
, false, dest
);
3312 return nullify_end(ctx
);
3315 static bool trans_shrp_imm(DisasContext
*ctx
, arg_shrp_imm
*a
)
3320 if (!ctx
->is_pa20
&& a
->d
) {
3327 width
= a
->d
? 64 : 32;
3328 sa
= width
- 1 - a
->cpos
;
3330 dest
= dest_gpr(ctx
, a
->t
);
3331 t2
= load_gpr(ctx
, a
->r2
);
3333 tcg_gen_extract_i64(dest
, t2
, sa
, width
- sa
);
3334 } else if (width
== TARGET_LONG_BITS
) {
3335 tcg_gen_extract2_i64(dest
, t2
, cpu_gr
[a
->r1
], sa
);
3338 if (a
->r1
== a
->r2
) {
3339 TCGv_i32 t32
= tcg_temp_new_i32();
3340 tcg_gen_extrl_i64_i32(t32
, t2
);
3341 tcg_gen_rotri_i32(t32
, t32
, sa
);
3342 tcg_gen_extu_i32_i64(dest
, t32
);
3344 tcg_gen_concat32_i64(dest
, t2
, cpu_gr
[a
->r1
]);
3345 tcg_gen_extract_i64(dest
, dest
, sa
, 32);
3348 save_gpr(ctx
, a
->t
, dest
);
3350 /* Install the new nullification. */
3351 cond_free(&ctx
->null_cond
);
3353 ctx
->null_cond
= do_sed_cond(ctx
, a
->c
, false, dest
);
3355 return nullify_end(ctx
);
3358 static bool trans_extr_sar(DisasContext
*ctx
, arg_extr_sar
*a
)
3360 unsigned widthm1
= a
->d
? 63 : 31;
3361 TCGv_i64 dest
, src
, tmp
;
3363 if (!ctx
->is_pa20
&& a
->d
) {
3370 dest
= dest_gpr(ctx
, a
->t
);
3371 src
= load_gpr(ctx
, a
->r
);
3372 tmp
= tcg_temp_new_i64();
3374 /* Recall that SAR is using big-endian bit numbering. */
3375 tcg_gen_andi_i64(tmp
, cpu_sar
, widthm1
);
3376 tcg_gen_xori_i64(tmp
, tmp
, widthm1
);
3380 tcg_gen_ext32s_i64(dest
, src
);
3383 tcg_gen_sar_i64(dest
, src
, tmp
);
3384 tcg_gen_sextract_i64(dest
, dest
, 0, a
->len
);
3387 tcg_gen_ext32u_i64(dest
, src
);
3390 tcg_gen_shr_i64(dest
, src
, tmp
);
3391 tcg_gen_extract_i64(dest
, dest
, 0, a
->len
);
3393 save_gpr(ctx
, a
->t
, dest
);
3395 /* Install the new nullification. */
3396 cond_free(&ctx
->null_cond
);
3398 ctx
->null_cond
= do_sed_cond(ctx
, a
->c
, a
->d
, dest
);
3400 return nullify_end(ctx
);
3403 static bool trans_extr_imm(DisasContext
*ctx
, arg_extr_imm
*a
)
3405 unsigned len
, cpos
, width
;
3408 if (!ctx
->is_pa20
&& a
->d
) {
3416 width
= a
->d
? 64 : 32;
3417 cpos
= width
- 1 - a
->pos
;
3418 if (cpos
+ len
> width
) {
3422 dest
= dest_gpr(ctx
, a
->t
);
3423 src
= load_gpr(ctx
, a
->r
);
3425 tcg_gen_sextract_i64(dest
, src
, cpos
, len
);
3427 tcg_gen_extract_i64(dest
, src
, cpos
, len
);
3429 save_gpr(ctx
, a
->t
, dest
);
3431 /* Install the new nullification. */
3432 cond_free(&ctx
->null_cond
);
3434 ctx
->null_cond
= do_sed_cond(ctx
, a
->c
, a
->d
, dest
);
3436 return nullify_end(ctx
);
3439 static bool trans_depi_imm(DisasContext
*ctx
, arg_depi_imm
*a
)
3441 unsigned len
, width
;
3442 uint64_t mask0
, mask1
;
3445 if (!ctx
->is_pa20
&& a
->d
) {
3453 width
= a
->d
? 64 : 32;
3454 if (a
->cpos
+ len
> width
) {
3455 len
= width
- a
->cpos
;
3458 dest
= dest_gpr(ctx
, a
->t
);
3459 mask0
= deposit64(0, a
->cpos
, len
, a
->i
);
3460 mask1
= deposit64(-1, a
->cpos
, len
, a
->i
);
3463 TCGv_i64 src
= load_gpr(ctx
, a
->t
);
3464 tcg_gen_andi_i64(dest
, src
, mask1
);
3465 tcg_gen_ori_i64(dest
, dest
, mask0
);
3467 tcg_gen_movi_i64(dest
, mask0
);
3469 save_gpr(ctx
, a
->t
, dest
);
3471 /* Install the new nullification. */
3472 cond_free(&ctx
->null_cond
);
3474 ctx
->null_cond
= do_sed_cond(ctx
, a
->c
, a
->d
, dest
);
3476 return nullify_end(ctx
);
3479 static bool trans_dep_imm(DisasContext
*ctx
, arg_dep_imm
*a
)
3481 unsigned rs
= a
->nz
? a
->t
: 0;
3482 unsigned len
, width
;
3485 if (!ctx
->is_pa20
&& a
->d
) {
3493 width
= a
->d
? 64 : 32;
3494 if (a
->cpos
+ len
> width
) {
3495 len
= width
- a
->cpos
;
3498 dest
= dest_gpr(ctx
, a
->t
);
3499 val
= load_gpr(ctx
, a
->r
);
3501 tcg_gen_deposit_z_i64(dest
, val
, a
->cpos
, len
);
3503 tcg_gen_deposit_i64(dest
, cpu_gr
[rs
], val
, a
->cpos
, len
);
3505 save_gpr(ctx
, a
->t
, dest
);
3507 /* Install the new nullification. */
3508 cond_free(&ctx
->null_cond
);
3510 ctx
->null_cond
= do_sed_cond(ctx
, a
->c
, a
->d
, dest
);
3512 return nullify_end(ctx
);
3515 static bool do_dep_sar(DisasContext
*ctx
, unsigned rt
, unsigned c
,
3516 bool d
, bool nz
, unsigned len
, TCGv_i64 val
)
3518 unsigned rs
= nz
? rt
: 0;
3519 unsigned widthm1
= d
? 63 : 31;
3520 TCGv_i64 mask
, tmp
, shift
, dest
;
3521 uint64_t msb
= 1ULL << (len
- 1);
3523 dest
= dest_gpr(ctx
, rt
);
3524 shift
= tcg_temp_new_i64();
3525 tmp
= tcg_temp_new_i64();
3527 /* Convert big-endian bit numbering in SAR to left-shift. */
3528 tcg_gen_andi_i64(shift
, cpu_sar
, widthm1
);
3529 tcg_gen_xori_i64(shift
, shift
, widthm1
);
3531 mask
= tcg_temp_new_i64();
3532 tcg_gen_movi_i64(mask
, msb
+ (msb
- 1));
3533 tcg_gen_and_i64(tmp
, val
, mask
);
3535 tcg_gen_shl_i64(mask
, mask
, shift
);
3536 tcg_gen_shl_i64(tmp
, tmp
, shift
);
3537 tcg_gen_andc_i64(dest
, cpu_gr
[rs
], mask
);
3538 tcg_gen_or_i64(dest
, dest
, tmp
);
3540 tcg_gen_shl_i64(dest
, tmp
, shift
);
3542 save_gpr(ctx
, rt
, dest
);
3544 /* Install the new nullification. */
3545 cond_free(&ctx
->null_cond
);
3547 ctx
->null_cond
= do_sed_cond(ctx
, c
, d
, dest
);
3549 return nullify_end(ctx
);
3552 static bool trans_dep_sar(DisasContext
*ctx
, arg_dep_sar
*a
)
3554 if (!ctx
->is_pa20
&& a
->d
) {
3560 return do_dep_sar(ctx
, a
->t
, a
->c
, a
->d
, a
->nz
, a
->len
,
3561 load_gpr(ctx
, a
->r
));
3564 static bool trans_depi_sar(DisasContext
*ctx
, arg_depi_sar
*a
)
3566 if (!ctx
->is_pa20
&& a
->d
) {
3572 return do_dep_sar(ctx
, a
->t
, a
->c
, a
->d
, a
->nz
, a
->len
,
3573 tcg_constant_i64(a
->i
));
3576 static bool trans_be(DisasContext
*ctx
, arg_be
*a
)
3580 #ifdef CONFIG_USER_ONLY
3581 /* ??? It seems like there should be a good way of using
3582 "be disp(sr2, r0)", the canonical gateway entry mechanism
3583 to our advantage. But that appears to be inconvenient to
3584 manage along side branch delay slots. Therefore we handle
3585 entry into the gateway page via absolute address. */
3586 /* Since we don't implement spaces, just branch. Do notice the special
3587 case of "be disp(*,r0)" using a direct branch to disp, so that we can
3588 goto_tb to the TB containing the syscall. */
3590 return do_dbranch(ctx
, a
->disp
, a
->l
, a
->n
);
3596 tmp
= tcg_temp_new_i64();
3597 tcg_gen_addi_i64(tmp
, load_gpr(ctx
, a
->b
), a
->disp
);
3598 tmp
= do_ibranch_priv(ctx
, tmp
);
3600 #ifdef CONFIG_USER_ONLY
3601 return do_ibranch(ctx
, tmp
, a
->l
, a
->n
);
3603 TCGv_i64 new_spc
= tcg_temp_new_i64();
3605 load_spr(ctx
, new_spc
, a
->sp
);
3607 copy_iaoq_entry(ctx
, cpu_gr
[31], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
3608 tcg_gen_mov_i64(cpu_sr
[0], cpu_iasq_f
);
3610 if (a
->n
&& use_nullify_skip(ctx
)) {
3611 copy_iaoq_entry(ctx
, cpu_iaoq_f
, -1, tmp
);
3612 tcg_gen_addi_i64(tmp
, tmp
, 4);
3613 copy_iaoq_entry(ctx
, cpu_iaoq_b
, -1, tmp
);
3614 tcg_gen_mov_i64(cpu_iasq_f
, new_spc
);
3615 tcg_gen_mov_i64(cpu_iasq_b
, cpu_iasq_f
);
3617 copy_iaoq_entry(ctx
, cpu_iaoq_f
, ctx
->iaoq_b
, cpu_iaoq_b
);
3618 if (ctx
->iaoq_b
== -1) {
3619 tcg_gen_mov_i64(cpu_iasq_f
, cpu_iasq_b
);
3621 copy_iaoq_entry(ctx
, cpu_iaoq_b
, -1, tmp
);
3622 tcg_gen_mov_i64(cpu_iasq_b
, new_spc
);
3623 nullify_set(ctx
, a
->n
);
3625 tcg_gen_lookup_and_goto_ptr();
3626 ctx
->base
.is_jmp
= DISAS_NORETURN
;
3627 return nullify_end(ctx
);
3631 static bool trans_bl(DisasContext
*ctx
, arg_bl
*a
)
3633 return do_dbranch(ctx
, iaoq_dest(ctx
, a
->disp
), a
->l
, a
->n
);
3636 static bool trans_b_gate(DisasContext
*ctx
, arg_b_gate
*a
)
3638 uint64_t dest
= iaoq_dest(ctx
, a
->disp
);
3642 /* Make sure the caller hasn't done something weird with the queue.
3643 * ??? This is not quite the same as the PSW[B] bit, which would be
3644 * expensive to track. Real hardware will trap for
3646 * b gateway+4 (in delay slot of first branch)
3647 * However, checking for a non-sequential instruction queue *will*
3648 * diagnose the security hole
3651 * in which instructions at evil would run with increased privs.
3653 if (ctx
->iaoq_b
== -1 || ctx
->iaoq_b
!= ctx
->iaoq_f
+ 4) {
3654 return gen_illegal(ctx
);
3657 #ifndef CONFIG_USER_ONLY
3658 if (ctx
->tb_flags
& PSW_C
) {
3659 CPUHPPAState
*env
= cpu_env(ctx
->cs
);
3660 int type
= hppa_artype_for_page(env
, ctx
->base
.pc_next
);
3661 /* If we could not find a TLB entry, then we need to generate an
3662 ITLB miss exception so the kernel will provide it.
3663 The resulting TLB fill operation will invalidate this TB and
3664 we will re-translate, at which point we *will* be able to find
3665 the TLB entry and determine if this is in fact a gateway page. */
3667 gen_excp(ctx
, EXCP_ITLB_MISS
);
3670 /* No change for non-gateway pages or for priv decrease. */
3671 if (type
>= 4 && type
- 4 < ctx
->privilege
) {
3672 dest
= deposit32(dest
, 0, 2, type
- 4);
3675 dest
&= -4; /* priv = 0 */
3680 TCGv_i64 tmp
= dest_gpr(ctx
, a
->l
);
3681 if (ctx
->privilege
< 3) {
3682 tcg_gen_andi_i64(tmp
, tmp
, -4);
3684 tcg_gen_ori_i64(tmp
, tmp
, ctx
->privilege
);
3685 save_gpr(ctx
, a
->l
, tmp
);
3688 return do_dbranch(ctx
, dest
, 0, a
->n
);
3691 static bool trans_blr(DisasContext
*ctx
, arg_blr
*a
)
3694 TCGv_i64 tmp
= tcg_temp_new_i64();
3695 tcg_gen_shli_i64(tmp
, load_gpr(ctx
, a
->x
), 3);
3696 tcg_gen_addi_i64(tmp
, tmp
, ctx
->iaoq_f
+ 8);
3697 /* The computation here never changes privilege level. */
3698 return do_ibranch(ctx
, tmp
, a
->l
, a
->n
);
3700 /* BLR R0,RX is a good way to load PC+8 into RX. */
3701 return do_dbranch(ctx
, ctx
->iaoq_f
+ 8, a
->l
, a
->n
);
3705 static bool trans_bv(DisasContext
*ctx
, arg_bv
*a
)
3710 dest
= load_gpr(ctx
, a
->b
);
3712 dest
= tcg_temp_new_i64();
3713 tcg_gen_shli_i64(dest
, load_gpr(ctx
, a
->x
), 3);
3714 tcg_gen_add_i64(dest
, dest
, load_gpr(ctx
, a
->b
));
3716 dest
= do_ibranch_priv(ctx
, dest
);
3717 return do_ibranch(ctx
, dest
, 0, a
->n
);
3720 static bool trans_bve(DisasContext
*ctx
, arg_bve
*a
)
3724 #ifdef CONFIG_USER_ONLY
3725 dest
= do_ibranch_priv(ctx
, load_gpr(ctx
, a
->b
));
3726 return do_ibranch(ctx
, dest
, a
->l
, a
->n
);
3729 dest
= do_ibranch_priv(ctx
, load_gpr(ctx
, a
->b
));
3731 copy_iaoq_entry(ctx
, cpu_iaoq_f
, ctx
->iaoq_b
, cpu_iaoq_b
);
3732 if (ctx
->iaoq_b
== -1) {
3733 tcg_gen_mov_i64(cpu_iasq_f
, cpu_iasq_b
);
3735 copy_iaoq_entry(ctx
, cpu_iaoq_b
, -1, dest
);
3736 tcg_gen_mov_i64(cpu_iasq_b
, space_select(ctx
, 0, dest
));
3738 copy_iaoq_entry(ctx
, cpu_gr
[a
->l
], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
3740 nullify_set(ctx
, a
->n
);
3741 tcg_gen_lookup_and_goto_ptr();
3742 ctx
->base
.is_jmp
= DISAS_NORETURN
;
3743 return nullify_end(ctx
);
3747 static bool trans_nopbts(DisasContext
*ctx
, arg_nopbts
*a
)
3749 /* All branch target stack instructions implement as nop. */
3750 return ctx
->is_pa20
;
3757 static void gen_fcpy_f(TCGv_i32 dst
, TCGv_env unused
, TCGv_i32 src
)
3759 tcg_gen_mov_i32(dst
, src
);
3762 static bool trans_fid_f(DisasContext
*ctx
, arg_fid_f
*a
)
3767 ret
= 0x13080000000000ULL
; /* PA8700 (PCX-W2) */
3769 ret
= 0x0f080000000000ULL
; /* PA7300LC (PCX-L2) */
3773 save_frd(0, tcg_constant_i64(ret
));
3774 return nullify_end(ctx
);
3777 static bool trans_fcpy_f(DisasContext
*ctx
, arg_fclass01
*a
)
3779 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_fcpy_f
);
3782 static void gen_fcpy_d(TCGv_i64 dst
, TCGv_env unused
, TCGv_i64 src
)
3784 tcg_gen_mov_i64(dst
, src
);
3787 static bool trans_fcpy_d(DisasContext
*ctx
, arg_fclass01
*a
)
3789 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_fcpy_d
);
3792 static void gen_fabs_f(TCGv_i32 dst
, TCGv_env unused
, TCGv_i32 src
)
3794 tcg_gen_andi_i32(dst
, src
, INT32_MAX
);
3797 static bool trans_fabs_f(DisasContext
*ctx
, arg_fclass01
*a
)
3799 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_fabs_f
);
3802 static void gen_fabs_d(TCGv_i64 dst
, TCGv_env unused
, TCGv_i64 src
)
3804 tcg_gen_andi_i64(dst
, src
, INT64_MAX
);
3807 static bool trans_fabs_d(DisasContext
*ctx
, arg_fclass01
*a
)
3809 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_fabs_d
);
3812 static bool trans_fsqrt_f(DisasContext
*ctx
, arg_fclass01
*a
)
3814 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_fsqrt_s
);
3817 static bool trans_fsqrt_d(DisasContext
*ctx
, arg_fclass01
*a
)
3819 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_fsqrt_d
);
3822 static bool trans_frnd_f(DisasContext
*ctx
, arg_fclass01
*a
)
3824 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_frnd_s
);
3827 static bool trans_frnd_d(DisasContext
*ctx
, arg_fclass01
*a
)
3829 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_frnd_d
);
3832 static void gen_fneg_f(TCGv_i32 dst
, TCGv_env unused
, TCGv_i32 src
)
3834 tcg_gen_xori_i32(dst
, src
, INT32_MIN
);
3837 static bool trans_fneg_f(DisasContext
*ctx
, arg_fclass01
*a
)
3839 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_fneg_f
);
3842 static void gen_fneg_d(TCGv_i64 dst
, TCGv_env unused
, TCGv_i64 src
)
3844 tcg_gen_xori_i64(dst
, src
, INT64_MIN
);
3847 static bool trans_fneg_d(DisasContext
*ctx
, arg_fclass01
*a
)
3849 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_fneg_d
);
3852 static void gen_fnegabs_f(TCGv_i32 dst
, TCGv_env unused
, TCGv_i32 src
)
3854 tcg_gen_ori_i32(dst
, src
, INT32_MIN
);
3857 static bool trans_fnegabs_f(DisasContext
*ctx
, arg_fclass01
*a
)
3859 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_fnegabs_f
);
3862 static void gen_fnegabs_d(TCGv_i64 dst
, TCGv_env unused
, TCGv_i64 src
)
3864 tcg_gen_ori_i64(dst
, src
, INT64_MIN
);
3867 static bool trans_fnegabs_d(DisasContext
*ctx
, arg_fclass01
*a
)
3869 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_fnegabs_d
);
3876 static bool trans_fcnv_d_f(DisasContext
*ctx
, arg_fclass01
*a
)
3878 return do_fop_wed(ctx
, a
->t
, a
->r
, gen_helper_fcnv_d_s
);
3881 static bool trans_fcnv_f_d(DisasContext
*ctx
, arg_fclass01
*a
)
3883 return do_fop_dew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_s_d
);
3886 static bool trans_fcnv_w_f(DisasContext
*ctx
, arg_fclass01
*a
)
3888 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_w_s
);
3891 static bool trans_fcnv_q_f(DisasContext
*ctx
, arg_fclass01
*a
)
3893 return do_fop_wed(ctx
, a
->t
, a
->r
, gen_helper_fcnv_dw_s
);
3896 static bool trans_fcnv_w_d(DisasContext
*ctx
, arg_fclass01
*a
)
3898 return do_fop_dew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_w_d
);
3901 static bool trans_fcnv_q_d(DisasContext
*ctx
, arg_fclass01
*a
)
3903 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_fcnv_dw_d
);
3906 static bool trans_fcnv_f_w(DisasContext
*ctx
, arg_fclass01
*a
)
3908 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_s_w
);
3911 static bool trans_fcnv_d_w(DisasContext
*ctx
, arg_fclass01
*a
)
3913 return do_fop_wed(ctx
, a
->t
, a
->r
, gen_helper_fcnv_d_w
);
3916 static bool trans_fcnv_f_q(DisasContext
*ctx
, arg_fclass01
*a
)
3918 return do_fop_dew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_s_dw
);
3921 static bool trans_fcnv_d_q(DisasContext
*ctx
, arg_fclass01
*a
)
3923 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_fcnv_d_dw
);
3926 static bool trans_fcnv_t_f_w(DisasContext
*ctx
, arg_fclass01
*a
)
3928 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_s_w
);
3931 static bool trans_fcnv_t_d_w(DisasContext
*ctx
, arg_fclass01
*a
)
3933 return do_fop_wed(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_d_w
);
3936 static bool trans_fcnv_t_f_q(DisasContext
*ctx
, arg_fclass01
*a
)
3938 return do_fop_dew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_s_dw
);
3941 static bool trans_fcnv_t_d_q(DisasContext
*ctx
, arg_fclass01
*a
)
3943 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_d_dw
);
3946 static bool trans_fcnv_uw_f(DisasContext
*ctx
, arg_fclass01
*a
)
3948 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_uw_s
);
3951 static bool trans_fcnv_uq_f(DisasContext
*ctx
, arg_fclass01
*a
)
3953 return do_fop_wed(ctx
, a
->t
, a
->r
, gen_helper_fcnv_udw_s
);
3956 static bool trans_fcnv_uw_d(DisasContext
*ctx
, arg_fclass01
*a
)
3958 return do_fop_dew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_uw_d
);
3961 static bool trans_fcnv_uq_d(DisasContext
*ctx
, arg_fclass01
*a
)
3963 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_fcnv_udw_d
);
3966 static bool trans_fcnv_f_uw(DisasContext
*ctx
, arg_fclass01
*a
)
3968 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_s_uw
);
3971 static bool trans_fcnv_d_uw(DisasContext
*ctx
, arg_fclass01
*a
)
3973 return do_fop_wed(ctx
, a
->t
, a
->r
, gen_helper_fcnv_d_uw
);
3976 static bool trans_fcnv_f_uq(DisasContext
*ctx
, arg_fclass01
*a
)
3978 return do_fop_dew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_s_udw
);
3981 static bool trans_fcnv_d_uq(DisasContext
*ctx
, arg_fclass01
*a
)
3983 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_fcnv_d_udw
);
3986 static bool trans_fcnv_t_f_uw(DisasContext
*ctx
, arg_fclass01
*a
)
3988 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_s_uw
);
3991 static bool trans_fcnv_t_d_uw(DisasContext
*ctx
, arg_fclass01
*a
)
3993 return do_fop_wed(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_d_uw
);
3996 static bool trans_fcnv_t_f_uq(DisasContext
*ctx
, arg_fclass01
*a
)
3998 return do_fop_dew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_s_udw
);
4001 static bool trans_fcnv_t_d_uq(DisasContext
*ctx
, arg_fclass01
*a
)
4003 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_d_udw
);
4010 static bool trans_fcmp_f(DisasContext
*ctx
, arg_fclass2
*a
)
4012 TCGv_i32 ta
, tb
, tc
, ty
;
4016 ta
= load_frw0_i32(a
->r1
);
4017 tb
= load_frw0_i32(a
->r2
);
4018 ty
= tcg_constant_i32(a
->y
);
4019 tc
= tcg_constant_i32(a
->c
);
4021 gen_helper_fcmp_s(tcg_env
, ta
, tb
, ty
, tc
);
4023 return nullify_end(ctx
);
4026 static bool trans_fcmp_d(DisasContext
*ctx
, arg_fclass2
*a
)
4033 ta
= load_frd0(a
->r1
);
4034 tb
= load_frd0(a
->r2
);
4035 ty
= tcg_constant_i32(a
->y
);
4036 tc
= tcg_constant_i32(a
->c
);
4038 gen_helper_fcmp_d(tcg_env
, ta
, tb
, ty
, tc
);
4040 return nullify_end(ctx
);
4043 static bool trans_ftest(DisasContext
*ctx
, arg_ftest
*a
)
4049 t
= tcg_temp_new_i64();
4050 tcg_gen_ld32u_i64(t
, tcg_env
, offsetof(CPUHPPAState
, fr0_shadow
));
4057 case 0: /* simple */
4058 tcg_gen_andi_i64(t
, t
, 0x4000000);
4059 ctx
->null_cond
= cond_make_0(TCG_COND_NE
, t
);
4087 TCGv_i64 c
= tcg_constant_i64(mask
);
4088 tcg_gen_or_i64(t
, t
, c
);
4089 ctx
->null_cond
= cond_make(TCG_COND_EQ
, t
, c
);
4091 tcg_gen_andi_i64(t
, t
, mask
);
4092 ctx
->null_cond
= cond_make_0(TCG_COND_EQ
, t
);
4095 unsigned cbit
= (a
->y
^ 1) - 1;
4097 tcg_gen_extract_i64(t
, t
, 21 - cbit
, 1);
4098 ctx
->null_cond
= cond_make_0(TCG_COND_NE
, t
);
4102 return nullify_end(ctx
);
4109 static bool trans_fadd_f(DisasContext
*ctx
, arg_fclass3
*a
)
4111 return do_fop_weww(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fadd_s
);
4114 static bool trans_fadd_d(DisasContext
*ctx
, arg_fclass3
*a
)
4116 return do_fop_dedd(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fadd_d
);
4119 static bool trans_fsub_f(DisasContext
*ctx
, arg_fclass3
*a
)
4121 return do_fop_weww(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fsub_s
);
4124 static bool trans_fsub_d(DisasContext
*ctx
, arg_fclass3
*a
)
4126 return do_fop_dedd(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fsub_d
);
4129 static bool trans_fmpy_f(DisasContext
*ctx
, arg_fclass3
*a
)
4131 return do_fop_weww(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fmpy_s
);
4134 static bool trans_fmpy_d(DisasContext
*ctx
, arg_fclass3
*a
)
4136 return do_fop_dedd(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fmpy_d
);
4139 static bool trans_fdiv_f(DisasContext
*ctx
, arg_fclass3
*a
)
4141 return do_fop_weww(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fdiv_s
);
4144 static bool trans_fdiv_d(DisasContext
*ctx
, arg_fclass3
*a
)
4146 return do_fop_dedd(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fdiv_d
);
4149 static bool trans_xmpyu(DisasContext
*ctx
, arg_xmpyu
*a
)
4155 x
= load_frw0_i64(a
->r1
);
4156 y
= load_frw0_i64(a
->r2
);
4157 tcg_gen_mul_i64(x
, x
, y
);
4160 return nullify_end(ctx
);
4163 /* Convert the fmpyadd single-precision register encodings to standard. */
4164 static inline int fmpyadd_s_reg(unsigned r
)
4166 return (r
& 16) * 2 + 16 + (r
& 15);
4169 static bool do_fmpyadd_s(DisasContext
*ctx
, arg_mpyadd
*a
, bool is_sub
)
4171 int tm
= fmpyadd_s_reg(a
->tm
);
4172 int ra
= fmpyadd_s_reg(a
->ra
);
4173 int ta
= fmpyadd_s_reg(a
->ta
);
4174 int rm2
= fmpyadd_s_reg(a
->rm2
);
4175 int rm1
= fmpyadd_s_reg(a
->rm1
);
4179 do_fop_weww(ctx
, tm
, rm1
, rm2
, gen_helper_fmpy_s
);
4180 do_fop_weww(ctx
, ta
, ta
, ra
,
4181 is_sub
? gen_helper_fsub_s
: gen_helper_fadd_s
);
4183 return nullify_end(ctx
);
4186 static bool trans_fmpyadd_f(DisasContext
*ctx
, arg_mpyadd
*a
)
4188 return do_fmpyadd_s(ctx
, a
, false);
4191 static bool trans_fmpysub_f(DisasContext
*ctx
, arg_mpyadd
*a
)
4193 return do_fmpyadd_s(ctx
, a
, true);
4196 static bool do_fmpyadd_d(DisasContext
*ctx
, arg_mpyadd
*a
, bool is_sub
)
4200 do_fop_dedd(ctx
, a
->tm
, a
->rm1
, a
->rm2
, gen_helper_fmpy_d
);
4201 do_fop_dedd(ctx
, a
->ta
, a
->ta
, a
->ra
,
4202 is_sub
? gen_helper_fsub_d
: gen_helper_fadd_d
);
4204 return nullify_end(ctx
);
4207 static bool trans_fmpyadd_d(DisasContext
*ctx
, arg_mpyadd
*a
)
4209 return do_fmpyadd_d(ctx
, a
, false);
4212 static bool trans_fmpysub_d(DisasContext
*ctx
, arg_mpyadd
*a
)
4214 return do_fmpyadd_d(ctx
, a
, true);
4217 static bool trans_fmpyfadd_f(DisasContext
*ctx
, arg_fmpyfadd_f
*a
)
4222 x
= load_frw0_i32(a
->rm1
);
4223 y
= load_frw0_i32(a
->rm2
);
4224 z
= load_frw0_i32(a
->ra3
);
4227 gen_helper_fmpynfadd_s(x
, tcg_env
, x
, y
, z
);
4229 gen_helper_fmpyfadd_s(x
, tcg_env
, x
, y
, z
);
4232 save_frw_i32(a
->t
, x
);
4233 return nullify_end(ctx
);
4236 static bool trans_fmpyfadd_d(DisasContext
*ctx
, arg_fmpyfadd_d
*a
)
4241 x
= load_frd0(a
->rm1
);
4242 y
= load_frd0(a
->rm2
);
4243 z
= load_frd0(a
->ra3
);
4246 gen_helper_fmpynfadd_d(x
, tcg_env
, x
, y
, z
);
4248 gen_helper_fmpyfadd_d(x
, tcg_env
, x
, y
, z
);
4252 return nullify_end(ctx
);
4255 static bool trans_diag(DisasContext
*ctx
, arg_diag
*a
)
4257 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
4258 #ifndef CONFIG_USER_ONLY
4259 if (a
->i
== 0x100) {
4260 /* emulate PDC BTLB, called by SeaBIOS-hppa */
4262 gen_helper_diag_btlb(tcg_env
);
4263 return nullify_end(ctx
);
4266 qemu_log_mask(LOG_UNIMP
, "DIAG opcode 0x%04x ignored\n", a
->i
);
4270 static void hppa_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
4272 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
4276 ctx
->tb_flags
= ctx
->base
.tb
->flags
;
4277 ctx
->is_pa20
= hppa_is_pa20(cpu_env(cs
));
4279 #ifdef CONFIG_USER_ONLY
4280 ctx
->privilege
= MMU_IDX_TO_PRIV(MMU_USER_IDX
);
4281 ctx
->mmu_idx
= MMU_USER_IDX
;
4282 ctx
->iaoq_f
= ctx
->base
.pc_first
| ctx
->privilege
;
4283 ctx
->iaoq_b
= ctx
->base
.tb
->cs_base
| ctx
->privilege
;
4284 ctx
->unalign
= (ctx
->tb_flags
& TB_FLAG_UNALIGN
? MO_UNALN
: MO_ALIGN
);
4286 ctx
->privilege
= (ctx
->tb_flags
>> TB_FLAG_PRIV_SHIFT
) & 3;
4287 ctx
->mmu_idx
= (ctx
->tb_flags
& PSW_D
4288 ? PRIV_P_TO_MMU_IDX(ctx
->privilege
, ctx
->tb_flags
& PSW_P
)
4291 /* Recover the IAOQ values from the GVA + PRIV. */
4292 uint64_t cs_base
= ctx
->base
.tb
->cs_base
;
4293 uint64_t iasq_f
= cs_base
& ~0xffffffffull
;
4294 int32_t diff
= cs_base
;
4296 ctx
->iaoq_f
= (ctx
->base
.pc_first
& ~iasq_f
) + ctx
->privilege
;
4297 ctx
->iaoq_b
= (diff
? ctx
->iaoq_f
+ diff
: -1);
4300 ctx
->iaoq_n_var
= NULL
;
4302 /* Bound the number of instructions by those left on the page. */
4303 bound
= -(ctx
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
4304 ctx
->base
.max_insns
= MIN(ctx
->base
.max_insns
, bound
);
4307 static void hppa_tr_tb_start(DisasContextBase
*dcbase
, CPUState
*cs
)
4309 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
4311 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
4312 ctx
->null_cond
= cond_make_f();
4313 ctx
->psw_n_nonzero
= false;
4314 if (ctx
->tb_flags
& PSW_N
) {
4315 ctx
->null_cond
.c
= TCG_COND_ALWAYS
;
4316 ctx
->psw_n_nonzero
= true;
4318 ctx
->null_lab
= NULL
;
4321 static void hppa_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cs
)
4323 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
4325 tcg_gen_insn_start(ctx
->iaoq_f
, ctx
->iaoq_b
);
4328 static void hppa_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cs
)
4330 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
4331 CPUHPPAState
*env
= cpu_env(cs
);
4334 /* Execute one insn. */
4335 #ifdef CONFIG_USER_ONLY
4336 if (ctx
->base
.pc_next
< TARGET_PAGE_SIZE
) {
4338 ret
= ctx
->base
.is_jmp
;
4339 assert(ret
!= DISAS_NEXT
);
4343 /* Always fetch the insn, even if nullified, so that we check
4344 the page permissions for execute. */
4345 uint32_t insn
= translator_ldl(env
, &ctx
->base
, ctx
->base
.pc_next
);
4347 /* Set up the IA queue for the next insn.
4348 This will be overwritten by a branch. */
4349 if (ctx
->iaoq_b
== -1) {
4351 ctx
->iaoq_n_var
= tcg_temp_new_i64();
4352 tcg_gen_addi_i64(ctx
->iaoq_n_var
, cpu_iaoq_b
, 4);
4354 ctx
->iaoq_n
= ctx
->iaoq_b
+ 4;
4355 ctx
->iaoq_n_var
= NULL
;
4358 if (unlikely(ctx
->null_cond
.c
== TCG_COND_ALWAYS
)) {
4359 ctx
->null_cond
.c
= TCG_COND_NEVER
;
4363 if (!decode(ctx
, insn
)) {
4366 ret
= ctx
->base
.is_jmp
;
4367 assert(ctx
->null_lab
== NULL
);
4371 /* Advance the insn queue. Note that this check also detects
4372 a priority change within the instruction queue. */
4373 if (ret
== DISAS_NEXT
&& ctx
->iaoq_b
!= ctx
->iaoq_f
+ 4) {
4374 if (ctx
->iaoq_b
!= -1 && ctx
->iaoq_n
!= -1
4375 && use_goto_tb(ctx
, ctx
->iaoq_b
)
4376 && (ctx
->null_cond
.c
== TCG_COND_NEVER
4377 || ctx
->null_cond
.c
== TCG_COND_ALWAYS
)) {
4378 nullify_set(ctx
, ctx
->null_cond
.c
== TCG_COND_ALWAYS
);
4379 gen_goto_tb(ctx
, 0, ctx
->iaoq_b
, ctx
->iaoq_n
);
4380 ctx
->base
.is_jmp
= ret
= DISAS_NORETURN
;
4382 ctx
->base
.is_jmp
= ret
= DISAS_IAQ_N_STALE
;
4385 ctx
->iaoq_f
= ctx
->iaoq_b
;
4386 ctx
->iaoq_b
= ctx
->iaoq_n
;
4387 ctx
->base
.pc_next
+= 4;
4390 case DISAS_NORETURN
:
4391 case DISAS_IAQ_N_UPDATED
:
4395 case DISAS_IAQ_N_STALE
:
4396 case DISAS_IAQ_N_STALE_EXIT
:
4397 if (ctx
->iaoq_f
== -1) {
4398 copy_iaoq_entry(ctx
, cpu_iaoq_f
, -1, cpu_iaoq_b
);
4399 copy_iaoq_entry(ctx
, cpu_iaoq_b
, ctx
->iaoq_n
, ctx
->iaoq_n_var
);
4400 #ifndef CONFIG_USER_ONLY
4401 tcg_gen_mov_i64(cpu_iasq_f
, cpu_iasq_b
);
4404 ctx
->base
.is_jmp
= (ret
== DISAS_IAQ_N_STALE_EXIT
4406 : DISAS_IAQ_N_UPDATED
);
4407 } else if (ctx
->iaoq_b
== -1) {
4408 copy_iaoq_entry(ctx
, cpu_iaoq_b
, -1, ctx
->iaoq_n_var
);
4413 g_assert_not_reached();
4417 static void hppa_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cs
)
4419 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
4420 DisasJumpType is_jmp
= ctx
->base
.is_jmp
;
4423 case DISAS_NORETURN
:
4425 case DISAS_TOO_MANY
:
4426 case DISAS_IAQ_N_STALE
:
4427 case DISAS_IAQ_N_STALE_EXIT
:
4428 copy_iaoq_entry(ctx
, cpu_iaoq_f
, ctx
->iaoq_f
, cpu_iaoq_f
);
4429 copy_iaoq_entry(ctx
, cpu_iaoq_b
, ctx
->iaoq_b
, cpu_iaoq_b
);
4432 case DISAS_IAQ_N_UPDATED
:
4433 if (is_jmp
!= DISAS_IAQ_N_STALE_EXIT
) {
4434 tcg_gen_lookup_and_goto_ptr();
4439 tcg_gen_exit_tb(NULL
, 0);
4442 g_assert_not_reached();
4446 static void hppa_tr_disas_log(const DisasContextBase
*dcbase
,
4447 CPUState
*cs
, FILE *logfile
)
4449 target_ulong pc
= dcbase
->pc_first
;
4451 #ifdef CONFIG_USER_ONLY
4454 fprintf(logfile
, "IN:\n0x00000000: (null)\n");
4457 fprintf(logfile
, "IN:\n0x000000b0: light-weight-syscall\n");
4460 fprintf(logfile
, "IN:\n0x000000e0: set-thread-pointer-syscall\n");
4463 fprintf(logfile
, "IN:\n0x00000100: syscall\n");
4468 fprintf(logfile
, "IN: %s\n", lookup_symbol(pc
));
4469 target_disas(logfile
, cs
, pc
, dcbase
->tb
->size
);
4472 static const TranslatorOps hppa_tr_ops
= {
4473 .init_disas_context
= hppa_tr_init_disas_context
,
4474 .tb_start
= hppa_tr_tb_start
,
4475 .insn_start
= hppa_tr_insn_start
,
4476 .translate_insn
= hppa_tr_translate_insn
,
4477 .tb_stop
= hppa_tr_tb_stop
,
4478 .disas_log
= hppa_tr_disas_log
,
4481 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int *max_insns
,
4482 target_ulong pc
, void *host_pc
)
4485 translator_loop(cs
, tb
, max_insns
, pc
, host_pc
, &hppa_tr_ops
, &ctx
.base
);