2 * HPPA emulation cpu translation for qemu.
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "tcg/tcg-op-gvec.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
32 #define HELPER_H "helper.h"
33 #include "exec/helper-info.c.inc"
36 /* Choose to use explicit sizes within this file. */
39 typedef struct DisasCond
{
44 typedef struct DisasContext
{
45 DisasContextBase base
;
66 #ifdef CONFIG_USER_ONLY
71 #ifdef CONFIG_USER_ONLY
72 #define UNALIGN(C) (C)->unalign
73 #define MMU_DISABLED(C) false
75 #define UNALIGN(C) MO_ALIGN
76 #define MMU_DISABLED(C) MMU_IDX_MMU_DISABLED((C)->mmu_idx)
79 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
80 static int expand_sm_imm(DisasContext
*ctx
, int val
)
82 /* Keep unimplemented bits disabled -- see cpu_hppa_put_psw. */
87 val
&= ~(PSW_SM_W
| PSW_SM_E
| PSW_G
);
89 val
&= ~(PSW_SM_W
| PSW_SM_E
| PSW_O
);
94 /* Inverted space register indicates 0 means sr0 not inferred from base. */
95 static int expand_sr3x(DisasContext
*ctx
, int val
)
100 /* Convert the M:A bits within a memory insn to the tri-state value
101 we use for the final M. */
102 static int ma_to_m(DisasContext
*ctx
, int val
)
104 return val
& 2 ? (val
& 1 ? -1 : 1) : 0;
107 /* Convert the sign of the displacement to a pre or post-modify. */
108 static int pos_to_m(DisasContext
*ctx
, int val
)
113 static int neg_to_m(DisasContext
*ctx
, int val
)
118 /* Used for branch targets and fp memory ops. */
119 static int expand_shl2(DisasContext
*ctx
, int val
)
124 /* Used for fp memory ops. */
125 static int expand_shl3(DisasContext
*ctx
, int val
)
130 /* Used for assemble_21. */
131 static int expand_shl11(DisasContext
*ctx
, int val
)
136 static int assemble_6(DisasContext
*ctx
, int val
)
139 * Officially, 32 * x + 32 - y.
140 * Here, x is already in bit 5, and y is [4:0].
141 * Since -y = ~y + 1, in 5 bits 32 - y => y ^ 31 + 1,
142 * with the overflow from bit 4 summing with x.
144 return (val
^ 31) + 1;
147 /* Translate CMPI doubleword conditions to standard. */
148 static int cmpbid_c(DisasContext
*ctx
, int val
)
150 return val
? val
: 4; /* 0 == "*<<" */
154 /* Include the auto-generated decoder. */
155 #include "decode-insns.c.inc"
157 /* We are not using a goto_tb (for whatever reason), but have updated
158 the iaq (for whatever reason), so don't do it again on exit. */
159 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
161 /* We are exiting the TB, but have neither emitted a goto_tb, nor
162 updated the iaq for the next instruction to be executed. */
163 #define DISAS_IAQ_N_STALE DISAS_TARGET_1
165 /* Similarly, but we want to return to the main loop immediately
166 to recognize unmasked interrupts. */
167 #define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
168 #define DISAS_EXIT DISAS_TARGET_3
170 /* global register indexes */
171 static TCGv_i64 cpu_gr
[32];
172 static TCGv_i64 cpu_sr
[4];
173 static TCGv_i64 cpu_srH
;
174 static TCGv_i64 cpu_iaoq_f
;
175 static TCGv_i64 cpu_iaoq_b
;
176 static TCGv_i64 cpu_iasq_f
;
177 static TCGv_i64 cpu_iasq_b
;
178 static TCGv_i64 cpu_sar
;
179 static TCGv_i64 cpu_psw_n
;
180 static TCGv_i64 cpu_psw_v
;
181 static TCGv_i64 cpu_psw_cb
;
182 static TCGv_i64 cpu_psw_cb_msb
;
184 void hppa_translate_init(void)
186 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
188 typedef struct { TCGv_i64
*var
; const char *name
; int ofs
; } GlobalVar
;
189 static const GlobalVar vars
[] = {
190 { &cpu_sar
, "sar", offsetof(CPUHPPAState
, cr
[CR_SAR
]) },
201 /* Use the symbolic register names that match the disassembler. */
202 static const char gr_names
[32][4] = {
203 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
204 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
205 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
206 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
208 /* SR[4-7] are not global registers so that we can index them. */
209 static const char sr_names
[5][4] = {
210 "sr0", "sr1", "sr2", "sr3", "srH"
216 for (i
= 1; i
< 32; i
++) {
217 cpu_gr
[i
] = tcg_global_mem_new(tcg_env
,
218 offsetof(CPUHPPAState
, gr
[i
]),
221 for (i
= 0; i
< 4; i
++) {
222 cpu_sr
[i
] = tcg_global_mem_new_i64(tcg_env
,
223 offsetof(CPUHPPAState
, sr
[i
]),
226 cpu_srH
= tcg_global_mem_new_i64(tcg_env
,
227 offsetof(CPUHPPAState
, sr
[4]),
230 for (i
= 0; i
< ARRAY_SIZE(vars
); ++i
) {
231 const GlobalVar
*v
= &vars
[i
];
232 *v
->var
= tcg_global_mem_new(tcg_env
, v
->ofs
, v
->name
);
235 cpu_iasq_f
= tcg_global_mem_new_i64(tcg_env
,
236 offsetof(CPUHPPAState
, iasq_f
),
238 cpu_iasq_b
= tcg_global_mem_new_i64(tcg_env
,
239 offsetof(CPUHPPAState
, iasq_b
),
243 static void set_insn_breg(DisasContext
*ctx
, int breg
)
245 assert(ctx
->insn_start
!= NULL
);
246 tcg_set_insn_start_param(ctx
->insn_start
, 2, breg
);
247 ctx
->insn_start
= NULL
;
250 static DisasCond
cond_make_f(void)
259 static DisasCond
cond_make_t(void)
262 .c
= TCG_COND_ALWAYS
,
268 static DisasCond
cond_make_n(void)
273 .a1
= tcg_constant_i64(0)
277 static DisasCond
cond_make_tmp(TCGCond c
, TCGv_i64 a0
, TCGv_i64 a1
)
279 assert (c
!= TCG_COND_NEVER
&& c
!= TCG_COND_ALWAYS
);
280 return (DisasCond
){ .c
= c
, .a0
= a0
, .a1
= a1
};
283 static DisasCond
cond_make_0_tmp(TCGCond c
, TCGv_i64 a0
)
285 return cond_make_tmp(c
, a0
, tcg_constant_i64(0));
288 static DisasCond
cond_make_0(TCGCond c
, TCGv_i64 a0
)
290 TCGv_i64 tmp
= tcg_temp_new_i64();
291 tcg_gen_mov_i64(tmp
, a0
);
292 return cond_make_0_tmp(c
, tmp
);
295 static DisasCond
cond_make(TCGCond c
, TCGv_i64 a0
, TCGv_i64 a1
)
297 TCGv_i64 t0
= tcg_temp_new_i64();
298 TCGv_i64 t1
= tcg_temp_new_i64();
300 tcg_gen_mov_i64(t0
, a0
);
301 tcg_gen_mov_i64(t1
, a1
);
302 return cond_make_tmp(c
, t0
, t1
);
305 static void cond_free(DisasCond
*cond
)
312 case TCG_COND_ALWAYS
:
313 cond
->c
= TCG_COND_NEVER
;
320 static TCGv_i64
load_gpr(DisasContext
*ctx
, unsigned reg
)
329 static TCGv_i64
dest_gpr(DisasContext
*ctx
, unsigned reg
)
331 if (reg
== 0 || ctx
->null_cond
.c
!= TCG_COND_NEVER
) {
332 return tcg_temp_new_i64();
338 static void save_or_nullify(DisasContext
*ctx
, TCGv_i64 dest
, TCGv_i64 t
)
340 if (ctx
->null_cond
.c
!= TCG_COND_NEVER
) {
341 tcg_gen_movcond_i64(ctx
->null_cond
.c
, dest
, ctx
->null_cond
.a0
,
342 ctx
->null_cond
.a1
, dest
, t
);
344 tcg_gen_mov_i64(dest
, t
);
348 static void save_gpr(DisasContext
*ctx
, unsigned reg
, TCGv_i64 t
)
351 save_or_nullify(ctx
, cpu_gr
[reg
], t
);
363 static TCGv_i32
load_frw_i32(unsigned rt
)
365 TCGv_i32 ret
= tcg_temp_new_i32();
366 tcg_gen_ld_i32(ret
, tcg_env
,
367 offsetof(CPUHPPAState
, fr
[rt
& 31])
368 + (rt
& 32 ? LO_OFS
: HI_OFS
));
372 static TCGv_i32
load_frw0_i32(unsigned rt
)
375 TCGv_i32 ret
= tcg_temp_new_i32();
376 tcg_gen_movi_i32(ret
, 0);
379 return load_frw_i32(rt
);
383 static TCGv_i64
load_frw0_i64(unsigned rt
)
385 TCGv_i64 ret
= tcg_temp_new_i64();
387 tcg_gen_movi_i64(ret
, 0);
389 tcg_gen_ld32u_i64(ret
, tcg_env
,
390 offsetof(CPUHPPAState
, fr
[rt
& 31])
391 + (rt
& 32 ? LO_OFS
: HI_OFS
));
396 static void save_frw_i32(unsigned rt
, TCGv_i32 val
)
398 tcg_gen_st_i32(val
, tcg_env
,
399 offsetof(CPUHPPAState
, fr
[rt
& 31])
400 + (rt
& 32 ? LO_OFS
: HI_OFS
));
406 static TCGv_i64
load_frd(unsigned rt
)
408 TCGv_i64 ret
= tcg_temp_new_i64();
409 tcg_gen_ld_i64(ret
, tcg_env
, offsetof(CPUHPPAState
, fr
[rt
]));
413 static TCGv_i64
load_frd0(unsigned rt
)
416 TCGv_i64 ret
= tcg_temp_new_i64();
417 tcg_gen_movi_i64(ret
, 0);
424 static void save_frd(unsigned rt
, TCGv_i64 val
)
426 tcg_gen_st_i64(val
, tcg_env
, offsetof(CPUHPPAState
, fr
[rt
]));
429 static void load_spr(DisasContext
*ctx
, TCGv_i64 dest
, unsigned reg
)
431 #ifdef CONFIG_USER_ONLY
432 tcg_gen_movi_i64(dest
, 0);
435 tcg_gen_mov_i64(dest
, cpu_sr
[reg
]);
436 } else if (ctx
->tb_flags
& TB_FLAG_SR_SAME
) {
437 tcg_gen_mov_i64(dest
, cpu_srH
);
439 tcg_gen_ld_i64(dest
, tcg_env
, offsetof(CPUHPPAState
, sr
[reg
]));
444 /* Skip over the implementation of an insn that has been nullified.
445 Use this when the insn is too complex for a conditional move. */
446 static void nullify_over(DisasContext
*ctx
)
448 if (ctx
->null_cond
.c
!= TCG_COND_NEVER
) {
449 /* The always condition should have been handled in the main loop. */
450 assert(ctx
->null_cond
.c
!= TCG_COND_ALWAYS
);
452 ctx
->null_lab
= gen_new_label();
454 /* If we're using PSW[N], copy it to a temp because... */
455 if (ctx
->null_cond
.a0
== cpu_psw_n
) {
456 ctx
->null_cond
.a0
= tcg_temp_new_i64();
457 tcg_gen_mov_i64(ctx
->null_cond
.a0
, cpu_psw_n
);
459 /* ... we clear it before branching over the implementation,
460 so that (1) it's clear after nullifying this insn and
461 (2) if this insn nullifies the next, PSW[N] is valid. */
462 if (ctx
->psw_n_nonzero
) {
463 ctx
->psw_n_nonzero
= false;
464 tcg_gen_movi_i64(cpu_psw_n
, 0);
467 tcg_gen_brcond_i64(ctx
->null_cond
.c
, ctx
->null_cond
.a0
,
468 ctx
->null_cond
.a1
, ctx
->null_lab
);
469 cond_free(&ctx
->null_cond
);
473 /* Save the current nullification state to PSW[N]. */
474 static void nullify_save(DisasContext
*ctx
)
476 if (ctx
->null_cond
.c
== TCG_COND_NEVER
) {
477 if (ctx
->psw_n_nonzero
) {
478 tcg_gen_movi_i64(cpu_psw_n
, 0);
482 if (ctx
->null_cond
.a0
!= cpu_psw_n
) {
483 tcg_gen_setcond_i64(ctx
->null_cond
.c
, cpu_psw_n
,
484 ctx
->null_cond
.a0
, ctx
->null_cond
.a1
);
485 ctx
->psw_n_nonzero
= true;
487 cond_free(&ctx
->null_cond
);
490 /* Set a PSW[N] to X. The intention is that this is used immediately
491 before a goto_tb/exit_tb, so that there is no fallthru path to other
492 code within the TB. Therefore we do not update psw_n_nonzero. */
493 static void nullify_set(DisasContext
*ctx
, bool x
)
495 if (ctx
->psw_n_nonzero
|| x
) {
496 tcg_gen_movi_i64(cpu_psw_n
, x
);
500 /* Mark the end of an instruction that may have been nullified.
501 This is the pair to nullify_over. Always returns true so that
502 it may be tail-called from a translate function. */
503 static bool nullify_end(DisasContext
*ctx
)
505 TCGLabel
*null_lab
= ctx
->null_lab
;
506 DisasJumpType status
= ctx
->base
.is_jmp
;
508 /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
509 For UPDATED, we cannot update on the nullified path. */
510 assert(status
!= DISAS_IAQ_N_UPDATED
);
512 if (likely(null_lab
== NULL
)) {
513 /* The current insn wasn't conditional or handled the condition
514 applied to it without a branch, so the (new) setting of
515 NULL_COND can be applied directly to the next insn. */
518 ctx
->null_lab
= NULL
;
520 if (likely(ctx
->null_cond
.c
== TCG_COND_NEVER
)) {
521 /* The next instruction will be unconditional,
522 and NULL_COND already reflects that. */
523 gen_set_label(null_lab
);
525 /* The insn that we just executed is itself nullifying the next
526 instruction. Store the condition in the PSW[N] global.
527 We asserted PSW[N] = 0 in nullify_over, so that after the
528 label we have the proper value in place. */
530 gen_set_label(null_lab
);
531 ctx
->null_cond
= cond_make_n();
533 if (status
== DISAS_NORETURN
) {
534 ctx
->base
.is_jmp
= DISAS_NEXT
;
539 static uint64_t gva_offset_mask(DisasContext
*ctx
)
541 return (ctx
->tb_flags
& PSW_W
542 ? MAKE_64BIT_MASK(0, 62)
543 : MAKE_64BIT_MASK(0, 32));
546 static void copy_iaoq_entry(DisasContext
*ctx
, TCGv_i64 dest
,
547 uint64_t ival
, TCGv_i64 vval
)
549 uint64_t mask
= gva_offset_mask(ctx
);
552 tcg_gen_movi_i64(dest
, ival
& mask
);
555 tcg_debug_assert(vval
!= NULL
);
558 * We know that the IAOQ is already properly masked.
559 * This optimization is primarily for "iaoq_f = iaoq_b".
561 if (vval
== cpu_iaoq_f
|| vval
== cpu_iaoq_b
) {
562 tcg_gen_mov_i64(dest
, vval
);
564 tcg_gen_andi_i64(dest
, vval
, mask
);
568 static inline uint64_t iaoq_dest(DisasContext
*ctx
, int64_t disp
)
570 return ctx
->iaoq_f
+ disp
+ 8;
573 static void gen_excp_1(int exception
)
575 gen_helper_excp(tcg_env
, tcg_constant_i32(exception
));
578 static void gen_excp(DisasContext
*ctx
, int exception
)
580 copy_iaoq_entry(ctx
, cpu_iaoq_f
, ctx
->iaoq_f
, cpu_iaoq_f
);
581 copy_iaoq_entry(ctx
, cpu_iaoq_b
, ctx
->iaoq_b
, cpu_iaoq_b
);
583 gen_excp_1(exception
);
584 ctx
->base
.is_jmp
= DISAS_NORETURN
;
587 static bool gen_excp_iir(DisasContext
*ctx
, int exc
)
590 tcg_gen_st_i64(tcg_constant_i64(ctx
->insn
),
591 tcg_env
, offsetof(CPUHPPAState
, cr
[CR_IIR
]));
593 return nullify_end(ctx
);
596 static bool gen_illegal(DisasContext
*ctx
)
598 return gen_excp_iir(ctx
, EXCP_ILL
);
601 #ifdef CONFIG_USER_ONLY
602 #define CHECK_MOST_PRIVILEGED(EXCP) \
603 return gen_excp_iir(ctx, EXCP)
605 #define CHECK_MOST_PRIVILEGED(EXCP) \
607 if (ctx->privilege != 0) { \
608 return gen_excp_iir(ctx, EXCP); \
613 static bool use_goto_tb(DisasContext
*ctx
, uint64_t dest
)
615 return translator_use_goto_tb(&ctx
->base
, dest
);
618 /* If the next insn is to be nullified, and it's on the same page,
619 and we're not attempting to set a breakpoint on it, then we can
620 totally skip the nullified insn. This avoids creating and
621 executing a TB that merely branches to the next TB. */
622 static bool use_nullify_skip(DisasContext
*ctx
)
624 return (((ctx
->iaoq_b
^ ctx
->iaoq_f
) & TARGET_PAGE_MASK
) == 0
625 && !cpu_breakpoint_test(ctx
->cs
, ctx
->iaoq_b
, BP_ANY
));
628 static void gen_goto_tb(DisasContext
*ctx
, int which
,
629 uint64_t f
, uint64_t b
)
631 if (f
!= -1 && b
!= -1 && use_goto_tb(ctx
, f
)) {
632 tcg_gen_goto_tb(which
);
633 copy_iaoq_entry(ctx
, cpu_iaoq_f
, f
, NULL
);
634 copy_iaoq_entry(ctx
, cpu_iaoq_b
, b
, NULL
);
635 tcg_gen_exit_tb(ctx
->base
.tb
, which
);
637 copy_iaoq_entry(ctx
, cpu_iaoq_f
, f
, cpu_iaoq_b
);
638 copy_iaoq_entry(ctx
, cpu_iaoq_b
, b
, ctx
->iaoq_n_var
);
639 tcg_gen_lookup_and_goto_ptr();
643 static bool cond_need_sv(int c
)
645 return c
== 2 || c
== 3 || c
== 6;
648 static bool cond_need_cb(int c
)
650 return c
== 4 || c
== 5;
653 /* Need extensions from TCGv_i32 to TCGv_i64. */
654 static bool cond_need_ext(DisasContext
*ctx
, bool d
)
656 return !(ctx
->is_pa20
&& d
);
660 * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of
661 * the Parisc 1.1 Architecture Reference Manual for details.
664 static DisasCond
do_cond(DisasContext
*ctx
, unsigned cf
, bool d
,
665 TCGv_i64 res
, TCGv_i64 cb_msb
, TCGv_i64 sv
)
671 case 0: /* Never / TR (0 / 1) */
672 cond
= cond_make_f();
674 case 1: /* = / <> (Z / !Z) */
675 if (cond_need_ext(ctx
, d
)) {
676 tmp
= tcg_temp_new_i64();
677 tcg_gen_ext32u_i64(tmp
, res
);
680 cond
= cond_make_0(TCG_COND_EQ
, res
);
682 case 2: /* < / >= (N ^ V / !(N ^ V) */
683 tmp
= tcg_temp_new_i64();
684 tcg_gen_xor_i64(tmp
, res
, sv
);
685 if (cond_need_ext(ctx
, d
)) {
686 tcg_gen_ext32s_i64(tmp
, tmp
);
688 cond
= cond_make_0_tmp(TCG_COND_LT
, tmp
);
690 case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */
694 * ((res < 0) ^ (sv < 0)) | !res
695 * ((res ^ sv) < 0) | !res
696 * (~(res ^ sv) >= 0) | !res
697 * !(~(res ^ sv) >> 31) | !res
698 * !(~(res ^ sv) >> 31 & res)
700 tmp
= tcg_temp_new_i64();
701 tcg_gen_eqv_i64(tmp
, res
, sv
);
702 if (cond_need_ext(ctx
, d
)) {
703 tcg_gen_sextract_i64(tmp
, tmp
, 31, 1);
704 tcg_gen_and_i64(tmp
, tmp
, res
);
705 tcg_gen_ext32u_i64(tmp
, tmp
);
707 tcg_gen_sari_i64(tmp
, tmp
, 63);
708 tcg_gen_and_i64(tmp
, tmp
, res
);
710 cond
= cond_make_0_tmp(TCG_COND_EQ
, tmp
);
712 case 4: /* NUV / UV (!C / C) */
713 /* Only bit 0 of cb_msb is ever set. */
714 cond
= cond_make_0(TCG_COND_EQ
, cb_msb
);
716 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
717 tmp
= tcg_temp_new_i64();
718 tcg_gen_neg_i64(tmp
, cb_msb
);
719 tcg_gen_and_i64(tmp
, tmp
, res
);
720 if (cond_need_ext(ctx
, d
)) {
721 tcg_gen_ext32u_i64(tmp
, tmp
);
723 cond
= cond_make_0_tmp(TCG_COND_EQ
, tmp
);
725 case 6: /* SV / NSV (V / !V) */
726 if (cond_need_ext(ctx
, d
)) {
727 tmp
= tcg_temp_new_i64();
728 tcg_gen_ext32s_i64(tmp
, sv
);
731 cond
= cond_make_0(TCG_COND_LT
, sv
);
733 case 7: /* OD / EV */
734 tmp
= tcg_temp_new_i64();
735 tcg_gen_andi_i64(tmp
, res
, 1);
736 cond
= cond_make_0_tmp(TCG_COND_NE
, tmp
);
739 g_assert_not_reached();
742 cond
.c
= tcg_invert_cond(cond
.c
);
748 /* Similar, but for the special case of subtraction without borrow, we
749 can use the inputs directly. This can allow other computation to be
750 deleted as unused. */
752 static DisasCond
do_sub_cond(DisasContext
*ctx
, unsigned cf
, bool d
,
753 TCGv_i64 res
, TCGv_i64 in1
,
754 TCGv_i64 in2
, TCGv_i64 sv
)
772 case 4: /* << / >>= */
776 case 5: /* <<= / >> */
781 return do_cond(ctx
, cf
, d
, res
, NULL
, sv
);
785 tc
= tcg_invert_cond(tc
);
787 if (cond_need_ext(ctx
, d
)) {
788 TCGv_i64 t1
= tcg_temp_new_i64();
789 TCGv_i64 t2
= tcg_temp_new_i64();
792 tcg_gen_ext32u_i64(t1
, in1
);
793 tcg_gen_ext32u_i64(t2
, in2
);
795 tcg_gen_ext32s_i64(t1
, in1
);
796 tcg_gen_ext32s_i64(t2
, in2
);
798 return cond_make_tmp(tc
, t1
, t2
);
800 return cond_make(tc
, in1
, in2
);
804 * Similar, but for logicals, where the carry and overflow bits are not
805 * computed, and use of them is undefined.
807 * Undefined or not, hardware does not trap. It seems reasonable to
808 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
809 * how cases c={2,3} are treated.
812 static DisasCond
do_log_cond(DisasContext
*ctx
, unsigned cf
, bool d
,
820 case 9: /* undef, C */
821 case 11: /* undef, C & !Z */
822 case 12: /* undef, V */
823 return cond_make_f();
826 case 8: /* undef, !C */
827 case 10: /* undef, !C | Z */
828 case 13: /* undef, !V */
829 return cond_make_t();
858 return do_cond(ctx
, cf
, d
, res
, NULL
, NULL
);
861 g_assert_not_reached();
864 if (cond_need_ext(ctx
, d
)) {
865 TCGv_i64 tmp
= tcg_temp_new_i64();
868 tcg_gen_ext32u_i64(tmp
, res
);
870 tcg_gen_ext32s_i64(tmp
, res
);
872 return cond_make_0_tmp(tc
, tmp
);
874 return cond_make_0(tc
, res
);
877 /* Similar, but for shift/extract/deposit conditions. */
879 static DisasCond
do_sed_cond(DisasContext
*ctx
, unsigned orig
, bool d
,
884 /* Convert the compressed condition codes to standard.
885 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
886 4-7 are the reverse of 0-3. */
893 return do_log_cond(ctx
, c
* 2 + f
, d
, res
);
896 /* Similar, but for unit conditions. */
898 static DisasCond
do_unit_cond(unsigned cf
, bool d
, TCGv_i64 res
,
899 TCGv_i64 in1
, TCGv_i64 in2
)
902 TCGv_i64 tmp
, cb
= NULL
;
903 uint64_t d_repl
= d
? 0x0000000100000001ull
: 1;
906 /* Since we want to test lots of carry-out bits all at once, do not
907 * do our normal thing and compute carry-in of bit B+1 since that
908 * leaves us with carry bits spread across two words.
910 cb
= tcg_temp_new_i64();
911 tmp
= tcg_temp_new_i64();
912 tcg_gen_or_i64(cb
, in1
, in2
);
913 tcg_gen_and_i64(tmp
, in1
, in2
);
914 tcg_gen_andc_i64(cb
, cb
, res
);
915 tcg_gen_or_i64(cb
, cb
, tmp
);
919 case 0: /* never / TR */
920 case 1: /* undefined */
921 case 5: /* undefined */
922 cond
= cond_make_f();
925 case 2: /* SBZ / NBZ */
926 /* See hasless(v,1) from
927 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
929 tmp
= tcg_temp_new_i64();
930 tcg_gen_subi_i64(tmp
, res
, d_repl
* 0x01010101u
);
931 tcg_gen_andc_i64(tmp
, tmp
, res
);
932 tcg_gen_andi_i64(tmp
, tmp
, d_repl
* 0x80808080u
);
933 cond
= cond_make_0(TCG_COND_NE
, tmp
);
936 case 3: /* SHZ / NHZ */
937 tmp
= tcg_temp_new_i64();
938 tcg_gen_subi_i64(tmp
, res
, d_repl
* 0x00010001u
);
939 tcg_gen_andc_i64(tmp
, tmp
, res
);
940 tcg_gen_andi_i64(tmp
, tmp
, d_repl
* 0x80008000u
);
941 cond
= cond_make_0(TCG_COND_NE
, tmp
);
944 case 4: /* SDC / NDC */
945 tcg_gen_andi_i64(cb
, cb
, d_repl
* 0x88888888u
);
946 cond
= cond_make_0(TCG_COND_NE
, cb
);
949 case 6: /* SBC / NBC */
950 tcg_gen_andi_i64(cb
, cb
, d_repl
* 0x80808080u
);
951 cond
= cond_make_0(TCG_COND_NE
, cb
);
954 case 7: /* SHC / NHC */
955 tcg_gen_andi_i64(cb
, cb
, d_repl
* 0x80008000u
);
956 cond
= cond_make_0(TCG_COND_NE
, cb
);
960 g_assert_not_reached();
963 cond
.c
= tcg_invert_cond(cond
.c
);
969 static TCGv_i64
get_carry(DisasContext
*ctx
, bool d
,
970 TCGv_i64 cb
, TCGv_i64 cb_msb
)
972 if (cond_need_ext(ctx
, d
)) {
973 TCGv_i64 t
= tcg_temp_new_i64();
974 tcg_gen_extract_i64(t
, cb
, 32, 1);
980 static TCGv_i64
get_psw_carry(DisasContext
*ctx
, bool d
)
982 return get_carry(ctx
, d
, cpu_psw_cb
, cpu_psw_cb_msb
);
985 /* Compute signed overflow for addition. */
986 static TCGv_i64
do_add_sv(DisasContext
*ctx
, TCGv_i64 res
,
987 TCGv_i64 in1
, TCGv_i64 in2
)
989 TCGv_i64 sv
= tcg_temp_new_i64();
990 TCGv_i64 tmp
= tcg_temp_new_i64();
992 tcg_gen_xor_i64(sv
, res
, in1
);
993 tcg_gen_xor_i64(tmp
, in1
, in2
);
994 tcg_gen_andc_i64(sv
, sv
, tmp
);
999 /* Compute signed overflow for subtraction. */
1000 static TCGv_i64
do_sub_sv(DisasContext
*ctx
, TCGv_i64 res
,
1001 TCGv_i64 in1
, TCGv_i64 in2
)
1003 TCGv_i64 sv
= tcg_temp_new_i64();
1004 TCGv_i64 tmp
= tcg_temp_new_i64();
1006 tcg_gen_xor_i64(sv
, res
, in1
);
1007 tcg_gen_xor_i64(tmp
, in1
, in2
);
1008 tcg_gen_and_i64(sv
, sv
, tmp
);
1013 static void do_add(DisasContext
*ctx
, unsigned rt
, TCGv_i64 in1
,
1014 TCGv_i64 in2
, unsigned shift
, bool is_l
,
1015 bool is_tsv
, bool is_tc
, bool is_c
, unsigned cf
, bool d
)
1017 TCGv_i64 dest
, cb
, cb_msb
, cb_cond
, sv
, tmp
;
1018 unsigned c
= cf
>> 1;
1021 dest
= tcg_temp_new_i64();
1027 tmp
= tcg_temp_new_i64();
1028 tcg_gen_shli_i64(tmp
, in1
, shift
);
1032 if (!is_l
|| cond_need_cb(c
)) {
1033 cb_msb
= tcg_temp_new_i64();
1034 cb
= tcg_temp_new_i64();
1036 tcg_gen_add2_i64(dest
, cb_msb
, in1
, ctx
->zero
, in2
, ctx
->zero
);
1038 tcg_gen_add2_i64(dest
, cb_msb
, dest
, cb_msb
,
1039 get_psw_carry(ctx
, d
), ctx
->zero
);
1041 tcg_gen_xor_i64(cb
, in1
, in2
);
1042 tcg_gen_xor_i64(cb
, cb
, dest
);
1043 if (cond_need_cb(c
)) {
1044 cb_cond
= get_carry(ctx
, d
, cb
, cb_msb
);
1047 tcg_gen_add_i64(dest
, in1
, in2
);
1049 tcg_gen_add_i64(dest
, dest
, get_psw_carry(ctx
, d
));
1053 /* Compute signed overflow if required. */
1055 if (is_tsv
|| cond_need_sv(c
)) {
1056 sv
= do_add_sv(ctx
, dest
, in1
, in2
);
1058 /* ??? Need to include overflow from shift. */
1059 gen_helper_tsv(tcg_env
, sv
);
1063 /* Emit any conditional trap before any writeback. */
1064 cond
= do_cond(ctx
, cf
, d
, dest
, cb_cond
, sv
);
1066 tmp
= tcg_temp_new_i64();
1067 tcg_gen_setcond_i64(cond
.c
, tmp
, cond
.a0
, cond
.a1
);
1068 gen_helper_tcond(tcg_env
, tmp
);
1071 /* Write back the result. */
1073 save_or_nullify(ctx
, cpu_psw_cb
, cb
);
1074 save_or_nullify(ctx
, cpu_psw_cb_msb
, cb_msb
);
1076 save_gpr(ctx
, rt
, dest
);
1078 /* Install the new nullification. */
1079 cond_free(&ctx
->null_cond
);
1080 ctx
->null_cond
= cond
;
1083 static bool do_add_reg(DisasContext
*ctx
, arg_rrr_cf_d_sh
*a
,
1084 bool is_l
, bool is_tsv
, bool is_tc
, bool is_c
)
1086 TCGv_i64 tcg_r1
, tcg_r2
;
1091 tcg_r1
= load_gpr(ctx
, a
->r1
);
1092 tcg_r2
= load_gpr(ctx
, a
->r2
);
1093 do_add(ctx
, a
->t
, tcg_r1
, tcg_r2
, a
->sh
, is_l
,
1094 is_tsv
, is_tc
, is_c
, a
->cf
, a
->d
);
1095 return nullify_end(ctx
);
1098 static bool do_add_imm(DisasContext
*ctx
, arg_rri_cf
*a
,
1099 bool is_tsv
, bool is_tc
)
1101 TCGv_i64 tcg_im
, tcg_r2
;
1106 tcg_im
= tcg_constant_i64(a
->i
);
1107 tcg_r2
= load_gpr(ctx
, a
->r
);
1108 /* All ADDI conditions are 32-bit. */
1109 do_add(ctx
, a
->t
, tcg_im
, tcg_r2
, 0, 0, is_tsv
, is_tc
, 0, a
->cf
, false);
1110 return nullify_end(ctx
);
1113 static void do_sub(DisasContext
*ctx
, unsigned rt
, TCGv_i64 in1
,
1114 TCGv_i64 in2
, bool is_tsv
, bool is_b
,
1115 bool is_tc
, unsigned cf
, bool d
)
1117 TCGv_i64 dest
, sv
, cb
, cb_msb
, tmp
;
1118 unsigned c
= cf
>> 1;
1121 dest
= tcg_temp_new_i64();
1122 cb
= tcg_temp_new_i64();
1123 cb_msb
= tcg_temp_new_i64();
1126 /* DEST,C = IN1 + ~IN2 + C. */
1127 tcg_gen_not_i64(cb
, in2
);
1128 tcg_gen_add2_i64(dest
, cb_msb
, in1
, ctx
->zero
,
1129 get_psw_carry(ctx
, d
), ctx
->zero
);
1130 tcg_gen_add2_i64(dest
, cb_msb
, dest
, cb_msb
, cb
, ctx
->zero
);
1131 tcg_gen_xor_i64(cb
, cb
, in1
);
1132 tcg_gen_xor_i64(cb
, cb
, dest
);
1135 * DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
1136 * operations by seeding the high word with 1 and subtracting.
1138 TCGv_i64 one
= tcg_constant_i64(1);
1139 tcg_gen_sub2_i64(dest
, cb_msb
, in1
, one
, in2
, ctx
->zero
);
1140 tcg_gen_eqv_i64(cb
, in1
, in2
);
1141 tcg_gen_xor_i64(cb
, cb
, dest
);
1144 /* Compute signed overflow if required. */
1146 if (is_tsv
|| cond_need_sv(c
)) {
1147 sv
= do_sub_sv(ctx
, dest
, in1
, in2
);
1149 gen_helper_tsv(tcg_env
, sv
);
1153 /* Compute the condition. We cannot use the special case for borrow. */
1155 cond
= do_sub_cond(ctx
, cf
, d
, dest
, in1
, in2
, sv
);
1157 cond
= do_cond(ctx
, cf
, d
, dest
, get_carry(ctx
, d
, cb
, cb_msb
), sv
);
1160 /* Emit any conditional trap before any writeback. */
1162 tmp
= tcg_temp_new_i64();
1163 tcg_gen_setcond_i64(cond
.c
, tmp
, cond
.a0
, cond
.a1
);
1164 gen_helper_tcond(tcg_env
, tmp
);
1167 /* Write back the result. */
1168 save_or_nullify(ctx
, cpu_psw_cb
, cb
);
1169 save_or_nullify(ctx
, cpu_psw_cb_msb
, cb_msb
);
1170 save_gpr(ctx
, rt
, dest
);
1172 /* Install the new nullification. */
1173 cond_free(&ctx
->null_cond
);
1174 ctx
->null_cond
= cond
;
1177 static bool do_sub_reg(DisasContext
*ctx
, arg_rrr_cf_d
*a
,
1178 bool is_tsv
, bool is_b
, bool is_tc
)
1180 TCGv_i64 tcg_r1
, tcg_r2
;
1185 tcg_r1
= load_gpr(ctx
, a
->r1
);
1186 tcg_r2
= load_gpr(ctx
, a
->r2
);
1187 do_sub(ctx
, a
->t
, tcg_r1
, tcg_r2
, is_tsv
, is_b
, is_tc
, a
->cf
, a
->d
);
1188 return nullify_end(ctx
);
1191 static bool do_sub_imm(DisasContext
*ctx
, arg_rri_cf
*a
, bool is_tsv
)
1193 TCGv_i64 tcg_im
, tcg_r2
;
1198 tcg_im
= tcg_constant_i64(a
->i
);
1199 tcg_r2
= load_gpr(ctx
, a
->r
);
1200 /* All SUBI conditions are 32-bit. */
1201 do_sub(ctx
, a
->t
, tcg_im
, tcg_r2
, is_tsv
, 0, 0, a
->cf
, false);
1202 return nullify_end(ctx
);
1205 static void do_cmpclr(DisasContext
*ctx
, unsigned rt
, TCGv_i64 in1
,
1206 TCGv_i64 in2
, unsigned cf
, bool d
)
1211 dest
= tcg_temp_new_i64();
1212 tcg_gen_sub_i64(dest
, in1
, in2
);
1214 /* Compute signed overflow if required. */
1216 if (cond_need_sv(cf
>> 1)) {
1217 sv
= do_sub_sv(ctx
, dest
, in1
, in2
);
1220 /* Form the condition for the compare. */
1221 cond
= do_sub_cond(ctx
, cf
, d
, dest
, in1
, in2
, sv
);
1224 tcg_gen_movi_i64(dest
, 0);
1225 save_gpr(ctx
, rt
, dest
);
1227 /* Install the new nullification. */
1228 cond_free(&ctx
->null_cond
);
1229 ctx
->null_cond
= cond
;
1232 static void do_log(DisasContext
*ctx
, unsigned rt
, TCGv_i64 in1
,
1233 TCGv_i64 in2
, unsigned cf
, bool d
,
1234 void (*fn
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
1236 TCGv_i64 dest
= dest_gpr(ctx
, rt
);
1238 /* Perform the operation, and writeback. */
1240 save_gpr(ctx
, rt
, dest
);
1242 /* Install the new nullification. */
1243 cond_free(&ctx
->null_cond
);
1245 ctx
->null_cond
= do_log_cond(ctx
, cf
, d
, dest
);
1249 static bool do_log_reg(DisasContext
*ctx
, arg_rrr_cf_d
*a
,
1250 void (*fn
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
1252 TCGv_i64 tcg_r1
, tcg_r2
;
1257 tcg_r1
= load_gpr(ctx
, a
->r1
);
1258 tcg_r2
= load_gpr(ctx
, a
->r2
);
1259 do_log(ctx
, a
->t
, tcg_r1
, tcg_r2
, a
->cf
, a
->d
, fn
);
1260 return nullify_end(ctx
);
1263 static void do_unit(DisasContext
*ctx
, unsigned rt
, TCGv_i64 in1
,
1264 TCGv_i64 in2
, unsigned cf
, bool d
, bool is_tc
,
1265 void (*fn
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
1271 dest
= dest_gpr(ctx
, rt
);
1273 save_gpr(ctx
, rt
, dest
);
1274 cond_free(&ctx
->null_cond
);
1276 dest
= tcg_temp_new_i64();
1279 cond
= do_unit_cond(cf
, d
, dest
, in1
, in2
);
1282 TCGv_i64 tmp
= tcg_temp_new_i64();
1283 tcg_gen_setcond_i64(cond
.c
, tmp
, cond
.a0
, cond
.a1
);
1284 gen_helper_tcond(tcg_env
, tmp
);
1286 save_gpr(ctx
, rt
, dest
);
1288 cond_free(&ctx
->null_cond
);
1289 ctx
->null_cond
= cond
;
1293 #ifndef CONFIG_USER_ONLY
1294 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1295 from the top 2 bits of the base register. There are a few system
1296 instructions that have a 3-bit space specifier, for which SR0 is
1297 not special. To handle this, pass ~SP. */
1298 static TCGv_i64
space_select(DisasContext
*ctx
, int sp
, TCGv_i64 base
)
1308 spc
= tcg_temp_new_i64();
1309 load_spr(ctx
, spc
, sp
);
1312 if (ctx
->tb_flags
& TB_FLAG_SR_SAME
) {
1316 ptr
= tcg_temp_new_ptr();
1317 tmp
= tcg_temp_new_i64();
1318 spc
= tcg_temp_new_i64();
1320 /* Extract top 2 bits of the address, shift left 3 for uint64_t index. */
1321 tcg_gen_shri_i64(tmp
, base
, (ctx
->tb_flags
& PSW_W
? 64 : 32) - 5);
1322 tcg_gen_andi_i64(tmp
, tmp
, 030);
1323 tcg_gen_trunc_i64_ptr(ptr
, tmp
);
1325 tcg_gen_add_ptr(ptr
, ptr
, tcg_env
);
1326 tcg_gen_ld_i64(spc
, ptr
, offsetof(CPUHPPAState
, sr
[4]));
1332 static void form_gva(DisasContext
*ctx
, TCGv_i64
*pgva
, TCGv_i64
*pofs
,
1333 unsigned rb
, unsigned rx
, int scale
, int64_t disp
,
1334 unsigned sp
, int modify
, bool is_phys
)
1336 TCGv_i64 base
= load_gpr(ctx
, rb
);
1340 set_insn_breg(ctx
, rb
);
1342 /* Note that RX is mutually exclusive with DISP. */
1344 ofs
= tcg_temp_new_i64();
1345 tcg_gen_shli_i64(ofs
, cpu_gr
[rx
], scale
);
1346 tcg_gen_add_i64(ofs
, ofs
, base
);
1347 } else if (disp
|| modify
) {
1348 ofs
= tcg_temp_new_i64();
1349 tcg_gen_addi_i64(ofs
, base
, disp
);
1355 *pgva
= addr
= tcg_temp_new_i64();
1356 tcg_gen_andi_i64(addr
, modify
<= 0 ? ofs
: base
, gva_offset_mask(ctx
));
1357 #ifndef CONFIG_USER_ONLY
1359 tcg_gen_or_i64(addr
, addr
, space_select(ctx
, sp
, base
));
1364 /* Emit a memory load. The modify parameter should be
1365 * < 0 for pre-modify,
1366 * > 0 for post-modify,
1367 * = 0 for no base register update.
1369 static void do_load_32(DisasContext
*ctx
, TCGv_i32 dest
, unsigned rb
,
1370 unsigned rx
, int scale
, int64_t disp
,
1371 unsigned sp
, int modify
, MemOp mop
)
1376 /* Caller uses nullify_over/nullify_end. */
1377 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1379 form_gva(ctx
, &addr
, &ofs
, rb
, rx
, scale
, disp
, sp
, modify
,
1381 tcg_gen_qemu_ld_i32(dest
, addr
, ctx
->mmu_idx
, mop
| UNALIGN(ctx
));
1383 save_gpr(ctx
, rb
, ofs
);
1387 static void do_load_64(DisasContext
*ctx
, TCGv_i64 dest
, unsigned rb
,
1388 unsigned rx
, int scale
, int64_t disp
,
1389 unsigned sp
, int modify
, MemOp mop
)
1394 /* Caller uses nullify_over/nullify_end. */
1395 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1397 form_gva(ctx
, &addr
, &ofs
, rb
, rx
, scale
, disp
, sp
, modify
,
1399 tcg_gen_qemu_ld_i64(dest
, addr
, ctx
->mmu_idx
, mop
| UNALIGN(ctx
));
1401 save_gpr(ctx
, rb
, ofs
);
1405 static void do_store_32(DisasContext
*ctx
, TCGv_i32 src
, unsigned rb
,
1406 unsigned rx
, int scale
, int64_t disp
,
1407 unsigned sp
, int modify
, MemOp mop
)
1412 /* Caller uses nullify_over/nullify_end. */
1413 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1415 form_gva(ctx
, &addr
, &ofs
, rb
, rx
, scale
, disp
, sp
, modify
,
1417 tcg_gen_qemu_st_i32(src
, addr
, ctx
->mmu_idx
, mop
| UNALIGN(ctx
));
1419 save_gpr(ctx
, rb
, ofs
);
1423 static void do_store_64(DisasContext
*ctx
, TCGv_i64 src
, unsigned rb
,
1424 unsigned rx
, int scale
, int64_t disp
,
1425 unsigned sp
, int modify
, MemOp mop
)
1430 /* Caller uses nullify_over/nullify_end. */
1431 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1433 form_gva(ctx
, &addr
, &ofs
, rb
, rx
, scale
, disp
, sp
, modify
,
1435 tcg_gen_qemu_st_i64(src
, addr
, ctx
->mmu_idx
, mop
| UNALIGN(ctx
));
1437 save_gpr(ctx
, rb
, ofs
);
1441 static bool do_load(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1442 unsigned rx
, int scale
, int64_t disp
,
1443 unsigned sp
, int modify
, MemOp mop
)
1450 /* No base register update. */
1451 dest
= dest_gpr(ctx
, rt
);
1453 /* Make sure if RT == RB, we see the result of the load. */
1454 dest
= tcg_temp_new_i64();
1456 do_load_64(ctx
, dest
, rb
, rx
, scale
, disp
, sp
, modify
, mop
);
1457 save_gpr(ctx
, rt
, dest
);
1459 return nullify_end(ctx
);
1462 static bool do_floadw(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1463 unsigned rx
, int scale
, int64_t disp
,
1464 unsigned sp
, int modify
)
1470 tmp
= tcg_temp_new_i32();
1471 do_load_32(ctx
, tmp
, rb
, rx
, scale
, disp
, sp
, modify
, MO_TEUL
);
1472 save_frw_i32(rt
, tmp
);
1475 gen_helper_loaded_fr0(tcg_env
);
1478 return nullify_end(ctx
);
1481 static bool trans_fldw(DisasContext
*ctx
, arg_ldst
*a
)
1483 return do_floadw(ctx
, a
->t
, a
->b
, a
->x
, a
->scale
? 2 : 0,
1484 a
->disp
, a
->sp
, a
->m
);
1487 static bool do_floadd(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1488 unsigned rx
, int scale
, int64_t disp
,
1489 unsigned sp
, int modify
)
1495 tmp
= tcg_temp_new_i64();
1496 do_load_64(ctx
, tmp
, rb
, rx
, scale
, disp
, sp
, modify
, MO_TEUQ
);
1500 gen_helper_loaded_fr0(tcg_env
);
1503 return nullify_end(ctx
);
1506 static bool trans_fldd(DisasContext
*ctx
, arg_ldst
*a
)
1508 return do_floadd(ctx
, a
->t
, a
->b
, a
->x
, a
->scale
? 3 : 0,
1509 a
->disp
, a
->sp
, a
->m
);
1512 static bool do_store(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1513 int64_t disp
, unsigned sp
,
1514 int modify
, MemOp mop
)
1517 do_store_64(ctx
, load_gpr(ctx
, rt
), rb
, 0, 0, disp
, sp
, modify
, mop
);
1518 return nullify_end(ctx
);
1521 static bool do_fstorew(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1522 unsigned rx
, int scale
, int64_t disp
,
1523 unsigned sp
, int modify
)
1529 tmp
= load_frw_i32(rt
);
1530 do_store_32(ctx
, tmp
, rb
, rx
, scale
, disp
, sp
, modify
, MO_TEUL
);
1532 return nullify_end(ctx
);
1535 static bool trans_fstw(DisasContext
*ctx
, arg_ldst
*a
)
1537 return do_fstorew(ctx
, a
->t
, a
->b
, a
->x
, a
->scale
? 2 : 0,
1538 a
->disp
, a
->sp
, a
->m
);
1541 static bool do_fstored(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1542 unsigned rx
, int scale
, int64_t disp
,
1543 unsigned sp
, int modify
)
1550 do_store_64(ctx
, tmp
, rb
, rx
, scale
, disp
, sp
, modify
, MO_TEUQ
);
1552 return nullify_end(ctx
);
1555 static bool trans_fstd(DisasContext
*ctx
, arg_ldst
*a
)
1557 return do_fstored(ctx
, a
->t
, a
->b
, a
->x
, a
->scale
? 3 : 0,
1558 a
->disp
, a
->sp
, a
->m
);
1561 static bool do_fop_wew(DisasContext
*ctx
, unsigned rt
, unsigned ra
,
1562 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i32
))
1567 tmp
= load_frw0_i32(ra
);
1569 func(tmp
, tcg_env
, tmp
);
1571 save_frw_i32(rt
, tmp
);
1572 return nullify_end(ctx
);
1575 static bool do_fop_wed(DisasContext
*ctx
, unsigned rt
, unsigned ra
,
1576 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i64
))
1583 dst
= tcg_temp_new_i32();
1585 func(dst
, tcg_env
, src
);
1587 save_frw_i32(rt
, dst
);
1588 return nullify_end(ctx
);
1591 static bool do_fop_ded(DisasContext
*ctx
, unsigned rt
, unsigned ra
,
1592 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i64
))
1597 tmp
= load_frd0(ra
);
1599 func(tmp
, tcg_env
, tmp
);
1602 return nullify_end(ctx
);
1605 static bool do_fop_dew(DisasContext
*ctx
, unsigned rt
, unsigned ra
,
1606 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i32
))
1612 src
= load_frw0_i32(ra
);
1613 dst
= tcg_temp_new_i64();
1615 func(dst
, tcg_env
, src
);
1618 return nullify_end(ctx
);
1621 static bool do_fop_weww(DisasContext
*ctx
, unsigned rt
,
1622 unsigned ra
, unsigned rb
,
1623 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i32
, TCGv_i32
))
1628 a
= load_frw0_i32(ra
);
1629 b
= load_frw0_i32(rb
);
1631 func(a
, tcg_env
, a
, b
);
1633 save_frw_i32(rt
, a
);
1634 return nullify_end(ctx
);
1637 static bool do_fop_dedd(DisasContext
*ctx
, unsigned rt
,
1638 unsigned ra
, unsigned rb
,
1639 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i64
, TCGv_i64
))
1647 func(a
, tcg_env
, a
, b
);
1650 return nullify_end(ctx
);
1653 /* Emit an unconditional branch to a direct target, which may or may not
1654 have already had nullification handled. */
1655 static bool do_dbranch(DisasContext
*ctx
, uint64_t dest
,
1656 unsigned link
, bool is_n
)
1658 if (ctx
->null_cond
.c
== TCG_COND_NEVER
&& ctx
->null_lab
== NULL
) {
1660 copy_iaoq_entry(ctx
, cpu_gr
[link
], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1664 ctx
->null_cond
.c
= TCG_COND_ALWAYS
;
1670 copy_iaoq_entry(ctx
, cpu_gr
[link
], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1673 if (is_n
&& use_nullify_skip(ctx
)) {
1674 nullify_set(ctx
, 0);
1675 gen_goto_tb(ctx
, 0, dest
, dest
+ 4);
1677 nullify_set(ctx
, is_n
);
1678 gen_goto_tb(ctx
, 0, ctx
->iaoq_b
, dest
);
1683 nullify_set(ctx
, 0);
1684 gen_goto_tb(ctx
, 1, ctx
->iaoq_b
, ctx
->iaoq_n
);
1685 ctx
->base
.is_jmp
= DISAS_NORETURN
;
1690 /* Emit a conditional branch to a direct target. If the branch itself
1691 is nullified, we should have already used nullify_over. */
1692 static bool do_cbranch(DisasContext
*ctx
, int64_t disp
, bool is_n
,
1695 uint64_t dest
= iaoq_dest(ctx
, disp
);
1696 TCGLabel
*taken
= NULL
;
1697 TCGCond c
= cond
->c
;
1700 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1702 /* Handle TRUE and NEVER as direct branches. */
1703 if (c
== TCG_COND_ALWAYS
) {
1704 return do_dbranch(ctx
, dest
, 0, is_n
&& disp
>= 0);
1706 if (c
== TCG_COND_NEVER
) {
1707 return do_dbranch(ctx
, ctx
->iaoq_n
, 0, is_n
&& disp
< 0);
1710 taken
= gen_new_label();
1711 tcg_gen_brcond_i64(c
, cond
->a0
, cond
->a1
, taken
);
1714 /* Not taken: Condition not satisfied; nullify on backward branches. */
1715 n
= is_n
&& disp
< 0;
1716 if (n
&& use_nullify_skip(ctx
)) {
1717 nullify_set(ctx
, 0);
1718 gen_goto_tb(ctx
, 0, ctx
->iaoq_n
, ctx
->iaoq_n
+ 4);
1720 if (!n
&& ctx
->null_lab
) {
1721 gen_set_label(ctx
->null_lab
);
1722 ctx
->null_lab
= NULL
;
1724 nullify_set(ctx
, n
);
1725 if (ctx
->iaoq_n
== -1) {
1726 /* The temporary iaoq_n_var died at the branch above.
1727 Regenerate it here instead of saving it. */
1728 tcg_gen_addi_i64(ctx
->iaoq_n_var
, cpu_iaoq_b
, 4);
1730 gen_goto_tb(ctx
, 0, ctx
->iaoq_b
, ctx
->iaoq_n
);
1733 gen_set_label(taken
);
1735 /* Taken: Condition satisfied; nullify on forward branches. */
1736 n
= is_n
&& disp
>= 0;
1737 if (n
&& use_nullify_skip(ctx
)) {
1738 nullify_set(ctx
, 0);
1739 gen_goto_tb(ctx
, 1, dest
, dest
+ 4);
1741 nullify_set(ctx
, n
);
1742 gen_goto_tb(ctx
, 1, ctx
->iaoq_b
, dest
);
1745 /* Not taken: the branch itself was nullified. */
1746 if (ctx
->null_lab
) {
1747 gen_set_label(ctx
->null_lab
);
1748 ctx
->null_lab
= NULL
;
1749 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE
;
1751 ctx
->base
.is_jmp
= DISAS_NORETURN
;
1756 /* Emit an unconditional branch to an indirect target. This handles
1757 nullification of the branch itself. */
1758 static bool do_ibranch(DisasContext
*ctx
, TCGv_i64 dest
,
1759 unsigned link
, bool is_n
)
1761 TCGv_i64 a0
, a1
, next
, tmp
;
1764 assert(ctx
->null_lab
== NULL
);
1766 if (ctx
->null_cond
.c
== TCG_COND_NEVER
) {
1768 copy_iaoq_entry(ctx
, cpu_gr
[link
], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1770 next
= tcg_temp_new_i64();
1771 tcg_gen_mov_i64(next
, dest
);
1773 if (use_nullify_skip(ctx
)) {
1774 copy_iaoq_entry(ctx
, cpu_iaoq_f
, -1, next
);
1775 tcg_gen_addi_i64(next
, next
, 4);
1776 copy_iaoq_entry(ctx
, cpu_iaoq_b
, -1, next
);
1777 nullify_set(ctx
, 0);
1778 ctx
->base
.is_jmp
= DISAS_IAQ_N_UPDATED
;
1781 ctx
->null_cond
.c
= TCG_COND_ALWAYS
;
1784 ctx
->iaoq_n_var
= next
;
1785 } else if (is_n
&& use_nullify_skip(ctx
)) {
1786 /* The (conditional) branch, B, nullifies the next insn, N,
1787 and we're allowed to skip execution N (no single-step or
1788 tracepoint in effect). Since the goto_ptr that we must use
1789 for the indirect branch consumes no special resources, we
1790 can (conditionally) skip B and continue execution. */
1791 /* The use_nullify_skip test implies we have a known control path. */
1792 tcg_debug_assert(ctx
->iaoq_b
!= -1);
1793 tcg_debug_assert(ctx
->iaoq_n
!= -1);
1795 /* We do have to handle the non-local temporary, DEST, before
1796 branching. Since IOAQ_F is not really live at this point, we
1797 can simply store DEST optimistically. Similarly with IAOQ_B. */
1798 copy_iaoq_entry(ctx
, cpu_iaoq_f
, -1, dest
);
1799 next
= tcg_temp_new_i64();
1800 tcg_gen_addi_i64(next
, dest
, 4);
1801 copy_iaoq_entry(ctx
, cpu_iaoq_b
, -1, next
);
1805 copy_iaoq_entry(ctx
, cpu_gr
[link
], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1807 tcg_gen_lookup_and_goto_ptr();
1808 return nullify_end(ctx
);
1810 c
= ctx
->null_cond
.c
;
1811 a0
= ctx
->null_cond
.a0
;
1812 a1
= ctx
->null_cond
.a1
;
1814 tmp
= tcg_temp_new_i64();
1815 next
= tcg_temp_new_i64();
1817 copy_iaoq_entry(ctx
, tmp
, ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1818 tcg_gen_movcond_i64(c
, next
, a0
, a1
, tmp
, dest
);
1820 ctx
->iaoq_n_var
= next
;
1823 tcg_gen_movcond_i64(c
, cpu_gr
[link
], a0
, a1
, cpu_gr
[link
], tmp
);
1827 /* The branch nullifies the next insn, which means the state of N
1828 after the branch is the inverse of the state of N that applied
1830 tcg_gen_setcond_i64(tcg_invert_cond(c
), cpu_psw_n
, a0
, a1
);
1831 cond_free(&ctx
->null_cond
);
1832 ctx
->null_cond
= cond_make_n();
1833 ctx
->psw_n_nonzero
= true;
1835 cond_free(&ctx
->null_cond
);
1842 * if (IAOQ_Front{30..31} < GR[b]{30..31})
1843 * IAOQ_Next{30..31} ← GR[b]{30..31};
1845 * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1846 * which keeps the privilege level from being increased.
1848 static TCGv_i64
do_ibranch_priv(DisasContext
*ctx
, TCGv_i64 offset
)
1851 switch (ctx
->privilege
) {
1853 /* Privilege 0 is maximum and is allowed to decrease. */
1856 /* Privilege 3 is minimum and is never allowed to increase. */
1857 dest
= tcg_temp_new_i64();
1858 tcg_gen_ori_i64(dest
, offset
, 3);
1861 dest
= tcg_temp_new_i64();
1862 tcg_gen_andi_i64(dest
, offset
, -4);
1863 tcg_gen_ori_i64(dest
, dest
, ctx
->privilege
);
1864 tcg_gen_movcond_i64(TCG_COND_GTU
, dest
, dest
, offset
, dest
, offset
);
1870 #ifdef CONFIG_USER_ONLY
1871 /* On Linux, page zero is normally marked execute only + gateway.
1872 Therefore normal read or write is supposed to fail, but specific
1873 offsets have kernel code mapped to raise permissions to implement
1874 system calls. Handling this via an explicit check here, rather
1875 in than the "be disp(sr2,r0)" instruction that probably sent us
1876 here, is the easiest way to handle the branch delay slot on the
1877 aforementioned BE. */
1878 static void do_page_zero(DisasContext
*ctx
)
1882 /* If by some means we get here with PSW[N]=1, that implies that
1883 the B,GATE instruction would be skipped, and we'd fault on the
1884 next insn within the privileged page. */
1885 switch (ctx
->null_cond
.c
) {
1886 case TCG_COND_NEVER
:
1888 case TCG_COND_ALWAYS
:
1889 tcg_gen_movi_i64(cpu_psw_n
, 0);
1892 /* Since this is always the first (and only) insn within the
1893 TB, we should know the state of PSW[N] from TB->FLAGS. */
1894 g_assert_not_reached();
1897 /* Check that we didn't arrive here via some means that allowed
1898 non-sequential instruction execution. Normally the PSW[B] bit
1899 detects this by disallowing the B,GATE instruction to execute
1900 under such conditions. */
1901 if (ctx
->iaoq_b
!= ctx
->iaoq_f
+ 4) {
1905 switch (ctx
->iaoq_f
& -4) {
1906 case 0x00: /* Null pointer call */
1907 gen_excp_1(EXCP_IMP
);
1908 ctx
->base
.is_jmp
= DISAS_NORETURN
;
1911 case 0xb0: /* LWS */
1912 gen_excp_1(EXCP_SYSCALL_LWS
);
1913 ctx
->base
.is_jmp
= DISAS_NORETURN
;
1916 case 0xe0: /* SET_THREAD_POINTER */
1917 tcg_gen_st_i64(cpu_gr
[26], tcg_env
, offsetof(CPUHPPAState
, cr
[27]));
1918 tmp
= tcg_temp_new_i64();
1919 tcg_gen_ori_i64(tmp
, cpu_gr
[31], 3);
1920 copy_iaoq_entry(ctx
, cpu_iaoq_f
, -1, tmp
);
1921 tcg_gen_addi_i64(tmp
, tmp
, 4);
1922 copy_iaoq_entry(ctx
, cpu_iaoq_b
, -1, tmp
);
1923 ctx
->base
.is_jmp
= DISAS_IAQ_N_UPDATED
;
1926 case 0x100: /* SYSCALL */
1927 gen_excp_1(EXCP_SYSCALL
);
1928 ctx
->base
.is_jmp
= DISAS_NORETURN
;
1933 gen_excp_1(EXCP_ILL
);
1934 ctx
->base
.is_jmp
= DISAS_NORETURN
;
1940 static bool trans_nop(DisasContext
*ctx
, arg_nop
*a
)
1942 cond_free(&ctx
->null_cond
);
1946 static bool trans_break(DisasContext
*ctx
, arg_break
*a
)
1948 return gen_excp_iir(ctx
, EXCP_BREAK
);
1951 static bool trans_sync(DisasContext
*ctx
, arg_sync
*a
)
1953 /* No point in nullifying the memory barrier. */
1954 tcg_gen_mb(TCG_BAR_SC
| TCG_MO_ALL
);
1956 cond_free(&ctx
->null_cond
);
1960 static bool trans_mfia(DisasContext
*ctx
, arg_mfia
*a
)
1963 TCGv_i64 tmp
= dest_gpr(ctx
, rt
);
1964 tcg_gen_movi_i64(tmp
, ctx
->iaoq_f
);
1965 save_gpr(ctx
, rt
, tmp
);
1967 cond_free(&ctx
->null_cond
);
1971 static bool trans_mfsp(DisasContext
*ctx
, arg_mfsp
*a
)
1974 unsigned rs
= a
->sp
;
1975 TCGv_i64 t0
= tcg_temp_new_i64();
1977 load_spr(ctx
, t0
, rs
);
1978 tcg_gen_shri_i64(t0
, t0
, 32);
1980 save_gpr(ctx
, rt
, t0
);
1982 cond_free(&ctx
->null_cond
);
1986 static bool trans_mfctl(DisasContext
*ctx
, arg_mfctl
*a
)
1989 unsigned ctl
= a
->r
;
1995 /* MFSAR without ,W masks low 5 bits. */
1996 tmp
= dest_gpr(ctx
, rt
);
1997 tcg_gen_andi_i64(tmp
, cpu_sar
, 31);
1998 save_gpr(ctx
, rt
, tmp
);
2001 save_gpr(ctx
, rt
, cpu_sar
);
2003 case CR_IT
: /* Interval Timer */
2004 /* FIXME: Respect PSW_S bit. */
2006 tmp
= dest_gpr(ctx
, rt
);
2007 if (translator_io_start(&ctx
->base
)) {
2008 gen_helper_read_interval_timer(tmp
);
2009 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE
;
2011 gen_helper_read_interval_timer(tmp
);
2013 save_gpr(ctx
, rt
, tmp
);
2014 return nullify_end(ctx
);
2019 /* All other control registers are privileged. */
2020 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG
);
2024 tmp
= tcg_temp_new_i64();
2025 tcg_gen_ld_i64(tmp
, tcg_env
, offsetof(CPUHPPAState
, cr
[ctl
]));
2026 save_gpr(ctx
, rt
, tmp
);
2029 cond_free(&ctx
->null_cond
);
2033 static bool trans_mtsp(DisasContext
*ctx
, arg_mtsp
*a
)
2036 unsigned rs
= a
->sp
;
2040 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG
);
2044 tmp
= tcg_temp_new_i64();
2045 tcg_gen_shli_i64(tmp
, load_gpr(ctx
, rr
), 32);
2048 tcg_gen_st_i64(tmp
, tcg_env
, offsetof(CPUHPPAState
, sr
[rs
]));
2049 ctx
->tb_flags
&= ~TB_FLAG_SR_SAME
;
2051 tcg_gen_mov_i64(cpu_sr
[rs
], tmp
);
2054 return nullify_end(ctx
);
2057 static bool trans_mtctl(DisasContext
*ctx
, arg_mtctl
*a
)
2059 unsigned ctl
= a
->t
;
2063 if (ctl
== CR_SAR
) {
2064 reg
= load_gpr(ctx
, a
->r
);
2065 tmp
= tcg_temp_new_i64();
2066 tcg_gen_andi_i64(tmp
, reg
, ctx
->is_pa20
? 63 : 31);
2067 save_or_nullify(ctx
, cpu_sar
, tmp
);
2069 cond_free(&ctx
->null_cond
);
2073 /* All other control registers are privileged or read-only. */
2074 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG
);
2076 #ifndef CONFIG_USER_ONLY
2080 reg
= load_gpr(ctx
, a
->r
);
2082 reg
= tcg_temp_new_i64();
2083 tcg_gen_ext32u_i64(reg
, load_gpr(ctx
, a
->r
));
2088 gen_helper_write_interval_timer(tcg_env
, reg
);
2091 gen_helper_write_eirr(tcg_env
, reg
);
2094 gen_helper_write_eiem(tcg_env
, reg
);
2095 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE_EXIT
;
2100 /* FIXME: Respect PSW_Q bit */
2101 /* The write advances the queue and stores to the back element. */
2102 tmp
= tcg_temp_new_i64();
2103 tcg_gen_ld_i64(tmp
, tcg_env
,
2104 offsetof(CPUHPPAState
, cr_back
[ctl
- CR_IIASQ
]));
2105 tcg_gen_st_i64(tmp
, tcg_env
, offsetof(CPUHPPAState
, cr
[ctl
]));
2106 tcg_gen_st_i64(reg
, tcg_env
,
2107 offsetof(CPUHPPAState
, cr_back
[ctl
- CR_IIASQ
]));
2114 tcg_gen_st_i64(reg
, tcg_env
, offsetof(CPUHPPAState
, cr
[ctl
]));
2115 #ifndef CONFIG_USER_ONLY
2116 gen_helper_change_prot_id(tcg_env
);
2121 tcg_gen_st_i64(reg
, tcg_env
, offsetof(CPUHPPAState
, cr
[ctl
]));
2124 return nullify_end(ctx
);
2128 static bool trans_mtsarcm(DisasContext
*ctx
, arg_mtsarcm
*a
)
2130 TCGv_i64 tmp
= tcg_temp_new_i64();
2132 tcg_gen_not_i64(tmp
, load_gpr(ctx
, a
->r
));
2133 tcg_gen_andi_i64(tmp
, tmp
, ctx
->is_pa20
? 63 : 31);
2134 save_or_nullify(ctx
, cpu_sar
, tmp
);
2136 cond_free(&ctx
->null_cond
);
2140 static bool trans_ldsid(DisasContext
*ctx
, arg_ldsid
*a
)
2142 TCGv_i64 dest
= dest_gpr(ctx
, a
->t
);
2144 #ifdef CONFIG_USER_ONLY
2145 /* We don't implement space registers in user mode. */
2146 tcg_gen_movi_i64(dest
, 0);
2148 tcg_gen_mov_i64(dest
, space_select(ctx
, a
->sp
, load_gpr(ctx
, a
->b
)));
2149 tcg_gen_shri_i64(dest
, dest
, 32);
2151 save_gpr(ctx
, a
->t
, dest
);
2153 cond_free(&ctx
->null_cond
);
2157 static bool trans_rsm(DisasContext
*ctx
, arg_rsm
*a
)
2159 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2160 #ifndef CONFIG_USER_ONLY
2165 tmp
= tcg_temp_new_i64();
2166 tcg_gen_ld_i64(tmp
, tcg_env
, offsetof(CPUHPPAState
, psw
));
2167 tcg_gen_andi_i64(tmp
, tmp
, ~a
->i
);
2168 gen_helper_swap_system_mask(tmp
, tcg_env
, tmp
);
2169 save_gpr(ctx
, a
->t
, tmp
);
2171 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
2172 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE_EXIT
;
2173 return nullify_end(ctx
);
2177 static bool trans_ssm(DisasContext
*ctx
, arg_ssm
*a
)
2179 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2180 #ifndef CONFIG_USER_ONLY
2185 tmp
= tcg_temp_new_i64();
2186 tcg_gen_ld_i64(tmp
, tcg_env
, offsetof(CPUHPPAState
, psw
));
2187 tcg_gen_ori_i64(tmp
, tmp
, a
->i
);
2188 gen_helper_swap_system_mask(tmp
, tcg_env
, tmp
);
2189 save_gpr(ctx
, a
->t
, tmp
);
2191 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
2192 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE_EXIT
;
2193 return nullify_end(ctx
);
2197 static bool trans_mtsm(DisasContext
*ctx
, arg_mtsm
*a
)
2199 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2200 #ifndef CONFIG_USER_ONLY
2204 reg
= load_gpr(ctx
, a
->r
);
2205 tmp
= tcg_temp_new_i64();
2206 gen_helper_swap_system_mask(tmp
, tcg_env
, reg
);
2208 /* Exit the TB to recognize new interrupts. */
2209 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE_EXIT
;
2210 return nullify_end(ctx
);
2214 static bool do_rfi(DisasContext
*ctx
, bool rfi_r
)
2216 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2217 #ifndef CONFIG_USER_ONLY
2221 gen_helper_rfi_r(tcg_env
);
2223 gen_helper_rfi(tcg_env
);
2225 /* Exit the TB to recognize new interrupts. */
2226 tcg_gen_exit_tb(NULL
, 0);
2227 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2229 return nullify_end(ctx
);
2233 static bool trans_rfi(DisasContext
*ctx
, arg_rfi
*a
)
2235 return do_rfi(ctx
, false);
2238 static bool trans_rfi_r(DisasContext
*ctx
, arg_rfi_r
*a
)
2240 return do_rfi(ctx
, true);
2243 static bool trans_halt(DisasContext
*ctx
, arg_halt
*a
)
2245 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2246 #ifndef CONFIG_USER_ONLY
2248 gen_helper_halt(tcg_env
);
2249 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2250 return nullify_end(ctx
);
2254 static bool trans_reset(DisasContext
*ctx
, arg_reset
*a
)
2256 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2257 #ifndef CONFIG_USER_ONLY
2259 gen_helper_reset(tcg_env
);
2260 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2261 return nullify_end(ctx
);
2265 static bool trans_getshadowregs(DisasContext
*ctx
, arg_getshadowregs
*a
)
2267 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2268 #ifndef CONFIG_USER_ONLY
2270 gen_helper_getshadowregs(tcg_env
);
2271 return nullify_end(ctx
);
2275 static bool trans_nop_addrx(DisasContext
*ctx
, arg_ldst
*a
)
2278 TCGv_i64 dest
= dest_gpr(ctx
, a
->b
);
2279 TCGv_i64 src1
= load_gpr(ctx
, a
->b
);
2280 TCGv_i64 src2
= load_gpr(ctx
, a
->x
);
2282 /* The only thing we need to do is the base register modification. */
2283 tcg_gen_add_i64(dest
, src1
, src2
);
2284 save_gpr(ctx
, a
->b
, dest
);
2286 cond_free(&ctx
->null_cond
);
2290 static bool trans_probe(DisasContext
*ctx
, arg_probe
*a
)
2293 TCGv_i32 level
, want
;
2298 dest
= dest_gpr(ctx
, a
->t
);
2299 form_gva(ctx
, &addr
, &ofs
, a
->b
, 0, 0, 0, a
->sp
, 0, false);
2302 level
= tcg_constant_i32(a
->ri
& 3);
2304 level
= tcg_temp_new_i32();
2305 tcg_gen_extrl_i64_i32(level
, load_gpr(ctx
, a
->ri
));
2306 tcg_gen_andi_i32(level
, level
, 3);
2308 want
= tcg_constant_i32(a
->write
? PAGE_WRITE
: PAGE_READ
);
2310 gen_helper_probe(dest
, tcg_env
, addr
, level
, want
);
2312 save_gpr(ctx
, a
->t
, dest
);
2313 return nullify_end(ctx
);
2316 static bool trans_ixtlbx(DisasContext
*ctx
, arg_ixtlbx
*a
)
2321 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2322 #ifndef CONFIG_USER_ONLY
2328 form_gva(ctx
, &addr
, &ofs
, a
->b
, 0, 0, 0, a
->sp
, 0, false);
2329 reg
= load_gpr(ctx
, a
->r
);
2331 gen_helper_itlba_pa11(tcg_env
, addr
, reg
);
2333 gen_helper_itlbp_pa11(tcg_env
, addr
, reg
);
2336 /* Exit TB for TLB change if mmu is enabled. */
2337 if (ctx
->tb_flags
& PSW_C
) {
2338 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE
;
2340 return nullify_end(ctx
);
2344 static bool do_pxtlb(DisasContext
*ctx
, arg_ldst
*a
, bool local
)
2346 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2347 #ifndef CONFIG_USER_ONLY
2353 form_gva(ctx
, &addr
, &ofs
, a
->b
, a
->x
, 0, 0, a
->sp
, a
->m
, false);
2356 * Page align now, rather than later, so that we can add in the
2357 * page_size field from pa2.0 from the low 4 bits of GR[b].
2359 tcg_gen_andi_i64(addr
, addr
, TARGET_PAGE_MASK
);
2361 tcg_gen_deposit_i64(addr
, addr
, load_gpr(ctx
, a
->b
), 0, 4);
2365 gen_helper_ptlb_l(tcg_env
, addr
);
2367 gen_helper_ptlb(tcg_env
, addr
);
2371 save_gpr(ctx
, a
->b
, ofs
);
2374 /* Exit TB for TLB change if mmu is enabled. */
2375 if (ctx
->tb_flags
& PSW_C
) {
2376 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE
;
2378 return nullify_end(ctx
);
2382 static bool trans_pxtlb(DisasContext
*ctx
, arg_ldst
*a
)
2384 return do_pxtlb(ctx
, a
, false);
2387 static bool trans_pxtlb_l(DisasContext
*ctx
, arg_ldst
*a
)
2389 return ctx
->is_pa20
&& do_pxtlb(ctx
, a
, true);
2392 static bool trans_pxtlbe(DisasContext
*ctx
, arg_ldst
*a
)
2394 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2395 #ifndef CONFIG_USER_ONLY
2398 trans_nop_addrx(ctx
, a
);
2399 gen_helper_ptlbe(tcg_env
);
2401 /* Exit TB for TLB change if mmu is enabled. */
2402 if (ctx
->tb_flags
& PSW_C
) {
2403 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE
;
2405 return nullify_end(ctx
);
2410 * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2412 * https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2413 * page 13-9 (195/206)
2415 static bool trans_ixtlbxf(DisasContext
*ctx
, arg_ixtlbxf
*a
)
2420 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2421 #ifndef CONFIG_USER_ONLY
2422 TCGv_i64 addr
, atl
, stl
;
2429 * if (not (pcxl or pcxl2))
2430 * return gen_illegal(ctx);
2433 atl
= tcg_temp_new_i64();
2434 stl
= tcg_temp_new_i64();
2435 addr
= tcg_temp_new_i64();
2437 tcg_gen_ld32u_i64(stl
, tcg_env
,
2438 a
->data
? offsetof(CPUHPPAState
, cr
[CR_ISR
])
2439 : offsetof(CPUHPPAState
, cr
[CR_IIASQ
]));
2440 tcg_gen_ld32u_i64(atl
, tcg_env
,
2441 a
->data
? offsetof(CPUHPPAState
, cr
[CR_IOR
])
2442 : offsetof(CPUHPPAState
, cr
[CR_IIAOQ
]));
2443 tcg_gen_shli_i64(stl
, stl
, 32);
2444 tcg_gen_or_i64(addr
, atl
, stl
);
2446 reg
= load_gpr(ctx
, a
->r
);
2448 gen_helper_itlba_pa11(tcg_env
, addr
, reg
);
2450 gen_helper_itlbp_pa11(tcg_env
, addr
, reg
);
2453 /* Exit TB for TLB change if mmu is enabled. */
2454 if (ctx
->tb_flags
& PSW_C
) {
2455 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE
;
2457 return nullify_end(ctx
);
2461 static bool trans_ixtlbt(DisasContext
*ctx
, arg_ixtlbt
*a
)
2463 if (!ctx
->is_pa20
) {
2466 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2467 #ifndef CONFIG_USER_ONLY
2470 TCGv_i64 src1
= load_gpr(ctx
, a
->r1
);
2471 TCGv_i64 src2
= load_gpr(ctx
, a
->r2
);
2474 gen_helper_idtlbt_pa20(tcg_env
, src1
, src2
);
2476 gen_helper_iitlbt_pa20(tcg_env
, src1
, src2
);
2479 /* Exit TB for TLB change if mmu is enabled. */
2480 if (ctx
->tb_flags
& PSW_C
) {
2481 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE
;
2483 return nullify_end(ctx
);
2487 static bool trans_lpa(DisasContext
*ctx
, arg_ldst
*a
)
2489 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2490 #ifndef CONFIG_USER_ONLY
2492 TCGv_i64 ofs
, paddr
;
2496 form_gva(ctx
, &vaddr
, &ofs
, a
->b
, a
->x
, 0, 0, a
->sp
, a
->m
, false);
2498 paddr
= tcg_temp_new_i64();
2499 gen_helper_lpa(paddr
, tcg_env
, vaddr
);
2501 /* Note that physical address result overrides base modification. */
2503 save_gpr(ctx
, a
->b
, ofs
);
2505 save_gpr(ctx
, a
->t
, paddr
);
2507 return nullify_end(ctx
);
2511 static bool trans_lci(DisasContext
*ctx
, arg_lci
*a
)
2513 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2515 /* The Coherence Index is an implementation-defined function of the
2516 physical address. Two addresses with the same CI have a coherent
2517 view of the cache. Our implementation is to return 0 for all,
2518 since the entire address space is coherent. */
2519 save_gpr(ctx
, a
->t
, ctx
->zero
);
2521 cond_free(&ctx
->null_cond
);
2525 static bool trans_add(DisasContext
*ctx
, arg_rrr_cf_d_sh
*a
)
2527 return do_add_reg(ctx
, a
, false, false, false, false);
2530 static bool trans_add_l(DisasContext
*ctx
, arg_rrr_cf_d_sh
*a
)
2532 return do_add_reg(ctx
, a
, true, false, false, false);
2535 static bool trans_add_tsv(DisasContext
*ctx
, arg_rrr_cf_d_sh
*a
)
2537 return do_add_reg(ctx
, a
, false, true, false, false);
2540 static bool trans_add_c(DisasContext
*ctx
, arg_rrr_cf_d_sh
*a
)
2542 return do_add_reg(ctx
, a
, false, false, false, true);
2545 static bool trans_add_c_tsv(DisasContext
*ctx
, arg_rrr_cf_d_sh
*a
)
2547 return do_add_reg(ctx
, a
, false, true, false, true);
2550 static bool trans_sub(DisasContext
*ctx
, arg_rrr_cf_d
*a
)
2552 return do_sub_reg(ctx
, a
, false, false, false);
2555 static bool trans_sub_tsv(DisasContext
*ctx
, arg_rrr_cf_d
*a
)
2557 return do_sub_reg(ctx
, a
, true, false, false);
2560 static bool trans_sub_tc(DisasContext
*ctx
, arg_rrr_cf_d
*a
)
2562 return do_sub_reg(ctx
, a
, false, false, true);
2565 static bool trans_sub_tsv_tc(DisasContext
*ctx
, arg_rrr_cf_d
*a
)
2567 return do_sub_reg(ctx
, a
, true, false, true);
2570 static bool trans_sub_b(DisasContext
*ctx
, arg_rrr_cf_d
*a
)
2572 return do_sub_reg(ctx
, a
, false, true, false);
2575 static bool trans_sub_b_tsv(DisasContext
*ctx
, arg_rrr_cf_d
*a
)
2577 return do_sub_reg(ctx
, a
, true, true, false);
2580 static bool trans_andcm(DisasContext
*ctx
, arg_rrr_cf_d
*a
)
2582 return do_log_reg(ctx
, a
, tcg_gen_andc_i64
);
2585 static bool trans_and(DisasContext
*ctx
, arg_rrr_cf_d
*a
)
2587 return do_log_reg(ctx
, a
, tcg_gen_and_i64
);
2590 static bool trans_or(DisasContext
*ctx
, arg_rrr_cf_d
*a
)
2593 unsigned r2
= a
->r2
;
2594 unsigned r1
= a
->r1
;
2597 if (rt
== 0) { /* NOP */
2598 cond_free(&ctx
->null_cond
);
2601 if (r2
== 0) { /* COPY */
2603 TCGv_i64 dest
= dest_gpr(ctx
, rt
);
2604 tcg_gen_movi_i64(dest
, 0);
2605 save_gpr(ctx
, rt
, dest
);
2607 save_gpr(ctx
, rt
, cpu_gr
[r1
]);
2609 cond_free(&ctx
->null_cond
);
2612 #ifndef CONFIG_USER_ONLY
2613 /* These are QEMU extensions and are nops in the real architecture:
2615 * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2616 * or %r31,%r31,%r31 -- death loop; offline cpu
2617 * currently implemented as idle.
2619 if ((rt
== 10 || rt
== 31) && r1
== rt
&& r2
== rt
) { /* PAUSE */
2620 /* No need to check for supervisor, as userland can only pause
2621 until the next timer interrupt. */
2624 /* Advance the instruction queue. */
2625 copy_iaoq_entry(ctx
, cpu_iaoq_f
, ctx
->iaoq_b
, cpu_iaoq_b
);
2626 copy_iaoq_entry(ctx
, cpu_iaoq_b
, ctx
->iaoq_n
, ctx
->iaoq_n_var
);
2627 nullify_set(ctx
, 0);
2629 /* Tell the qemu main loop to halt until this cpu has work. */
2630 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env
,
2631 offsetof(CPUState
, halted
) - offsetof(HPPACPU
, env
));
2632 gen_excp_1(EXCP_HALTED
);
2633 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2635 return nullify_end(ctx
);
2639 return do_log_reg(ctx
, a
, tcg_gen_or_i64
);
2642 static bool trans_xor(DisasContext
*ctx
, arg_rrr_cf_d
*a
)
2644 return do_log_reg(ctx
, a
, tcg_gen_xor_i64
);
2647 static bool trans_cmpclr(DisasContext
*ctx
, arg_rrr_cf_d
*a
)
2649 TCGv_i64 tcg_r1
, tcg_r2
;
2654 tcg_r1
= load_gpr(ctx
, a
->r1
);
2655 tcg_r2
= load_gpr(ctx
, a
->r2
);
2656 do_cmpclr(ctx
, a
->t
, tcg_r1
, tcg_r2
, a
->cf
, a
->d
);
2657 return nullify_end(ctx
);
2660 static bool trans_uxor(DisasContext
*ctx
, arg_rrr_cf_d
*a
)
2662 TCGv_i64 tcg_r1
, tcg_r2
;
2667 tcg_r1
= load_gpr(ctx
, a
->r1
);
2668 tcg_r2
= load_gpr(ctx
, a
->r2
);
2669 do_unit(ctx
, a
->t
, tcg_r1
, tcg_r2
, a
->cf
, a
->d
, false, tcg_gen_xor_i64
);
2670 return nullify_end(ctx
);
2673 static bool do_uaddcm(DisasContext
*ctx
, arg_rrr_cf_d
*a
, bool is_tc
)
2675 TCGv_i64 tcg_r1
, tcg_r2
, tmp
;
2680 tcg_r1
= load_gpr(ctx
, a
->r1
);
2681 tcg_r2
= load_gpr(ctx
, a
->r2
);
2682 tmp
= tcg_temp_new_i64();
2683 tcg_gen_not_i64(tmp
, tcg_r2
);
2684 do_unit(ctx
, a
->t
, tcg_r1
, tmp
, a
->cf
, a
->d
, is_tc
, tcg_gen_add_i64
);
2685 return nullify_end(ctx
);
2688 static bool trans_uaddcm(DisasContext
*ctx
, arg_rrr_cf_d
*a
)
2690 return do_uaddcm(ctx
, a
, false);
2693 static bool trans_uaddcm_tc(DisasContext
*ctx
, arg_rrr_cf_d
*a
)
2695 return do_uaddcm(ctx
, a
, true);
2698 static bool do_dcor(DisasContext
*ctx
, arg_rr_cf_d
*a
, bool is_i
)
2704 tmp
= tcg_temp_new_i64();
2705 tcg_gen_shri_i64(tmp
, cpu_psw_cb
, 3);
2707 tcg_gen_not_i64(tmp
, tmp
);
2709 tcg_gen_andi_i64(tmp
, tmp
, (uint64_t)0x1111111111111111ull
);
2710 tcg_gen_muli_i64(tmp
, tmp
, 6);
2711 do_unit(ctx
, a
->t
, load_gpr(ctx
, a
->r
), tmp
, a
->cf
, a
->d
, false,
2712 is_i
? tcg_gen_add_i64
: tcg_gen_sub_i64
);
2713 return nullify_end(ctx
);
2716 static bool trans_dcor(DisasContext
*ctx
, arg_rr_cf_d
*a
)
2718 return do_dcor(ctx
, a
, false);
2721 static bool trans_dcor_i(DisasContext
*ctx
, arg_rr_cf_d
*a
)
2723 return do_dcor(ctx
, a
, true);
2726 static bool trans_ds(DisasContext
*ctx
, arg_rrr_cf
*a
)
2728 TCGv_i64 dest
, add1
, add2
, addc
, in1
, in2
;
2733 in1
= load_gpr(ctx
, a
->r1
);
2734 in2
= load_gpr(ctx
, a
->r2
);
2736 add1
= tcg_temp_new_i64();
2737 add2
= tcg_temp_new_i64();
2738 addc
= tcg_temp_new_i64();
2739 dest
= tcg_temp_new_i64();
2741 /* Form R1 << 1 | PSW[CB]{8}. */
2742 tcg_gen_add_i64(add1
, in1
, in1
);
2743 tcg_gen_add_i64(add1
, add1
, get_psw_carry(ctx
, false));
2746 * Add or subtract R2, depending on PSW[V]. Proper computation of
2747 * carry requires that we subtract via + ~R2 + 1, as described in
2748 * the manual. By extracting and masking V, we can produce the
2749 * proper inputs to the addition without movcond.
2751 tcg_gen_sextract_i64(addc
, cpu_psw_v
, 31, 1);
2752 tcg_gen_xor_i64(add2
, in2
, addc
);
2753 tcg_gen_andi_i64(addc
, addc
, 1);
2755 tcg_gen_add2_i64(dest
, cpu_psw_cb_msb
, add1
, ctx
->zero
, add2
, ctx
->zero
);
2756 tcg_gen_add2_i64(dest
, cpu_psw_cb_msb
, dest
, cpu_psw_cb_msb
,
2759 /* Write back the result register. */
2760 save_gpr(ctx
, a
->t
, dest
);
2762 /* Write back PSW[CB]. */
2763 tcg_gen_xor_i64(cpu_psw_cb
, add1
, add2
);
2764 tcg_gen_xor_i64(cpu_psw_cb
, cpu_psw_cb
, dest
);
2766 /* Write back PSW[V] for the division step. */
2767 cout
= get_psw_carry(ctx
, false);
2768 tcg_gen_neg_i64(cpu_psw_v
, cout
);
2769 tcg_gen_xor_i64(cpu_psw_v
, cpu_psw_v
, in2
);
2771 /* Install the new nullification. */
2774 if (cond_need_sv(a
->cf
>> 1)) {
2775 /* ??? The lshift is supposed to contribute to overflow. */
2776 sv
= do_add_sv(ctx
, dest
, add1
, add2
);
2778 ctx
->null_cond
= do_cond(ctx
, a
->cf
, false, dest
, cout
, sv
);
2781 return nullify_end(ctx
);
2784 static bool trans_addi(DisasContext
*ctx
, arg_rri_cf
*a
)
2786 return do_add_imm(ctx
, a
, false, false);
2789 static bool trans_addi_tsv(DisasContext
*ctx
, arg_rri_cf
*a
)
2791 return do_add_imm(ctx
, a
, true, false);
2794 static bool trans_addi_tc(DisasContext
*ctx
, arg_rri_cf
*a
)
2796 return do_add_imm(ctx
, a
, false, true);
2799 static bool trans_addi_tc_tsv(DisasContext
*ctx
, arg_rri_cf
*a
)
2801 return do_add_imm(ctx
, a
, true, true);
2804 static bool trans_subi(DisasContext
*ctx
, arg_rri_cf
*a
)
2806 return do_sub_imm(ctx
, a
, false);
2809 static bool trans_subi_tsv(DisasContext
*ctx
, arg_rri_cf
*a
)
2811 return do_sub_imm(ctx
, a
, true);
2814 static bool trans_cmpiclr(DisasContext
*ctx
, arg_rri_cf_d
*a
)
2816 TCGv_i64 tcg_im
, tcg_r2
;
2822 tcg_im
= tcg_constant_i64(a
->i
);
2823 tcg_r2
= load_gpr(ctx
, a
->r
);
2824 do_cmpclr(ctx
, a
->t
, tcg_im
, tcg_r2
, a
->cf
, a
->d
);
2826 return nullify_end(ctx
);
2829 static bool do_multimedia(DisasContext
*ctx
, arg_rrr
*a
,
2830 void (*fn
)(TCGv_i64
, TCGv_i64
, TCGv_i64
))
2832 TCGv_i64 r1
, r2
, dest
;
2834 if (!ctx
->is_pa20
) {
2840 r1
= load_gpr(ctx
, a
->r1
);
2841 r2
= load_gpr(ctx
, a
->r2
);
2842 dest
= dest_gpr(ctx
, a
->t
);
2845 save_gpr(ctx
, a
->t
, dest
);
2847 return nullify_end(ctx
);
2850 static bool do_multimedia_sh(DisasContext
*ctx
, arg_rri
*a
,
2851 void (*fn
)(TCGv_i64
, TCGv_i64
, int64_t))
2855 if (!ctx
->is_pa20
) {
2861 r
= load_gpr(ctx
, a
->r
);
2862 dest
= dest_gpr(ctx
, a
->t
);
2865 save_gpr(ctx
, a
->t
, dest
);
2867 return nullify_end(ctx
);
2870 static bool do_multimedia_shadd(DisasContext
*ctx
, arg_rrr_sh
*a
,
2871 void (*fn
)(TCGv_i64
, TCGv_i64
,
2872 TCGv_i64
, TCGv_i32
))
2874 TCGv_i64 r1
, r2
, dest
;
2876 if (!ctx
->is_pa20
) {
2882 r1
= load_gpr(ctx
, a
->r1
);
2883 r2
= load_gpr(ctx
, a
->r2
);
2884 dest
= dest_gpr(ctx
, a
->t
);
2886 fn(dest
, r1
, r2
, tcg_constant_i32(a
->sh
));
2887 save_gpr(ctx
, a
->t
, dest
);
2889 return nullify_end(ctx
);
2892 static bool trans_hadd(DisasContext
*ctx
, arg_rrr
*a
)
2894 return do_multimedia(ctx
, a
, tcg_gen_vec_add16_i64
);
2897 static bool trans_hadd_ss(DisasContext
*ctx
, arg_rrr
*a
)
2899 return do_multimedia(ctx
, a
, gen_helper_hadd_ss
);
2902 static bool trans_hadd_us(DisasContext
*ctx
, arg_rrr
*a
)
2904 return do_multimedia(ctx
, a
, gen_helper_hadd_us
);
2907 static bool trans_havg(DisasContext
*ctx
, arg_rrr
*a
)
2909 return do_multimedia(ctx
, a
, gen_helper_havg
);
2912 static bool trans_hshl(DisasContext
*ctx
, arg_rri
*a
)
2914 return do_multimedia_sh(ctx
, a
, tcg_gen_vec_shl16i_i64
);
2917 static bool trans_hshr_s(DisasContext
*ctx
, arg_rri
*a
)
2919 return do_multimedia_sh(ctx
, a
, tcg_gen_vec_sar16i_i64
);
2922 static bool trans_hshr_u(DisasContext
*ctx
, arg_rri
*a
)
2924 return do_multimedia_sh(ctx
, a
, tcg_gen_vec_shr16i_i64
);
2927 static bool trans_hshladd(DisasContext
*ctx
, arg_rrr_sh
*a
)
2929 return do_multimedia_shadd(ctx
, a
, gen_helper_hshladd
);
2932 static bool trans_hshradd(DisasContext
*ctx
, arg_rrr_sh
*a
)
2934 return do_multimedia_shadd(ctx
, a
, gen_helper_hshradd
);
2937 static bool trans_hsub(DisasContext
*ctx
, arg_rrr
*a
)
2939 return do_multimedia(ctx
, a
, tcg_gen_vec_sub16_i64
);
2942 static bool trans_hsub_ss(DisasContext
*ctx
, arg_rrr
*a
)
2944 return do_multimedia(ctx
, a
, gen_helper_hsub_ss
);
2947 static bool trans_hsub_us(DisasContext
*ctx
, arg_rrr
*a
)
2949 return do_multimedia(ctx
, a
, gen_helper_hsub_us
);
2952 static void gen_mixh_l(TCGv_i64 dst
, TCGv_i64 r1
, TCGv_i64 r2
)
2954 uint64_t mask
= 0xffff0000ffff0000ull
;
2955 TCGv_i64 tmp
= tcg_temp_new_i64();
2957 tcg_gen_andi_i64(tmp
, r2
, mask
);
2958 tcg_gen_andi_i64(dst
, r1
, mask
);
2959 tcg_gen_shri_i64(tmp
, tmp
, 16);
2960 tcg_gen_or_i64(dst
, dst
, tmp
);
2963 static bool trans_mixh_l(DisasContext
*ctx
, arg_rrr
*a
)
2965 return do_multimedia(ctx
, a
, gen_mixh_l
);
2968 static void gen_mixh_r(TCGv_i64 dst
, TCGv_i64 r1
, TCGv_i64 r2
)
2970 uint64_t mask
= 0x0000ffff0000ffffull
;
2971 TCGv_i64 tmp
= tcg_temp_new_i64();
2973 tcg_gen_andi_i64(tmp
, r1
, mask
);
2974 tcg_gen_andi_i64(dst
, r2
, mask
);
2975 tcg_gen_shli_i64(tmp
, tmp
, 16);
2976 tcg_gen_or_i64(dst
, dst
, tmp
);
2979 static bool trans_mixh_r(DisasContext
*ctx
, arg_rrr
*a
)
2981 return do_multimedia(ctx
, a
, gen_mixh_r
);
2984 static void gen_mixw_l(TCGv_i64 dst
, TCGv_i64 r1
, TCGv_i64 r2
)
2986 TCGv_i64 tmp
= tcg_temp_new_i64();
2988 tcg_gen_shri_i64(tmp
, r2
, 32);
2989 tcg_gen_deposit_i64(dst
, r1
, tmp
, 0, 32);
2992 static bool trans_mixw_l(DisasContext
*ctx
, arg_rrr
*a
)
2994 return do_multimedia(ctx
, a
, gen_mixw_l
);
2997 static void gen_mixw_r(TCGv_i64 dst
, TCGv_i64 r1
, TCGv_i64 r2
)
2999 tcg_gen_deposit_i64(dst
, r2
, r1
, 32, 32);
3002 static bool trans_mixw_r(DisasContext
*ctx
, arg_rrr
*a
)
3004 return do_multimedia(ctx
, a
, gen_mixw_r
);
3007 static bool trans_permh(DisasContext
*ctx
, arg_permh
*a
)
3009 TCGv_i64 r
, t0
, t1
, t2
, t3
;
3011 if (!ctx
->is_pa20
) {
3017 r
= load_gpr(ctx
, a
->r1
);
3018 t0
= tcg_temp_new_i64();
3019 t1
= tcg_temp_new_i64();
3020 t2
= tcg_temp_new_i64();
3021 t3
= tcg_temp_new_i64();
3023 tcg_gen_extract_i64(t0
, r
, (3 - a
->c0
) * 16, 16);
3024 tcg_gen_extract_i64(t1
, r
, (3 - a
->c1
) * 16, 16);
3025 tcg_gen_extract_i64(t2
, r
, (3 - a
->c2
) * 16, 16);
3026 tcg_gen_extract_i64(t3
, r
, (3 - a
->c3
) * 16, 16);
3028 tcg_gen_deposit_i64(t0
, t1
, t0
, 16, 48);
3029 tcg_gen_deposit_i64(t2
, t3
, t2
, 16, 48);
3030 tcg_gen_deposit_i64(t0
, t2
, t0
, 32, 32);
3032 save_gpr(ctx
, a
->t
, t0
);
3033 return nullify_end(ctx
);
3036 static bool trans_ld(DisasContext
*ctx
, arg_ldst
*a
)
3040 * With pa20, LDB, LDH, LDW, LDD to %g0 are prefetches.
3041 * Any base modification still occurs.
3044 return trans_nop_addrx(ctx
, a
);
3046 } else if (a
->size
> MO_32
) {
3047 return gen_illegal(ctx
);
3049 return do_load(ctx
, a
->t
, a
->b
, a
->x
, a
->scale
? a
->size
: 0,
3050 a
->disp
, a
->sp
, a
->m
, a
->size
| MO_TE
);
3053 static bool trans_st(DisasContext
*ctx
, arg_ldst
*a
)
3055 assert(a
->x
== 0 && a
->scale
== 0);
3056 if (!ctx
->is_pa20
&& a
->size
> MO_32
) {
3057 return gen_illegal(ctx
);
3059 return do_store(ctx
, a
->t
, a
->b
, a
->disp
, a
->sp
, a
->m
, a
->size
| MO_TE
);
3062 static bool trans_ldc(DisasContext
*ctx
, arg_ldst
*a
)
3064 MemOp mop
= MO_TE
| MO_ALIGN
| a
->size
;
3068 if (!ctx
->is_pa20
&& a
->size
> MO_32
) {
3069 return gen_illegal(ctx
);
3075 /* Base register modification. Make sure if RT == RB,
3076 we see the result of the load. */
3077 dest
= tcg_temp_new_i64();
3079 dest
= dest_gpr(ctx
, a
->t
);
3082 form_gva(ctx
, &addr
, &ofs
, a
->b
, a
->x
, a
->scale
? a
->size
: 0,
3083 a
->disp
, a
->sp
, a
->m
, MMU_DISABLED(ctx
));
3086 * For hppa1.1, LDCW is undefined unless aligned mod 16.
3087 * However actual hardware succeeds with aligned mod 4.
3088 * Detect this case and log a GUEST_ERROR.
3090 * TODO: HPPA64 relaxes the over-alignment requirement
3091 * with the ,co completer.
3093 gen_helper_ldc_check(addr
);
3095 tcg_gen_atomic_xchg_i64(dest
, addr
, ctx
->zero
, ctx
->mmu_idx
, mop
);
3098 save_gpr(ctx
, a
->b
, ofs
);
3100 save_gpr(ctx
, a
->t
, dest
);
3102 return nullify_end(ctx
);
3105 static bool trans_stby(DisasContext
*ctx
, arg_stby
*a
)
3112 form_gva(ctx
, &addr
, &ofs
, a
->b
, 0, 0, a
->disp
, a
->sp
, a
->m
,
3114 val
= load_gpr(ctx
, a
->r
);
3116 if (tb_cflags(ctx
->base
.tb
) & CF_PARALLEL
) {
3117 gen_helper_stby_e_parallel(tcg_env
, addr
, val
);
3119 gen_helper_stby_e(tcg_env
, addr
, val
);
3122 if (tb_cflags(ctx
->base
.tb
) & CF_PARALLEL
) {
3123 gen_helper_stby_b_parallel(tcg_env
, addr
, val
);
3125 gen_helper_stby_b(tcg_env
, addr
, val
);
3129 tcg_gen_andi_i64(ofs
, ofs
, ~3);
3130 save_gpr(ctx
, a
->b
, ofs
);
3133 return nullify_end(ctx
);
3136 static bool trans_stdby(DisasContext
*ctx
, arg_stby
*a
)
3141 if (!ctx
->is_pa20
) {
3146 form_gva(ctx
, &addr
, &ofs
, a
->b
, 0, 0, a
->disp
, a
->sp
, a
->m
,
3148 val
= load_gpr(ctx
, a
->r
);
3150 if (tb_cflags(ctx
->base
.tb
) & CF_PARALLEL
) {
3151 gen_helper_stdby_e_parallel(tcg_env
, addr
, val
);
3153 gen_helper_stdby_e(tcg_env
, addr
, val
);
3156 if (tb_cflags(ctx
->base
.tb
) & CF_PARALLEL
) {
3157 gen_helper_stdby_b_parallel(tcg_env
, addr
, val
);
3159 gen_helper_stdby_b(tcg_env
, addr
, val
);
3163 tcg_gen_andi_i64(ofs
, ofs
, ~7);
3164 save_gpr(ctx
, a
->b
, ofs
);
3167 return nullify_end(ctx
);
3170 static bool trans_lda(DisasContext
*ctx
, arg_ldst
*a
)
3172 int hold_mmu_idx
= ctx
->mmu_idx
;
3174 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
3175 ctx
->mmu_idx
= ctx
->tb_flags
& PSW_W
? MMU_ABS_W_IDX
: MMU_ABS_IDX
;
3177 ctx
->mmu_idx
= hold_mmu_idx
;
3181 static bool trans_sta(DisasContext
*ctx
, arg_ldst
*a
)
3183 int hold_mmu_idx
= ctx
->mmu_idx
;
3185 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
3186 ctx
->mmu_idx
= ctx
->tb_flags
& PSW_W
? MMU_ABS_W_IDX
: MMU_ABS_IDX
;
3188 ctx
->mmu_idx
= hold_mmu_idx
;
3192 static bool trans_ldil(DisasContext
*ctx
, arg_ldil
*a
)
3194 TCGv_i64 tcg_rt
= dest_gpr(ctx
, a
->t
);
3196 tcg_gen_movi_i64(tcg_rt
, a
->i
);
3197 save_gpr(ctx
, a
->t
, tcg_rt
);
3198 cond_free(&ctx
->null_cond
);
3202 static bool trans_addil(DisasContext
*ctx
, arg_addil
*a
)
3204 TCGv_i64 tcg_rt
= load_gpr(ctx
, a
->r
);
3205 TCGv_i64 tcg_r1
= dest_gpr(ctx
, 1);
3207 tcg_gen_addi_i64(tcg_r1
, tcg_rt
, a
->i
);
3208 save_gpr(ctx
, 1, tcg_r1
);
3209 cond_free(&ctx
->null_cond
);
3213 static bool trans_ldo(DisasContext
*ctx
, arg_ldo
*a
)
3215 TCGv_i64 tcg_rt
= dest_gpr(ctx
, a
->t
);
3217 /* Special case rb == 0, for the LDI pseudo-op.
3218 The COPY pseudo-op is handled for free within tcg_gen_addi_i64. */
3220 tcg_gen_movi_i64(tcg_rt
, a
->i
);
3222 tcg_gen_addi_i64(tcg_rt
, cpu_gr
[a
->b
], a
->i
);
3224 save_gpr(ctx
, a
->t
, tcg_rt
);
3225 cond_free(&ctx
->null_cond
);
3229 static bool do_cmpb(DisasContext
*ctx
, unsigned r
, TCGv_i64 in1
,
3230 unsigned c
, unsigned f
, bool d
, unsigned n
, int disp
)
3232 TCGv_i64 dest
, in2
, sv
;
3235 in2
= load_gpr(ctx
, r
);
3236 dest
= tcg_temp_new_i64();
3238 tcg_gen_sub_i64(dest
, in1
, in2
);
3241 if (cond_need_sv(c
)) {
3242 sv
= do_sub_sv(ctx
, dest
, in1
, in2
);
3245 cond
= do_sub_cond(ctx
, c
* 2 + f
, d
, dest
, in1
, in2
, sv
);
3246 return do_cbranch(ctx
, disp
, n
, &cond
);
3249 static bool trans_cmpb(DisasContext
*ctx
, arg_cmpb
*a
)
3251 if (!ctx
->is_pa20
&& a
->d
) {
3255 return do_cmpb(ctx
, a
->r2
, load_gpr(ctx
, a
->r1
),
3256 a
->c
, a
->f
, a
->d
, a
->n
, a
->disp
);
3259 static bool trans_cmpbi(DisasContext
*ctx
, arg_cmpbi
*a
)
3261 if (!ctx
->is_pa20
&& a
->d
) {
3265 return do_cmpb(ctx
, a
->r
, tcg_constant_i64(a
->i
),
3266 a
->c
, a
->f
, a
->d
, a
->n
, a
->disp
);
3269 static bool do_addb(DisasContext
*ctx
, unsigned r
, TCGv_i64 in1
,
3270 unsigned c
, unsigned f
, unsigned n
, int disp
)
3272 TCGv_i64 dest
, in2
, sv
, cb_cond
;
3277 * For hppa64, the ADDB conditions change with PSW.W,
3278 * dropping ZNV, SV, OD in favor of double-word EQ, LT, LE.
3280 if (ctx
->tb_flags
& PSW_W
) {
3287 in2
= load_gpr(ctx
, r
);
3288 dest
= tcg_temp_new_i64();
3292 if (cond_need_cb(c
)) {
3293 TCGv_i64 cb
= tcg_temp_new_i64();
3294 TCGv_i64 cb_msb
= tcg_temp_new_i64();
3296 tcg_gen_movi_i64(cb_msb
, 0);
3297 tcg_gen_add2_i64(dest
, cb_msb
, in1
, cb_msb
, in2
, cb_msb
);
3298 tcg_gen_xor_i64(cb
, in1
, in2
);
3299 tcg_gen_xor_i64(cb
, cb
, dest
);
3300 cb_cond
= get_carry(ctx
, d
, cb
, cb_msb
);
3302 tcg_gen_add_i64(dest
, in1
, in2
);
3304 if (cond_need_sv(c
)) {
3305 sv
= do_add_sv(ctx
, dest
, in1
, in2
);
3308 cond
= do_cond(ctx
, c
* 2 + f
, d
, dest
, cb_cond
, sv
);
3309 save_gpr(ctx
, r
, dest
);
3310 return do_cbranch(ctx
, disp
, n
, &cond
);
3313 static bool trans_addb(DisasContext
*ctx
, arg_addb
*a
)
3316 return do_addb(ctx
, a
->r2
, load_gpr(ctx
, a
->r1
), a
->c
, a
->f
, a
->n
, a
->disp
);
3319 static bool trans_addbi(DisasContext
*ctx
, arg_addbi
*a
)
3322 return do_addb(ctx
, a
->r
, tcg_constant_i64(a
->i
), a
->c
, a
->f
, a
->n
, a
->disp
);
3325 static bool trans_bb_sar(DisasContext
*ctx
, arg_bb_sar
*a
)
3327 TCGv_i64 tmp
, tcg_r
;
3332 tmp
= tcg_temp_new_i64();
3333 tcg_r
= load_gpr(ctx
, a
->r
);
3334 if (cond_need_ext(ctx
, a
->d
)) {
3335 /* Force shift into [32,63] */
3336 tcg_gen_ori_i64(tmp
, cpu_sar
, 32);
3337 tcg_gen_shl_i64(tmp
, tcg_r
, tmp
);
3339 tcg_gen_shl_i64(tmp
, tcg_r
, cpu_sar
);
3342 cond
= cond_make_0_tmp(a
->c
? TCG_COND_GE
: TCG_COND_LT
, tmp
);
3343 return do_cbranch(ctx
, a
->disp
, a
->n
, &cond
);
3346 static bool trans_bb_imm(DisasContext
*ctx
, arg_bb_imm
*a
)
3348 TCGv_i64 tmp
, tcg_r
;
3354 tmp
= tcg_temp_new_i64();
3355 tcg_r
= load_gpr(ctx
, a
->r
);
3356 p
= a
->p
| (cond_need_ext(ctx
, a
->d
) ? 32 : 0);
3357 tcg_gen_shli_i64(tmp
, tcg_r
, p
);
3359 cond
= cond_make_0(a
->c
? TCG_COND_GE
: TCG_COND_LT
, tmp
);
3360 return do_cbranch(ctx
, a
->disp
, a
->n
, &cond
);
3363 static bool trans_movb(DisasContext
*ctx
, arg_movb
*a
)
3370 dest
= dest_gpr(ctx
, a
->r2
);
3372 tcg_gen_movi_i64(dest
, 0);
3374 tcg_gen_mov_i64(dest
, cpu_gr
[a
->r1
]);
3377 /* All MOVB conditions are 32-bit. */
3378 cond
= do_sed_cond(ctx
, a
->c
, false, dest
);
3379 return do_cbranch(ctx
, a
->disp
, a
->n
, &cond
);
3382 static bool trans_movbi(DisasContext
*ctx
, arg_movbi
*a
)
3389 dest
= dest_gpr(ctx
, a
->r
);
3390 tcg_gen_movi_i64(dest
, a
->i
);
3392 /* All MOVBI conditions are 32-bit. */
3393 cond
= do_sed_cond(ctx
, a
->c
, false, dest
);
3394 return do_cbranch(ctx
, a
->disp
, a
->n
, &cond
);
3397 static bool trans_shrp_sar(DisasContext
*ctx
, arg_shrp_sar
*a
)
3399 TCGv_i64 dest
, src2
;
3401 if (!ctx
->is_pa20
&& a
->d
) {
3408 dest
= dest_gpr(ctx
, a
->t
);
3409 src2
= load_gpr(ctx
, a
->r2
);
3412 tcg_gen_shr_i64(dest
, src2
, cpu_sar
);
3414 TCGv_i64 tmp
= tcg_temp_new_i64();
3416 tcg_gen_ext32u_i64(dest
, src2
);
3417 tcg_gen_andi_i64(tmp
, cpu_sar
, 31);
3418 tcg_gen_shr_i64(dest
, dest
, tmp
);
3420 } else if (a
->r1
== a
->r2
) {
3422 tcg_gen_rotr_i64(dest
, src2
, cpu_sar
);
3424 TCGv_i32 t32
= tcg_temp_new_i32();
3425 TCGv_i32 s32
= tcg_temp_new_i32();
3427 tcg_gen_extrl_i64_i32(t32
, src2
);
3428 tcg_gen_extrl_i64_i32(s32
, cpu_sar
);
3429 tcg_gen_andi_i32(s32
, s32
, 31);
3430 tcg_gen_rotr_i32(t32
, t32
, s32
);
3431 tcg_gen_extu_i32_i64(dest
, t32
);
3434 TCGv_i64 src1
= load_gpr(ctx
, a
->r1
);
3437 TCGv_i64 t
= tcg_temp_new_i64();
3438 TCGv_i64 n
= tcg_temp_new_i64();
3440 tcg_gen_xori_i64(n
, cpu_sar
, 63);
3441 tcg_gen_shl_i64(t
, src1
, n
);
3442 tcg_gen_shli_i64(t
, t
, 1);
3443 tcg_gen_shr_i64(dest
, src2
, cpu_sar
);
3444 tcg_gen_or_i64(dest
, dest
, t
);
3446 TCGv_i64 t
= tcg_temp_new_i64();
3447 TCGv_i64 s
= tcg_temp_new_i64();
3449 tcg_gen_concat32_i64(t
, src2
, src1
);
3450 tcg_gen_andi_i64(s
, cpu_sar
, 31);
3451 tcg_gen_shr_i64(dest
, t
, s
);
3454 save_gpr(ctx
, a
->t
, dest
);
3456 /* Install the new nullification. */
3457 cond_free(&ctx
->null_cond
);
3459 ctx
->null_cond
= do_sed_cond(ctx
, a
->c
, false, dest
);
3461 return nullify_end(ctx
);
3464 static bool trans_shrp_imm(DisasContext
*ctx
, arg_shrp_imm
*a
)
3469 if (!ctx
->is_pa20
&& a
->d
) {
3476 width
= a
->d
? 64 : 32;
3477 sa
= width
- 1 - a
->cpos
;
3479 dest
= dest_gpr(ctx
, a
->t
);
3480 t2
= load_gpr(ctx
, a
->r2
);
3482 tcg_gen_extract_i64(dest
, t2
, sa
, width
- sa
);
3483 } else if (width
== TARGET_LONG_BITS
) {
3484 tcg_gen_extract2_i64(dest
, t2
, cpu_gr
[a
->r1
], sa
);
3487 if (a
->r1
== a
->r2
) {
3488 TCGv_i32 t32
= tcg_temp_new_i32();
3489 tcg_gen_extrl_i64_i32(t32
, t2
);
3490 tcg_gen_rotri_i32(t32
, t32
, sa
);
3491 tcg_gen_extu_i32_i64(dest
, t32
);
3493 tcg_gen_concat32_i64(dest
, t2
, cpu_gr
[a
->r1
]);
3494 tcg_gen_extract_i64(dest
, dest
, sa
, 32);
3497 save_gpr(ctx
, a
->t
, dest
);
3499 /* Install the new nullification. */
3500 cond_free(&ctx
->null_cond
);
3502 ctx
->null_cond
= do_sed_cond(ctx
, a
->c
, false, dest
);
3504 return nullify_end(ctx
);
3507 static bool trans_extr_sar(DisasContext
*ctx
, arg_extr_sar
*a
)
3509 unsigned widthm1
= a
->d
? 63 : 31;
3510 TCGv_i64 dest
, src
, tmp
;
3512 if (!ctx
->is_pa20
&& a
->d
) {
3519 dest
= dest_gpr(ctx
, a
->t
);
3520 src
= load_gpr(ctx
, a
->r
);
3521 tmp
= tcg_temp_new_i64();
3523 /* Recall that SAR is using big-endian bit numbering. */
3524 tcg_gen_andi_i64(tmp
, cpu_sar
, widthm1
);
3525 tcg_gen_xori_i64(tmp
, tmp
, widthm1
);
3529 tcg_gen_ext32s_i64(dest
, src
);
3532 tcg_gen_sar_i64(dest
, src
, tmp
);
3533 tcg_gen_sextract_i64(dest
, dest
, 0, a
->len
);
3536 tcg_gen_ext32u_i64(dest
, src
);
3539 tcg_gen_shr_i64(dest
, src
, tmp
);
3540 tcg_gen_extract_i64(dest
, dest
, 0, a
->len
);
3542 save_gpr(ctx
, a
->t
, dest
);
3544 /* Install the new nullification. */
3545 cond_free(&ctx
->null_cond
);
3547 ctx
->null_cond
= do_sed_cond(ctx
, a
->c
, a
->d
, dest
);
3549 return nullify_end(ctx
);
3552 static bool trans_extr_imm(DisasContext
*ctx
, arg_extr_imm
*a
)
3554 unsigned len
, cpos
, width
;
3557 if (!ctx
->is_pa20
&& a
->d
) {
3565 width
= a
->d
? 64 : 32;
3566 cpos
= width
- 1 - a
->pos
;
3567 if (cpos
+ len
> width
) {
3571 dest
= dest_gpr(ctx
, a
->t
);
3572 src
= load_gpr(ctx
, a
->r
);
3574 tcg_gen_sextract_i64(dest
, src
, cpos
, len
);
3576 tcg_gen_extract_i64(dest
, src
, cpos
, len
);
3578 save_gpr(ctx
, a
->t
, dest
);
3580 /* Install the new nullification. */
3581 cond_free(&ctx
->null_cond
);
3583 ctx
->null_cond
= do_sed_cond(ctx
, a
->c
, a
->d
, dest
);
3585 return nullify_end(ctx
);
3588 static bool trans_depi_imm(DisasContext
*ctx
, arg_depi_imm
*a
)
3590 unsigned len
, width
;
3591 uint64_t mask0
, mask1
;
3594 if (!ctx
->is_pa20
&& a
->d
) {
3602 width
= a
->d
? 64 : 32;
3603 if (a
->cpos
+ len
> width
) {
3604 len
= width
- a
->cpos
;
3607 dest
= dest_gpr(ctx
, a
->t
);
3608 mask0
= deposit64(0, a
->cpos
, len
, a
->i
);
3609 mask1
= deposit64(-1, a
->cpos
, len
, a
->i
);
3612 TCGv_i64 src
= load_gpr(ctx
, a
->t
);
3613 tcg_gen_andi_i64(dest
, src
, mask1
);
3614 tcg_gen_ori_i64(dest
, dest
, mask0
);
3616 tcg_gen_movi_i64(dest
, mask0
);
3618 save_gpr(ctx
, a
->t
, dest
);
3620 /* Install the new nullification. */
3621 cond_free(&ctx
->null_cond
);
3623 ctx
->null_cond
= do_sed_cond(ctx
, a
->c
, a
->d
, dest
);
3625 return nullify_end(ctx
);
3628 static bool trans_dep_imm(DisasContext
*ctx
, arg_dep_imm
*a
)
3630 unsigned rs
= a
->nz
? a
->t
: 0;
3631 unsigned len
, width
;
3634 if (!ctx
->is_pa20
&& a
->d
) {
3642 width
= a
->d
? 64 : 32;
3643 if (a
->cpos
+ len
> width
) {
3644 len
= width
- a
->cpos
;
3647 dest
= dest_gpr(ctx
, a
->t
);
3648 val
= load_gpr(ctx
, a
->r
);
3650 tcg_gen_deposit_z_i64(dest
, val
, a
->cpos
, len
);
3652 tcg_gen_deposit_i64(dest
, cpu_gr
[rs
], val
, a
->cpos
, len
);
3654 save_gpr(ctx
, a
->t
, dest
);
3656 /* Install the new nullification. */
3657 cond_free(&ctx
->null_cond
);
3659 ctx
->null_cond
= do_sed_cond(ctx
, a
->c
, a
->d
, dest
);
3661 return nullify_end(ctx
);
3664 static bool do_dep_sar(DisasContext
*ctx
, unsigned rt
, unsigned c
,
3665 bool d
, bool nz
, unsigned len
, TCGv_i64 val
)
3667 unsigned rs
= nz
? rt
: 0;
3668 unsigned widthm1
= d
? 63 : 31;
3669 TCGv_i64 mask
, tmp
, shift
, dest
;
3670 uint64_t msb
= 1ULL << (len
- 1);
3672 dest
= dest_gpr(ctx
, rt
);
3673 shift
= tcg_temp_new_i64();
3674 tmp
= tcg_temp_new_i64();
3676 /* Convert big-endian bit numbering in SAR to left-shift. */
3677 tcg_gen_andi_i64(shift
, cpu_sar
, widthm1
);
3678 tcg_gen_xori_i64(shift
, shift
, widthm1
);
3680 mask
= tcg_temp_new_i64();
3681 tcg_gen_movi_i64(mask
, msb
+ (msb
- 1));
3682 tcg_gen_and_i64(tmp
, val
, mask
);
3684 tcg_gen_shl_i64(mask
, mask
, shift
);
3685 tcg_gen_shl_i64(tmp
, tmp
, shift
);
3686 tcg_gen_andc_i64(dest
, cpu_gr
[rs
], mask
);
3687 tcg_gen_or_i64(dest
, dest
, tmp
);
3689 tcg_gen_shl_i64(dest
, tmp
, shift
);
3691 save_gpr(ctx
, rt
, dest
);
3693 /* Install the new nullification. */
3694 cond_free(&ctx
->null_cond
);
3696 ctx
->null_cond
= do_sed_cond(ctx
, c
, d
, dest
);
3698 return nullify_end(ctx
);
3701 static bool trans_dep_sar(DisasContext
*ctx
, arg_dep_sar
*a
)
3703 if (!ctx
->is_pa20
&& a
->d
) {
3709 return do_dep_sar(ctx
, a
->t
, a
->c
, a
->d
, a
->nz
, a
->len
,
3710 load_gpr(ctx
, a
->r
));
3713 static bool trans_depi_sar(DisasContext
*ctx
, arg_depi_sar
*a
)
3715 if (!ctx
->is_pa20
&& a
->d
) {
3721 return do_dep_sar(ctx
, a
->t
, a
->c
, a
->d
, a
->nz
, a
->len
,
3722 tcg_constant_i64(a
->i
));
3725 static bool trans_be(DisasContext
*ctx
, arg_be
*a
)
3729 #ifdef CONFIG_USER_ONLY
3730 /* ??? It seems like there should be a good way of using
3731 "be disp(sr2, r0)", the canonical gateway entry mechanism
3732 to our advantage. But that appears to be inconvenient to
3733 manage along side branch delay slots. Therefore we handle
3734 entry into the gateway page via absolute address. */
3735 /* Since we don't implement spaces, just branch. Do notice the special
3736 case of "be disp(*,r0)" using a direct branch to disp, so that we can
3737 goto_tb to the TB containing the syscall. */
3739 return do_dbranch(ctx
, a
->disp
, a
->l
, a
->n
);
3745 tmp
= tcg_temp_new_i64();
3746 tcg_gen_addi_i64(tmp
, load_gpr(ctx
, a
->b
), a
->disp
);
3747 tmp
= do_ibranch_priv(ctx
, tmp
);
3749 #ifdef CONFIG_USER_ONLY
3750 return do_ibranch(ctx
, tmp
, a
->l
, a
->n
);
3752 TCGv_i64 new_spc
= tcg_temp_new_i64();
3754 load_spr(ctx
, new_spc
, a
->sp
);
3756 copy_iaoq_entry(ctx
, cpu_gr
[31], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
3757 tcg_gen_mov_i64(cpu_sr
[0], cpu_iasq_f
);
3759 if (a
->n
&& use_nullify_skip(ctx
)) {
3760 copy_iaoq_entry(ctx
, cpu_iaoq_f
, -1, tmp
);
3761 tcg_gen_addi_i64(tmp
, tmp
, 4);
3762 copy_iaoq_entry(ctx
, cpu_iaoq_b
, -1, tmp
);
3763 tcg_gen_mov_i64(cpu_iasq_f
, new_spc
);
3764 tcg_gen_mov_i64(cpu_iasq_b
, cpu_iasq_f
);
3766 copy_iaoq_entry(ctx
, cpu_iaoq_f
, ctx
->iaoq_b
, cpu_iaoq_b
);
3767 if (ctx
->iaoq_b
== -1) {
3768 tcg_gen_mov_i64(cpu_iasq_f
, cpu_iasq_b
);
3770 copy_iaoq_entry(ctx
, cpu_iaoq_b
, -1, tmp
);
3771 tcg_gen_mov_i64(cpu_iasq_b
, new_spc
);
3772 nullify_set(ctx
, a
->n
);
3774 tcg_gen_lookup_and_goto_ptr();
3775 ctx
->base
.is_jmp
= DISAS_NORETURN
;
3776 return nullify_end(ctx
);
3780 static bool trans_bl(DisasContext
*ctx
, arg_bl
*a
)
3782 return do_dbranch(ctx
, iaoq_dest(ctx
, a
->disp
), a
->l
, a
->n
);
3785 static bool trans_b_gate(DisasContext
*ctx
, arg_b_gate
*a
)
3787 uint64_t dest
= iaoq_dest(ctx
, a
->disp
);
3791 /* Make sure the caller hasn't done something weird with the queue.
3792 * ??? This is not quite the same as the PSW[B] bit, which would be
3793 * expensive to track. Real hardware will trap for
3795 * b gateway+4 (in delay slot of first branch)
3796 * However, checking for a non-sequential instruction queue *will*
3797 * diagnose the security hole
3800 * in which instructions at evil would run with increased privs.
3802 if (ctx
->iaoq_b
== -1 || ctx
->iaoq_b
!= ctx
->iaoq_f
+ 4) {
3803 return gen_illegal(ctx
);
3806 #ifndef CONFIG_USER_ONLY
3807 if (ctx
->tb_flags
& PSW_C
) {
3808 CPUHPPAState
*env
= cpu_env(ctx
->cs
);
3809 int type
= hppa_artype_for_page(env
, ctx
->base
.pc_next
);
3810 /* If we could not find a TLB entry, then we need to generate an
3811 ITLB miss exception so the kernel will provide it.
3812 The resulting TLB fill operation will invalidate this TB and
3813 we will re-translate, at which point we *will* be able to find
3814 the TLB entry and determine if this is in fact a gateway page. */
3816 gen_excp(ctx
, EXCP_ITLB_MISS
);
3819 /* No change for non-gateway pages or for priv decrease. */
3820 if (type
>= 4 && type
- 4 < ctx
->privilege
) {
3821 dest
= deposit32(dest
, 0, 2, type
- 4);
3824 dest
&= -4; /* priv = 0 */
3829 TCGv_i64 tmp
= dest_gpr(ctx
, a
->l
);
3830 if (ctx
->privilege
< 3) {
3831 tcg_gen_andi_i64(tmp
, tmp
, -4);
3833 tcg_gen_ori_i64(tmp
, tmp
, ctx
->privilege
);
3834 save_gpr(ctx
, a
->l
, tmp
);
3837 return do_dbranch(ctx
, dest
, 0, a
->n
);
3840 static bool trans_blr(DisasContext
*ctx
, arg_blr
*a
)
3843 TCGv_i64 tmp
= tcg_temp_new_i64();
3844 tcg_gen_shli_i64(tmp
, load_gpr(ctx
, a
->x
), 3);
3845 tcg_gen_addi_i64(tmp
, tmp
, ctx
->iaoq_f
+ 8);
3846 /* The computation here never changes privilege level. */
3847 return do_ibranch(ctx
, tmp
, a
->l
, a
->n
);
3849 /* BLR R0,RX is a good way to load PC+8 into RX. */
3850 return do_dbranch(ctx
, ctx
->iaoq_f
+ 8, a
->l
, a
->n
);
3854 static bool trans_bv(DisasContext
*ctx
, arg_bv
*a
)
3859 dest
= load_gpr(ctx
, a
->b
);
3861 dest
= tcg_temp_new_i64();
3862 tcg_gen_shli_i64(dest
, load_gpr(ctx
, a
->x
), 3);
3863 tcg_gen_add_i64(dest
, dest
, load_gpr(ctx
, a
->b
));
3865 dest
= do_ibranch_priv(ctx
, dest
);
3866 return do_ibranch(ctx
, dest
, 0, a
->n
);
3869 static bool trans_bve(DisasContext
*ctx
, arg_bve
*a
)
3873 #ifdef CONFIG_USER_ONLY
3874 dest
= do_ibranch_priv(ctx
, load_gpr(ctx
, a
->b
));
3875 return do_ibranch(ctx
, dest
, a
->l
, a
->n
);
3878 dest
= do_ibranch_priv(ctx
, load_gpr(ctx
, a
->b
));
3880 copy_iaoq_entry(ctx
, cpu_iaoq_f
, ctx
->iaoq_b
, cpu_iaoq_b
);
3881 if (ctx
->iaoq_b
== -1) {
3882 tcg_gen_mov_i64(cpu_iasq_f
, cpu_iasq_b
);
3884 copy_iaoq_entry(ctx
, cpu_iaoq_b
, -1, dest
);
3885 tcg_gen_mov_i64(cpu_iasq_b
, space_select(ctx
, 0, dest
));
3887 copy_iaoq_entry(ctx
, cpu_gr
[a
->l
], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
3889 nullify_set(ctx
, a
->n
);
3890 tcg_gen_lookup_and_goto_ptr();
3891 ctx
->base
.is_jmp
= DISAS_NORETURN
;
3892 return nullify_end(ctx
);
3896 static bool trans_nopbts(DisasContext
*ctx
, arg_nopbts
*a
)
3898 /* All branch target stack instructions implement as nop. */
3899 return ctx
->is_pa20
;
3906 static void gen_fcpy_f(TCGv_i32 dst
, TCGv_env unused
, TCGv_i32 src
)
3908 tcg_gen_mov_i32(dst
, src
);
3911 static bool trans_fid_f(DisasContext
*ctx
, arg_fid_f
*a
)
3916 ret
= 0x13080000000000ULL
; /* PA8700 (PCX-W2) */
3918 ret
= 0x0f080000000000ULL
; /* PA7300LC (PCX-L2) */
3922 save_frd(0, tcg_constant_i64(ret
));
3923 return nullify_end(ctx
);
3926 static bool trans_fcpy_f(DisasContext
*ctx
, arg_fclass01
*a
)
3928 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_fcpy_f
);
3931 static void gen_fcpy_d(TCGv_i64 dst
, TCGv_env unused
, TCGv_i64 src
)
3933 tcg_gen_mov_i64(dst
, src
);
3936 static bool trans_fcpy_d(DisasContext
*ctx
, arg_fclass01
*a
)
3938 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_fcpy_d
);
3941 static void gen_fabs_f(TCGv_i32 dst
, TCGv_env unused
, TCGv_i32 src
)
3943 tcg_gen_andi_i32(dst
, src
, INT32_MAX
);
3946 static bool trans_fabs_f(DisasContext
*ctx
, arg_fclass01
*a
)
3948 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_fabs_f
);
3951 static void gen_fabs_d(TCGv_i64 dst
, TCGv_env unused
, TCGv_i64 src
)
3953 tcg_gen_andi_i64(dst
, src
, INT64_MAX
);
3956 static bool trans_fabs_d(DisasContext
*ctx
, arg_fclass01
*a
)
3958 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_fabs_d
);
3961 static bool trans_fsqrt_f(DisasContext
*ctx
, arg_fclass01
*a
)
3963 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_fsqrt_s
);
3966 static bool trans_fsqrt_d(DisasContext
*ctx
, arg_fclass01
*a
)
3968 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_fsqrt_d
);
3971 static bool trans_frnd_f(DisasContext
*ctx
, arg_fclass01
*a
)
3973 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_frnd_s
);
3976 static bool trans_frnd_d(DisasContext
*ctx
, arg_fclass01
*a
)
3978 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_frnd_d
);
3981 static void gen_fneg_f(TCGv_i32 dst
, TCGv_env unused
, TCGv_i32 src
)
3983 tcg_gen_xori_i32(dst
, src
, INT32_MIN
);
3986 static bool trans_fneg_f(DisasContext
*ctx
, arg_fclass01
*a
)
3988 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_fneg_f
);
3991 static void gen_fneg_d(TCGv_i64 dst
, TCGv_env unused
, TCGv_i64 src
)
3993 tcg_gen_xori_i64(dst
, src
, INT64_MIN
);
3996 static bool trans_fneg_d(DisasContext
*ctx
, arg_fclass01
*a
)
3998 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_fneg_d
);
4001 static void gen_fnegabs_f(TCGv_i32 dst
, TCGv_env unused
, TCGv_i32 src
)
4003 tcg_gen_ori_i32(dst
, src
, INT32_MIN
);
4006 static bool trans_fnegabs_f(DisasContext
*ctx
, arg_fclass01
*a
)
4008 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_fnegabs_f
);
4011 static void gen_fnegabs_d(TCGv_i64 dst
, TCGv_env unused
, TCGv_i64 src
)
4013 tcg_gen_ori_i64(dst
, src
, INT64_MIN
);
4016 static bool trans_fnegabs_d(DisasContext
*ctx
, arg_fclass01
*a
)
4018 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_fnegabs_d
);
4025 static bool trans_fcnv_d_f(DisasContext
*ctx
, arg_fclass01
*a
)
4027 return do_fop_wed(ctx
, a
->t
, a
->r
, gen_helper_fcnv_d_s
);
4030 static bool trans_fcnv_f_d(DisasContext
*ctx
, arg_fclass01
*a
)
4032 return do_fop_dew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_s_d
);
4035 static bool trans_fcnv_w_f(DisasContext
*ctx
, arg_fclass01
*a
)
4037 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_w_s
);
4040 static bool trans_fcnv_q_f(DisasContext
*ctx
, arg_fclass01
*a
)
4042 return do_fop_wed(ctx
, a
->t
, a
->r
, gen_helper_fcnv_dw_s
);
4045 static bool trans_fcnv_w_d(DisasContext
*ctx
, arg_fclass01
*a
)
4047 return do_fop_dew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_w_d
);
4050 static bool trans_fcnv_q_d(DisasContext
*ctx
, arg_fclass01
*a
)
4052 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_fcnv_dw_d
);
4055 static bool trans_fcnv_f_w(DisasContext
*ctx
, arg_fclass01
*a
)
4057 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_s_w
);
4060 static bool trans_fcnv_d_w(DisasContext
*ctx
, arg_fclass01
*a
)
4062 return do_fop_wed(ctx
, a
->t
, a
->r
, gen_helper_fcnv_d_w
);
4065 static bool trans_fcnv_f_q(DisasContext
*ctx
, arg_fclass01
*a
)
4067 return do_fop_dew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_s_dw
);
4070 static bool trans_fcnv_d_q(DisasContext
*ctx
, arg_fclass01
*a
)
4072 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_fcnv_d_dw
);
4075 static bool trans_fcnv_t_f_w(DisasContext
*ctx
, arg_fclass01
*a
)
4077 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_s_w
);
4080 static bool trans_fcnv_t_d_w(DisasContext
*ctx
, arg_fclass01
*a
)
4082 return do_fop_wed(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_d_w
);
4085 static bool trans_fcnv_t_f_q(DisasContext
*ctx
, arg_fclass01
*a
)
4087 return do_fop_dew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_s_dw
);
4090 static bool trans_fcnv_t_d_q(DisasContext
*ctx
, arg_fclass01
*a
)
4092 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_d_dw
);
4095 static bool trans_fcnv_uw_f(DisasContext
*ctx
, arg_fclass01
*a
)
4097 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_uw_s
);
4100 static bool trans_fcnv_uq_f(DisasContext
*ctx
, arg_fclass01
*a
)
4102 return do_fop_wed(ctx
, a
->t
, a
->r
, gen_helper_fcnv_udw_s
);
4105 static bool trans_fcnv_uw_d(DisasContext
*ctx
, arg_fclass01
*a
)
4107 return do_fop_dew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_uw_d
);
4110 static bool trans_fcnv_uq_d(DisasContext
*ctx
, arg_fclass01
*a
)
4112 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_fcnv_udw_d
);
4115 static bool trans_fcnv_f_uw(DisasContext
*ctx
, arg_fclass01
*a
)
4117 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_s_uw
);
4120 static bool trans_fcnv_d_uw(DisasContext
*ctx
, arg_fclass01
*a
)
4122 return do_fop_wed(ctx
, a
->t
, a
->r
, gen_helper_fcnv_d_uw
);
4125 static bool trans_fcnv_f_uq(DisasContext
*ctx
, arg_fclass01
*a
)
4127 return do_fop_dew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_s_udw
);
4130 static bool trans_fcnv_d_uq(DisasContext
*ctx
, arg_fclass01
*a
)
4132 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_fcnv_d_udw
);
4135 static bool trans_fcnv_t_f_uw(DisasContext
*ctx
, arg_fclass01
*a
)
4137 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_s_uw
);
4140 static bool trans_fcnv_t_d_uw(DisasContext
*ctx
, arg_fclass01
*a
)
4142 return do_fop_wed(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_d_uw
);
4145 static bool trans_fcnv_t_f_uq(DisasContext
*ctx
, arg_fclass01
*a
)
4147 return do_fop_dew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_s_udw
);
4150 static bool trans_fcnv_t_d_uq(DisasContext
*ctx
, arg_fclass01
*a
)
4152 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_d_udw
);
4159 static bool trans_fcmp_f(DisasContext
*ctx
, arg_fclass2
*a
)
4161 TCGv_i32 ta
, tb
, tc
, ty
;
4165 ta
= load_frw0_i32(a
->r1
);
4166 tb
= load_frw0_i32(a
->r2
);
4167 ty
= tcg_constant_i32(a
->y
);
4168 tc
= tcg_constant_i32(a
->c
);
4170 gen_helper_fcmp_s(tcg_env
, ta
, tb
, ty
, tc
);
4172 return nullify_end(ctx
);
4175 static bool trans_fcmp_d(DisasContext
*ctx
, arg_fclass2
*a
)
4182 ta
= load_frd0(a
->r1
);
4183 tb
= load_frd0(a
->r2
);
4184 ty
= tcg_constant_i32(a
->y
);
4185 tc
= tcg_constant_i32(a
->c
);
4187 gen_helper_fcmp_d(tcg_env
, ta
, tb
, ty
, tc
);
4189 return nullify_end(ctx
);
4192 static bool trans_ftest(DisasContext
*ctx
, arg_ftest
*a
)
4198 t
= tcg_temp_new_i64();
4199 tcg_gen_ld32u_i64(t
, tcg_env
, offsetof(CPUHPPAState
, fr0_shadow
));
4206 case 0: /* simple */
4207 tcg_gen_andi_i64(t
, t
, 0x4000000);
4208 ctx
->null_cond
= cond_make_0(TCG_COND_NE
, t
);
4236 TCGv_i64 c
= tcg_constant_i64(mask
);
4237 tcg_gen_or_i64(t
, t
, c
);
4238 ctx
->null_cond
= cond_make(TCG_COND_EQ
, t
, c
);
4240 tcg_gen_andi_i64(t
, t
, mask
);
4241 ctx
->null_cond
= cond_make_0(TCG_COND_EQ
, t
);
4244 unsigned cbit
= (a
->y
^ 1) - 1;
4246 tcg_gen_extract_i64(t
, t
, 21 - cbit
, 1);
4247 ctx
->null_cond
= cond_make_0(TCG_COND_NE
, t
);
4251 return nullify_end(ctx
);
4258 static bool trans_fadd_f(DisasContext
*ctx
, arg_fclass3
*a
)
4260 return do_fop_weww(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fadd_s
);
4263 static bool trans_fadd_d(DisasContext
*ctx
, arg_fclass3
*a
)
4265 return do_fop_dedd(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fadd_d
);
4268 static bool trans_fsub_f(DisasContext
*ctx
, arg_fclass3
*a
)
4270 return do_fop_weww(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fsub_s
);
4273 static bool trans_fsub_d(DisasContext
*ctx
, arg_fclass3
*a
)
4275 return do_fop_dedd(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fsub_d
);
4278 static bool trans_fmpy_f(DisasContext
*ctx
, arg_fclass3
*a
)
4280 return do_fop_weww(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fmpy_s
);
4283 static bool trans_fmpy_d(DisasContext
*ctx
, arg_fclass3
*a
)
4285 return do_fop_dedd(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fmpy_d
);
4288 static bool trans_fdiv_f(DisasContext
*ctx
, arg_fclass3
*a
)
4290 return do_fop_weww(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fdiv_s
);
4293 static bool trans_fdiv_d(DisasContext
*ctx
, arg_fclass3
*a
)
4295 return do_fop_dedd(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fdiv_d
);
4298 static bool trans_xmpyu(DisasContext
*ctx
, arg_xmpyu
*a
)
4304 x
= load_frw0_i64(a
->r1
);
4305 y
= load_frw0_i64(a
->r2
);
4306 tcg_gen_mul_i64(x
, x
, y
);
4309 return nullify_end(ctx
);
4312 /* Convert the fmpyadd single-precision register encodings to standard. */
4313 static inline int fmpyadd_s_reg(unsigned r
)
4315 return (r
& 16) * 2 + 16 + (r
& 15);
4318 static bool do_fmpyadd_s(DisasContext
*ctx
, arg_mpyadd
*a
, bool is_sub
)
4320 int tm
= fmpyadd_s_reg(a
->tm
);
4321 int ra
= fmpyadd_s_reg(a
->ra
);
4322 int ta
= fmpyadd_s_reg(a
->ta
);
4323 int rm2
= fmpyadd_s_reg(a
->rm2
);
4324 int rm1
= fmpyadd_s_reg(a
->rm1
);
4328 do_fop_weww(ctx
, tm
, rm1
, rm2
, gen_helper_fmpy_s
);
4329 do_fop_weww(ctx
, ta
, ta
, ra
,
4330 is_sub
? gen_helper_fsub_s
: gen_helper_fadd_s
);
4332 return nullify_end(ctx
);
4335 static bool trans_fmpyadd_f(DisasContext
*ctx
, arg_mpyadd
*a
)
4337 return do_fmpyadd_s(ctx
, a
, false);
4340 static bool trans_fmpysub_f(DisasContext
*ctx
, arg_mpyadd
*a
)
4342 return do_fmpyadd_s(ctx
, a
, true);
4345 static bool do_fmpyadd_d(DisasContext
*ctx
, arg_mpyadd
*a
, bool is_sub
)
4349 do_fop_dedd(ctx
, a
->tm
, a
->rm1
, a
->rm2
, gen_helper_fmpy_d
);
4350 do_fop_dedd(ctx
, a
->ta
, a
->ta
, a
->ra
,
4351 is_sub
? gen_helper_fsub_d
: gen_helper_fadd_d
);
4353 return nullify_end(ctx
);
4356 static bool trans_fmpyadd_d(DisasContext
*ctx
, arg_mpyadd
*a
)
4358 return do_fmpyadd_d(ctx
, a
, false);
4361 static bool trans_fmpysub_d(DisasContext
*ctx
, arg_mpyadd
*a
)
4363 return do_fmpyadd_d(ctx
, a
, true);
4366 static bool trans_fmpyfadd_f(DisasContext
*ctx
, arg_fmpyfadd_f
*a
)
4371 x
= load_frw0_i32(a
->rm1
);
4372 y
= load_frw0_i32(a
->rm2
);
4373 z
= load_frw0_i32(a
->ra3
);
4376 gen_helper_fmpynfadd_s(x
, tcg_env
, x
, y
, z
);
4378 gen_helper_fmpyfadd_s(x
, tcg_env
, x
, y
, z
);
4381 save_frw_i32(a
->t
, x
);
4382 return nullify_end(ctx
);
4385 static bool trans_fmpyfadd_d(DisasContext
*ctx
, arg_fmpyfadd_d
*a
)
4390 x
= load_frd0(a
->rm1
);
4391 y
= load_frd0(a
->rm2
);
4392 z
= load_frd0(a
->ra3
);
4395 gen_helper_fmpynfadd_d(x
, tcg_env
, x
, y
, z
);
4397 gen_helper_fmpyfadd_d(x
, tcg_env
, x
, y
, z
);
4401 return nullify_end(ctx
);
4404 static bool trans_diag(DisasContext
*ctx
, arg_diag
*a
)
4406 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
4407 #ifndef CONFIG_USER_ONLY
4408 if (a
->i
== 0x100) {
4409 /* emulate PDC BTLB, called by SeaBIOS-hppa */
4411 gen_helper_diag_btlb(tcg_env
);
4412 return nullify_end(ctx
);
4415 qemu_log_mask(LOG_UNIMP
, "DIAG opcode 0x%04x ignored\n", a
->i
);
4419 static void hppa_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
4421 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
4425 ctx
->tb_flags
= ctx
->base
.tb
->flags
;
4426 ctx
->is_pa20
= hppa_is_pa20(cpu_env(cs
));
4428 #ifdef CONFIG_USER_ONLY
4429 ctx
->privilege
= MMU_IDX_TO_PRIV(MMU_USER_IDX
);
4430 ctx
->mmu_idx
= MMU_USER_IDX
;
4431 ctx
->iaoq_f
= ctx
->base
.pc_first
| ctx
->privilege
;
4432 ctx
->iaoq_b
= ctx
->base
.tb
->cs_base
| ctx
->privilege
;
4433 ctx
->unalign
= (ctx
->tb_flags
& TB_FLAG_UNALIGN
? MO_UNALN
: MO_ALIGN
);
4435 ctx
->privilege
= (ctx
->tb_flags
>> TB_FLAG_PRIV_SHIFT
) & 3;
4436 ctx
->mmu_idx
= (ctx
->tb_flags
& PSW_D
4437 ? PRIV_P_TO_MMU_IDX(ctx
->privilege
, ctx
->tb_flags
& PSW_P
)
4438 : ctx
->tb_flags
& PSW_W
? MMU_ABS_W_IDX
: MMU_ABS_IDX
);
4440 /* Recover the IAOQ values from the GVA + PRIV. */
4441 uint64_t cs_base
= ctx
->base
.tb
->cs_base
;
4442 uint64_t iasq_f
= cs_base
& ~0xffffffffull
;
4443 int32_t diff
= cs_base
;
4445 ctx
->iaoq_f
= (ctx
->base
.pc_first
& ~iasq_f
) + ctx
->privilege
;
4446 ctx
->iaoq_b
= (diff
? ctx
->iaoq_f
+ diff
: -1);
4449 ctx
->iaoq_n_var
= NULL
;
4451 ctx
->zero
= tcg_constant_i64(0);
4453 /* Bound the number of instructions by those left on the page. */
4454 bound
= -(ctx
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
4455 ctx
->base
.max_insns
= MIN(ctx
->base
.max_insns
, bound
);
4458 static void hppa_tr_tb_start(DisasContextBase
*dcbase
, CPUState
*cs
)
4460 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
4462 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
4463 ctx
->null_cond
= cond_make_f();
4464 ctx
->psw_n_nonzero
= false;
4465 if (ctx
->tb_flags
& PSW_N
) {
4466 ctx
->null_cond
.c
= TCG_COND_ALWAYS
;
4467 ctx
->psw_n_nonzero
= true;
4469 ctx
->null_lab
= NULL
;
4472 static void hppa_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cs
)
4474 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
4476 tcg_gen_insn_start(ctx
->iaoq_f
, ctx
->iaoq_b
, 0);
4477 ctx
->insn_start
= tcg_last_op();
4480 static void hppa_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cs
)
4482 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
4483 CPUHPPAState
*env
= cpu_env(cs
);
4486 /* Execute one insn. */
4487 #ifdef CONFIG_USER_ONLY
4488 if (ctx
->base
.pc_next
< TARGET_PAGE_SIZE
) {
4490 ret
= ctx
->base
.is_jmp
;
4491 assert(ret
!= DISAS_NEXT
);
4495 /* Always fetch the insn, even if nullified, so that we check
4496 the page permissions for execute. */
4497 uint32_t insn
= translator_ldl(env
, &ctx
->base
, ctx
->base
.pc_next
);
4499 /* Set up the IA queue for the next insn.
4500 This will be overwritten by a branch. */
4501 if (ctx
->iaoq_b
== -1) {
4503 ctx
->iaoq_n_var
= tcg_temp_new_i64();
4504 tcg_gen_addi_i64(ctx
->iaoq_n_var
, cpu_iaoq_b
, 4);
4506 ctx
->iaoq_n
= ctx
->iaoq_b
+ 4;
4507 ctx
->iaoq_n_var
= NULL
;
4510 if (unlikely(ctx
->null_cond
.c
== TCG_COND_ALWAYS
)) {
4511 ctx
->null_cond
.c
= TCG_COND_NEVER
;
4515 if (!decode(ctx
, insn
)) {
4518 ret
= ctx
->base
.is_jmp
;
4519 assert(ctx
->null_lab
== NULL
);
4523 /* Advance the insn queue. Note that this check also detects
4524 a priority change within the instruction queue. */
4525 if (ret
== DISAS_NEXT
&& ctx
->iaoq_b
!= ctx
->iaoq_f
+ 4) {
4526 if (ctx
->iaoq_b
!= -1 && ctx
->iaoq_n
!= -1
4527 && use_goto_tb(ctx
, ctx
->iaoq_b
)
4528 && (ctx
->null_cond
.c
== TCG_COND_NEVER
4529 || ctx
->null_cond
.c
== TCG_COND_ALWAYS
)) {
4530 nullify_set(ctx
, ctx
->null_cond
.c
== TCG_COND_ALWAYS
);
4531 gen_goto_tb(ctx
, 0, ctx
->iaoq_b
, ctx
->iaoq_n
);
4532 ctx
->base
.is_jmp
= ret
= DISAS_NORETURN
;
4534 ctx
->base
.is_jmp
= ret
= DISAS_IAQ_N_STALE
;
4537 ctx
->iaoq_f
= ctx
->iaoq_b
;
4538 ctx
->iaoq_b
= ctx
->iaoq_n
;
4539 ctx
->base
.pc_next
+= 4;
4542 case DISAS_NORETURN
:
4543 case DISAS_IAQ_N_UPDATED
:
4547 case DISAS_IAQ_N_STALE
:
4548 case DISAS_IAQ_N_STALE_EXIT
:
4549 if (ctx
->iaoq_f
== -1) {
4550 copy_iaoq_entry(ctx
, cpu_iaoq_f
, -1, cpu_iaoq_b
);
4551 copy_iaoq_entry(ctx
, cpu_iaoq_b
, ctx
->iaoq_n
, ctx
->iaoq_n_var
);
4552 #ifndef CONFIG_USER_ONLY
4553 tcg_gen_mov_i64(cpu_iasq_f
, cpu_iasq_b
);
4556 ctx
->base
.is_jmp
= (ret
== DISAS_IAQ_N_STALE_EXIT
4558 : DISAS_IAQ_N_UPDATED
);
4559 } else if (ctx
->iaoq_b
== -1) {
4560 copy_iaoq_entry(ctx
, cpu_iaoq_b
, -1, ctx
->iaoq_n_var
);
4565 g_assert_not_reached();
4569 static void hppa_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cs
)
4571 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
4572 DisasJumpType is_jmp
= ctx
->base
.is_jmp
;
4575 case DISAS_NORETURN
:
4577 case DISAS_TOO_MANY
:
4578 case DISAS_IAQ_N_STALE
:
4579 case DISAS_IAQ_N_STALE_EXIT
:
4580 copy_iaoq_entry(ctx
, cpu_iaoq_f
, ctx
->iaoq_f
, cpu_iaoq_f
);
4581 copy_iaoq_entry(ctx
, cpu_iaoq_b
, ctx
->iaoq_b
, cpu_iaoq_b
);
4584 case DISAS_IAQ_N_UPDATED
:
4585 if (is_jmp
!= DISAS_IAQ_N_STALE_EXIT
) {
4586 tcg_gen_lookup_and_goto_ptr();
4591 tcg_gen_exit_tb(NULL
, 0);
4594 g_assert_not_reached();
4598 static void hppa_tr_disas_log(const DisasContextBase
*dcbase
,
4599 CPUState
*cs
, FILE *logfile
)
4601 target_ulong pc
= dcbase
->pc_first
;
4603 #ifdef CONFIG_USER_ONLY
4606 fprintf(logfile
, "IN:\n0x00000000: (null)\n");
4609 fprintf(logfile
, "IN:\n0x000000b0: light-weight-syscall\n");
4612 fprintf(logfile
, "IN:\n0x000000e0: set-thread-pointer-syscall\n");
4615 fprintf(logfile
, "IN:\n0x00000100: syscall\n");
4620 fprintf(logfile
, "IN: %s\n", lookup_symbol(pc
));
4621 target_disas(logfile
, cs
, pc
, dcbase
->tb
->size
);
4624 static const TranslatorOps hppa_tr_ops
= {
4625 .init_disas_context
= hppa_tr_init_disas_context
,
4626 .tb_start
= hppa_tr_tb_start
,
4627 .insn_start
= hppa_tr_insn_start
,
4628 .translate_insn
= hppa_tr_translate_insn
,
4629 .tb_stop
= hppa_tr_tb_stop
,
4630 .disas_log
= hppa_tr_disas_log
,
4633 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int *max_insns
,
4634 vaddr pc
, void *host_pc
)
4637 translator_loop(cs
, tb
, max_insns
, pc
, host_pc
, &hppa_tr_ops
, &ctx
.base
);