2 * HPPA emulation cpu translation for qemu.
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
28 #include "exec/helper-proto.h"
29 #include "exec/helper-gen.h"
31 #include "trace-tcg.h"
34 typedef struct DisasCond
{
41 typedef struct DisasContext
{
42 struct TranslationBlock
*tb
;
56 bool singlestep_enabled
;
60 /* Return values from translate_one, indicating the state of the TB.
61 Note that zero indicates that we are not exiting the TB. */
66 /* We have emitted one or more goto_tb. No fixup required. */
69 /* We are not using a goto_tb (for whatever reason), but have updated
70 the iaq (for whatever reason), so don't do it again on exit. */
73 /* We are exiting the TB, but have neither emitted a goto_tb, nor
74 updated the iaq for the next instruction to be executed. */
77 /* We are ending the TB with a noreturn function call, e.g. longjmp.
78 No following code will be executed. */
82 typedef struct DisasInsn
{
84 ExitStatus (*trans
)(DisasContext
*ctx
, uint32_t insn
,
85 const struct DisasInsn
*f
);
87 void (*f_ttt
)(TCGv
, TCGv
, TCGv
);
91 /* global register indexes */
92 static TCGv_env cpu_env
;
93 static TCGv cpu_gr
[32];
94 static TCGv cpu_iaoq_f
;
95 static TCGv cpu_iaoq_b
;
97 static TCGv cpu_psw_n
;
98 static TCGv cpu_psw_v
;
99 static TCGv cpu_psw_cb
;
100 static TCGv cpu_psw_cb_msb
;
101 static TCGv cpu_cr26
;
102 static TCGv cpu_cr27
;
104 #include "exec/gen-icount.h"
106 void hppa_translate_init(void)
108 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
110 typedef struct { TCGv
*var
; const char *name
; int ofs
; } GlobalVar
;
111 static const GlobalVar vars
[] = {
125 /* Use the symbolic register names that match the disassembler. */
126 static const char gr_names
[32][4] = {
127 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
128 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
129 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
130 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
133 static bool done_init
= 0;
141 cpu_env
= tcg_global_reg_new_ptr(TCG_AREG0
, "env");
142 tcg_ctx
.tcg_env
= cpu_env
;
144 TCGV_UNUSED(cpu_gr
[0]);
145 for (i
= 1; i
< 32; i
++) {
146 cpu_gr
[i
] = tcg_global_mem_new(cpu_env
,
147 offsetof(CPUHPPAState
, gr
[i
]),
151 for (i
= 0; i
< ARRAY_SIZE(vars
); ++i
) {
152 const GlobalVar
*v
= &vars
[i
];
153 *v
->var
= tcg_global_mem_new(cpu_env
, v
->ofs
, v
->name
);
157 static DisasCond
cond_make_f(void)
159 DisasCond r
= { .c
= TCG_COND_NEVER
};
165 static DisasCond
cond_make_n(void)
167 DisasCond r
= { .c
= TCG_COND_NE
, .a0_is_n
= true, .a1_is_0
= true };
173 static DisasCond
cond_make_0(TCGCond c
, TCGv a0
)
175 DisasCond r
= { .c
= c
, .a1_is_0
= true };
177 assert (c
!= TCG_COND_NEVER
&& c
!= TCG_COND_ALWAYS
);
178 r
.a0
= tcg_temp_new();
179 tcg_gen_mov_tl(r
.a0
, a0
);
185 static DisasCond
cond_make(TCGCond c
, TCGv a0
, TCGv a1
)
187 DisasCond r
= { .c
= c
};
189 assert (c
!= TCG_COND_NEVER
&& c
!= TCG_COND_ALWAYS
);
190 r
.a0
= tcg_temp_new();
191 tcg_gen_mov_tl(r
.a0
, a0
);
192 r
.a1
= tcg_temp_new();
193 tcg_gen_mov_tl(r
.a1
, a1
);
198 static void cond_prep(DisasCond
*cond
)
201 cond
->a1_is_0
= false;
202 cond
->a1
= tcg_const_tl(0);
206 static void cond_free(DisasCond
*cond
)
210 if (!cond
->a0_is_n
) {
211 tcg_temp_free(cond
->a0
);
213 if (!cond
->a1_is_0
) {
214 tcg_temp_free(cond
->a1
);
216 cond
->a0_is_n
= false;
217 cond
->a1_is_0
= false;
218 TCGV_UNUSED(cond
->a0
);
219 TCGV_UNUSED(cond
->a1
);
221 case TCG_COND_ALWAYS
:
222 cond
->c
= TCG_COND_NEVER
;
229 static TCGv
get_temp(DisasContext
*ctx
)
231 unsigned i
= ctx
->ntemps
++;
232 g_assert(i
< ARRAY_SIZE(ctx
->temps
));
233 return ctx
->temps
[i
] = tcg_temp_new();
236 static TCGv
load_const(DisasContext
*ctx
, target_long v
)
238 TCGv t
= get_temp(ctx
);
239 tcg_gen_movi_tl(t
, v
);
243 static TCGv
load_gpr(DisasContext
*ctx
, unsigned reg
)
246 TCGv t
= get_temp(ctx
);
247 tcg_gen_movi_tl(t
, 0);
254 static TCGv
dest_gpr(DisasContext
*ctx
, unsigned reg
)
256 if (reg
== 0 || ctx
->null_cond
.c
!= TCG_COND_NEVER
) {
257 return get_temp(ctx
);
263 static void save_or_nullify(DisasContext
*ctx
, TCGv dest
, TCGv t
)
265 if (ctx
->null_cond
.c
!= TCG_COND_NEVER
) {
266 cond_prep(&ctx
->null_cond
);
267 tcg_gen_movcond_tl(ctx
->null_cond
.c
, dest
, ctx
->null_cond
.a0
,
268 ctx
->null_cond
.a1
, dest
, t
);
270 tcg_gen_mov_tl(dest
, t
);
274 static void save_gpr(DisasContext
*ctx
, unsigned reg
, TCGv t
)
277 save_or_nullify(ctx
, cpu_gr
[reg
], t
);
281 #ifdef HOST_WORDS_BIGENDIAN
289 static TCGv_i32
load_frw_i32(unsigned rt
)
291 TCGv_i32 ret
= tcg_temp_new_i32();
292 tcg_gen_ld_i32(ret
, cpu_env
,
293 offsetof(CPUHPPAState
, fr
[rt
& 31])
294 + (rt
& 32 ? LO_OFS
: HI_OFS
));
298 static void save_frw_i32(unsigned rt
, TCGv_i32 val
)
300 tcg_gen_st_i32(val
, cpu_env
,
301 offsetof(CPUHPPAState
, fr
[rt
& 31])
302 + (rt
& 32 ? LO_OFS
: HI_OFS
));
308 static TCGv_i64
load_frd(unsigned rt
)
310 TCGv_i64 ret
= tcg_temp_new_i64();
311 tcg_gen_ld_i64(ret
, cpu_env
, offsetof(CPUHPPAState
, fr
[rt
]));
315 static void save_frd(unsigned rt
, TCGv_i64 val
)
317 tcg_gen_st_i64(val
, cpu_env
, offsetof(CPUHPPAState
, fr
[rt
]));
320 /* Skip over the implementation of an insn that has been nullified.
321 Use this when the insn is too complex for a conditional move. */
322 static void nullify_over(DisasContext
*ctx
)
324 if (ctx
->null_cond
.c
!= TCG_COND_NEVER
) {
325 /* The always condition should have been handled in the main loop. */
326 assert(ctx
->null_cond
.c
!= TCG_COND_ALWAYS
);
328 ctx
->null_lab
= gen_new_label();
329 cond_prep(&ctx
->null_cond
);
331 /* If we're using PSW[N], copy it to a temp because... */
332 if (ctx
->null_cond
.a0_is_n
) {
333 ctx
->null_cond
.a0_is_n
= false;
334 ctx
->null_cond
.a0
= tcg_temp_new();
335 tcg_gen_mov_tl(ctx
->null_cond
.a0
, cpu_psw_n
);
337 /* ... we clear it before branching over the implementation,
338 so that (1) it's clear after nullifying this insn and
339 (2) if this insn nullifies the next, PSW[N] is valid. */
340 if (ctx
->psw_n_nonzero
) {
341 ctx
->psw_n_nonzero
= false;
342 tcg_gen_movi_tl(cpu_psw_n
, 0);
345 tcg_gen_brcond_tl(ctx
->null_cond
.c
, ctx
->null_cond
.a0
,
346 ctx
->null_cond
.a1
, ctx
->null_lab
);
347 cond_free(&ctx
->null_cond
);
351 /* Save the current nullification state to PSW[N]. */
352 static void nullify_save(DisasContext
*ctx
)
354 if (ctx
->null_cond
.c
== TCG_COND_NEVER
) {
355 if (ctx
->psw_n_nonzero
) {
356 tcg_gen_movi_tl(cpu_psw_n
, 0);
360 if (!ctx
->null_cond
.a0_is_n
) {
361 cond_prep(&ctx
->null_cond
);
362 tcg_gen_setcond_tl(ctx
->null_cond
.c
, cpu_psw_n
,
363 ctx
->null_cond
.a0
, ctx
->null_cond
.a1
);
364 ctx
->psw_n_nonzero
= true;
366 cond_free(&ctx
->null_cond
);
369 /* Set a PSW[N] to X. The intention is that this is used immediately
370 before a goto_tb/exit_tb, so that there is no fallthru path to other
371 code within the TB. Therefore we do not update psw_n_nonzero. */
372 static void nullify_set(DisasContext
*ctx
, bool x
)
374 if (ctx
->psw_n_nonzero
|| x
) {
375 tcg_gen_movi_tl(cpu_psw_n
, x
);
379 /* Mark the end of an instruction that may have been nullified.
380 This is the pair to nullify_over. */
381 static ExitStatus
nullify_end(DisasContext
*ctx
, ExitStatus status
)
383 TCGLabel
*null_lab
= ctx
->null_lab
;
385 if (likely(null_lab
== NULL
)) {
386 /* The current insn wasn't conditional or handled the condition
387 applied to it without a branch, so the (new) setting of
388 NULL_COND can be applied directly to the next insn. */
391 ctx
->null_lab
= NULL
;
393 if (likely(ctx
->null_cond
.c
== TCG_COND_NEVER
)) {
394 /* The next instruction will be unconditional,
395 and NULL_COND already reflects that. */
396 gen_set_label(null_lab
);
398 /* The insn that we just executed is itself nullifying the next
399 instruction. Store the condition in the PSW[N] global.
400 We asserted PSW[N] = 0 in nullify_over, so that after the
401 label we have the proper value in place. */
403 gen_set_label(null_lab
);
404 ctx
->null_cond
= cond_make_n();
407 assert(status
!= EXIT_GOTO_TB
&& status
!= EXIT_IAQ_N_UPDATED
);
408 if (status
== EXIT_NORETURN
) {
414 static void copy_iaoq_entry(TCGv dest
, target_ulong ival
, TCGv vval
)
416 if (unlikely(ival
== -1)) {
417 tcg_gen_mov_tl(dest
, vval
);
419 tcg_gen_movi_tl(dest
, ival
);
423 static inline target_ulong
iaoq_dest(DisasContext
*ctx
, target_long disp
)
425 return ctx
->iaoq_f
+ disp
+ 8;
428 static void gen_excp_1(int exception
)
430 TCGv_i32 t
= tcg_const_i32(exception
);
431 gen_helper_excp(cpu_env
, t
);
432 tcg_temp_free_i32(t
);
435 static ExitStatus
gen_excp(DisasContext
*ctx
, int exception
)
437 copy_iaoq_entry(cpu_iaoq_f
, ctx
->iaoq_f
, cpu_iaoq_f
);
438 copy_iaoq_entry(cpu_iaoq_b
, ctx
->iaoq_b
, cpu_iaoq_b
);
440 gen_excp_1(exception
);
441 return EXIT_NORETURN
;
444 static ExitStatus
gen_illegal(DisasContext
*ctx
)
447 return nullify_end(ctx
, gen_excp(ctx
, EXCP_SIGILL
));
450 static bool use_goto_tb(DisasContext
*ctx
, target_ulong dest
)
452 /* Suppress goto_tb in the case of single-steping and IO. */
453 if ((ctx
->tb
->cflags
& CF_LAST_IO
) || ctx
->singlestep_enabled
) {
459 /* If the next insn is to be nullified, and it's on the same page,
460 and we're not attempting to set a breakpoint on it, then we can
461 totally skip the nullified insn. This avoids creating and
462 executing a TB that merely branches to the next TB. */
463 static bool use_nullify_skip(DisasContext
*ctx
)
465 return (((ctx
->iaoq_b
^ ctx
->iaoq_f
) & TARGET_PAGE_MASK
) == 0
466 && !cpu_breakpoint_test(ctx
->cs
, ctx
->iaoq_b
, BP_ANY
));
469 static void gen_goto_tb(DisasContext
*ctx
, int which
,
470 target_ulong f
, target_ulong b
)
472 if (f
!= -1 && b
!= -1 && use_goto_tb(ctx
, f
)) {
473 tcg_gen_goto_tb(which
);
474 tcg_gen_movi_tl(cpu_iaoq_f
, f
);
475 tcg_gen_movi_tl(cpu_iaoq_b
, b
);
476 tcg_gen_exit_tb((uintptr_t)ctx
->tb
+ which
);
478 copy_iaoq_entry(cpu_iaoq_f
, f
, cpu_iaoq_b
);
479 copy_iaoq_entry(cpu_iaoq_b
, b
, ctx
->iaoq_n_var
);
480 if (ctx
->singlestep_enabled
) {
481 gen_excp_1(EXCP_DEBUG
);
488 /* PA has a habit of taking the LSB of a field and using that as the sign,
489 with the rest of the field becoming the least significant bits. */
490 static target_long
low_sextract(uint32_t val
, int pos
, int len
)
492 target_ulong x
= -(target_ulong
)extract32(val
, pos
, 1);
493 x
= (x
<< (len
- 1)) | extract32(val
, pos
+ 1, len
- 1);
497 static target_long
assemble_12(uint32_t insn
)
499 target_ulong x
= -(target_ulong
)(insn
& 1);
500 x
= (x
<< 1) | extract32(insn
, 2, 1);
501 x
= (x
<< 10) | extract32(insn
, 3, 10);
505 static target_long
assemble_16(uint32_t insn
)
507 /* Take the name from PA2.0, which produces a 16-bit number
508 only with wide mode; otherwise a 14-bit number. Since we don't
509 implement wide mode, this is always the 14-bit number. */
510 return low_sextract(insn
, 0, 14);
513 static target_long
assemble_16a(uint32_t insn
)
515 /* Take the name from PA2.0, which produces a 14-bit shifted number
516 only with wide mode; otherwise a 12-bit shifted number. Since we
517 don't implement wide mode, this is always the 12-bit number. */
518 target_ulong x
= -(target_ulong
)(insn
& 1);
519 x
= (x
<< 11) | extract32(insn
, 2, 11);
523 static target_long
assemble_17(uint32_t insn
)
525 target_ulong x
= -(target_ulong
)(insn
& 1);
526 x
= (x
<< 5) | extract32(insn
, 16, 5);
527 x
= (x
<< 1) | extract32(insn
, 2, 1);
528 x
= (x
<< 10) | extract32(insn
, 3, 10);
532 static target_long
assemble_21(uint32_t insn
)
534 target_ulong x
= -(target_ulong
)(insn
& 1);
535 x
= (x
<< 11) | extract32(insn
, 1, 11);
536 x
= (x
<< 2) | extract32(insn
, 14, 2);
537 x
= (x
<< 5) | extract32(insn
, 16, 5);
538 x
= (x
<< 2) | extract32(insn
, 12, 2);
542 static target_long
assemble_22(uint32_t insn
)
544 target_ulong x
= -(target_ulong
)(insn
& 1);
545 x
= (x
<< 10) | extract32(insn
, 16, 10);
546 x
= (x
<< 1) | extract32(insn
, 2, 1);
547 x
= (x
<< 10) | extract32(insn
, 3, 10);
551 /* The parisc documentation describes only the general interpretation of
552 the conditions, without describing their exact implementation. The
553 interpretations do not stand up well when considering ADD,C and SUB,B.
554 However, considering the Addition, Subtraction and Logical conditions
555 as a whole it would appear that these relations are similar to what
556 a traditional NZCV set of flags would produce. */
558 static DisasCond
do_cond(unsigned cf
, TCGv res
, TCGv cb_msb
, TCGv sv
)
564 case 0: /* Never / TR */
565 cond
= cond_make_f();
567 case 1: /* = / <> (Z / !Z) */
568 cond
= cond_make_0(TCG_COND_EQ
, res
);
570 case 2: /* < / >= (N / !N) */
571 cond
= cond_make_0(TCG_COND_LT
, res
);
573 case 3: /* <= / > (N | Z / !N & !Z) */
574 cond
= cond_make_0(TCG_COND_LE
, res
);
576 case 4: /* NUV / UV (!C / C) */
577 cond
= cond_make_0(TCG_COND_EQ
, cb_msb
);
579 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
580 tmp
= tcg_temp_new();
581 tcg_gen_neg_tl(tmp
, cb_msb
);
582 tcg_gen_and_tl(tmp
, tmp
, res
);
583 cond
= cond_make_0(TCG_COND_EQ
, tmp
);
586 case 6: /* SV / NSV (V / !V) */
587 cond
= cond_make_0(TCG_COND_LT
, sv
);
589 case 7: /* OD / EV */
590 tmp
= tcg_temp_new();
591 tcg_gen_andi_tl(tmp
, res
, 1);
592 cond
= cond_make_0(TCG_COND_NE
, tmp
);
596 g_assert_not_reached();
599 cond
.c
= tcg_invert_cond(cond
.c
);
605 /* Similar, but for the special case of subtraction without borrow, we
606 can use the inputs directly. This can allow other computation to be
607 deleted as unused. */
609 static DisasCond
do_sub_cond(unsigned cf
, TCGv res
, TCGv in1
, TCGv in2
, TCGv sv
)
615 cond
= cond_make(TCG_COND_EQ
, in1
, in2
);
618 cond
= cond_make(TCG_COND_LT
, in1
, in2
);
621 cond
= cond_make(TCG_COND_LE
, in1
, in2
);
623 case 4: /* << / >>= */
624 cond
= cond_make(TCG_COND_LTU
, in1
, in2
);
626 case 5: /* <<= / >> */
627 cond
= cond_make(TCG_COND_LEU
, in1
, in2
);
630 return do_cond(cf
, res
, sv
, sv
);
633 cond
.c
= tcg_invert_cond(cond
.c
);
639 /* Similar, but for logicals, where the carry and overflow bits are not
640 computed, and use of them is undefined. */
642 static DisasCond
do_log_cond(unsigned cf
, TCGv res
)
645 case 4: case 5: case 6:
649 return do_cond(cf
, res
, res
, res
);
652 /* Similar, but for shift/extract/deposit conditions. */
654 static DisasCond
do_sed_cond(unsigned orig
, TCGv res
)
658 /* Convert the compressed condition codes to standard.
659 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
660 4-7 are the reverse of 0-3. */
667 return do_log_cond(c
* 2 + f
, res
);
670 /* Similar, but for unit conditions. */
672 static DisasCond
do_unit_cond(unsigned cf
, TCGv res
, TCGv in1
, TCGv in2
)
679 /* Since we want to test lots of carry-out bits all at once, do not
680 * do our normal thing and compute carry-in of bit B+1 since that
681 * leaves us with carry bits spread across two words.
684 tmp
= tcg_temp_new();
685 tcg_gen_or_tl(cb
, in1
, in2
);
686 tcg_gen_and_tl(tmp
, in1
, in2
);
687 tcg_gen_andc_tl(cb
, cb
, res
);
688 tcg_gen_or_tl(cb
, cb
, tmp
);
693 case 0: /* never / TR */
694 case 1: /* undefined */
695 case 5: /* undefined */
696 cond
= cond_make_f();
699 case 2: /* SBZ / NBZ */
700 /* See hasless(v,1) from
701 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
703 tmp
= tcg_temp_new();
704 tcg_gen_subi_tl(tmp
, res
, 0x01010101u
);
705 tcg_gen_andc_tl(tmp
, tmp
, res
);
706 tcg_gen_andi_tl(tmp
, tmp
, 0x80808080u
);
707 cond
= cond_make_0(TCG_COND_NE
, tmp
);
711 case 3: /* SHZ / NHZ */
712 tmp
= tcg_temp_new();
713 tcg_gen_subi_tl(tmp
, res
, 0x00010001u
);
714 tcg_gen_andc_tl(tmp
, tmp
, res
);
715 tcg_gen_andi_tl(tmp
, tmp
, 0x80008000u
);
716 cond
= cond_make_0(TCG_COND_NE
, tmp
);
720 case 4: /* SDC / NDC */
721 tcg_gen_andi_tl(cb
, cb
, 0x88888888u
);
722 cond
= cond_make_0(TCG_COND_NE
, cb
);
725 case 6: /* SBC / NBC */
726 tcg_gen_andi_tl(cb
, cb
, 0x80808080u
);
727 cond
= cond_make_0(TCG_COND_NE
, cb
);
730 case 7: /* SHC / NHC */
731 tcg_gen_andi_tl(cb
, cb
, 0x80008000u
);
732 cond
= cond_make_0(TCG_COND_NE
, cb
);
736 g_assert_not_reached();
742 cond
.c
= tcg_invert_cond(cond
.c
);
748 /* Compute signed overflow for addition. */
749 static TCGv
do_add_sv(DisasContext
*ctx
, TCGv res
, TCGv in1
, TCGv in2
)
751 TCGv sv
= get_temp(ctx
);
752 TCGv tmp
= tcg_temp_new();
754 tcg_gen_xor_tl(sv
, res
, in1
);
755 tcg_gen_xor_tl(tmp
, in1
, in2
);
756 tcg_gen_andc_tl(sv
, sv
, tmp
);
762 /* Compute signed overflow for subtraction. */
763 static TCGv
do_sub_sv(DisasContext
*ctx
, TCGv res
, TCGv in1
, TCGv in2
)
765 TCGv sv
= get_temp(ctx
);
766 TCGv tmp
= tcg_temp_new();
768 tcg_gen_xor_tl(sv
, res
, in1
);
769 tcg_gen_xor_tl(tmp
, in1
, in2
);
770 tcg_gen_and_tl(sv
, sv
, tmp
);
776 static ExitStatus
do_add(DisasContext
*ctx
, unsigned rt
, TCGv in1
, TCGv in2
,
777 unsigned shift
, bool is_l
, bool is_tsv
, bool is_tc
,
778 bool is_c
, unsigned cf
)
780 TCGv dest
, cb
, cb_msb
, sv
, tmp
;
781 unsigned c
= cf
>> 1;
784 dest
= tcg_temp_new();
790 tcg_gen_shli_tl(tmp
, in1
, shift
);
794 if (!is_l
|| c
== 4 || c
== 5) {
795 TCGv zero
= tcg_const_tl(0);
796 cb_msb
= get_temp(ctx
);
797 tcg_gen_add2_tl(dest
, cb_msb
, in1
, zero
, in2
, zero
);
799 tcg_gen_add2_tl(dest
, cb_msb
, dest
, cb_msb
, cpu_psw_cb_msb
, zero
);
804 tcg_gen_xor_tl(cb
, in1
, in2
);
805 tcg_gen_xor_tl(cb
, cb
, dest
);
808 tcg_gen_add_tl(dest
, in1
, in2
);
810 tcg_gen_add_tl(dest
, dest
, cpu_psw_cb_msb
);
814 /* Compute signed overflow if required. */
816 if (is_tsv
|| c
== 6) {
817 sv
= do_add_sv(ctx
, dest
, in1
, in2
);
819 /* ??? Need to include overflow from shift. */
820 gen_helper_tsv(cpu_env
, sv
);
824 /* Emit any conditional trap before any writeback. */
825 cond
= do_cond(cf
, dest
, cb_msb
, sv
);
828 tmp
= tcg_temp_new();
829 tcg_gen_setcond_tl(cond
.c
, tmp
, cond
.a0
, cond
.a1
);
830 gen_helper_tcond(cpu_env
, tmp
);
834 /* Write back the result. */
836 save_or_nullify(ctx
, cpu_psw_cb
, cb
);
837 save_or_nullify(ctx
, cpu_psw_cb_msb
, cb_msb
);
839 save_gpr(ctx
, rt
, dest
);
842 /* Install the new nullification. */
843 cond_free(&ctx
->null_cond
);
844 ctx
->null_cond
= cond
;
848 static ExitStatus
do_sub(DisasContext
*ctx
, unsigned rt
, TCGv in1
, TCGv in2
,
849 bool is_tsv
, bool is_b
, bool is_tc
, unsigned cf
)
851 TCGv dest
, sv
, cb
, cb_msb
, zero
, tmp
;
852 unsigned c
= cf
>> 1;
855 dest
= tcg_temp_new();
857 cb_msb
= tcg_temp_new();
859 zero
= tcg_const_tl(0);
861 /* DEST,C = IN1 + ~IN2 + C. */
862 tcg_gen_not_tl(cb
, in2
);
863 tcg_gen_add2_tl(dest
, cb_msb
, in1
, zero
, cpu_psw_cb_msb
, zero
);
864 tcg_gen_add2_tl(dest
, cb_msb
, dest
, cb_msb
, cb
, zero
);
865 tcg_gen_xor_tl(cb
, cb
, in1
);
866 tcg_gen_xor_tl(cb
, cb
, dest
);
868 /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
869 operations by seeding the high word with 1 and subtracting. */
870 tcg_gen_movi_tl(cb_msb
, 1);
871 tcg_gen_sub2_tl(dest
, cb_msb
, in1
, cb_msb
, in2
, zero
);
872 tcg_gen_eqv_tl(cb
, in1
, in2
);
873 tcg_gen_xor_tl(cb
, cb
, dest
);
877 /* Compute signed overflow if required. */
879 if (is_tsv
|| c
== 6) {
880 sv
= do_sub_sv(ctx
, dest
, in1
, in2
);
882 gen_helper_tsv(cpu_env
, sv
);
886 /* Compute the condition. We cannot use the special case for borrow. */
888 cond
= do_sub_cond(cf
, dest
, in1
, in2
, sv
);
890 cond
= do_cond(cf
, dest
, cb_msb
, sv
);
893 /* Emit any conditional trap before any writeback. */
896 tmp
= tcg_temp_new();
897 tcg_gen_setcond_tl(cond
.c
, tmp
, cond
.a0
, cond
.a1
);
898 gen_helper_tcond(cpu_env
, tmp
);
902 /* Write back the result. */
903 save_or_nullify(ctx
, cpu_psw_cb
, cb
);
904 save_or_nullify(ctx
, cpu_psw_cb_msb
, cb_msb
);
905 save_gpr(ctx
, rt
, dest
);
908 /* Install the new nullification. */
909 cond_free(&ctx
->null_cond
);
910 ctx
->null_cond
= cond
;
914 static ExitStatus
do_cmpclr(DisasContext
*ctx
, unsigned rt
, TCGv in1
,
915 TCGv in2
, unsigned cf
)
920 dest
= tcg_temp_new();
921 tcg_gen_sub_tl(dest
, in1
, in2
);
923 /* Compute signed overflow if required. */
925 if ((cf
>> 1) == 6) {
926 sv
= do_sub_sv(ctx
, dest
, in1
, in2
);
929 /* Form the condition for the compare. */
930 cond
= do_sub_cond(cf
, dest
, in1
, in2
, sv
);
933 tcg_gen_movi_tl(dest
, 0);
934 save_gpr(ctx
, rt
, dest
);
937 /* Install the new nullification. */
938 cond_free(&ctx
->null_cond
);
939 ctx
->null_cond
= cond
;
943 static ExitStatus
do_log(DisasContext
*ctx
, unsigned rt
, TCGv in1
, TCGv in2
,
944 unsigned cf
, void (*fn
)(TCGv
, TCGv
, TCGv
))
946 TCGv dest
= dest_gpr(ctx
, rt
);
948 /* Perform the operation, and writeback. */
950 save_gpr(ctx
, rt
, dest
);
952 /* Install the new nullification. */
953 cond_free(&ctx
->null_cond
);
955 ctx
->null_cond
= do_log_cond(cf
, dest
);
960 static ExitStatus
do_unit(DisasContext
*ctx
, unsigned rt
, TCGv in1
,
961 TCGv in2
, unsigned cf
, bool is_tc
,
962 void (*fn
)(TCGv
, TCGv
, TCGv
))
968 dest
= dest_gpr(ctx
, rt
);
970 save_gpr(ctx
, rt
, dest
);
971 cond_free(&ctx
->null_cond
);
973 dest
= tcg_temp_new();
976 cond
= do_unit_cond(cf
, dest
, in1
, in2
);
979 TCGv tmp
= tcg_temp_new();
981 tcg_gen_setcond_tl(cond
.c
, tmp
, cond
.a0
, cond
.a1
);
982 gen_helper_tcond(cpu_env
, tmp
);
985 save_gpr(ctx
, rt
, dest
);
987 cond_free(&ctx
->null_cond
);
988 ctx
->null_cond
= cond
;
993 /* Emit a memory load. The modify parameter should be
994 * < 0 for pre-modify,
995 * > 0 for post-modify,
996 * = 0 for no base register update.
998 static void do_load_32(DisasContext
*ctx
, TCGv_i32 dest
, unsigned rb
,
999 unsigned rx
, int scale
, target_long disp
,
1000 int modify
, TCGMemOp mop
)
1004 /* Caller uses nullify_over/nullify_end. */
1005 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1007 addr
= tcg_temp_new();
1008 base
= load_gpr(ctx
, rb
);
1010 /* Note that RX is mutually exclusive with DISP. */
1012 tcg_gen_shli_tl(addr
, cpu_gr
[rx
], scale
);
1013 tcg_gen_add_tl(addr
, addr
, base
);
1015 tcg_gen_addi_tl(addr
, base
, disp
);
1019 tcg_gen_qemu_ld_i32(dest
, addr
, MMU_USER_IDX
, mop
);
1021 tcg_gen_qemu_ld_i32(dest
, (modify
< 0 ? addr
: base
),
1023 save_gpr(ctx
, rb
, addr
);
1025 tcg_temp_free(addr
);
1028 static void do_load_64(DisasContext
*ctx
, TCGv_i64 dest
, unsigned rb
,
1029 unsigned rx
, int scale
, target_long disp
,
1030 int modify
, TCGMemOp mop
)
1034 /* Caller uses nullify_over/nullify_end. */
1035 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1037 addr
= tcg_temp_new();
1038 base
= load_gpr(ctx
, rb
);
1040 /* Note that RX is mutually exclusive with DISP. */
1042 tcg_gen_shli_tl(addr
, cpu_gr
[rx
], scale
);
1043 tcg_gen_add_tl(addr
, addr
, base
);
1045 tcg_gen_addi_tl(addr
, base
, disp
);
1049 tcg_gen_qemu_ld_i64(dest
, addr
, MMU_USER_IDX
, mop
);
1051 tcg_gen_qemu_ld_i64(dest
, (modify
< 0 ? addr
: base
),
1053 save_gpr(ctx
, rb
, addr
);
1055 tcg_temp_free(addr
);
1058 static void do_store_32(DisasContext
*ctx
, TCGv_i32 src
, unsigned rb
,
1059 unsigned rx
, int scale
, target_long disp
,
1060 int modify
, TCGMemOp mop
)
1064 /* Caller uses nullify_over/nullify_end. */
1065 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1067 addr
= tcg_temp_new();
1068 base
= load_gpr(ctx
, rb
);
1070 /* Note that RX is mutually exclusive with DISP. */
1072 tcg_gen_shli_tl(addr
, cpu_gr
[rx
], scale
);
1073 tcg_gen_add_tl(addr
, addr
, base
);
1075 tcg_gen_addi_tl(addr
, base
, disp
);
1078 tcg_gen_qemu_st_i32(src
, (modify
<= 0 ? addr
: base
), MMU_USER_IDX
, mop
);
1081 save_gpr(ctx
, rb
, addr
);
1083 tcg_temp_free(addr
);
1086 static void do_store_64(DisasContext
*ctx
, TCGv_i64 src
, unsigned rb
,
1087 unsigned rx
, int scale
, target_long disp
,
1088 int modify
, TCGMemOp mop
)
1092 /* Caller uses nullify_over/nullify_end. */
1093 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1095 addr
= tcg_temp_new();
1096 base
= load_gpr(ctx
, rb
);
1098 /* Note that RX is mutually exclusive with DISP. */
1100 tcg_gen_shli_tl(addr
, cpu_gr
[rx
], scale
);
1101 tcg_gen_add_tl(addr
, addr
, base
);
1103 tcg_gen_addi_tl(addr
, base
, disp
);
1106 tcg_gen_qemu_st_i64(src
, (modify
<= 0 ? addr
: base
), MMU_USER_IDX
, mop
);
1109 save_gpr(ctx
, rb
, addr
);
1111 tcg_temp_free(addr
);
1114 #if TARGET_LONG_BITS == 64
1115 #define do_load_tl do_load_64
1116 #define do_store_tl do_store_64
1118 #define do_load_tl do_load_32
1119 #define do_store_tl do_store_32
1122 static ExitStatus
do_load(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1123 unsigned rx
, int scale
, target_long disp
,
1124 int modify
, TCGMemOp mop
)
1131 /* No base register update. */
1132 dest
= dest_gpr(ctx
, rt
);
1134 /* Make sure if RT == RB, we see the result of the load. */
1135 dest
= get_temp(ctx
);
1137 do_load_tl(ctx
, dest
, rb
, rx
, scale
, disp
, modify
, mop
);
1138 save_gpr(ctx
, rt
, dest
);
1140 return nullify_end(ctx
, NO_EXIT
);
1143 static ExitStatus
do_floadw(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1144 unsigned rx
, int scale
, target_long disp
,
1151 tmp
= tcg_temp_new_i32();
1152 do_load_32(ctx
, tmp
, rb
, rx
, scale
, disp
, modify
, MO_TEUL
);
1153 save_frw_i32(rt
, tmp
);
1154 tcg_temp_free_i32(tmp
);
1157 gen_helper_loaded_fr0(cpu_env
);
1160 return nullify_end(ctx
, NO_EXIT
);
1163 static ExitStatus
do_floadd(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1164 unsigned rx
, int scale
, target_long disp
,
1171 tmp
= tcg_temp_new_i64();
1172 do_load_64(ctx
, tmp
, rb
, rx
, scale
, disp
, modify
, MO_TEQ
);
1174 tcg_temp_free_i64(tmp
);
1177 gen_helper_loaded_fr0(cpu_env
);
1180 return nullify_end(ctx
, NO_EXIT
);
1183 static ExitStatus
do_store(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1184 target_long disp
, int modify
, TCGMemOp mop
)
1187 do_store_tl(ctx
, load_gpr(ctx
, rt
), rb
, 0, 0, disp
, modify
, mop
);
1188 return nullify_end(ctx
, NO_EXIT
);
1191 static ExitStatus
do_fstorew(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1192 unsigned rx
, int scale
, target_long disp
,
1199 tmp
= load_frw_i32(rt
);
1200 do_store_32(ctx
, tmp
, rb
, rx
, scale
, disp
, modify
, MO_TEUL
);
1201 tcg_temp_free_i32(tmp
);
1203 return nullify_end(ctx
, NO_EXIT
);
1206 static ExitStatus
do_fstored(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1207 unsigned rx
, int scale
, target_long disp
,
1215 do_store_64(ctx
, tmp
, rb
, rx
, scale
, disp
, modify
, MO_TEQ
);
1216 tcg_temp_free_i64(tmp
);
1218 return nullify_end(ctx
, NO_EXIT
);
1221 /* Emit an unconditional branch to a direct target, which may or may not
1222 have already had nullification handled. */
1223 static ExitStatus
do_dbranch(DisasContext
*ctx
, target_ulong dest
,
1224 unsigned link
, bool is_n
)
1226 if (ctx
->null_cond
.c
== TCG_COND_NEVER
&& ctx
->null_lab
== NULL
) {
1228 copy_iaoq_entry(cpu_gr
[link
], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1232 ctx
->null_cond
.c
= TCG_COND_ALWAYS
;
1239 copy_iaoq_entry(cpu_gr
[link
], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1242 if (is_n
&& use_nullify_skip(ctx
)) {
1243 nullify_set(ctx
, 0);
1244 gen_goto_tb(ctx
, 0, dest
, dest
+ 4);
1246 nullify_set(ctx
, is_n
);
1247 gen_goto_tb(ctx
, 0, ctx
->iaoq_b
, dest
);
1250 nullify_end(ctx
, NO_EXIT
);
1252 nullify_set(ctx
, 0);
1253 gen_goto_tb(ctx
, 1, ctx
->iaoq_b
, ctx
->iaoq_n
);
1254 return EXIT_GOTO_TB
;
1258 /* Emit a conditional branch to a direct target. If the branch itself
1259 is nullified, we should have already used nullify_over. */
1260 static ExitStatus
do_cbranch(DisasContext
*ctx
, target_long disp
, bool is_n
,
1263 target_ulong dest
= iaoq_dest(ctx
, disp
);
1264 TCGLabel
*taken
= NULL
;
1265 TCGCond c
= cond
->c
;
1269 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1271 /* Handle TRUE and NEVER as direct branches. */
1272 if (c
== TCG_COND_ALWAYS
) {
1273 return do_dbranch(ctx
, dest
, 0, is_n
&& disp
>= 0);
1275 if (c
== TCG_COND_NEVER
) {
1276 return do_dbranch(ctx
, ctx
->iaoq_n
, 0, is_n
&& disp
< 0);
1279 taken
= gen_new_label();
1281 tcg_gen_brcond_tl(c
, cond
->a0
, cond
->a1
, taken
);
1284 /* Not taken: Condition not satisfied; nullify on backward branches. */
1285 n
= is_n
&& disp
< 0;
1286 if (n
&& use_nullify_skip(ctx
)) {
1287 nullify_set(ctx
, 0);
1288 gen_goto_tb(ctx
, which
++, ctx
->iaoq_n
, ctx
->iaoq_n
+ 4);
1290 if (!n
&& ctx
->null_lab
) {
1291 gen_set_label(ctx
->null_lab
);
1292 ctx
->null_lab
= NULL
;
1294 nullify_set(ctx
, n
);
1295 gen_goto_tb(ctx
, which
++, ctx
->iaoq_b
, ctx
->iaoq_n
);
1298 gen_set_label(taken
);
1300 /* Taken: Condition satisfied; nullify on forward branches. */
1301 n
= is_n
&& disp
>= 0;
1302 if (n
&& use_nullify_skip(ctx
)) {
1303 nullify_set(ctx
, 0);
1304 gen_goto_tb(ctx
, which
++, dest
, dest
+ 4);
1306 nullify_set(ctx
, n
);
1307 gen_goto_tb(ctx
, which
++, ctx
->iaoq_b
, dest
);
1310 /* Not taken: the branch itself was nullified. */
1311 if (ctx
->null_lab
) {
1312 gen_set_label(ctx
->null_lab
);
1313 ctx
->null_lab
= NULL
;
1315 nullify_set(ctx
, 0);
1316 gen_goto_tb(ctx
, which
, ctx
->iaoq_b
, ctx
->iaoq_n
);
1317 return EXIT_GOTO_TB
;
1319 return EXIT_IAQ_N_STALE
;
1322 return EXIT_GOTO_TB
;
1326 /* Emit an unconditional branch to an indirect target. This handles
1327 nullification of the branch itself. */
1328 static ExitStatus
do_ibranch(DisasContext
*ctx
, TCGv dest
,
1329 unsigned link
, bool is_n
)
1331 TCGv a0
, a1
, next
, tmp
;
1334 assert(ctx
->null_lab
== NULL
);
1336 if (ctx
->null_cond
.c
== TCG_COND_NEVER
) {
1338 copy_iaoq_entry(cpu_gr
[link
], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1340 next
= get_temp(ctx
);
1341 tcg_gen_mov_tl(next
, dest
);
1343 ctx
->iaoq_n_var
= next
;
1345 ctx
->null_cond
.c
= TCG_COND_ALWAYS
;
1347 } else if (is_n
&& use_nullify_skip(ctx
)) {
1348 /* The (conditional) branch, B, nullifies the next insn, N,
1349 and we're allowed to skip execution N (no single-step or
1350 tracepoint in effect). Since the exit_tb that we must use
1351 for the indirect branch consumes no special resources, we
1352 can (conditionally) skip B and continue execution. */
1353 /* The use_nullify_skip test implies we have a known control path. */
1354 tcg_debug_assert(ctx
->iaoq_b
!= -1);
1355 tcg_debug_assert(ctx
->iaoq_n
!= -1);
1357 /* We do have to handle the non-local temporary, DEST, before
1358 branching. Since IOAQ_F is not really live at this point, we
1359 can simply store DEST optimistically. Similarly with IAOQ_B. */
1360 tcg_gen_mov_tl(cpu_iaoq_f
, dest
);
1361 tcg_gen_addi_tl(cpu_iaoq_b
, dest
, 4);
1365 tcg_gen_movi_tl(cpu_gr
[link
], ctx
->iaoq_n
);
1368 return nullify_end(ctx
, NO_EXIT
);
1370 cond_prep(&ctx
->null_cond
);
1371 c
= ctx
->null_cond
.c
;
1372 a0
= ctx
->null_cond
.a0
;
1373 a1
= ctx
->null_cond
.a1
;
1375 tmp
= tcg_temp_new();
1376 next
= get_temp(ctx
);
1378 copy_iaoq_entry(tmp
, ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1379 tcg_gen_movcond_tl(c
, next
, a0
, a1
, tmp
, dest
);
1381 ctx
->iaoq_n_var
= next
;
1384 tcg_gen_movcond_tl(c
, cpu_gr
[link
], a0
, a1
, cpu_gr
[link
], tmp
);
1388 /* The branch nullifies the next insn, which means the state of N
1389 after the branch is the inverse of the state of N that applied
1391 tcg_gen_setcond_tl(tcg_invert_cond(c
), cpu_psw_n
, a0
, a1
);
1392 cond_free(&ctx
->null_cond
);
1393 ctx
->null_cond
= cond_make_n();
1394 ctx
->psw_n_nonzero
= true;
1396 cond_free(&ctx
->null_cond
);
1403 /* On Linux, page zero is normally marked execute only + gateway.
1404 Therefore normal read or write is supposed to fail, but specific
1405 offsets have kernel code mapped to raise permissions to implement
1406 system calls. Handling this via an explicit check here, rather
1407 in than the "be disp(sr2,r0)" instruction that probably sent us
1408 here, is the easiest way to handle the branch delay slot on the
1409 aforementioned BE. */
1410 static ExitStatus
do_page_zero(DisasContext
*ctx
)
1412 /* If by some means we get here with PSW[N]=1, that implies that
1413 the B,GATE instruction would be skipped, and we'd fault on the
1414 next insn within the privilaged page. */
1415 switch (ctx
->null_cond
.c
) {
1416 case TCG_COND_NEVER
:
1418 case TCG_COND_ALWAYS
:
1419 tcg_gen_movi_tl(cpu_psw_n
, 0);
1422 /* Since this is always the first (and only) insn within the
1423 TB, we should know the state of PSW[N] from TB->FLAGS. */
1424 g_assert_not_reached();
1427 /* Check that we didn't arrive here via some means that allowed
1428 non-sequential instruction execution. Normally the PSW[B] bit
1429 detects this by disallowing the B,GATE instruction to execute
1430 under such conditions. */
1431 if (ctx
->iaoq_b
!= ctx
->iaoq_f
+ 4) {
1435 switch (ctx
->iaoq_f
) {
1436 case 0x00: /* Null pointer call */
1437 gen_excp_1(EXCP_SIGSEGV
);
1438 return EXIT_NORETURN
;
1440 case 0xb0: /* LWS */
1441 gen_excp_1(EXCP_SYSCALL_LWS
);
1442 return EXIT_NORETURN
;
1444 case 0xe0: /* SET_THREAD_POINTER */
1445 tcg_gen_mov_tl(cpu_cr27
, cpu_gr
[26]);
1446 tcg_gen_mov_tl(cpu_iaoq_f
, cpu_gr
[31]);
1447 tcg_gen_addi_tl(cpu_iaoq_b
, cpu_iaoq_f
, 4);
1448 return EXIT_IAQ_N_UPDATED
;
1450 case 0x100: /* SYSCALL */
1451 gen_excp_1(EXCP_SYSCALL
);
1452 return EXIT_NORETURN
;
1456 gen_excp_1(EXCP_SIGILL
);
1457 return EXIT_NORETURN
;
1461 static ExitStatus
trans_nop(DisasContext
*ctx
, uint32_t insn
,
1462 const DisasInsn
*di
)
1464 cond_free(&ctx
->null_cond
);
1468 static ExitStatus
trans_add(DisasContext
*ctx
, uint32_t insn
,
1469 const DisasInsn
*di
)
1471 unsigned r2
= extract32(insn
, 21, 5);
1472 unsigned r1
= extract32(insn
, 16, 5);
1473 unsigned cf
= extract32(insn
, 12, 4);
1474 unsigned ext
= extract32(insn
, 8, 4);
1475 unsigned shift
= extract32(insn
, 6, 2);
1476 unsigned rt
= extract32(insn
, 0, 5);
1477 TCGv tcg_r1
, tcg_r2
;
1481 bool is_tsv
= false;
1485 case 0x6: /* ADD, SHLADD */
1487 case 0xa: /* ADD,L, SHLADD,L */
1490 case 0xe: /* ADD,TSV, SHLADD,TSV (1) */
1493 case 0x7: /* ADD,C */
1496 case 0xf: /* ADD,C,TSV */
1497 is_c
= is_tsv
= true;
1500 return gen_illegal(ctx
);
1506 tcg_r1
= load_gpr(ctx
, r1
);
1507 tcg_r2
= load_gpr(ctx
, r2
);
1508 ret
= do_add(ctx
, rt
, tcg_r1
, tcg_r2
, shift
, is_l
, is_tsv
, is_tc
, is_c
, cf
);
1509 return nullify_end(ctx
, ret
);
1512 static ExitStatus
trans_sub(DisasContext
*ctx
, uint32_t insn
,
1513 const DisasInsn
*di
)
1515 unsigned r2
= extract32(insn
, 21, 5);
1516 unsigned r1
= extract32(insn
, 16, 5);
1517 unsigned cf
= extract32(insn
, 12, 4);
1518 unsigned ext
= extract32(insn
, 6, 6);
1519 unsigned rt
= extract32(insn
, 0, 5);
1520 TCGv tcg_r1
, tcg_r2
;
1523 bool is_tsv
= false;
1527 case 0x10: /* SUB */
1529 case 0x30: /* SUB,TSV */
1532 case 0x14: /* SUB,B */
1535 case 0x34: /* SUB,B,TSV */
1536 is_b
= is_tsv
= true;
1538 case 0x13: /* SUB,TC */
1541 case 0x33: /* SUB,TSV,TC */
1542 is_tc
= is_tsv
= true;
1545 return gen_illegal(ctx
);
1551 tcg_r1
= load_gpr(ctx
, r1
);
1552 tcg_r2
= load_gpr(ctx
, r2
);
1553 ret
= do_sub(ctx
, rt
, tcg_r1
, tcg_r2
, is_tsv
, is_b
, is_tc
, cf
);
1554 return nullify_end(ctx
, ret
);
1557 static ExitStatus
trans_log(DisasContext
*ctx
, uint32_t insn
,
1558 const DisasInsn
*di
)
1560 unsigned r2
= extract32(insn
, 21, 5);
1561 unsigned r1
= extract32(insn
, 16, 5);
1562 unsigned cf
= extract32(insn
, 12, 4);
1563 unsigned rt
= extract32(insn
, 0, 5);
1564 TCGv tcg_r1
, tcg_r2
;
1570 tcg_r1
= load_gpr(ctx
, r1
);
1571 tcg_r2
= load_gpr(ctx
, r2
);
1572 ret
= do_log(ctx
, rt
, tcg_r1
, tcg_r2
, cf
, di
->f_ttt
);
1573 return nullify_end(ctx
, ret
);
1576 /* OR r,0,t -> COPY (according to gas) */
1577 static ExitStatus
trans_copy(DisasContext
*ctx
, uint32_t insn
,
1578 const DisasInsn
*di
)
1580 unsigned r1
= extract32(insn
, 16, 5);
1581 unsigned rt
= extract32(insn
, 0, 5);
1584 TCGv dest
= dest_gpr(ctx
, rt
);
1585 tcg_gen_movi_tl(dest
, 0);
1586 save_gpr(ctx
, rt
, dest
);
1588 save_gpr(ctx
, rt
, cpu_gr
[r1
]);
1590 cond_free(&ctx
->null_cond
);
1594 static ExitStatus
trans_cmpclr(DisasContext
*ctx
, uint32_t insn
,
1595 const DisasInsn
*di
)
1597 unsigned r2
= extract32(insn
, 21, 5);
1598 unsigned r1
= extract32(insn
, 16, 5);
1599 unsigned cf
= extract32(insn
, 12, 4);
1600 unsigned rt
= extract32(insn
, 0, 5);
1601 TCGv tcg_r1
, tcg_r2
;
1607 tcg_r1
= load_gpr(ctx
, r1
);
1608 tcg_r2
= load_gpr(ctx
, r2
);
1609 ret
= do_cmpclr(ctx
, rt
, tcg_r1
, tcg_r2
, cf
);
1610 return nullify_end(ctx
, ret
);
1613 static ExitStatus
trans_uxor(DisasContext
*ctx
, uint32_t insn
,
1614 const DisasInsn
*di
)
1616 unsigned r2
= extract32(insn
, 21, 5);
1617 unsigned r1
= extract32(insn
, 16, 5);
1618 unsigned cf
= extract32(insn
, 12, 4);
1619 unsigned rt
= extract32(insn
, 0, 5);
1620 TCGv tcg_r1
, tcg_r2
;
1626 tcg_r1
= load_gpr(ctx
, r1
);
1627 tcg_r2
= load_gpr(ctx
, r2
);
1628 ret
= do_unit(ctx
, rt
, tcg_r1
, tcg_r2
, cf
, false, tcg_gen_xor_tl
);
1629 return nullify_end(ctx
, ret
);
1632 static ExitStatus
trans_uaddcm(DisasContext
*ctx
, uint32_t insn
,
1633 const DisasInsn
*di
)
1635 unsigned r2
= extract32(insn
, 21, 5);
1636 unsigned r1
= extract32(insn
, 16, 5);
1637 unsigned cf
= extract32(insn
, 12, 4);
1638 unsigned is_tc
= extract32(insn
, 6, 1);
1639 unsigned rt
= extract32(insn
, 0, 5);
1640 TCGv tcg_r1
, tcg_r2
, tmp
;
1646 tcg_r1
= load_gpr(ctx
, r1
);
1647 tcg_r2
= load_gpr(ctx
, r2
);
1648 tmp
= get_temp(ctx
);
1649 tcg_gen_not_tl(tmp
, tcg_r2
);
1650 ret
= do_unit(ctx
, rt
, tcg_r1
, tmp
, cf
, is_tc
, tcg_gen_add_tl
);
1651 return nullify_end(ctx
, ret
);
1654 static ExitStatus
trans_dcor(DisasContext
*ctx
, uint32_t insn
,
1655 const DisasInsn
*di
)
1657 unsigned r2
= extract32(insn
, 21, 5);
1658 unsigned cf
= extract32(insn
, 12, 4);
1659 unsigned is_i
= extract32(insn
, 6, 1);
1660 unsigned rt
= extract32(insn
, 0, 5);
1666 tmp
= get_temp(ctx
);
1667 tcg_gen_shri_tl(tmp
, cpu_psw_cb
, 3);
1669 tcg_gen_not_tl(tmp
, tmp
);
1671 tcg_gen_andi_tl(tmp
, tmp
, 0x11111111);
1672 tcg_gen_muli_tl(tmp
, tmp
, 6);
1673 ret
= do_unit(ctx
, rt
, tmp
, load_gpr(ctx
, r2
), cf
, false,
1674 is_i
? tcg_gen_add_tl
: tcg_gen_sub_tl
);
1676 return nullify_end(ctx
, ret
);
1679 static ExitStatus
trans_ds(DisasContext
*ctx
, uint32_t insn
,
1680 const DisasInsn
*di
)
1682 unsigned r2
= extract32(insn
, 21, 5);
1683 unsigned r1
= extract32(insn
, 16, 5);
1684 unsigned cf
= extract32(insn
, 12, 4);
1685 unsigned rt
= extract32(insn
, 0, 5);
1686 TCGv dest
, add1
, add2
, addc
, zero
, in1
, in2
;
1690 in1
= load_gpr(ctx
, r1
);
1691 in2
= load_gpr(ctx
, r2
);
1693 add1
= tcg_temp_new();
1694 add2
= tcg_temp_new();
1695 addc
= tcg_temp_new();
1696 dest
= tcg_temp_new();
1697 zero
= tcg_const_tl(0);
1699 /* Form R1 << 1 | PSW[CB]{8}. */
1700 tcg_gen_add_tl(add1
, in1
, in1
);
1701 tcg_gen_add_tl(add1
, add1
, cpu_psw_cb_msb
);
1703 /* Add or subtract R2, depending on PSW[V]. Proper computation of
1704 carry{8} requires that we subtract via + ~R2 + 1, as described in
1705 the manual. By extracting and masking V, we can produce the
1706 proper inputs to the addition without movcond. */
1707 tcg_gen_sari_tl(addc
, cpu_psw_v
, TARGET_LONG_BITS
- 1);
1708 tcg_gen_xor_tl(add2
, in2
, addc
);
1709 tcg_gen_andi_tl(addc
, addc
, 1);
1710 /* ??? This is only correct for 32-bit. */
1711 tcg_gen_add2_i32(dest
, cpu_psw_cb_msb
, add1
, zero
, add2
, zero
);
1712 tcg_gen_add2_i32(dest
, cpu_psw_cb_msb
, dest
, cpu_psw_cb_msb
, addc
, zero
);
1714 tcg_temp_free(addc
);
1715 tcg_temp_free(zero
);
1717 /* Write back the result register. */
1718 save_gpr(ctx
, rt
, dest
);
1720 /* Write back PSW[CB]. */
1721 tcg_gen_xor_tl(cpu_psw_cb
, add1
, add2
);
1722 tcg_gen_xor_tl(cpu_psw_cb
, cpu_psw_cb
, dest
);
1724 /* Write back PSW[V] for the division step. */
1725 tcg_gen_neg_tl(cpu_psw_v
, cpu_psw_cb_msb
);
1726 tcg_gen_xor_tl(cpu_psw_v
, cpu_psw_v
, in2
);
1728 /* Install the new nullification. */
1733 /* ??? The lshift is supposed to contribute to overflow. */
1734 sv
= do_add_sv(ctx
, dest
, add1
, add2
);
1736 ctx
->null_cond
= do_cond(cf
, dest
, cpu_psw_cb_msb
, sv
);
1739 tcg_temp_free(add1
);
1740 tcg_temp_free(add2
);
1741 tcg_temp_free(dest
);
1743 return nullify_end(ctx
, NO_EXIT
);
1746 static const DisasInsn table_arith_log
[] = {
1747 { 0x08000240u
, 0xfc00ffffu
, trans_nop
}, /* or x,y,0 */
1748 { 0x08000240u
, 0xffe0ffe0u
, trans_copy
}, /* or x,0,t */
1749 { 0x08000000u
, 0xfc000fe0u
, trans_log
, .f_ttt
= tcg_gen_andc_tl
},
1750 { 0x08000200u
, 0xfc000fe0u
, trans_log
, .f_ttt
= tcg_gen_and_tl
},
1751 { 0x08000240u
, 0xfc000fe0u
, trans_log
, .f_ttt
= tcg_gen_or_tl
},
1752 { 0x08000280u
, 0xfc000fe0u
, trans_log
, .f_ttt
= tcg_gen_xor_tl
},
1753 { 0x08000880u
, 0xfc000fe0u
, trans_cmpclr
},
1754 { 0x08000380u
, 0xfc000fe0u
, trans_uxor
},
1755 { 0x08000980u
, 0xfc000fa0u
, trans_uaddcm
},
1756 { 0x08000b80u
, 0xfc1f0fa0u
, trans_dcor
},
1757 { 0x08000440u
, 0xfc000fe0u
, trans_ds
},
1758 { 0x08000700u
, 0xfc0007e0u
, trans_add
}, /* add */
1759 { 0x08000400u
, 0xfc0006e0u
, trans_sub
}, /* sub; sub,b; sub,tsv */
1760 { 0x080004c0u
, 0xfc0007e0u
, trans_sub
}, /* sub,tc; sub,tsv,tc */
1761 { 0x08000200u
, 0xfc000320u
, trans_add
}, /* shladd */
1764 static ExitStatus
trans_addi(DisasContext
*ctx
, uint32_t insn
)
1766 target_long im
= low_sextract(insn
, 0, 11);
1767 unsigned e1
= extract32(insn
, 11, 1);
1768 unsigned cf
= extract32(insn
, 12, 4);
1769 unsigned rt
= extract32(insn
, 16, 5);
1770 unsigned r2
= extract32(insn
, 21, 5);
1771 unsigned o1
= extract32(insn
, 26, 1);
1772 TCGv tcg_im
, tcg_r2
;
1779 tcg_im
= load_const(ctx
, im
);
1780 tcg_r2
= load_gpr(ctx
, r2
);
1781 ret
= do_add(ctx
, rt
, tcg_im
, tcg_r2
, 0, false, e1
, !o1
, false, cf
);
1783 return nullify_end(ctx
, ret
);
1786 static ExitStatus
trans_subi(DisasContext
*ctx
, uint32_t insn
)
1788 target_long im
= low_sextract(insn
, 0, 11);
1789 unsigned e1
= extract32(insn
, 11, 1);
1790 unsigned cf
= extract32(insn
, 12, 4);
1791 unsigned rt
= extract32(insn
, 16, 5);
1792 unsigned r2
= extract32(insn
, 21, 5);
1793 TCGv tcg_im
, tcg_r2
;
1800 tcg_im
= load_const(ctx
, im
);
1801 tcg_r2
= load_gpr(ctx
, r2
);
1802 ret
= do_sub(ctx
, rt
, tcg_im
, tcg_r2
, e1
, false, false, cf
);
1804 return nullify_end(ctx
, ret
);
1807 static ExitStatus
trans_cmpiclr(DisasContext
*ctx
, uint32_t insn
)
1809 target_long im
= low_sextract(insn
, 0, 11);
1810 unsigned cf
= extract32(insn
, 12, 4);
1811 unsigned rt
= extract32(insn
, 16, 5);
1812 unsigned r2
= extract32(insn
, 21, 5);
1813 TCGv tcg_im
, tcg_r2
;
1820 tcg_im
= load_const(ctx
, im
);
1821 tcg_r2
= load_gpr(ctx
, r2
);
1822 ret
= do_cmpclr(ctx
, rt
, tcg_im
, tcg_r2
, cf
);
1824 return nullify_end(ctx
, ret
);
1827 static ExitStatus
trans_ld_idx_i(DisasContext
*ctx
, uint32_t insn
,
1828 const DisasInsn
*di
)
1830 unsigned rt
= extract32(insn
, 0, 5);
1831 unsigned m
= extract32(insn
, 5, 1);
1832 unsigned sz
= extract32(insn
, 6, 2);
1833 unsigned a
= extract32(insn
, 13, 1);
1834 int disp
= low_sextract(insn
, 16, 5);
1835 unsigned rb
= extract32(insn
, 21, 5);
1836 int modify
= (m
? (a
? -1 : 1) : 0);
1837 TCGMemOp mop
= MO_TE
| sz
;
1839 return do_load(ctx
, rt
, rb
, 0, 0, disp
, modify
, mop
);
1842 static ExitStatus
trans_ld_idx_x(DisasContext
*ctx
, uint32_t insn
,
1843 const DisasInsn
*di
)
1845 unsigned rt
= extract32(insn
, 0, 5);
1846 unsigned m
= extract32(insn
, 5, 1);
1847 unsigned sz
= extract32(insn
, 6, 2);
1848 unsigned u
= extract32(insn
, 13, 1);
1849 unsigned rx
= extract32(insn
, 16, 5);
1850 unsigned rb
= extract32(insn
, 21, 5);
1851 TCGMemOp mop
= MO_TE
| sz
;
1853 return do_load(ctx
, rt
, rb
, rx
, u
? sz
: 0, 0, m
, mop
);
1856 static ExitStatus
trans_st_idx_i(DisasContext
*ctx
, uint32_t insn
,
1857 const DisasInsn
*di
)
1859 int disp
= low_sextract(insn
, 0, 5);
1860 unsigned m
= extract32(insn
, 5, 1);
1861 unsigned sz
= extract32(insn
, 6, 2);
1862 unsigned a
= extract32(insn
, 13, 1);
1863 unsigned rr
= extract32(insn
, 16, 5);
1864 unsigned rb
= extract32(insn
, 21, 5);
1865 int modify
= (m
? (a
? -1 : 1) : 0);
1866 TCGMemOp mop
= MO_TE
| sz
;
1868 return do_store(ctx
, rr
, rb
, disp
, modify
, mop
);
1871 static ExitStatus
trans_ldcw(DisasContext
*ctx
, uint32_t insn
,
1872 const DisasInsn
*di
)
1874 unsigned rt
= extract32(insn
, 0, 5);
1875 unsigned m
= extract32(insn
, 5, 1);
1876 unsigned i
= extract32(insn
, 12, 1);
1877 unsigned au
= extract32(insn
, 13, 1);
1878 unsigned rx
= extract32(insn
, 16, 5);
1879 unsigned rb
= extract32(insn
, 21, 5);
1880 TCGMemOp mop
= MO_TEUL
| MO_ALIGN_16
;
1881 TCGv zero
, addr
, base
, dest
;
1882 int modify
, disp
= 0, scale
= 0;
1886 /* ??? Share more code with do_load and do_load_{32,64}. */
1889 modify
= (m
? (au
? -1 : 1) : 0);
1890 disp
= low_sextract(rx
, 0, 5);
1895 scale
= mop
& MO_SIZE
;
1899 /* Base register modification. Make sure if RT == RB, we see
1900 the result of the load. */
1901 dest
= get_temp(ctx
);
1903 dest
= dest_gpr(ctx
, rt
);
1906 addr
= tcg_temp_new();
1907 base
= load_gpr(ctx
, rb
);
1909 tcg_gen_shli_tl(addr
, cpu_gr
[rx
], scale
);
1910 tcg_gen_add_tl(addr
, addr
, base
);
1912 tcg_gen_addi_tl(addr
, base
, disp
);
1915 zero
= tcg_const_tl(0);
1916 tcg_gen_atomic_xchg_tl(dest
, (modify
<= 0 ? addr
: base
),
1917 zero
, MMU_USER_IDX
, mop
);
1919 save_gpr(ctx
, rb
, addr
);
1921 save_gpr(ctx
, rt
, dest
);
1923 return nullify_end(ctx
, NO_EXIT
);
1926 static ExitStatus
trans_stby(DisasContext
*ctx
, uint32_t insn
,
1927 const DisasInsn
*di
)
1929 target_long disp
= low_sextract(insn
, 0, 5);
1930 unsigned m
= extract32(insn
, 5, 1);
1931 unsigned a
= extract32(insn
, 13, 1);
1932 unsigned rt
= extract32(insn
, 16, 5);
1933 unsigned rb
= extract32(insn
, 21, 5);
1938 addr
= tcg_temp_new();
1939 if (m
|| disp
== 0) {
1940 tcg_gen_mov_tl(addr
, load_gpr(ctx
, rb
));
1942 tcg_gen_addi_tl(addr
, load_gpr(ctx
, rb
), disp
);
1944 val
= load_gpr(ctx
, rt
);
1947 gen_helper_stby_e(cpu_env
, addr
, val
);
1949 gen_helper_stby_b(cpu_env
, addr
, val
);
1953 tcg_gen_addi_tl(addr
, addr
, disp
);
1954 tcg_gen_andi_tl(addr
, addr
, ~3);
1955 save_gpr(ctx
, rb
, addr
);
1957 tcg_temp_free(addr
);
1959 return nullify_end(ctx
, NO_EXIT
);
1962 static const DisasInsn table_index_mem
[] = {
1963 { 0x0c001000u
, 0xfc001300, trans_ld_idx_i
}, /* LD[BHWD], im */
1964 { 0x0c000000u
, 0xfc001300, trans_ld_idx_x
}, /* LD[BHWD], rx */
1965 { 0x0c001200u
, 0xfc001300, trans_st_idx_i
}, /* ST[BHWD] */
1966 { 0x0c0001c0u
, 0xfc0003c0, trans_ldcw
},
1967 { 0x0c001300u
, 0xfc0013c0, trans_stby
},
1970 static ExitStatus
trans_ldil(DisasContext
*ctx
, uint32_t insn
)
1972 unsigned rt
= extract32(insn
, 21, 5);
1973 target_long i
= assemble_21(insn
);
1974 TCGv tcg_rt
= dest_gpr(ctx
, rt
);
1976 tcg_gen_movi_tl(tcg_rt
, i
);
1977 save_gpr(ctx
, rt
, tcg_rt
);
1978 cond_free(&ctx
->null_cond
);
1983 static ExitStatus
trans_addil(DisasContext
*ctx
, uint32_t insn
)
1985 unsigned rt
= extract32(insn
, 21, 5);
1986 target_long i
= assemble_21(insn
);
1987 TCGv tcg_rt
= load_gpr(ctx
, rt
);
1988 TCGv tcg_r1
= dest_gpr(ctx
, 1);
1990 tcg_gen_addi_tl(tcg_r1
, tcg_rt
, i
);
1991 save_gpr(ctx
, 1, tcg_r1
);
1992 cond_free(&ctx
->null_cond
);
1997 static ExitStatus
trans_ldo(DisasContext
*ctx
, uint32_t insn
)
1999 unsigned rb
= extract32(insn
, 21, 5);
2000 unsigned rt
= extract32(insn
, 16, 5);
2001 target_long i
= assemble_16(insn
);
2002 TCGv tcg_rt
= dest_gpr(ctx
, rt
);
2004 /* Special case rb == 0, for the LDI pseudo-op.
2005 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
2007 tcg_gen_movi_tl(tcg_rt
, i
);
2009 tcg_gen_addi_tl(tcg_rt
, cpu_gr
[rb
], i
);
2011 save_gpr(ctx
, rt
, tcg_rt
);
2012 cond_free(&ctx
->null_cond
);
2017 static ExitStatus
trans_load(DisasContext
*ctx
, uint32_t insn
,
2018 bool is_mod
, TCGMemOp mop
)
2020 unsigned rb
= extract32(insn
, 21, 5);
2021 unsigned rt
= extract32(insn
, 16, 5);
2022 target_long i
= assemble_16(insn
);
2024 return do_load(ctx
, rt
, rb
, 0, 0, i
, is_mod
? (i
< 0 ? -1 : 1) : 0, mop
);
2027 static ExitStatus
trans_load_w(DisasContext
*ctx
, uint32_t insn
)
2029 unsigned rb
= extract32(insn
, 21, 5);
2030 unsigned rt
= extract32(insn
, 16, 5);
2031 target_long i
= assemble_16a(insn
);
2032 unsigned ext2
= extract32(insn
, 1, 2);
2037 /* FLDW without modification. */
2038 return do_floadw(ctx
, ext2
* 32 + rt
, rb
, 0, 0, i
, 0);
2040 /* LDW with modification. Note that the sign of I selects
2041 post-dec vs pre-inc. */
2042 return do_load(ctx
, rt
, rb
, 0, 0, i
, (i
< 0 ? 1 : -1), MO_TEUL
);
2044 return gen_illegal(ctx
);
2048 static ExitStatus
trans_fload_mod(DisasContext
*ctx
, uint32_t insn
)
2050 target_long i
= assemble_16a(insn
);
2051 unsigned t1
= extract32(insn
, 1, 1);
2052 unsigned a
= extract32(insn
, 2, 1);
2053 unsigned t0
= extract32(insn
, 16, 5);
2054 unsigned rb
= extract32(insn
, 21, 5);
2056 /* FLDW with modification. */
2057 return do_floadw(ctx
, t1
* 32 + t0
, rb
, 0, 0, i
, (a
? -1 : 1));
2060 static ExitStatus
trans_store(DisasContext
*ctx
, uint32_t insn
,
2061 bool is_mod
, TCGMemOp mop
)
2063 unsigned rb
= extract32(insn
, 21, 5);
2064 unsigned rt
= extract32(insn
, 16, 5);
2065 target_long i
= assemble_16(insn
);
2067 return do_store(ctx
, rt
, rb
, i
, is_mod
? (i
< 0 ? -1 : 1) : 0, mop
);
2070 static ExitStatus
trans_store_w(DisasContext
*ctx
, uint32_t insn
)
2072 unsigned rb
= extract32(insn
, 21, 5);
2073 unsigned rt
= extract32(insn
, 16, 5);
2074 target_long i
= assemble_16a(insn
);
2075 unsigned ext2
= extract32(insn
, 1, 2);
2080 /* FSTW without modification. */
2081 return do_fstorew(ctx
, ext2
* 32 + rt
, rb
, 0, 0, i
, 0);
2083 /* LDW with modification. */
2084 return do_store(ctx
, rt
, rb
, i
, (i
< 0 ? 1 : -1), MO_TEUL
);
2086 return gen_illegal(ctx
);
2090 static ExitStatus
trans_fstore_mod(DisasContext
*ctx
, uint32_t insn
)
2092 target_long i
= assemble_16a(insn
);
2093 unsigned t1
= extract32(insn
, 1, 1);
2094 unsigned a
= extract32(insn
, 2, 1);
2095 unsigned t0
= extract32(insn
, 16, 5);
2096 unsigned rb
= extract32(insn
, 21, 5);
2098 /* FSTW with modification. */
2099 return do_fstorew(ctx
, t1
* 32 + t0
, rb
, 0, 0, i
, (a
? -1 : 1));
2102 static ExitStatus
trans_copr_w(DisasContext
*ctx
, uint32_t insn
)
2104 unsigned t0
= extract32(insn
, 0, 5);
2105 unsigned m
= extract32(insn
, 5, 1);
2106 unsigned t1
= extract32(insn
, 6, 1);
2107 unsigned ext3
= extract32(insn
, 7, 3);
2108 /* unsigned cc = extract32(insn, 10, 2); */
2109 unsigned i
= extract32(insn
, 12, 1);
2110 unsigned ua
= extract32(insn
, 13, 1);
2111 unsigned rx
= extract32(insn
, 16, 5);
2112 unsigned rb
= extract32(insn
, 21, 5);
2113 unsigned rt
= t1
* 32 + t0
;
2114 int modify
= (m
? (ua
? -1 : 1) : 0);
2118 scale
= (ua
? 2 : 0);
2122 disp
= low_sextract(rx
, 0, 5);
2125 modify
= (m
? (ua
? -1 : 1) : 0);
2130 return do_floadw(ctx
, rt
, rb
, rx
, scale
, disp
, modify
);
2132 return do_fstorew(ctx
, rt
, rb
, rx
, scale
, disp
, modify
);
2134 return gen_illegal(ctx
);
2137 static ExitStatus
trans_copr_dw(DisasContext
*ctx
, uint32_t insn
)
2139 unsigned rt
= extract32(insn
, 0, 5);
2140 unsigned m
= extract32(insn
, 5, 1);
2141 unsigned ext4
= extract32(insn
, 6, 4);
2142 /* unsigned cc = extract32(insn, 10, 2); */
2143 unsigned i
= extract32(insn
, 12, 1);
2144 unsigned ua
= extract32(insn
, 13, 1);
2145 unsigned rx
= extract32(insn
, 16, 5);
2146 unsigned rb
= extract32(insn
, 21, 5);
2147 int modify
= (m
? (ua
? -1 : 1) : 0);
2151 scale
= (ua
? 3 : 0);
2155 disp
= low_sextract(rx
, 0, 5);
2158 modify
= (m
? (ua
? -1 : 1) : 0);
2163 return do_floadd(ctx
, rt
, rb
, rx
, scale
, disp
, modify
);
2165 return do_fstored(ctx
, rt
, rb
, rx
, scale
, disp
, modify
);
2167 return gen_illegal(ctx
);
2171 static ExitStatus
trans_cmpb(DisasContext
*ctx
, uint32_t insn
,
2172 bool is_true
, bool is_imm
, bool is_dw
)
2174 target_long disp
= assemble_12(insn
) * 4;
2175 unsigned n
= extract32(insn
, 1, 1);
2176 unsigned c
= extract32(insn
, 13, 3);
2177 unsigned r
= extract32(insn
, 21, 5);
2178 unsigned cf
= c
* 2 + !is_true
;
2179 TCGv dest
, in1
, in2
, sv
;
2185 in1
= load_const(ctx
, low_sextract(insn
, 16, 5));
2187 in1
= load_gpr(ctx
, extract32(insn
, 16, 5));
2189 in2
= load_gpr(ctx
, r
);
2190 dest
= get_temp(ctx
);
2192 tcg_gen_sub_tl(dest
, in1
, in2
);
2196 sv
= do_sub_sv(ctx
, dest
, in1
, in2
);
2199 cond
= do_sub_cond(cf
, dest
, in1
, in2
, sv
);
2200 return do_cbranch(ctx
, disp
, n
, &cond
);
2203 static ExitStatus
trans_addb(DisasContext
*ctx
, uint32_t insn
,
2204 bool is_true
, bool is_imm
)
2206 target_long disp
= assemble_12(insn
) * 4;
2207 unsigned n
= extract32(insn
, 1, 1);
2208 unsigned c
= extract32(insn
, 13, 3);
2209 unsigned r
= extract32(insn
, 21, 5);
2210 unsigned cf
= c
* 2 + !is_true
;
2211 TCGv dest
, in1
, in2
, sv
, cb_msb
;
2217 in1
= load_const(ctx
, low_sextract(insn
, 16, 5));
2219 in1
= load_gpr(ctx
, extract32(insn
, 16, 5));
2221 in2
= load_gpr(ctx
, r
);
2222 dest
= dest_gpr(ctx
, r
);
2224 TCGV_UNUSED(cb_msb
);
2228 tcg_gen_add_tl(dest
, in1
, in2
);
2231 cb_msb
= get_temp(ctx
);
2232 tcg_gen_movi_tl(cb_msb
, 0);
2233 tcg_gen_add2_tl(dest
, cb_msb
, in1
, cb_msb
, in2
, cb_msb
);
2236 tcg_gen_add_tl(dest
, in1
, in2
);
2237 sv
= do_add_sv(ctx
, dest
, in1
, in2
);
2241 cond
= do_cond(cf
, dest
, cb_msb
, sv
);
2242 return do_cbranch(ctx
, disp
, n
, &cond
);
2245 static ExitStatus
trans_bb(DisasContext
*ctx
, uint32_t insn
)
2247 target_long disp
= assemble_12(insn
) * 4;
2248 unsigned n
= extract32(insn
, 1, 1);
2249 unsigned c
= extract32(insn
, 15, 1);
2250 unsigned r
= extract32(insn
, 16, 5);
2251 unsigned p
= extract32(insn
, 21, 5);
2252 unsigned i
= extract32(insn
, 26, 1);
2258 tmp
= tcg_temp_new();
2259 tcg_r
= load_gpr(ctx
, r
);
2261 tcg_gen_shli_tl(tmp
, tcg_r
, p
);
2263 tcg_gen_shl_tl(tmp
, tcg_r
, cpu_sar
);
2266 cond
= cond_make_0(c
? TCG_COND_GE
: TCG_COND_LT
, tmp
);
2268 return do_cbranch(ctx
, disp
, n
, &cond
);
2271 static ExitStatus
trans_movb(DisasContext
*ctx
, uint32_t insn
, bool is_imm
)
2273 target_long disp
= assemble_12(insn
) * 4;
2274 unsigned n
= extract32(insn
, 1, 1);
2275 unsigned c
= extract32(insn
, 13, 3);
2276 unsigned t
= extract32(insn
, 16, 5);
2277 unsigned r
= extract32(insn
, 21, 5);
2283 dest
= dest_gpr(ctx
, r
);
2285 tcg_gen_movi_tl(dest
, low_sextract(t
, 0, 5));
2286 } else if (t
== 0) {
2287 tcg_gen_movi_tl(dest
, 0);
2289 tcg_gen_mov_tl(dest
, cpu_gr
[t
]);
2292 cond
= do_sed_cond(c
, dest
);
2293 return do_cbranch(ctx
, disp
, n
, &cond
);
2296 static ExitStatus
trans_shrpw_sar(DisasContext
*ctx
, uint32_t insn
,
2297 const DisasInsn
*di
)
2299 unsigned rt
= extract32(insn
, 0, 5);
2300 unsigned c
= extract32(insn
, 13, 3);
2301 unsigned r1
= extract32(insn
, 16, 5);
2302 unsigned r2
= extract32(insn
, 21, 5);
2309 dest
= dest_gpr(ctx
, rt
);
2311 tcg_gen_ext32u_tl(dest
, load_gpr(ctx
, r2
));
2312 tcg_gen_shr_tl(dest
, dest
, cpu_sar
);
2313 } else if (r1
== r2
) {
2314 TCGv_i32 t32
= tcg_temp_new_i32();
2315 tcg_gen_trunc_tl_i32(t32
, load_gpr(ctx
, r2
));
2316 tcg_gen_rotr_i32(t32
, t32
, cpu_sar
);
2317 tcg_gen_extu_i32_tl(dest
, t32
);
2318 tcg_temp_free_i32(t32
);
2320 TCGv_i64 t
= tcg_temp_new_i64();
2321 TCGv_i64 s
= tcg_temp_new_i64();
2323 tcg_gen_concat_tl_i64(t
, load_gpr(ctx
, r2
), load_gpr(ctx
, r1
));
2324 tcg_gen_extu_tl_i64(s
, cpu_sar
);
2325 tcg_gen_shr_i64(t
, t
, s
);
2326 tcg_gen_trunc_i64_tl(dest
, t
);
2328 tcg_temp_free_i64(t
);
2329 tcg_temp_free_i64(s
);
2331 save_gpr(ctx
, rt
, dest
);
2333 /* Install the new nullification. */
2334 cond_free(&ctx
->null_cond
);
2336 ctx
->null_cond
= do_sed_cond(c
, dest
);
2338 return nullify_end(ctx
, NO_EXIT
);
2341 static ExitStatus
trans_shrpw_imm(DisasContext
*ctx
, uint32_t insn
,
2342 const DisasInsn
*di
)
2344 unsigned rt
= extract32(insn
, 0, 5);
2345 unsigned cpos
= extract32(insn
, 5, 5);
2346 unsigned c
= extract32(insn
, 13, 3);
2347 unsigned r1
= extract32(insn
, 16, 5);
2348 unsigned r2
= extract32(insn
, 21, 5);
2349 unsigned sa
= 31 - cpos
;
2356 dest
= dest_gpr(ctx
, rt
);
2357 t2
= load_gpr(ctx
, r2
);
2359 TCGv_i32 t32
= tcg_temp_new_i32();
2360 tcg_gen_trunc_tl_i32(t32
, t2
);
2361 tcg_gen_rotri_i32(t32
, t32
, sa
);
2362 tcg_gen_extu_i32_tl(dest
, t32
);
2363 tcg_temp_free_i32(t32
);
2364 } else if (r1
== 0) {
2365 tcg_gen_extract_tl(dest
, t2
, sa
, 32 - sa
);
2367 TCGv t0
= tcg_temp_new();
2368 tcg_gen_extract_tl(t0
, t2
, sa
, 32 - sa
);
2369 tcg_gen_deposit_tl(dest
, t0
, cpu_gr
[r1
], 32 - sa
, sa
);
2372 save_gpr(ctx
, rt
, dest
);
2374 /* Install the new nullification. */
2375 cond_free(&ctx
->null_cond
);
2377 ctx
->null_cond
= do_sed_cond(c
, dest
);
2379 return nullify_end(ctx
, NO_EXIT
);
2382 static ExitStatus
trans_extrw_sar(DisasContext
*ctx
, uint32_t insn
,
2383 const DisasInsn
*di
)
2385 unsigned clen
= extract32(insn
, 0, 5);
2386 unsigned is_se
= extract32(insn
, 10, 1);
2387 unsigned c
= extract32(insn
, 13, 3);
2388 unsigned rt
= extract32(insn
, 16, 5);
2389 unsigned rr
= extract32(insn
, 21, 5);
2390 unsigned len
= 32 - clen
;
2391 TCGv dest
, src
, tmp
;
2397 dest
= dest_gpr(ctx
, rt
);
2398 src
= load_gpr(ctx
, rr
);
2399 tmp
= tcg_temp_new();
2401 /* Recall that SAR is using big-endian bit numbering. */
2402 tcg_gen_xori_tl(tmp
, cpu_sar
, TARGET_LONG_BITS
- 1);
2404 tcg_gen_sar_tl(dest
, src
, tmp
);
2405 tcg_gen_sextract_tl(dest
, dest
, 0, len
);
2407 tcg_gen_shr_tl(dest
, src
, tmp
);
2408 tcg_gen_extract_tl(dest
, dest
, 0, len
);
2411 save_gpr(ctx
, rt
, dest
);
2413 /* Install the new nullification. */
2414 cond_free(&ctx
->null_cond
);
2416 ctx
->null_cond
= do_sed_cond(c
, dest
);
2418 return nullify_end(ctx
, NO_EXIT
);
2421 static ExitStatus
trans_extrw_imm(DisasContext
*ctx
, uint32_t insn
,
2422 const DisasInsn
*di
)
2424 unsigned clen
= extract32(insn
, 0, 5);
2425 unsigned pos
= extract32(insn
, 5, 5);
2426 unsigned is_se
= extract32(insn
, 10, 1);
2427 unsigned c
= extract32(insn
, 13, 3);
2428 unsigned rt
= extract32(insn
, 16, 5);
2429 unsigned rr
= extract32(insn
, 21, 5);
2430 unsigned len
= 32 - clen
;
2431 unsigned cpos
= 31 - pos
;
2438 dest
= dest_gpr(ctx
, rt
);
2439 src
= load_gpr(ctx
, rr
);
2441 tcg_gen_sextract_tl(dest
, src
, cpos
, len
);
2443 tcg_gen_extract_tl(dest
, src
, cpos
, len
);
2445 save_gpr(ctx
, rt
, dest
);
2447 /* Install the new nullification. */
2448 cond_free(&ctx
->null_cond
);
2450 ctx
->null_cond
= do_sed_cond(c
, dest
);
2452 return nullify_end(ctx
, NO_EXIT
);
2455 static const DisasInsn table_sh_ex
[] = {
2456 { 0xd0000000u
, 0xfc001fe0u
, trans_shrpw_sar
},
2457 { 0xd0000800u
, 0xfc001c00u
, trans_shrpw_imm
},
2458 { 0xd0001000u
, 0xfc001be0u
, trans_extrw_sar
},
2459 { 0xd0001800u
, 0xfc001800u
, trans_extrw_imm
},
2462 static ExitStatus
trans_depw_imm_c(DisasContext
*ctx
, uint32_t insn
,
2463 const DisasInsn
*di
)
2465 unsigned clen
= extract32(insn
, 0, 5);
2466 unsigned cpos
= extract32(insn
, 5, 5);
2467 unsigned nz
= extract32(insn
, 10, 1);
2468 unsigned c
= extract32(insn
, 13, 3);
2469 target_long val
= low_sextract(insn
, 16, 5);
2470 unsigned rt
= extract32(insn
, 21, 5);
2471 unsigned len
= 32 - clen
;
2472 target_long mask0
, mask1
;
2478 if (cpos
+ len
> 32) {
2482 dest
= dest_gpr(ctx
, rt
);
2483 mask0
= deposit64(0, cpos
, len
, val
);
2484 mask1
= deposit64(-1, cpos
, len
, val
);
2487 TCGv src
= load_gpr(ctx
, rt
);
2489 tcg_gen_andi_tl(dest
, src
, mask1
);
2492 tcg_gen_ori_tl(dest
, src
, mask0
);
2494 tcg_gen_movi_tl(dest
, mask0
);
2496 save_gpr(ctx
, rt
, dest
);
2498 /* Install the new nullification. */
2499 cond_free(&ctx
->null_cond
);
2501 ctx
->null_cond
= do_sed_cond(c
, dest
);
2503 return nullify_end(ctx
, NO_EXIT
);
2506 static ExitStatus
trans_depw_imm(DisasContext
*ctx
, uint32_t insn
,
2507 const DisasInsn
*di
)
2509 unsigned clen
= extract32(insn
, 0, 5);
2510 unsigned cpos
= extract32(insn
, 5, 5);
2511 unsigned nz
= extract32(insn
, 10, 1);
2512 unsigned c
= extract32(insn
, 13, 3);
2513 unsigned rr
= extract32(insn
, 16, 5);
2514 unsigned rt
= extract32(insn
, 21, 5);
2515 unsigned rs
= nz
? rt
: 0;
2516 unsigned len
= 32 - clen
;
2522 if (cpos
+ len
> 32) {
2526 dest
= dest_gpr(ctx
, rt
);
2527 val
= load_gpr(ctx
, rr
);
2529 tcg_gen_deposit_z_tl(dest
, val
, cpos
, len
);
2531 tcg_gen_deposit_tl(dest
, cpu_gr
[rs
], val
, cpos
, len
);
2533 save_gpr(ctx
, rt
, dest
);
2535 /* Install the new nullification. */
2536 cond_free(&ctx
->null_cond
);
2538 ctx
->null_cond
= do_sed_cond(c
, dest
);
2540 return nullify_end(ctx
, NO_EXIT
);
2543 static ExitStatus
trans_depw_sar(DisasContext
*ctx
, uint32_t insn
,
2544 const DisasInsn
*di
)
2546 unsigned clen
= extract32(insn
, 0, 5);
2547 unsigned nz
= extract32(insn
, 10, 1);
2548 unsigned i
= extract32(insn
, 12, 1);
2549 unsigned c
= extract32(insn
, 13, 3);
2550 unsigned rt
= extract32(insn
, 21, 5);
2551 unsigned rs
= nz
? rt
: 0;
2552 unsigned len
= 32 - clen
;
2553 TCGv val
, mask
, tmp
, shift
, dest
;
2554 unsigned msb
= 1U << (len
- 1);
2561 val
= load_const(ctx
, low_sextract(insn
, 16, 5));
2563 val
= load_gpr(ctx
, extract32(insn
, 16, 5));
2565 dest
= dest_gpr(ctx
, rt
);
2566 shift
= tcg_temp_new();
2567 tmp
= tcg_temp_new();
2569 /* Convert big-endian bit numbering in SAR to left-shift. */
2570 tcg_gen_xori_tl(shift
, cpu_sar
, TARGET_LONG_BITS
- 1);
2572 mask
= tcg_const_tl(msb
+ (msb
- 1));
2573 tcg_gen_and_tl(tmp
, val
, mask
);
2575 tcg_gen_shl_tl(mask
, mask
, shift
);
2576 tcg_gen_shl_tl(tmp
, tmp
, shift
);
2577 tcg_gen_andc_tl(dest
, cpu_gr
[rs
], mask
);
2578 tcg_gen_or_tl(dest
, dest
, tmp
);
2580 tcg_gen_shl_tl(dest
, tmp
, shift
);
2582 tcg_temp_free(shift
);
2583 tcg_temp_free(mask
);
2585 save_gpr(ctx
, rt
, dest
);
2587 /* Install the new nullification. */
2588 cond_free(&ctx
->null_cond
);
2590 ctx
->null_cond
= do_sed_cond(c
, dest
);
2592 return nullify_end(ctx
, NO_EXIT
);
2595 static const DisasInsn table_depw
[] = {
2596 { 0xd4000000u
, 0xfc000be0u
, trans_depw_sar
},
2597 { 0xd4000800u
, 0xfc001800u
, trans_depw_imm
},
2598 { 0xd4001800u
, 0xfc001800u
, trans_depw_imm_c
},
2601 static ExitStatus
trans_be(DisasContext
*ctx
, uint32_t insn
, bool is_l
)
2603 unsigned n
= extract32(insn
, 1, 1);
2604 unsigned b
= extract32(insn
, 21, 5);
2605 target_long disp
= assemble_17(insn
);
2607 /* unsigned s = low_uextract(insn, 13, 3); */
2608 /* ??? It seems like there should be a good way of using
2609 "be disp(sr2, r0)", the canonical gateway entry mechanism
2610 to our advantage. But that appears to be inconvenient to
2611 manage along side branch delay slots. Therefore we handle
2612 entry into the gateway page via absolute address. */
2614 /* Since we don't implement spaces, just branch. Do notice the special
2615 case of "be disp(*,r0)" using a direct branch to disp, so that we can
2616 goto_tb to the TB containing the syscall. */
2618 return do_dbranch(ctx
, disp
, is_l
? 31 : 0, n
);
2620 TCGv tmp
= get_temp(ctx
);
2621 tcg_gen_addi_tl(tmp
, load_gpr(ctx
, b
), disp
);
2622 return do_ibranch(ctx
, tmp
, is_l
? 31 : 0, n
);
2626 static ExitStatus
trans_bl(DisasContext
*ctx
, uint32_t insn
,
2627 const DisasInsn
*di
)
2629 unsigned n
= extract32(insn
, 1, 1);
2630 unsigned link
= extract32(insn
, 21, 5);
2631 target_long disp
= assemble_17(insn
);
2633 return do_dbranch(ctx
, iaoq_dest(ctx
, disp
), link
, n
);
2636 static ExitStatus
trans_bl_long(DisasContext
*ctx
, uint32_t insn
,
2637 const DisasInsn
*di
)
2639 unsigned n
= extract32(insn
, 1, 1);
2640 target_long disp
= assemble_22(insn
);
2642 return do_dbranch(ctx
, iaoq_dest(ctx
, disp
), 2, n
);
2645 static ExitStatus
trans_blr(DisasContext
*ctx
, uint32_t insn
,
2646 const DisasInsn
*di
)
2648 unsigned n
= extract32(insn
, 1, 1);
2649 unsigned rx
= extract32(insn
, 16, 5);
2650 unsigned link
= extract32(insn
, 21, 5);
2651 TCGv tmp
= get_temp(ctx
);
2653 tcg_gen_shli_tl(tmp
, load_gpr(ctx
, rx
), 3);
2654 tcg_gen_addi_tl(tmp
, tmp
, ctx
->iaoq_f
+ 8);
2655 return do_ibranch(ctx
, tmp
, link
, n
);
2658 static ExitStatus
trans_bv(DisasContext
*ctx
, uint32_t insn
,
2659 const DisasInsn
*di
)
2661 unsigned n
= extract32(insn
, 1, 1);
2662 unsigned rx
= extract32(insn
, 16, 5);
2663 unsigned rb
= extract32(insn
, 21, 5);
2667 dest
= load_gpr(ctx
, rb
);
2669 dest
= get_temp(ctx
);
2670 tcg_gen_shli_tl(dest
, load_gpr(ctx
, rx
), 3);
2671 tcg_gen_add_tl(dest
, dest
, load_gpr(ctx
, rb
));
2673 return do_ibranch(ctx
, dest
, 0, n
);
2676 static ExitStatus
trans_bve(DisasContext
*ctx
, uint32_t insn
,
2677 const DisasInsn
*di
)
2679 unsigned n
= extract32(insn
, 1, 1);
2680 unsigned rb
= extract32(insn
, 21, 5);
2681 unsigned link
= extract32(insn
, 13, 1) ? 2 : 0;
2683 return do_ibranch(ctx
, load_gpr(ctx
, rb
), link
, n
);
2686 static const DisasInsn table_branch
[] = {
2687 { 0xe8000000u
, 0xfc006000u
, trans_bl
}, /* B,L and B,L,PUSH */
2688 { 0xe800a000u
, 0xfc00e000u
, trans_bl_long
},
2689 { 0xe8004000u
, 0xfc00fffdu
, trans_blr
},
2690 { 0xe800c000u
, 0xfc00fffdu
, trans_bv
},
2691 { 0xe800d000u
, 0xfc00dffcu
, trans_bve
},
2694 static ExitStatus
translate_table_int(DisasContext
*ctx
, uint32_t insn
,
2695 const DisasInsn table
[], size_t n
)
2698 for (i
= 0; i
< n
; ++i
) {
2699 if ((insn
& table
[i
].mask
) == table
[i
].insn
) {
2700 return table
[i
].trans(ctx
, insn
, &table
[i
]);
2703 return gen_illegal(ctx
);
2706 #define translate_table(ctx, insn, table) \
2707 translate_table_int(ctx, insn, table, ARRAY_SIZE(table))
2709 static ExitStatus
translate_one(DisasContext
*ctx
, uint32_t insn
)
2711 uint32_t opc
= extract32(insn
, 26, 6);
2715 return translate_table(ctx
, insn
, table_arith_log
);
2717 return translate_table(ctx
, insn
, table_index_mem
);
2719 return trans_ldil(ctx
, insn
);
2721 return trans_copr_w(ctx
, insn
);
2723 return trans_addil(ctx
, insn
);
2725 return trans_copr_dw(ctx
, insn
);
2727 return trans_ldo(ctx
, insn
);
2730 return trans_load(ctx
, insn
, false, MO_UB
);
2732 return trans_load(ctx
, insn
, false, MO_TEUW
);
2734 return trans_load(ctx
, insn
, false, MO_TEUL
);
2736 return trans_load(ctx
, insn
, true, MO_TEUL
);
2738 return trans_fload_mod(ctx
, insn
);
2740 return trans_load_w(ctx
, insn
);
2742 return trans_store(ctx
, insn
, false, MO_UB
);
2744 return trans_store(ctx
, insn
, false, MO_TEUW
);
2746 return trans_store(ctx
, insn
, false, MO_TEUL
);
2748 return trans_store(ctx
, insn
, true, MO_TEUL
);
2750 return trans_fstore_mod(ctx
, insn
);
2752 return trans_store_w(ctx
, insn
);
2755 return trans_cmpb(ctx
, insn
, true, false, false);
2757 return trans_cmpb(ctx
, insn
, true, true, false);
2759 return trans_cmpb(ctx
, insn
, false, false, false);
2761 return trans_cmpb(ctx
, insn
, false, true, false);
2763 return trans_cmpiclr(ctx
, insn
);
2765 return trans_subi(ctx
, insn
);
2767 return trans_cmpb(ctx
, insn
, true, false, true);
2769 return trans_addb(ctx
, insn
, true, false);
2771 return trans_addb(ctx
, insn
, true, true);
2773 return trans_addb(ctx
, insn
, false, false);
2775 return trans_addb(ctx
, insn
, false, true);
2778 return trans_addi(ctx
, insn
);
2780 return trans_cmpb(ctx
, insn
, false, false, true);
2784 return trans_bb(ctx
, insn
);
2786 return trans_movb(ctx
, insn
, false);
2788 return trans_movb(ctx
, insn
, true);
2790 return translate_table(ctx
, insn
, table_sh_ex
);
2792 return translate_table(ctx
, insn
, table_depw
);
2794 return trans_be(ctx
, insn
, false);
2796 return trans_be(ctx
, insn
, true);
2798 return translate_table(ctx
, insn
, table_branch
);
2800 case 0x04: /* spopn */
2801 case 0x05: /* diag */
2802 case 0x0F: /* product specific */
2805 case 0x07: /* unassigned */
2806 case 0x15: /* unassigned */
2807 case 0x1D: /* unassigned */
2808 case 0x37: /* unassigned */
2809 case 0x3F: /* unassigned */
2813 return gen_illegal(ctx
);
2816 void gen_intermediate_code(CPUHPPAState
*env
, struct TranslationBlock
*tb
)
2818 HPPACPU
*cpu
= hppa_env_get_cpu(env
);
2819 CPUState
*cs
= CPU(cpu
);
2822 int num_insns
, max_insns
, i
;
2826 ctx
.iaoq_f
= tb
->pc
;
2827 ctx
.iaoq_b
= tb
->cs_base
;
2828 ctx
.singlestep_enabled
= cs
->singlestep_enabled
;
2831 for (i
= 0; i
< ARRAY_SIZE(ctx
.temps
); ++i
) {
2832 TCGV_UNUSED(ctx
.temps
[i
]);
2835 /* Compute the maximum number of insns to execute, as bounded by
2836 (1) icount, (2) single-stepping, (3) branch delay slots, or
2837 (4) the number of insns remaining on the current page. */
2838 max_insns
= tb
->cflags
& CF_COUNT_MASK
;
2839 if (max_insns
== 0) {
2840 max_insns
= CF_COUNT_MASK
;
2842 if (ctx
.singlestep_enabled
|| singlestep
) {
2844 } else if (max_insns
> TCG_MAX_INSNS
) {
2845 max_insns
= TCG_MAX_INSNS
;
2851 /* Seed the nullification status from PSW[N], as shown in TB->FLAGS. */
2852 ctx
.null_cond
= cond_make_f();
2853 ctx
.psw_n_nonzero
= false;
2854 if (tb
->flags
& 1) {
2855 ctx
.null_cond
.c
= TCG_COND_ALWAYS
;
2856 ctx
.psw_n_nonzero
= true;
2858 ctx
.null_lab
= NULL
;
2861 tcg_gen_insn_start(ctx
.iaoq_f
, ctx
.iaoq_b
);
2864 if (unlikely(cpu_breakpoint_test(cs
, ctx
.iaoq_f
, BP_ANY
))) {
2865 ret
= gen_excp(&ctx
, EXCP_DEBUG
);
2868 if (num_insns
== max_insns
&& (tb
->cflags
& CF_LAST_IO
)) {
2872 if (ctx
.iaoq_f
< TARGET_PAGE_SIZE
) {
2873 ret
= do_page_zero(&ctx
);
2874 assert(ret
!= NO_EXIT
);
2876 /* Always fetch the insn, even if nullified, so that we check
2877 the page permissions for execute. */
2878 uint32_t insn
= cpu_ldl_code(env
, ctx
.iaoq_f
);
2880 /* Set up the IA queue for the next insn.
2881 This will be overwritten by a branch. */
2882 if (ctx
.iaoq_b
== -1) {
2884 ctx
.iaoq_n_var
= get_temp(&ctx
);
2885 tcg_gen_addi_tl(ctx
.iaoq_n_var
, cpu_iaoq_b
, 4);
2887 ctx
.iaoq_n
= ctx
.iaoq_b
+ 4;
2888 TCGV_UNUSED(ctx
.iaoq_n_var
);
2891 if (unlikely(ctx
.null_cond
.c
== TCG_COND_ALWAYS
)) {
2892 ctx
.null_cond
.c
= TCG_COND_NEVER
;
2895 ret
= translate_one(&ctx
, insn
);
2896 assert(ctx
.null_lab
== NULL
);
2900 for (i
= 0; i
< ctx
.ntemps
; ++i
) {
2901 tcg_temp_free(ctx
.temps
[i
]);
2902 TCGV_UNUSED(ctx
.temps
[i
]);
2906 /* If we see non-linear instructions, exhaust instruction count,
2907 or run out of buffer space, stop generation. */
2908 /* ??? The non-linear instruction restriction is purely due to
2909 the debugging dump. Otherwise we *could* follow unconditional
2910 branches within the same page. */
2912 && (ctx
.iaoq_b
!= ctx
.iaoq_f
+ 4
2913 || num_insns
>= max_insns
2914 || tcg_op_buf_full())) {
2915 if (ctx
.null_cond
.c
== TCG_COND_NEVER
2916 || ctx
.null_cond
.c
== TCG_COND_ALWAYS
) {
2917 nullify_set(&ctx
, ctx
.null_cond
.c
== TCG_COND_ALWAYS
);
2918 gen_goto_tb(&ctx
, 0, ctx
.iaoq_b
, ctx
.iaoq_n
);
2921 ret
= EXIT_IAQ_N_STALE
;
2925 ctx
.iaoq_f
= ctx
.iaoq_b
;
2926 ctx
.iaoq_b
= ctx
.iaoq_n
;
2927 if (ret
== EXIT_NORETURN
2928 || ret
== EXIT_GOTO_TB
2929 || ret
== EXIT_IAQ_N_UPDATED
) {
2932 if (ctx
.iaoq_f
== -1) {
2933 tcg_gen_mov_tl(cpu_iaoq_f
, cpu_iaoq_b
);
2934 copy_iaoq_entry(cpu_iaoq_b
, ctx
.iaoq_n
, ctx
.iaoq_n_var
);
2936 ret
= EXIT_IAQ_N_UPDATED
;
2939 if (ctx
.iaoq_b
== -1) {
2940 tcg_gen_mov_tl(cpu_iaoq_b
, ctx
.iaoq_n_var
);
2942 } while (ret
== NO_EXIT
);
2944 if (tb
->cflags
& CF_LAST_IO
) {
2952 case EXIT_IAQ_N_STALE
:
2953 copy_iaoq_entry(cpu_iaoq_f
, ctx
.iaoq_f
, cpu_iaoq_f
);
2954 copy_iaoq_entry(cpu_iaoq_b
, ctx
.iaoq_b
, cpu_iaoq_b
);
2957 case EXIT_IAQ_N_UPDATED
:
2958 if (ctx
.singlestep_enabled
) {
2959 gen_excp_1(EXCP_DEBUG
);
2968 gen_tb_end(tb
, num_insns
);
2970 tb
->size
= num_insns
* 4;
2971 tb
->icount
= num_insns
;
2974 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM
)
2975 && qemu_log_in_addr_range(tb
->pc
)) {
2979 qemu_log("IN:\n0x00000000: (null)\n\n");
2982 qemu_log("IN:\n0x000000b0: light-weight-syscall\n\n");
2985 qemu_log("IN:\n0x000000e0: set-thread-pointer-syscall\n\n");
2988 qemu_log("IN:\n0x00000100: syscall\n\n");
2991 qemu_log("IN: %s\n", lookup_symbol(tb
->pc
));
2992 log_target_disas(cs
, tb
->pc
, tb
->size
, 1);
3001 void restore_state_to_opc(CPUHPPAState
*env
, TranslationBlock
*tb
,
3004 env
->iaoq_f
= data
[0];
3005 if (data
[1] != -1) {
3006 env
->iaoq_b
= data
[1];
3008 /* Since we were executing the instruction at IAOQ_F, and took some
3009 sort of action that provoked the cpu_restore_state, we can infer
3010 that the instruction was not nullified. */