2 * HPPA emulation cpu translation for qemu.
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
32 /* Since we have a distinction between register size and address size,
33 we need to redefine all of these. */
37 #undef tcg_global_mem_new
40 #if TARGET_LONG_BITS == 64
41 #define TCGv_tl TCGv_i64
42 #define tcg_temp_new_tl tcg_temp_new_i64
43 #define tcg_temp_free_tl tcg_temp_free_i64
44 #if TARGET_REGISTER_BITS == 64
45 #define tcg_gen_extu_reg_tl tcg_gen_mov_i64
47 #define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64
50 #define TCGv_tl TCGv_i32
51 #define tcg_temp_new_tl tcg_temp_new_i32
52 #define tcg_temp_free_tl tcg_temp_free_i32
53 #define tcg_gen_extu_reg_tl tcg_gen_mov_i32
56 #if TARGET_REGISTER_BITS == 64
57 #define TCGv_reg TCGv_i64
59 #define tcg_temp_new tcg_temp_new_i64
60 #define tcg_global_mem_new tcg_global_mem_new_i64
61 #define tcg_temp_free tcg_temp_free_i64
63 #define tcg_gen_movi_reg tcg_gen_movi_i64
64 #define tcg_gen_mov_reg tcg_gen_mov_i64
65 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i64
66 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i64
67 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i64
68 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i64
69 #define tcg_gen_ld32u_reg tcg_gen_ld32u_i64
70 #define tcg_gen_ld32s_reg tcg_gen_ld32s_i64
71 #define tcg_gen_ld_reg tcg_gen_ld_i64
72 #define tcg_gen_st8_reg tcg_gen_st8_i64
73 #define tcg_gen_st16_reg tcg_gen_st16_i64
74 #define tcg_gen_st32_reg tcg_gen_st32_i64
75 #define tcg_gen_st_reg tcg_gen_st_i64
76 #define tcg_gen_add_reg tcg_gen_add_i64
77 #define tcg_gen_addi_reg tcg_gen_addi_i64
78 #define tcg_gen_sub_reg tcg_gen_sub_i64
79 #define tcg_gen_neg_reg tcg_gen_neg_i64
80 #define tcg_gen_subfi_reg tcg_gen_subfi_i64
81 #define tcg_gen_subi_reg tcg_gen_subi_i64
82 #define tcg_gen_and_reg tcg_gen_and_i64
83 #define tcg_gen_andi_reg tcg_gen_andi_i64
84 #define tcg_gen_or_reg tcg_gen_or_i64
85 #define tcg_gen_ori_reg tcg_gen_ori_i64
86 #define tcg_gen_xor_reg tcg_gen_xor_i64
87 #define tcg_gen_xori_reg tcg_gen_xori_i64
88 #define tcg_gen_not_reg tcg_gen_not_i64
89 #define tcg_gen_shl_reg tcg_gen_shl_i64
90 #define tcg_gen_shli_reg tcg_gen_shli_i64
91 #define tcg_gen_shr_reg tcg_gen_shr_i64
92 #define tcg_gen_shri_reg tcg_gen_shri_i64
93 #define tcg_gen_sar_reg tcg_gen_sar_i64
94 #define tcg_gen_sari_reg tcg_gen_sari_i64
95 #define tcg_gen_brcond_reg tcg_gen_brcond_i64
96 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i64
97 #define tcg_gen_setcond_reg tcg_gen_setcond_i64
98 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
99 #define tcg_gen_mul_reg tcg_gen_mul_i64
100 #define tcg_gen_muli_reg tcg_gen_muli_i64
101 #define tcg_gen_div_reg tcg_gen_div_i64
102 #define tcg_gen_rem_reg tcg_gen_rem_i64
103 #define tcg_gen_divu_reg tcg_gen_divu_i64
104 #define tcg_gen_remu_reg tcg_gen_remu_i64
105 #define tcg_gen_discard_reg tcg_gen_discard_i64
106 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
107 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
108 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
109 #define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64
110 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
111 #define tcg_gen_ext_reg_i64 tcg_gen_mov_i64
112 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i64
113 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i64
114 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i64
115 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i64
116 #define tcg_gen_ext32u_reg tcg_gen_ext32u_i64
117 #define tcg_gen_ext32s_reg tcg_gen_ext32s_i64
118 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i64
119 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i64
120 #define tcg_gen_bswap64_reg tcg_gen_bswap64_i64
121 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
122 #define tcg_gen_andc_reg tcg_gen_andc_i64
123 #define tcg_gen_eqv_reg tcg_gen_eqv_i64
124 #define tcg_gen_nand_reg tcg_gen_nand_i64
125 #define tcg_gen_nor_reg tcg_gen_nor_i64
126 #define tcg_gen_orc_reg tcg_gen_orc_i64
127 #define tcg_gen_clz_reg tcg_gen_clz_i64
128 #define tcg_gen_ctz_reg tcg_gen_ctz_i64
129 #define tcg_gen_clzi_reg tcg_gen_clzi_i64
130 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i64
131 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i64
132 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i64
133 #define tcg_gen_rotl_reg tcg_gen_rotl_i64
134 #define tcg_gen_rotli_reg tcg_gen_rotli_i64
135 #define tcg_gen_rotr_reg tcg_gen_rotr_i64
136 #define tcg_gen_rotri_reg tcg_gen_rotri_i64
137 #define tcg_gen_deposit_reg tcg_gen_deposit_i64
138 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
139 #define tcg_gen_extract_reg tcg_gen_extract_i64
140 #define tcg_gen_sextract_reg tcg_gen_sextract_i64
141 #define tcg_gen_extract2_reg tcg_gen_extract2_i64
142 #define tcg_const_reg tcg_const_i64
143 #define tcg_const_local_reg tcg_const_local_i64
144 #define tcg_constant_reg tcg_constant_i64
145 #define tcg_gen_movcond_reg tcg_gen_movcond_i64
146 #define tcg_gen_add2_reg tcg_gen_add2_i64
147 #define tcg_gen_sub2_reg tcg_gen_sub2_i64
148 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64
149 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64
150 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
151 #define tcg_gen_trunc_reg_ptr tcg_gen_trunc_i64_ptr
153 #define TCGv_reg TCGv_i32
154 #define tcg_temp_new tcg_temp_new_i32
155 #define tcg_global_mem_new tcg_global_mem_new_i32
156 #define tcg_temp_free tcg_temp_free_i32
158 #define tcg_gen_movi_reg tcg_gen_movi_i32
159 #define tcg_gen_mov_reg tcg_gen_mov_i32
160 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i32
161 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i32
162 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i32
163 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i32
164 #define tcg_gen_ld32u_reg tcg_gen_ld_i32
165 #define tcg_gen_ld32s_reg tcg_gen_ld_i32
166 #define tcg_gen_ld_reg tcg_gen_ld_i32
167 #define tcg_gen_st8_reg tcg_gen_st8_i32
168 #define tcg_gen_st16_reg tcg_gen_st16_i32
169 #define tcg_gen_st32_reg tcg_gen_st32_i32
170 #define tcg_gen_st_reg tcg_gen_st_i32
171 #define tcg_gen_add_reg tcg_gen_add_i32
172 #define tcg_gen_addi_reg tcg_gen_addi_i32
173 #define tcg_gen_sub_reg tcg_gen_sub_i32
174 #define tcg_gen_neg_reg tcg_gen_neg_i32
175 #define tcg_gen_subfi_reg tcg_gen_subfi_i32
176 #define tcg_gen_subi_reg tcg_gen_subi_i32
177 #define tcg_gen_and_reg tcg_gen_and_i32
178 #define tcg_gen_andi_reg tcg_gen_andi_i32
179 #define tcg_gen_or_reg tcg_gen_or_i32
180 #define tcg_gen_ori_reg tcg_gen_ori_i32
181 #define tcg_gen_xor_reg tcg_gen_xor_i32
182 #define tcg_gen_xori_reg tcg_gen_xori_i32
183 #define tcg_gen_not_reg tcg_gen_not_i32
184 #define tcg_gen_shl_reg tcg_gen_shl_i32
185 #define tcg_gen_shli_reg tcg_gen_shli_i32
186 #define tcg_gen_shr_reg tcg_gen_shr_i32
187 #define tcg_gen_shri_reg tcg_gen_shri_i32
188 #define tcg_gen_sar_reg tcg_gen_sar_i32
189 #define tcg_gen_sari_reg tcg_gen_sari_i32
190 #define tcg_gen_brcond_reg tcg_gen_brcond_i32
191 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i32
192 #define tcg_gen_setcond_reg tcg_gen_setcond_i32
193 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
194 #define tcg_gen_mul_reg tcg_gen_mul_i32
195 #define tcg_gen_muli_reg tcg_gen_muli_i32
196 #define tcg_gen_div_reg tcg_gen_div_i32
197 #define tcg_gen_rem_reg tcg_gen_rem_i32
198 #define tcg_gen_divu_reg tcg_gen_divu_i32
199 #define tcg_gen_remu_reg tcg_gen_remu_i32
200 #define tcg_gen_discard_reg tcg_gen_discard_i32
201 #define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
202 #define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
203 #define tcg_gen_extu_i32_reg tcg_gen_mov_i32
204 #define tcg_gen_ext_i32_reg tcg_gen_mov_i32
205 #define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
206 #define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64
207 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i32
208 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i32
209 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i32
210 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i32
211 #define tcg_gen_ext32u_reg tcg_gen_mov_i32
212 #define tcg_gen_ext32s_reg tcg_gen_mov_i32
213 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i32
214 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i32
215 #define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
216 #define tcg_gen_andc_reg tcg_gen_andc_i32
217 #define tcg_gen_eqv_reg tcg_gen_eqv_i32
218 #define tcg_gen_nand_reg tcg_gen_nand_i32
219 #define tcg_gen_nor_reg tcg_gen_nor_i32
220 #define tcg_gen_orc_reg tcg_gen_orc_i32
221 #define tcg_gen_clz_reg tcg_gen_clz_i32
222 #define tcg_gen_ctz_reg tcg_gen_ctz_i32
223 #define tcg_gen_clzi_reg tcg_gen_clzi_i32
224 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i32
225 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i32
226 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i32
227 #define tcg_gen_rotl_reg tcg_gen_rotl_i32
228 #define tcg_gen_rotli_reg tcg_gen_rotli_i32
229 #define tcg_gen_rotr_reg tcg_gen_rotr_i32
230 #define tcg_gen_rotri_reg tcg_gen_rotri_i32
231 #define tcg_gen_deposit_reg tcg_gen_deposit_i32
232 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
233 #define tcg_gen_extract_reg tcg_gen_extract_i32
234 #define tcg_gen_sextract_reg tcg_gen_sextract_i32
235 #define tcg_gen_extract2_reg tcg_gen_extract2_i32
236 #define tcg_const_reg tcg_const_i32
237 #define tcg_const_local_reg tcg_const_local_i32
238 #define tcg_constant_reg tcg_constant_i32
239 #define tcg_gen_movcond_reg tcg_gen_movcond_i32
240 #define tcg_gen_add2_reg tcg_gen_add2_i32
241 #define tcg_gen_sub2_reg tcg_gen_sub2_i32
242 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32
243 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32
244 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
245 #define tcg_gen_trunc_reg_ptr tcg_gen_ext_i32_ptr
246 #endif /* TARGET_REGISTER_BITS */
248 typedef struct DisasCond
{
253 typedef struct DisasContext
{
254 DisasContextBase base
;
275 #ifdef CONFIG_USER_ONLY
280 #ifdef CONFIG_USER_ONLY
281 #define UNALIGN(C) (C)->unalign
286 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
287 static int expand_sm_imm(DisasContext
*ctx
, int val
)
289 if (val
& PSW_SM_E
) {
290 val
= (val
& ~PSW_SM_E
) | PSW_E
;
292 if (val
& PSW_SM_W
) {
293 val
= (val
& ~PSW_SM_W
) | PSW_W
;
298 /* Inverted space register indicates 0 means sr0 not inferred from base. */
299 static int expand_sr3x(DisasContext
*ctx
, int val
)
304 /* Convert the M:A bits within a memory insn to the tri-state value
305 we use for the final M. */
306 static int ma_to_m(DisasContext
*ctx
, int val
)
308 return val
& 2 ? (val
& 1 ? -1 : 1) : 0;
311 /* Convert the sign of the displacement to a pre or post-modify. */
312 static int pos_to_m(DisasContext
*ctx
, int val
)
317 static int neg_to_m(DisasContext
*ctx
, int val
)
322 /* Used for branch targets and fp memory ops. */
323 static int expand_shl2(DisasContext
*ctx
, int val
)
328 /* Used for fp memory ops. */
329 static int expand_shl3(DisasContext
*ctx
, int val
)
334 /* Used for assemble_21. */
335 static int expand_shl11(DisasContext
*ctx
, int val
)
341 /* Include the auto-generated decoder. */
342 #include "decode-insns.c.inc"
344 /* We are not using a goto_tb (for whatever reason), but have updated
345 the iaq (for whatever reason), so don't do it again on exit. */
346 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
348 /* We are exiting the TB, but have neither emitted a goto_tb, nor
349 updated the iaq for the next instruction to be executed. */
350 #define DISAS_IAQ_N_STALE DISAS_TARGET_1
352 /* Similarly, but we want to return to the main loop immediately
353 to recognize unmasked interrupts. */
354 #define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
355 #define DISAS_EXIT DISAS_TARGET_3
357 /* global register indexes */
358 static TCGv_reg cpu_gr
[32];
359 static TCGv_i64 cpu_sr
[4];
360 static TCGv_i64 cpu_srH
;
361 static TCGv_reg cpu_iaoq_f
;
362 static TCGv_reg cpu_iaoq_b
;
363 static TCGv_i64 cpu_iasq_f
;
364 static TCGv_i64 cpu_iasq_b
;
365 static TCGv_reg cpu_sar
;
366 static TCGv_reg cpu_psw_n
;
367 static TCGv_reg cpu_psw_v
;
368 static TCGv_reg cpu_psw_cb
;
369 static TCGv_reg cpu_psw_cb_msb
;
371 #include "exec/gen-icount.h"
373 void hppa_translate_init(void)
375 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
377 typedef struct { TCGv_reg
*var
; const char *name
; int ofs
; } GlobalVar
;
378 static const GlobalVar vars
[] = {
379 { &cpu_sar
, "sar", offsetof(CPUHPPAState
, cr
[CR_SAR
]) },
390 /* Use the symbolic register names that match the disassembler. */
391 static const char gr_names
[32][4] = {
392 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
393 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
394 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
395 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
397 /* SR[4-7] are not global registers so that we can index them. */
398 static const char sr_names
[5][4] = {
399 "sr0", "sr1", "sr2", "sr3", "srH"
405 for (i
= 1; i
< 32; i
++) {
406 cpu_gr
[i
] = tcg_global_mem_new(cpu_env
,
407 offsetof(CPUHPPAState
, gr
[i
]),
410 for (i
= 0; i
< 4; i
++) {
411 cpu_sr
[i
] = tcg_global_mem_new_i64(cpu_env
,
412 offsetof(CPUHPPAState
, sr
[i
]),
415 cpu_srH
= tcg_global_mem_new_i64(cpu_env
,
416 offsetof(CPUHPPAState
, sr
[4]),
419 for (i
= 0; i
< ARRAY_SIZE(vars
); ++i
) {
420 const GlobalVar
*v
= &vars
[i
];
421 *v
->var
= tcg_global_mem_new(cpu_env
, v
->ofs
, v
->name
);
424 cpu_iasq_f
= tcg_global_mem_new_i64(cpu_env
,
425 offsetof(CPUHPPAState
, iasq_f
),
427 cpu_iasq_b
= tcg_global_mem_new_i64(cpu_env
,
428 offsetof(CPUHPPAState
, iasq_b
),
432 static DisasCond
cond_make_f(void)
441 static DisasCond
cond_make_t(void)
444 .c
= TCG_COND_ALWAYS
,
450 static DisasCond
cond_make_n(void)
455 .a1
= tcg_constant_reg(0)
459 static DisasCond
cond_make_0_tmp(TCGCond c
, TCGv_reg a0
)
461 assert (c
!= TCG_COND_NEVER
&& c
!= TCG_COND_ALWAYS
);
463 .c
= c
, .a0
= a0
, .a1
= tcg_constant_reg(0)
467 static DisasCond
cond_make_0(TCGCond c
, TCGv_reg a0
)
469 TCGv_reg tmp
= tcg_temp_new();
470 tcg_gen_mov_reg(tmp
, a0
);
471 return cond_make_0_tmp(c
, tmp
);
474 static DisasCond
cond_make(TCGCond c
, TCGv_reg a0
, TCGv_reg a1
)
476 DisasCond r
= { .c
= c
};
478 assert (c
!= TCG_COND_NEVER
&& c
!= TCG_COND_ALWAYS
);
479 r
.a0
= tcg_temp_new();
480 tcg_gen_mov_reg(r
.a0
, a0
);
481 r
.a1
= tcg_temp_new();
482 tcg_gen_mov_reg(r
.a1
, a1
);
487 static void cond_free(DisasCond
*cond
)
491 if (cond
->a0
!= cpu_psw_n
) {
492 tcg_temp_free(cond
->a0
);
494 tcg_temp_free(cond
->a1
);
498 case TCG_COND_ALWAYS
:
499 cond
->c
= TCG_COND_NEVER
;
506 static TCGv_reg
get_temp(DisasContext
*ctx
)
508 unsigned i
= ctx
->ntempr
++;
509 g_assert(i
< ARRAY_SIZE(ctx
->tempr
));
510 return ctx
->tempr
[i
] = tcg_temp_new();
513 #ifndef CONFIG_USER_ONLY
514 static TCGv_tl
get_temp_tl(DisasContext
*ctx
)
516 unsigned i
= ctx
->ntempl
++;
517 g_assert(i
< ARRAY_SIZE(ctx
->templ
));
518 return ctx
->templ
[i
] = tcg_temp_new_tl();
522 static TCGv_reg
load_const(DisasContext
*ctx
, target_sreg v
)
524 TCGv_reg t
= get_temp(ctx
);
525 tcg_gen_movi_reg(t
, v
);
529 static TCGv_reg
load_gpr(DisasContext
*ctx
, unsigned reg
)
532 TCGv_reg t
= get_temp(ctx
);
533 tcg_gen_movi_reg(t
, 0);
540 static TCGv_reg
dest_gpr(DisasContext
*ctx
, unsigned reg
)
542 if (reg
== 0 || ctx
->null_cond
.c
!= TCG_COND_NEVER
) {
543 return get_temp(ctx
);
549 static void save_or_nullify(DisasContext
*ctx
, TCGv_reg dest
, TCGv_reg t
)
551 if (ctx
->null_cond
.c
!= TCG_COND_NEVER
) {
552 tcg_gen_movcond_reg(ctx
->null_cond
.c
, dest
, ctx
->null_cond
.a0
,
553 ctx
->null_cond
.a1
, dest
, t
);
555 tcg_gen_mov_reg(dest
, t
);
559 static void save_gpr(DisasContext
*ctx
, unsigned reg
, TCGv_reg t
)
562 save_or_nullify(ctx
, cpu_gr
[reg
], t
);
574 static TCGv_i32
load_frw_i32(unsigned rt
)
576 TCGv_i32 ret
= tcg_temp_new_i32();
577 tcg_gen_ld_i32(ret
, cpu_env
,
578 offsetof(CPUHPPAState
, fr
[rt
& 31])
579 + (rt
& 32 ? LO_OFS
: HI_OFS
));
583 static TCGv_i32
load_frw0_i32(unsigned rt
)
586 return tcg_const_i32(0);
588 return load_frw_i32(rt
);
592 static TCGv_i64
load_frw0_i64(unsigned rt
)
595 return tcg_const_i64(0);
597 TCGv_i64 ret
= tcg_temp_new_i64();
598 tcg_gen_ld32u_i64(ret
, cpu_env
,
599 offsetof(CPUHPPAState
, fr
[rt
& 31])
600 + (rt
& 32 ? LO_OFS
: HI_OFS
));
605 static void save_frw_i32(unsigned rt
, TCGv_i32 val
)
607 tcg_gen_st_i32(val
, cpu_env
,
608 offsetof(CPUHPPAState
, fr
[rt
& 31])
609 + (rt
& 32 ? LO_OFS
: HI_OFS
));
615 static TCGv_i64
load_frd(unsigned rt
)
617 TCGv_i64 ret
= tcg_temp_new_i64();
618 tcg_gen_ld_i64(ret
, cpu_env
, offsetof(CPUHPPAState
, fr
[rt
]));
622 static TCGv_i64
load_frd0(unsigned rt
)
625 return tcg_const_i64(0);
631 static void save_frd(unsigned rt
, TCGv_i64 val
)
633 tcg_gen_st_i64(val
, cpu_env
, offsetof(CPUHPPAState
, fr
[rt
]));
636 static void load_spr(DisasContext
*ctx
, TCGv_i64 dest
, unsigned reg
)
638 #ifdef CONFIG_USER_ONLY
639 tcg_gen_movi_i64(dest
, 0);
642 tcg_gen_mov_i64(dest
, cpu_sr
[reg
]);
643 } else if (ctx
->tb_flags
& TB_FLAG_SR_SAME
) {
644 tcg_gen_mov_i64(dest
, cpu_srH
);
646 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUHPPAState
, sr
[reg
]));
651 /* Skip over the implementation of an insn that has been nullified.
652 Use this when the insn is too complex for a conditional move. */
653 static void nullify_over(DisasContext
*ctx
)
655 if (ctx
->null_cond
.c
!= TCG_COND_NEVER
) {
656 /* The always condition should have been handled in the main loop. */
657 assert(ctx
->null_cond
.c
!= TCG_COND_ALWAYS
);
659 ctx
->null_lab
= gen_new_label();
661 /* If we're using PSW[N], copy it to a temp because... */
662 if (ctx
->null_cond
.a0
== cpu_psw_n
) {
663 ctx
->null_cond
.a0
= tcg_temp_new();
664 tcg_gen_mov_reg(ctx
->null_cond
.a0
, cpu_psw_n
);
666 /* ... we clear it before branching over the implementation,
667 so that (1) it's clear after nullifying this insn and
668 (2) if this insn nullifies the next, PSW[N] is valid. */
669 if (ctx
->psw_n_nonzero
) {
670 ctx
->psw_n_nonzero
= false;
671 tcg_gen_movi_reg(cpu_psw_n
, 0);
674 tcg_gen_brcond_reg(ctx
->null_cond
.c
, ctx
->null_cond
.a0
,
675 ctx
->null_cond
.a1
, ctx
->null_lab
);
676 cond_free(&ctx
->null_cond
);
680 /* Save the current nullification state to PSW[N]. */
681 static void nullify_save(DisasContext
*ctx
)
683 if (ctx
->null_cond
.c
== TCG_COND_NEVER
) {
684 if (ctx
->psw_n_nonzero
) {
685 tcg_gen_movi_reg(cpu_psw_n
, 0);
689 if (ctx
->null_cond
.a0
!= cpu_psw_n
) {
690 tcg_gen_setcond_reg(ctx
->null_cond
.c
, cpu_psw_n
,
691 ctx
->null_cond
.a0
, ctx
->null_cond
.a1
);
692 ctx
->psw_n_nonzero
= true;
694 cond_free(&ctx
->null_cond
);
697 /* Set a PSW[N] to X. The intention is that this is used immediately
698 before a goto_tb/exit_tb, so that there is no fallthru path to other
699 code within the TB. Therefore we do not update psw_n_nonzero. */
700 static void nullify_set(DisasContext
*ctx
, bool x
)
702 if (ctx
->psw_n_nonzero
|| x
) {
703 tcg_gen_movi_reg(cpu_psw_n
, x
);
707 /* Mark the end of an instruction that may have been nullified.
708 This is the pair to nullify_over. Always returns true so that
709 it may be tail-called from a translate function. */
710 static bool nullify_end(DisasContext
*ctx
)
712 TCGLabel
*null_lab
= ctx
->null_lab
;
713 DisasJumpType status
= ctx
->base
.is_jmp
;
715 /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
716 For UPDATED, we cannot update on the nullified path. */
717 assert(status
!= DISAS_IAQ_N_UPDATED
);
719 if (likely(null_lab
== NULL
)) {
720 /* The current insn wasn't conditional or handled the condition
721 applied to it without a branch, so the (new) setting of
722 NULL_COND can be applied directly to the next insn. */
725 ctx
->null_lab
= NULL
;
727 if (likely(ctx
->null_cond
.c
== TCG_COND_NEVER
)) {
728 /* The next instruction will be unconditional,
729 and NULL_COND already reflects that. */
730 gen_set_label(null_lab
);
732 /* The insn that we just executed is itself nullifying the next
733 instruction. Store the condition in the PSW[N] global.
734 We asserted PSW[N] = 0 in nullify_over, so that after the
735 label we have the proper value in place. */
737 gen_set_label(null_lab
);
738 ctx
->null_cond
= cond_make_n();
740 if (status
== DISAS_NORETURN
) {
741 ctx
->base
.is_jmp
= DISAS_NEXT
;
746 static void copy_iaoq_entry(TCGv_reg dest
, target_ureg ival
, TCGv_reg vval
)
748 if (unlikely(ival
== -1)) {
749 tcg_gen_mov_reg(dest
, vval
);
751 tcg_gen_movi_reg(dest
, ival
);
755 static inline target_ureg
iaoq_dest(DisasContext
*ctx
, target_sreg disp
)
757 return ctx
->iaoq_f
+ disp
+ 8;
760 static void gen_excp_1(int exception
)
762 gen_helper_excp(cpu_env
, tcg_constant_i32(exception
));
765 static void gen_excp(DisasContext
*ctx
, int exception
)
767 copy_iaoq_entry(cpu_iaoq_f
, ctx
->iaoq_f
, cpu_iaoq_f
);
768 copy_iaoq_entry(cpu_iaoq_b
, ctx
->iaoq_b
, cpu_iaoq_b
);
770 gen_excp_1(exception
);
771 ctx
->base
.is_jmp
= DISAS_NORETURN
;
774 static bool gen_excp_iir(DisasContext
*ctx
, int exc
)
777 tcg_gen_st_reg(tcg_constant_reg(ctx
->insn
),
778 cpu_env
, offsetof(CPUHPPAState
, cr
[CR_IIR
]));
780 return nullify_end(ctx
);
783 static bool gen_illegal(DisasContext
*ctx
)
785 return gen_excp_iir(ctx
, EXCP_ILL
);
788 #ifdef CONFIG_USER_ONLY
789 #define CHECK_MOST_PRIVILEGED(EXCP) \
790 return gen_excp_iir(ctx, EXCP)
792 #define CHECK_MOST_PRIVILEGED(EXCP) \
794 if (ctx->privilege != 0) { \
795 return gen_excp_iir(ctx, EXCP); \
800 static bool use_goto_tb(DisasContext
*ctx
, target_ureg dest
)
802 return translator_use_goto_tb(&ctx
->base
, dest
);
805 /* If the next insn is to be nullified, and it's on the same page,
806 and we're not attempting to set a breakpoint on it, then we can
807 totally skip the nullified insn. This avoids creating and
808 executing a TB that merely branches to the next TB. */
809 static bool use_nullify_skip(DisasContext
*ctx
)
811 return (((ctx
->iaoq_b
^ ctx
->iaoq_f
) & TARGET_PAGE_MASK
) == 0
812 && !cpu_breakpoint_test(ctx
->cs
, ctx
->iaoq_b
, BP_ANY
));
815 static void gen_goto_tb(DisasContext
*ctx
, int which
,
816 target_ureg f
, target_ureg b
)
818 if (f
!= -1 && b
!= -1 && use_goto_tb(ctx
, f
)) {
819 tcg_gen_goto_tb(which
);
820 tcg_gen_movi_reg(cpu_iaoq_f
, f
);
821 tcg_gen_movi_reg(cpu_iaoq_b
, b
);
822 tcg_gen_exit_tb(ctx
->base
.tb
, which
);
824 copy_iaoq_entry(cpu_iaoq_f
, f
, cpu_iaoq_b
);
825 copy_iaoq_entry(cpu_iaoq_b
, b
, ctx
->iaoq_n_var
);
826 tcg_gen_lookup_and_goto_ptr();
830 static bool cond_need_sv(int c
)
832 return c
== 2 || c
== 3 || c
== 6;
835 static bool cond_need_cb(int c
)
837 return c
== 4 || c
== 5;
841 * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of
842 * the Parisc 1.1 Architecture Reference Manual for details.
845 static DisasCond
do_cond(unsigned cf
, TCGv_reg res
,
846 TCGv_reg cb_msb
, TCGv_reg sv
)
852 case 0: /* Never / TR (0 / 1) */
853 cond
= cond_make_f();
855 case 1: /* = / <> (Z / !Z) */
856 cond
= cond_make_0(TCG_COND_EQ
, res
);
858 case 2: /* < / >= (N ^ V / !(N ^ V) */
859 tmp
= tcg_temp_new();
860 tcg_gen_xor_reg(tmp
, res
, sv
);
861 cond
= cond_make_0_tmp(TCG_COND_LT
, tmp
);
863 case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */
867 * ((res < 0) ^ (sv < 0)) | !res
868 * ((res ^ sv) < 0) | !res
869 * (~(res ^ sv) >= 0) | !res
870 * !(~(res ^ sv) >> 31) | !res
871 * !(~(res ^ sv) >> 31 & res)
873 tmp
= tcg_temp_new();
874 tcg_gen_eqv_reg(tmp
, res
, sv
);
875 tcg_gen_sari_reg(tmp
, tmp
, TARGET_REGISTER_BITS
- 1);
876 tcg_gen_and_reg(tmp
, tmp
, res
);
877 cond
= cond_make_0_tmp(TCG_COND_EQ
, tmp
);
879 case 4: /* NUV / UV (!C / C) */
880 cond
= cond_make_0(TCG_COND_EQ
, cb_msb
);
882 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
883 tmp
= tcg_temp_new();
884 tcg_gen_neg_reg(tmp
, cb_msb
);
885 tcg_gen_and_reg(tmp
, tmp
, res
);
886 cond
= cond_make_0_tmp(TCG_COND_EQ
, tmp
);
888 case 6: /* SV / NSV (V / !V) */
889 cond
= cond_make_0(TCG_COND_LT
, sv
);
891 case 7: /* OD / EV */
892 tmp
= tcg_temp_new();
893 tcg_gen_andi_reg(tmp
, res
, 1);
894 cond
= cond_make_0_tmp(TCG_COND_NE
, tmp
);
897 g_assert_not_reached();
900 cond
.c
= tcg_invert_cond(cond
.c
);
906 /* Similar, but for the special case of subtraction without borrow, we
907 can use the inputs directly. This can allow other computation to be
908 deleted as unused. */
910 static DisasCond
do_sub_cond(unsigned cf
, TCGv_reg res
,
911 TCGv_reg in1
, TCGv_reg in2
, TCGv_reg sv
)
917 cond
= cond_make(TCG_COND_EQ
, in1
, in2
);
920 cond
= cond_make(TCG_COND_LT
, in1
, in2
);
923 cond
= cond_make(TCG_COND_LE
, in1
, in2
);
925 case 4: /* << / >>= */
926 cond
= cond_make(TCG_COND_LTU
, in1
, in2
);
928 case 5: /* <<= / >> */
929 cond
= cond_make(TCG_COND_LEU
, in1
, in2
);
932 return do_cond(cf
, res
, NULL
, sv
);
935 cond
.c
= tcg_invert_cond(cond
.c
);
942 * Similar, but for logicals, where the carry and overflow bits are not
943 * computed, and use of them is undefined.
945 * Undefined or not, hardware does not trap. It seems reasonable to
946 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
947 * how cases c={2,3} are treated.
950 static DisasCond
do_log_cond(unsigned cf
, TCGv_reg res
)
954 case 9: /* undef, C */
955 case 11: /* undef, C & !Z */
956 case 12: /* undef, V */
957 return cond_make_f();
960 case 8: /* undef, !C */
961 case 10: /* undef, !C | Z */
962 case 13: /* undef, !V */
963 return cond_make_t();
966 return cond_make_0(TCG_COND_EQ
, res
);
968 return cond_make_0(TCG_COND_NE
, res
);
970 return cond_make_0(TCG_COND_LT
, res
);
972 return cond_make_0(TCG_COND_GE
, res
);
974 return cond_make_0(TCG_COND_LE
, res
);
976 return cond_make_0(TCG_COND_GT
, res
);
980 return do_cond(cf
, res
, NULL
, NULL
);
983 g_assert_not_reached();
987 /* Similar, but for shift/extract/deposit conditions. */
989 static DisasCond
do_sed_cond(unsigned orig
, TCGv_reg res
)
993 /* Convert the compressed condition codes to standard.
994 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
995 4-7 are the reverse of 0-3. */
1002 return do_log_cond(c
* 2 + f
, res
);
1005 /* Similar, but for unit conditions. */
1007 static DisasCond
do_unit_cond(unsigned cf
, TCGv_reg res
,
1008 TCGv_reg in1
, TCGv_reg in2
)
1011 TCGv_reg tmp
, cb
= NULL
;
1014 /* Since we want to test lots of carry-out bits all at once, do not
1015 * do our normal thing and compute carry-in of bit B+1 since that
1016 * leaves us with carry bits spread across two words.
1018 cb
= tcg_temp_new();
1019 tmp
= tcg_temp_new();
1020 tcg_gen_or_reg(cb
, in1
, in2
);
1021 tcg_gen_and_reg(tmp
, in1
, in2
);
1022 tcg_gen_andc_reg(cb
, cb
, res
);
1023 tcg_gen_or_reg(cb
, cb
, tmp
);
1028 case 0: /* never / TR */
1029 case 1: /* undefined */
1030 case 5: /* undefined */
1031 cond
= cond_make_f();
1034 case 2: /* SBZ / NBZ */
1035 /* See hasless(v,1) from
1036 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1038 tmp
= tcg_temp_new();
1039 tcg_gen_subi_reg(tmp
, res
, 0x01010101u
);
1040 tcg_gen_andc_reg(tmp
, tmp
, res
);
1041 tcg_gen_andi_reg(tmp
, tmp
, 0x80808080u
);
1042 cond
= cond_make_0(TCG_COND_NE
, tmp
);
1046 case 3: /* SHZ / NHZ */
1047 tmp
= tcg_temp_new();
1048 tcg_gen_subi_reg(tmp
, res
, 0x00010001u
);
1049 tcg_gen_andc_reg(tmp
, tmp
, res
);
1050 tcg_gen_andi_reg(tmp
, tmp
, 0x80008000u
);
1051 cond
= cond_make_0(TCG_COND_NE
, tmp
);
1055 case 4: /* SDC / NDC */
1056 tcg_gen_andi_reg(cb
, cb
, 0x88888888u
);
1057 cond
= cond_make_0(TCG_COND_NE
, cb
);
1060 case 6: /* SBC / NBC */
1061 tcg_gen_andi_reg(cb
, cb
, 0x80808080u
);
1062 cond
= cond_make_0(TCG_COND_NE
, cb
);
1065 case 7: /* SHC / NHC */
1066 tcg_gen_andi_reg(cb
, cb
, 0x80008000u
);
1067 cond
= cond_make_0(TCG_COND_NE
, cb
);
1071 g_assert_not_reached();
1077 cond
.c
= tcg_invert_cond(cond
.c
);
1083 /* Compute signed overflow for addition. */
1084 static TCGv_reg
do_add_sv(DisasContext
*ctx
, TCGv_reg res
,
1085 TCGv_reg in1
, TCGv_reg in2
)
1087 TCGv_reg sv
= get_temp(ctx
);
1088 TCGv_reg tmp
= tcg_temp_new();
1090 tcg_gen_xor_reg(sv
, res
, in1
);
1091 tcg_gen_xor_reg(tmp
, in1
, in2
);
1092 tcg_gen_andc_reg(sv
, sv
, tmp
);
1098 /* Compute signed overflow for subtraction. */
1099 static TCGv_reg
do_sub_sv(DisasContext
*ctx
, TCGv_reg res
,
1100 TCGv_reg in1
, TCGv_reg in2
)
1102 TCGv_reg sv
= get_temp(ctx
);
1103 TCGv_reg tmp
= tcg_temp_new();
1105 tcg_gen_xor_reg(sv
, res
, in1
);
1106 tcg_gen_xor_reg(tmp
, in1
, in2
);
1107 tcg_gen_and_reg(sv
, sv
, tmp
);
1113 static void do_add(DisasContext
*ctx
, unsigned rt
, TCGv_reg in1
,
1114 TCGv_reg in2
, unsigned shift
, bool is_l
,
1115 bool is_tsv
, bool is_tc
, bool is_c
, unsigned cf
)
1117 TCGv_reg dest
, cb
, cb_msb
, sv
, tmp
;
1118 unsigned c
= cf
>> 1;
1121 dest
= tcg_temp_new();
1126 tmp
= get_temp(ctx
);
1127 tcg_gen_shli_reg(tmp
, in1
, shift
);
1131 if (!is_l
|| cond_need_cb(c
)) {
1132 TCGv_reg zero
= tcg_constant_reg(0);
1133 cb_msb
= get_temp(ctx
);
1134 tcg_gen_add2_reg(dest
, cb_msb
, in1
, zero
, in2
, zero
);
1136 tcg_gen_add2_reg(dest
, cb_msb
, dest
, cb_msb
, cpu_psw_cb_msb
, zero
);
1140 tcg_gen_xor_reg(cb
, in1
, in2
);
1141 tcg_gen_xor_reg(cb
, cb
, dest
);
1144 tcg_gen_add_reg(dest
, in1
, in2
);
1146 tcg_gen_add_reg(dest
, dest
, cpu_psw_cb_msb
);
1150 /* Compute signed overflow if required. */
1152 if (is_tsv
|| cond_need_sv(c
)) {
1153 sv
= do_add_sv(ctx
, dest
, in1
, in2
);
1155 /* ??? Need to include overflow from shift. */
1156 gen_helper_tsv(cpu_env
, sv
);
1160 /* Emit any conditional trap before any writeback. */
1161 cond
= do_cond(cf
, dest
, cb_msb
, sv
);
1163 tmp
= tcg_temp_new();
1164 tcg_gen_setcond_reg(cond
.c
, tmp
, cond
.a0
, cond
.a1
);
1165 gen_helper_tcond(cpu_env
, tmp
);
1169 /* Write back the result. */
1171 save_or_nullify(ctx
, cpu_psw_cb
, cb
);
1172 save_or_nullify(ctx
, cpu_psw_cb_msb
, cb_msb
);
1174 save_gpr(ctx
, rt
, dest
);
1175 tcg_temp_free(dest
);
1177 /* Install the new nullification. */
1178 cond_free(&ctx
->null_cond
);
1179 ctx
->null_cond
= cond
;
1182 static bool do_add_reg(DisasContext
*ctx
, arg_rrr_cf_sh
*a
,
1183 bool is_l
, bool is_tsv
, bool is_tc
, bool is_c
)
1185 TCGv_reg tcg_r1
, tcg_r2
;
1190 tcg_r1
= load_gpr(ctx
, a
->r1
);
1191 tcg_r2
= load_gpr(ctx
, a
->r2
);
1192 do_add(ctx
, a
->t
, tcg_r1
, tcg_r2
, a
->sh
, is_l
, is_tsv
, is_tc
, is_c
, a
->cf
);
1193 return nullify_end(ctx
);
1196 static bool do_add_imm(DisasContext
*ctx
, arg_rri_cf
*a
,
1197 bool is_tsv
, bool is_tc
)
1199 TCGv_reg tcg_im
, tcg_r2
;
1204 tcg_im
= load_const(ctx
, a
->i
);
1205 tcg_r2
= load_gpr(ctx
, a
->r
);
1206 do_add(ctx
, a
->t
, tcg_im
, tcg_r2
, 0, 0, is_tsv
, is_tc
, 0, a
->cf
);
1207 return nullify_end(ctx
);
1210 static void do_sub(DisasContext
*ctx
, unsigned rt
, TCGv_reg in1
,
1211 TCGv_reg in2
, bool is_tsv
, bool is_b
,
1212 bool is_tc
, unsigned cf
)
1214 TCGv_reg dest
, sv
, cb
, cb_msb
, zero
, tmp
;
1215 unsigned c
= cf
>> 1;
1218 dest
= tcg_temp_new();
1219 cb
= tcg_temp_new();
1220 cb_msb
= tcg_temp_new();
1222 zero
= tcg_constant_reg(0);
1224 /* DEST,C = IN1 + ~IN2 + C. */
1225 tcg_gen_not_reg(cb
, in2
);
1226 tcg_gen_add2_reg(dest
, cb_msb
, in1
, zero
, cpu_psw_cb_msb
, zero
);
1227 tcg_gen_add2_reg(dest
, cb_msb
, dest
, cb_msb
, cb
, zero
);
1228 tcg_gen_xor_reg(cb
, cb
, in1
);
1229 tcg_gen_xor_reg(cb
, cb
, dest
);
1231 /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
1232 operations by seeding the high word with 1 and subtracting. */
1233 tcg_gen_movi_reg(cb_msb
, 1);
1234 tcg_gen_sub2_reg(dest
, cb_msb
, in1
, cb_msb
, in2
, zero
);
1235 tcg_gen_eqv_reg(cb
, in1
, in2
);
1236 tcg_gen_xor_reg(cb
, cb
, dest
);
1239 /* Compute signed overflow if required. */
1241 if (is_tsv
|| cond_need_sv(c
)) {
1242 sv
= do_sub_sv(ctx
, dest
, in1
, in2
);
1244 gen_helper_tsv(cpu_env
, sv
);
1248 /* Compute the condition. We cannot use the special case for borrow. */
1250 cond
= do_sub_cond(cf
, dest
, in1
, in2
, sv
);
1252 cond
= do_cond(cf
, dest
, cb_msb
, sv
);
1255 /* Emit any conditional trap before any writeback. */
1257 tmp
= tcg_temp_new();
1258 tcg_gen_setcond_reg(cond
.c
, tmp
, cond
.a0
, cond
.a1
);
1259 gen_helper_tcond(cpu_env
, tmp
);
1263 /* Write back the result. */
1264 save_or_nullify(ctx
, cpu_psw_cb
, cb
);
1265 save_or_nullify(ctx
, cpu_psw_cb_msb
, cb_msb
);
1266 save_gpr(ctx
, rt
, dest
);
1267 tcg_temp_free(dest
);
1269 tcg_temp_free(cb_msb
);
1271 /* Install the new nullification. */
1272 cond_free(&ctx
->null_cond
);
1273 ctx
->null_cond
= cond
;
1276 static bool do_sub_reg(DisasContext
*ctx
, arg_rrr_cf
*a
,
1277 bool is_tsv
, bool is_b
, bool is_tc
)
1279 TCGv_reg tcg_r1
, tcg_r2
;
1284 tcg_r1
= load_gpr(ctx
, a
->r1
);
1285 tcg_r2
= load_gpr(ctx
, a
->r2
);
1286 do_sub(ctx
, a
->t
, tcg_r1
, tcg_r2
, is_tsv
, is_b
, is_tc
, a
->cf
);
1287 return nullify_end(ctx
);
1290 static bool do_sub_imm(DisasContext
*ctx
, arg_rri_cf
*a
, bool is_tsv
)
1292 TCGv_reg tcg_im
, tcg_r2
;
1297 tcg_im
= load_const(ctx
, a
->i
);
1298 tcg_r2
= load_gpr(ctx
, a
->r
);
1299 do_sub(ctx
, a
->t
, tcg_im
, tcg_r2
, is_tsv
, 0, 0, a
->cf
);
1300 return nullify_end(ctx
);
1303 static void do_cmpclr(DisasContext
*ctx
, unsigned rt
, TCGv_reg in1
,
1304 TCGv_reg in2
, unsigned cf
)
1309 dest
= tcg_temp_new();
1310 tcg_gen_sub_reg(dest
, in1
, in2
);
1312 /* Compute signed overflow if required. */
1314 if (cond_need_sv(cf
>> 1)) {
1315 sv
= do_sub_sv(ctx
, dest
, in1
, in2
);
1318 /* Form the condition for the compare. */
1319 cond
= do_sub_cond(cf
, dest
, in1
, in2
, sv
);
1322 tcg_gen_movi_reg(dest
, 0);
1323 save_gpr(ctx
, rt
, dest
);
1324 tcg_temp_free(dest
);
1326 /* Install the new nullification. */
1327 cond_free(&ctx
->null_cond
);
1328 ctx
->null_cond
= cond
;
1331 static void do_log(DisasContext
*ctx
, unsigned rt
, TCGv_reg in1
,
1332 TCGv_reg in2
, unsigned cf
,
1333 void (*fn
)(TCGv_reg
, TCGv_reg
, TCGv_reg
))
1335 TCGv_reg dest
= dest_gpr(ctx
, rt
);
1337 /* Perform the operation, and writeback. */
1339 save_gpr(ctx
, rt
, dest
);
1341 /* Install the new nullification. */
1342 cond_free(&ctx
->null_cond
);
1344 ctx
->null_cond
= do_log_cond(cf
, dest
);
1348 static bool do_log_reg(DisasContext
*ctx
, arg_rrr_cf
*a
,
1349 void (*fn
)(TCGv_reg
, TCGv_reg
, TCGv_reg
))
1351 TCGv_reg tcg_r1
, tcg_r2
;
1356 tcg_r1
= load_gpr(ctx
, a
->r1
);
1357 tcg_r2
= load_gpr(ctx
, a
->r2
);
1358 do_log(ctx
, a
->t
, tcg_r1
, tcg_r2
, a
->cf
, fn
);
1359 return nullify_end(ctx
);
1362 static void do_unit(DisasContext
*ctx
, unsigned rt
, TCGv_reg in1
,
1363 TCGv_reg in2
, unsigned cf
, bool is_tc
,
1364 void (*fn
)(TCGv_reg
, TCGv_reg
, TCGv_reg
))
1370 dest
= dest_gpr(ctx
, rt
);
1372 save_gpr(ctx
, rt
, dest
);
1373 cond_free(&ctx
->null_cond
);
1375 dest
= tcg_temp_new();
1378 cond
= do_unit_cond(cf
, dest
, in1
, in2
);
1381 TCGv_reg tmp
= tcg_temp_new();
1382 tcg_gen_setcond_reg(cond
.c
, tmp
, cond
.a0
, cond
.a1
);
1383 gen_helper_tcond(cpu_env
, tmp
);
1386 save_gpr(ctx
, rt
, dest
);
1388 cond_free(&ctx
->null_cond
);
1389 ctx
->null_cond
= cond
;
1393 #ifndef CONFIG_USER_ONLY
1394 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1395 from the top 2 bits of the base register. There are a few system
1396 instructions that have a 3-bit space specifier, for which SR0 is
1397 not special. To handle this, pass ~SP. */
1398 static TCGv_i64
space_select(DisasContext
*ctx
, int sp
, TCGv_reg base
)
1408 spc
= get_temp_tl(ctx
);
1409 load_spr(ctx
, spc
, sp
);
1412 if (ctx
->tb_flags
& TB_FLAG_SR_SAME
) {
1416 ptr
= tcg_temp_new_ptr();
1417 tmp
= tcg_temp_new();
1418 spc
= get_temp_tl(ctx
);
1420 tcg_gen_shri_reg(tmp
, base
, TARGET_REGISTER_BITS
- 5);
1421 tcg_gen_andi_reg(tmp
, tmp
, 030);
1422 tcg_gen_trunc_reg_ptr(ptr
, tmp
);
1425 tcg_gen_add_ptr(ptr
, ptr
, cpu_env
);
1426 tcg_gen_ld_i64(spc
, ptr
, offsetof(CPUHPPAState
, sr
[4]));
1427 tcg_temp_free_ptr(ptr
);
1433 static void form_gva(DisasContext
*ctx
, TCGv_tl
*pgva
, TCGv_reg
*pofs
,
1434 unsigned rb
, unsigned rx
, int scale
, target_sreg disp
,
1435 unsigned sp
, int modify
, bool is_phys
)
1437 TCGv_reg base
= load_gpr(ctx
, rb
);
1440 /* Note that RX is mutually exclusive with DISP. */
1442 ofs
= get_temp(ctx
);
1443 tcg_gen_shli_reg(ofs
, cpu_gr
[rx
], scale
);
1444 tcg_gen_add_reg(ofs
, ofs
, base
);
1445 } else if (disp
|| modify
) {
1446 ofs
= get_temp(ctx
);
1447 tcg_gen_addi_reg(ofs
, base
, disp
);
1453 #ifdef CONFIG_USER_ONLY
1454 *pgva
= (modify
<= 0 ? ofs
: base
);
1456 TCGv_tl addr
= get_temp_tl(ctx
);
1457 tcg_gen_extu_reg_tl(addr
, modify
<= 0 ? ofs
: base
);
1458 if (ctx
->tb_flags
& PSW_W
) {
1459 tcg_gen_andi_tl(addr
, addr
, 0x3fffffffffffffffull
);
1462 tcg_gen_or_tl(addr
, addr
, space_select(ctx
, sp
, base
));
1468 /* Emit a memory load. The modify parameter should be
1469 * < 0 for pre-modify,
1470 * > 0 for post-modify,
1471 * = 0 for no base register update.
1473 static void do_load_32(DisasContext
*ctx
, TCGv_i32 dest
, unsigned rb
,
1474 unsigned rx
, int scale
, target_sreg disp
,
1475 unsigned sp
, int modify
, MemOp mop
)
1480 /* Caller uses nullify_over/nullify_end. */
1481 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1483 form_gva(ctx
, &addr
, &ofs
, rb
, rx
, scale
, disp
, sp
, modify
,
1484 ctx
->mmu_idx
== MMU_PHYS_IDX
);
1485 tcg_gen_qemu_ld_reg(dest
, addr
, ctx
->mmu_idx
, mop
| UNALIGN(ctx
));
1487 save_gpr(ctx
, rb
, ofs
);
1491 static void do_load_64(DisasContext
*ctx
, TCGv_i64 dest
, unsigned rb
,
1492 unsigned rx
, int scale
, target_sreg disp
,
1493 unsigned sp
, int modify
, MemOp mop
)
1498 /* Caller uses nullify_over/nullify_end. */
1499 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1501 form_gva(ctx
, &addr
, &ofs
, rb
, rx
, scale
, disp
, sp
, modify
,
1502 ctx
->mmu_idx
== MMU_PHYS_IDX
);
1503 tcg_gen_qemu_ld_i64(dest
, addr
, ctx
->mmu_idx
, mop
| UNALIGN(ctx
));
1505 save_gpr(ctx
, rb
, ofs
);
1509 static void do_store_32(DisasContext
*ctx
, TCGv_i32 src
, unsigned rb
,
1510 unsigned rx
, int scale
, target_sreg disp
,
1511 unsigned sp
, int modify
, MemOp mop
)
1516 /* Caller uses nullify_over/nullify_end. */
1517 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1519 form_gva(ctx
, &addr
, &ofs
, rb
, rx
, scale
, disp
, sp
, modify
,
1520 ctx
->mmu_idx
== MMU_PHYS_IDX
);
1521 tcg_gen_qemu_st_i32(src
, addr
, ctx
->mmu_idx
, mop
| UNALIGN(ctx
));
1523 save_gpr(ctx
, rb
, ofs
);
1527 static void do_store_64(DisasContext
*ctx
, TCGv_i64 src
, unsigned rb
,
1528 unsigned rx
, int scale
, target_sreg disp
,
1529 unsigned sp
, int modify
, MemOp mop
)
1534 /* Caller uses nullify_over/nullify_end. */
1535 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1537 form_gva(ctx
, &addr
, &ofs
, rb
, rx
, scale
, disp
, sp
, modify
,
1538 ctx
->mmu_idx
== MMU_PHYS_IDX
);
1539 tcg_gen_qemu_st_i64(src
, addr
, ctx
->mmu_idx
, mop
| UNALIGN(ctx
));
1541 save_gpr(ctx
, rb
, ofs
);
1545 #if TARGET_REGISTER_BITS == 64
1546 #define do_load_reg do_load_64
1547 #define do_store_reg do_store_64
1549 #define do_load_reg do_load_32
1550 #define do_store_reg do_store_32
1553 static bool do_load(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1554 unsigned rx
, int scale
, target_sreg disp
,
1555 unsigned sp
, int modify
, MemOp mop
)
1562 /* No base register update. */
1563 dest
= dest_gpr(ctx
, rt
);
1565 /* Make sure if RT == RB, we see the result of the load. */
1566 dest
= get_temp(ctx
);
1568 do_load_reg(ctx
, dest
, rb
, rx
, scale
, disp
, sp
, modify
, mop
);
1569 save_gpr(ctx
, rt
, dest
);
1571 return nullify_end(ctx
);
1574 static bool do_floadw(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1575 unsigned rx
, int scale
, target_sreg disp
,
1576 unsigned sp
, int modify
)
1582 tmp
= tcg_temp_new_i32();
1583 do_load_32(ctx
, tmp
, rb
, rx
, scale
, disp
, sp
, modify
, MO_TEUL
);
1584 save_frw_i32(rt
, tmp
);
1585 tcg_temp_free_i32(tmp
);
1588 gen_helper_loaded_fr0(cpu_env
);
1591 return nullify_end(ctx
);
1594 static bool trans_fldw(DisasContext
*ctx
, arg_ldst
*a
)
1596 return do_floadw(ctx
, a
->t
, a
->b
, a
->x
, a
->scale
? 2 : 0,
1597 a
->disp
, a
->sp
, a
->m
);
1600 static bool do_floadd(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1601 unsigned rx
, int scale
, target_sreg disp
,
1602 unsigned sp
, int modify
)
1608 tmp
= tcg_temp_new_i64();
1609 do_load_64(ctx
, tmp
, rb
, rx
, scale
, disp
, sp
, modify
, MO_TEUQ
);
1611 tcg_temp_free_i64(tmp
);
1614 gen_helper_loaded_fr0(cpu_env
);
1617 return nullify_end(ctx
);
1620 static bool trans_fldd(DisasContext
*ctx
, arg_ldst
*a
)
1622 return do_floadd(ctx
, a
->t
, a
->b
, a
->x
, a
->scale
? 3 : 0,
1623 a
->disp
, a
->sp
, a
->m
);
1626 static bool do_store(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1627 target_sreg disp
, unsigned sp
,
1628 int modify
, MemOp mop
)
1631 do_store_reg(ctx
, load_gpr(ctx
, rt
), rb
, 0, 0, disp
, sp
, modify
, mop
);
1632 return nullify_end(ctx
);
1635 static bool do_fstorew(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1636 unsigned rx
, int scale
, target_sreg disp
,
1637 unsigned sp
, int modify
)
1643 tmp
= load_frw_i32(rt
);
1644 do_store_32(ctx
, tmp
, rb
, rx
, scale
, disp
, sp
, modify
, MO_TEUL
);
1645 tcg_temp_free_i32(tmp
);
1647 return nullify_end(ctx
);
1650 static bool trans_fstw(DisasContext
*ctx
, arg_ldst
*a
)
1652 return do_fstorew(ctx
, a
->t
, a
->b
, a
->x
, a
->scale
? 2 : 0,
1653 a
->disp
, a
->sp
, a
->m
);
1656 static bool do_fstored(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1657 unsigned rx
, int scale
, target_sreg disp
,
1658 unsigned sp
, int modify
)
1665 do_store_64(ctx
, tmp
, rb
, rx
, scale
, disp
, sp
, modify
, MO_TEUQ
);
1666 tcg_temp_free_i64(tmp
);
1668 return nullify_end(ctx
);
1671 static bool trans_fstd(DisasContext
*ctx
, arg_ldst
*a
)
1673 return do_fstored(ctx
, a
->t
, a
->b
, a
->x
, a
->scale
? 3 : 0,
1674 a
->disp
, a
->sp
, a
->m
);
1677 static bool do_fop_wew(DisasContext
*ctx
, unsigned rt
, unsigned ra
,
1678 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i32
))
1683 tmp
= load_frw0_i32(ra
);
1685 func(tmp
, cpu_env
, tmp
);
1687 save_frw_i32(rt
, tmp
);
1688 tcg_temp_free_i32(tmp
);
1689 return nullify_end(ctx
);
1692 static bool do_fop_wed(DisasContext
*ctx
, unsigned rt
, unsigned ra
,
1693 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i64
))
1700 dst
= tcg_temp_new_i32();
1702 func(dst
, cpu_env
, src
);
1704 tcg_temp_free_i64(src
);
1705 save_frw_i32(rt
, dst
);
1706 tcg_temp_free_i32(dst
);
1707 return nullify_end(ctx
);
1710 static bool do_fop_ded(DisasContext
*ctx
, unsigned rt
, unsigned ra
,
1711 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i64
))
1716 tmp
= load_frd0(ra
);
1718 func(tmp
, cpu_env
, tmp
);
1721 tcg_temp_free_i64(tmp
);
1722 return nullify_end(ctx
);
1725 static bool do_fop_dew(DisasContext
*ctx
, unsigned rt
, unsigned ra
,
1726 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i32
))
1732 src
= load_frw0_i32(ra
);
1733 dst
= tcg_temp_new_i64();
1735 func(dst
, cpu_env
, src
);
1737 tcg_temp_free_i32(src
);
1739 tcg_temp_free_i64(dst
);
1740 return nullify_end(ctx
);
1743 static bool do_fop_weww(DisasContext
*ctx
, unsigned rt
,
1744 unsigned ra
, unsigned rb
,
1745 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i32
, TCGv_i32
))
1750 a
= load_frw0_i32(ra
);
1751 b
= load_frw0_i32(rb
);
1753 func(a
, cpu_env
, a
, b
);
1755 tcg_temp_free_i32(b
);
1756 save_frw_i32(rt
, a
);
1757 tcg_temp_free_i32(a
);
1758 return nullify_end(ctx
);
1761 static bool do_fop_dedd(DisasContext
*ctx
, unsigned rt
,
1762 unsigned ra
, unsigned rb
,
1763 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i64
, TCGv_i64
))
1771 func(a
, cpu_env
, a
, b
);
1773 tcg_temp_free_i64(b
);
1775 tcg_temp_free_i64(a
);
1776 return nullify_end(ctx
);
1779 /* Emit an unconditional branch to a direct target, which may or may not
1780 have already had nullification handled. */
1781 static bool do_dbranch(DisasContext
*ctx
, target_ureg dest
,
1782 unsigned link
, bool is_n
)
1784 if (ctx
->null_cond
.c
== TCG_COND_NEVER
&& ctx
->null_lab
== NULL
) {
1786 copy_iaoq_entry(cpu_gr
[link
], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1790 ctx
->null_cond
.c
= TCG_COND_ALWAYS
;
1796 copy_iaoq_entry(cpu_gr
[link
], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1799 if (is_n
&& use_nullify_skip(ctx
)) {
1800 nullify_set(ctx
, 0);
1801 gen_goto_tb(ctx
, 0, dest
, dest
+ 4);
1803 nullify_set(ctx
, is_n
);
1804 gen_goto_tb(ctx
, 0, ctx
->iaoq_b
, dest
);
1809 nullify_set(ctx
, 0);
1810 gen_goto_tb(ctx
, 1, ctx
->iaoq_b
, ctx
->iaoq_n
);
1811 ctx
->base
.is_jmp
= DISAS_NORETURN
;
1816 /* Emit a conditional branch to a direct target. If the branch itself
1817 is nullified, we should have already used nullify_over. */
1818 static bool do_cbranch(DisasContext
*ctx
, target_sreg disp
, bool is_n
,
1821 target_ureg dest
= iaoq_dest(ctx
, disp
);
1822 TCGLabel
*taken
= NULL
;
1823 TCGCond c
= cond
->c
;
1826 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1828 /* Handle TRUE and NEVER as direct branches. */
1829 if (c
== TCG_COND_ALWAYS
) {
1830 return do_dbranch(ctx
, dest
, 0, is_n
&& disp
>= 0);
1832 if (c
== TCG_COND_NEVER
) {
1833 return do_dbranch(ctx
, ctx
->iaoq_n
, 0, is_n
&& disp
< 0);
1836 taken
= gen_new_label();
1837 tcg_gen_brcond_reg(c
, cond
->a0
, cond
->a1
, taken
);
1840 /* Not taken: Condition not satisfied; nullify on backward branches. */
1841 n
= is_n
&& disp
< 0;
1842 if (n
&& use_nullify_skip(ctx
)) {
1843 nullify_set(ctx
, 0);
1844 gen_goto_tb(ctx
, 0, ctx
->iaoq_n
, ctx
->iaoq_n
+ 4);
1846 if (!n
&& ctx
->null_lab
) {
1847 gen_set_label(ctx
->null_lab
);
1848 ctx
->null_lab
= NULL
;
1850 nullify_set(ctx
, n
);
1851 if (ctx
->iaoq_n
== -1) {
1852 /* The temporary iaoq_n_var died at the branch above.
1853 Regenerate it here instead of saving it. */
1854 tcg_gen_addi_reg(ctx
->iaoq_n_var
, cpu_iaoq_b
, 4);
1856 gen_goto_tb(ctx
, 0, ctx
->iaoq_b
, ctx
->iaoq_n
);
1859 gen_set_label(taken
);
1861 /* Taken: Condition satisfied; nullify on forward branches. */
1862 n
= is_n
&& disp
>= 0;
1863 if (n
&& use_nullify_skip(ctx
)) {
1864 nullify_set(ctx
, 0);
1865 gen_goto_tb(ctx
, 1, dest
, dest
+ 4);
1867 nullify_set(ctx
, n
);
1868 gen_goto_tb(ctx
, 1, ctx
->iaoq_b
, dest
);
1871 /* Not taken: the branch itself was nullified. */
1872 if (ctx
->null_lab
) {
1873 gen_set_label(ctx
->null_lab
);
1874 ctx
->null_lab
= NULL
;
1875 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE
;
1877 ctx
->base
.is_jmp
= DISAS_NORETURN
;
1882 /* Emit an unconditional branch to an indirect target. This handles
1883 nullification of the branch itself. */
1884 static bool do_ibranch(DisasContext
*ctx
, TCGv_reg dest
,
1885 unsigned link
, bool is_n
)
1887 TCGv_reg a0
, a1
, next
, tmp
;
1890 assert(ctx
->null_lab
== NULL
);
1892 if (ctx
->null_cond
.c
== TCG_COND_NEVER
) {
1894 copy_iaoq_entry(cpu_gr
[link
], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1896 next
= get_temp(ctx
);
1897 tcg_gen_mov_reg(next
, dest
);
1899 if (use_nullify_skip(ctx
)) {
1900 tcg_gen_mov_reg(cpu_iaoq_f
, next
);
1901 tcg_gen_addi_reg(cpu_iaoq_b
, next
, 4);
1902 nullify_set(ctx
, 0);
1903 ctx
->base
.is_jmp
= DISAS_IAQ_N_UPDATED
;
1906 ctx
->null_cond
.c
= TCG_COND_ALWAYS
;
1909 ctx
->iaoq_n_var
= next
;
1910 } else if (is_n
&& use_nullify_skip(ctx
)) {
1911 /* The (conditional) branch, B, nullifies the next insn, N,
1912 and we're allowed to skip execution N (no single-step or
1913 tracepoint in effect). Since the goto_ptr that we must use
1914 for the indirect branch consumes no special resources, we
1915 can (conditionally) skip B and continue execution. */
1916 /* The use_nullify_skip test implies we have a known control path. */
1917 tcg_debug_assert(ctx
->iaoq_b
!= -1);
1918 tcg_debug_assert(ctx
->iaoq_n
!= -1);
1920 /* We do have to handle the non-local temporary, DEST, before
1921 branching. Since IOAQ_F is not really live at this point, we
1922 can simply store DEST optimistically. Similarly with IAOQ_B. */
1923 tcg_gen_mov_reg(cpu_iaoq_f
, dest
);
1924 tcg_gen_addi_reg(cpu_iaoq_b
, dest
, 4);
1928 tcg_gen_movi_reg(cpu_gr
[link
], ctx
->iaoq_n
);
1930 tcg_gen_lookup_and_goto_ptr();
1931 return nullify_end(ctx
);
1933 c
= ctx
->null_cond
.c
;
1934 a0
= ctx
->null_cond
.a0
;
1935 a1
= ctx
->null_cond
.a1
;
1937 tmp
= tcg_temp_new();
1938 next
= get_temp(ctx
);
1940 copy_iaoq_entry(tmp
, ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1941 tcg_gen_movcond_reg(c
, next
, a0
, a1
, tmp
, dest
);
1943 ctx
->iaoq_n_var
= next
;
1946 tcg_gen_movcond_reg(c
, cpu_gr
[link
], a0
, a1
, cpu_gr
[link
], tmp
);
1950 /* The branch nullifies the next insn, which means the state of N
1951 after the branch is the inverse of the state of N that applied
1953 tcg_gen_setcond_reg(tcg_invert_cond(c
), cpu_psw_n
, a0
, a1
);
1954 cond_free(&ctx
->null_cond
);
1955 ctx
->null_cond
= cond_make_n();
1956 ctx
->psw_n_nonzero
= true;
1958 cond_free(&ctx
->null_cond
);
1965 * if (IAOQ_Front{30..31} < GR[b]{30..31})
1966 * IAOQ_Next{30..31} ← GR[b]{30..31};
1968 * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1969 * which keeps the privilege level from being increased.
1971 static TCGv_reg
do_ibranch_priv(DisasContext
*ctx
, TCGv_reg offset
)
1974 switch (ctx
->privilege
) {
1976 /* Privilege 0 is maximum and is allowed to decrease. */
1979 /* Privilege 3 is minimum and is never allowed to increase. */
1980 dest
= get_temp(ctx
);
1981 tcg_gen_ori_reg(dest
, offset
, 3);
1984 dest
= get_temp(ctx
);
1985 tcg_gen_andi_reg(dest
, offset
, -4);
1986 tcg_gen_ori_reg(dest
, dest
, ctx
->privilege
);
1987 tcg_gen_movcond_reg(TCG_COND_GTU
, dest
, dest
, offset
, dest
, offset
);
1993 #ifdef CONFIG_USER_ONLY
1994 /* On Linux, page zero is normally marked execute only + gateway.
1995 Therefore normal read or write is supposed to fail, but specific
1996 offsets have kernel code mapped to raise permissions to implement
1997 system calls. Handling this via an explicit check here, rather
1998 in than the "be disp(sr2,r0)" instruction that probably sent us
1999 here, is the easiest way to handle the branch delay slot on the
2000 aforementioned BE. */
2001 static void do_page_zero(DisasContext
*ctx
)
2003 /* If by some means we get here with PSW[N]=1, that implies that
2004 the B,GATE instruction would be skipped, and we'd fault on the
2005 next insn within the privilaged page. */
2006 switch (ctx
->null_cond
.c
) {
2007 case TCG_COND_NEVER
:
2009 case TCG_COND_ALWAYS
:
2010 tcg_gen_movi_reg(cpu_psw_n
, 0);
2013 /* Since this is always the first (and only) insn within the
2014 TB, we should know the state of PSW[N] from TB->FLAGS. */
2015 g_assert_not_reached();
2018 /* Check that we didn't arrive here via some means that allowed
2019 non-sequential instruction execution. Normally the PSW[B] bit
2020 detects this by disallowing the B,GATE instruction to execute
2021 under such conditions. */
2022 if (ctx
->iaoq_b
!= ctx
->iaoq_f
+ 4) {
2026 switch (ctx
->iaoq_f
& -4) {
2027 case 0x00: /* Null pointer call */
2028 gen_excp_1(EXCP_IMP
);
2029 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2032 case 0xb0: /* LWS */
2033 gen_excp_1(EXCP_SYSCALL_LWS
);
2034 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2037 case 0xe0: /* SET_THREAD_POINTER */
2038 tcg_gen_st_reg(cpu_gr
[26], cpu_env
, offsetof(CPUHPPAState
, cr
[27]));
2039 tcg_gen_ori_reg(cpu_iaoq_f
, cpu_gr
[31], 3);
2040 tcg_gen_addi_reg(cpu_iaoq_b
, cpu_iaoq_f
, 4);
2041 ctx
->base
.is_jmp
= DISAS_IAQ_N_UPDATED
;
2044 case 0x100: /* SYSCALL */
2045 gen_excp_1(EXCP_SYSCALL
);
2046 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2051 gen_excp_1(EXCP_ILL
);
2052 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2058 static bool trans_nop(DisasContext
*ctx
, arg_nop
*a
)
2060 cond_free(&ctx
->null_cond
);
2064 static bool trans_break(DisasContext
*ctx
, arg_break
*a
)
2066 return gen_excp_iir(ctx
, EXCP_BREAK
);
2069 static bool trans_sync(DisasContext
*ctx
, arg_sync
*a
)
2071 /* No point in nullifying the memory barrier. */
2072 tcg_gen_mb(TCG_BAR_SC
| TCG_MO_ALL
);
2074 cond_free(&ctx
->null_cond
);
2078 static bool trans_mfia(DisasContext
*ctx
, arg_mfia
*a
)
2081 TCGv_reg tmp
= dest_gpr(ctx
, rt
);
2082 tcg_gen_movi_reg(tmp
, ctx
->iaoq_f
);
2083 save_gpr(ctx
, rt
, tmp
);
2085 cond_free(&ctx
->null_cond
);
2089 static bool trans_mfsp(DisasContext
*ctx
, arg_mfsp
*a
)
2092 unsigned rs
= a
->sp
;
2093 TCGv_i64 t0
= tcg_temp_new_i64();
2094 TCGv_reg t1
= tcg_temp_new();
2096 load_spr(ctx
, t0
, rs
);
2097 tcg_gen_shri_i64(t0
, t0
, 32);
2098 tcg_gen_trunc_i64_reg(t1
, t0
);
2100 save_gpr(ctx
, rt
, t1
);
2102 tcg_temp_free_i64(t0
);
2104 cond_free(&ctx
->null_cond
);
2108 static bool trans_mfctl(DisasContext
*ctx
, arg_mfctl
*a
)
2111 unsigned ctl
= a
->r
;
2116 #ifdef TARGET_HPPA64
2118 /* MFSAR without ,W masks low 5 bits. */
2119 tmp
= dest_gpr(ctx
, rt
);
2120 tcg_gen_andi_reg(tmp
, cpu_sar
, 31);
2121 save_gpr(ctx
, rt
, tmp
);
2125 save_gpr(ctx
, rt
, cpu_sar
);
2127 case CR_IT
: /* Interval Timer */
2128 /* FIXME: Respect PSW_S bit. */
2130 tmp
= dest_gpr(ctx
, rt
);
2131 if (tb_cflags(ctx
->base
.tb
) & CF_USE_ICOUNT
) {
2133 gen_helper_read_interval_timer(tmp
);
2134 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE
;
2136 gen_helper_read_interval_timer(tmp
);
2138 save_gpr(ctx
, rt
, tmp
);
2139 return nullify_end(ctx
);
2144 /* All other control registers are privileged. */
2145 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG
);
2149 tmp
= get_temp(ctx
);
2150 tcg_gen_ld_reg(tmp
, cpu_env
, offsetof(CPUHPPAState
, cr
[ctl
]));
2151 save_gpr(ctx
, rt
, tmp
);
2154 cond_free(&ctx
->null_cond
);
2158 static bool trans_mtsp(DisasContext
*ctx
, arg_mtsp
*a
)
2161 unsigned rs
= a
->sp
;
2165 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG
);
2169 t64
= tcg_temp_new_i64();
2170 tcg_gen_extu_reg_i64(t64
, load_gpr(ctx
, rr
));
2171 tcg_gen_shli_i64(t64
, t64
, 32);
2174 tcg_gen_st_i64(t64
, cpu_env
, offsetof(CPUHPPAState
, sr
[rs
]));
2175 ctx
->tb_flags
&= ~TB_FLAG_SR_SAME
;
2177 tcg_gen_mov_i64(cpu_sr
[rs
], t64
);
2179 tcg_temp_free_i64(t64
);
2181 return nullify_end(ctx
);
2184 static bool trans_mtctl(DisasContext
*ctx
, arg_mtctl
*a
)
2186 unsigned ctl
= a
->t
;
2190 if (ctl
== CR_SAR
) {
2191 reg
= load_gpr(ctx
, a
->r
);
2192 tmp
= tcg_temp_new();
2193 tcg_gen_andi_reg(tmp
, reg
, TARGET_REGISTER_BITS
- 1);
2194 save_or_nullify(ctx
, cpu_sar
, tmp
);
2197 cond_free(&ctx
->null_cond
);
2201 /* All other control registers are privileged or read-only. */
2202 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG
);
2204 #ifndef CONFIG_USER_ONLY
2206 reg
= load_gpr(ctx
, a
->r
);
2210 gen_helper_write_interval_timer(cpu_env
, reg
);
2213 gen_helper_write_eirr(cpu_env
, reg
);
2216 gen_helper_write_eiem(cpu_env
, reg
);
2217 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE_EXIT
;
2222 /* FIXME: Respect PSW_Q bit */
2223 /* The write advances the queue and stores to the back element. */
2224 tmp
= get_temp(ctx
);
2225 tcg_gen_ld_reg(tmp
, cpu_env
,
2226 offsetof(CPUHPPAState
, cr_back
[ctl
- CR_IIASQ
]));
2227 tcg_gen_st_reg(tmp
, cpu_env
, offsetof(CPUHPPAState
, cr
[ctl
]));
2228 tcg_gen_st_reg(reg
, cpu_env
,
2229 offsetof(CPUHPPAState
, cr_back
[ctl
- CR_IIASQ
]));
2236 tcg_gen_st_reg(reg
, cpu_env
, offsetof(CPUHPPAState
, cr
[ctl
]));
2237 #ifndef CONFIG_USER_ONLY
2238 gen_helper_change_prot_id(cpu_env
);
2243 tcg_gen_st_reg(reg
, cpu_env
, offsetof(CPUHPPAState
, cr
[ctl
]));
2246 return nullify_end(ctx
);
2250 static bool trans_mtsarcm(DisasContext
*ctx
, arg_mtsarcm
*a
)
2252 TCGv_reg tmp
= tcg_temp_new();
2254 tcg_gen_not_reg(tmp
, load_gpr(ctx
, a
->r
));
2255 tcg_gen_andi_reg(tmp
, tmp
, TARGET_REGISTER_BITS
- 1);
2256 save_or_nullify(ctx
, cpu_sar
, tmp
);
2259 cond_free(&ctx
->null_cond
);
2263 static bool trans_ldsid(DisasContext
*ctx
, arg_ldsid
*a
)
2265 TCGv_reg dest
= dest_gpr(ctx
, a
->t
);
2267 #ifdef CONFIG_USER_ONLY
2268 /* We don't implement space registers in user mode. */
2269 tcg_gen_movi_reg(dest
, 0);
2271 TCGv_i64 t0
= tcg_temp_new_i64();
2273 tcg_gen_mov_i64(t0
, space_select(ctx
, a
->sp
, load_gpr(ctx
, a
->b
)));
2274 tcg_gen_shri_i64(t0
, t0
, 32);
2275 tcg_gen_trunc_i64_reg(dest
, t0
);
2277 tcg_temp_free_i64(t0
);
2279 save_gpr(ctx
, a
->t
, dest
);
2281 cond_free(&ctx
->null_cond
);
2285 static bool trans_rsm(DisasContext
*ctx
, arg_rsm
*a
)
2287 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2288 #ifndef CONFIG_USER_ONLY
2293 tmp
= get_temp(ctx
);
2294 tcg_gen_ld_reg(tmp
, cpu_env
, offsetof(CPUHPPAState
, psw
));
2295 tcg_gen_andi_reg(tmp
, tmp
, ~a
->i
);
2296 gen_helper_swap_system_mask(tmp
, cpu_env
, tmp
);
2297 save_gpr(ctx
, a
->t
, tmp
);
2299 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
2300 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE_EXIT
;
2301 return nullify_end(ctx
);
2305 static bool trans_ssm(DisasContext
*ctx
, arg_ssm
*a
)
2307 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2308 #ifndef CONFIG_USER_ONLY
2313 tmp
= get_temp(ctx
);
2314 tcg_gen_ld_reg(tmp
, cpu_env
, offsetof(CPUHPPAState
, psw
));
2315 tcg_gen_ori_reg(tmp
, tmp
, a
->i
);
2316 gen_helper_swap_system_mask(tmp
, cpu_env
, tmp
);
2317 save_gpr(ctx
, a
->t
, tmp
);
2319 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
2320 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE_EXIT
;
2321 return nullify_end(ctx
);
2325 static bool trans_mtsm(DisasContext
*ctx
, arg_mtsm
*a
)
2327 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2328 #ifndef CONFIG_USER_ONLY
2332 reg
= load_gpr(ctx
, a
->r
);
2333 tmp
= get_temp(ctx
);
2334 gen_helper_swap_system_mask(tmp
, cpu_env
, reg
);
2336 /* Exit the TB to recognize new interrupts. */
2337 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE_EXIT
;
2338 return nullify_end(ctx
);
2342 static bool do_rfi(DisasContext
*ctx
, bool rfi_r
)
2344 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2345 #ifndef CONFIG_USER_ONLY
2349 gen_helper_rfi_r(cpu_env
);
2351 gen_helper_rfi(cpu_env
);
2353 /* Exit the TB to recognize new interrupts. */
2354 tcg_gen_exit_tb(NULL
, 0);
2355 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2357 return nullify_end(ctx
);
2361 static bool trans_rfi(DisasContext
*ctx
, arg_rfi
*a
)
2363 return do_rfi(ctx
, false);
2366 static bool trans_rfi_r(DisasContext
*ctx
, arg_rfi_r
*a
)
2368 return do_rfi(ctx
, true);
2371 static bool trans_halt(DisasContext
*ctx
, arg_halt
*a
)
2373 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2374 #ifndef CONFIG_USER_ONLY
2376 gen_helper_halt(cpu_env
);
2377 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2378 return nullify_end(ctx
);
2382 static bool trans_reset(DisasContext
*ctx
, arg_reset
*a
)
2384 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2385 #ifndef CONFIG_USER_ONLY
2387 gen_helper_reset(cpu_env
);
2388 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2389 return nullify_end(ctx
);
2393 static bool trans_getshadowregs(DisasContext
*ctx
, arg_getshadowregs
*a
)
2395 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2396 #ifndef CONFIG_USER_ONLY
2398 gen_helper_getshadowregs(cpu_env
);
2399 return nullify_end(ctx
);
2403 static bool trans_nop_addrx(DisasContext
*ctx
, arg_ldst
*a
)
2406 TCGv_reg dest
= dest_gpr(ctx
, a
->b
);
2407 TCGv_reg src1
= load_gpr(ctx
, a
->b
);
2408 TCGv_reg src2
= load_gpr(ctx
, a
->x
);
2410 /* The only thing we need to do is the base register modification. */
2411 tcg_gen_add_reg(dest
, src1
, src2
);
2412 save_gpr(ctx
, a
->b
, dest
);
2414 cond_free(&ctx
->null_cond
);
2418 static bool trans_probe(DisasContext
*ctx
, arg_probe
*a
)
2421 TCGv_i32 level
, want
;
2426 dest
= dest_gpr(ctx
, a
->t
);
2427 form_gva(ctx
, &addr
, &ofs
, a
->b
, 0, 0, 0, a
->sp
, 0, false);
2430 level
= tcg_constant_i32(a
->ri
);
2432 level
= tcg_temp_new_i32();
2433 tcg_gen_trunc_reg_i32(level
, load_gpr(ctx
, a
->ri
));
2434 tcg_gen_andi_i32(level
, level
, 3);
2436 want
= tcg_constant_i32(a
->write
? PAGE_WRITE
: PAGE_READ
);
2438 gen_helper_probe(dest
, cpu_env
, addr
, level
, want
);
2440 tcg_temp_free_i32(level
);
2442 save_gpr(ctx
, a
->t
, dest
);
2443 return nullify_end(ctx
);
2446 static bool trans_ixtlbx(DisasContext
*ctx
, arg_ixtlbx
*a
)
2448 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2449 #ifndef CONFIG_USER_ONLY
2455 form_gva(ctx
, &addr
, &ofs
, a
->b
, 0, 0, 0, a
->sp
, 0, false);
2456 reg
= load_gpr(ctx
, a
->r
);
2458 gen_helper_itlba(cpu_env
, addr
, reg
);
2460 gen_helper_itlbp(cpu_env
, addr
, reg
);
2463 /* Exit TB for TLB change if mmu is enabled. */
2464 if (ctx
->tb_flags
& PSW_C
) {
2465 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE
;
2467 return nullify_end(ctx
);
2471 static bool trans_pxtlbx(DisasContext
*ctx
, arg_pxtlbx
*a
)
2473 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2474 #ifndef CONFIG_USER_ONLY
2480 form_gva(ctx
, &addr
, &ofs
, a
->b
, a
->x
, 0, 0, a
->sp
, a
->m
, false);
2482 save_gpr(ctx
, a
->b
, ofs
);
2485 gen_helper_ptlbe(cpu_env
);
2487 gen_helper_ptlb(cpu_env
, addr
);
2490 /* Exit TB for TLB change if mmu is enabled. */
2491 if (ctx
->tb_flags
& PSW_C
) {
2492 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE
;
2494 return nullify_end(ctx
);
2499 * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2501 * https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2502 * page 13-9 (195/206)
2504 static bool trans_ixtlbxf(DisasContext
*ctx
, arg_ixtlbxf
*a
)
2506 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2507 #ifndef CONFIG_USER_ONLY
2508 TCGv_tl addr
, atl
, stl
;
2515 * if (not (pcxl or pcxl2))
2516 * return gen_illegal(ctx);
2518 * Note for future: these are 32-bit systems; no hppa64.
2521 atl
= tcg_temp_new_tl();
2522 stl
= tcg_temp_new_tl();
2523 addr
= tcg_temp_new_tl();
2525 tcg_gen_ld32u_i64(stl
, cpu_env
,
2526 a
->data
? offsetof(CPUHPPAState
, cr
[CR_ISR
])
2527 : offsetof(CPUHPPAState
, cr
[CR_IIASQ
]));
2528 tcg_gen_ld32u_i64(atl
, cpu_env
,
2529 a
->data
? offsetof(CPUHPPAState
, cr
[CR_IOR
])
2530 : offsetof(CPUHPPAState
, cr
[CR_IIAOQ
]));
2531 tcg_gen_shli_i64(stl
, stl
, 32);
2532 tcg_gen_or_tl(addr
, atl
, stl
);
2533 tcg_temp_free_tl(atl
);
2534 tcg_temp_free_tl(stl
);
2536 reg
= load_gpr(ctx
, a
->r
);
2538 gen_helper_itlba(cpu_env
, addr
, reg
);
2540 gen_helper_itlbp(cpu_env
, addr
, reg
);
2542 tcg_temp_free_tl(addr
);
2544 /* Exit TB for TLB change if mmu is enabled. */
2545 if (ctx
->tb_flags
& PSW_C
) {
2546 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE
;
2548 return nullify_end(ctx
);
2552 static bool trans_lpa(DisasContext
*ctx
, arg_ldst
*a
)
2554 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2555 #ifndef CONFIG_USER_ONLY
2557 TCGv_reg ofs
, paddr
;
2561 form_gva(ctx
, &vaddr
, &ofs
, a
->b
, a
->x
, 0, 0, a
->sp
, a
->m
, false);
2563 paddr
= tcg_temp_new();
2564 gen_helper_lpa(paddr
, cpu_env
, vaddr
);
2566 /* Note that physical address result overrides base modification. */
2568 save_gpr(ctx
, a
->b
, ofs
);
2570 save_gpr(ctx
, a
->t
, paddr
);
2571 tcg_temp_free(paddr
);
2573 return nullify_end(ctx
);
2577 static bool trans_lci(DisasContext
*ctx
, arg_lci
*a
)
2579 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2581 /* The Coherence Index is an implementation-defined function of the
2582 physical address. Two addresses with the same CI have a coherent
2583 view of the cache. Our implementation is to return 0 for all,
2584 since the entire address space is coherent. */
2585 save_gpr(ctx
, a
->t
, tcg_constant_reg(0));
2587 cond_free(&ctx
->null_cond
);
2591 static bool trans_add(DisasContext
*ctx
, arg_rrr_cf_sh
*a
)
2593 return do_add_reg(ctx
, a
, false, false, false, false);
2596 static bool trans_add_l(DisasContext
*ctx
, arg_rrr_cf_sh
*a
)
2598 return do_add_reg(ctx
, a
, true, false, false, false);
2601 static bool trans_add_tsv(DisasContext
*ctx
, arg_rrr_cf_sh
*a
)
2603 return do_add_reg(ctx
, a
, false, true, false, false);
2606 static bool trans_add_c(DisasContext
*ctx
, arg_rrr_cf_sh
*a
)
2608 return do_add_reg(ctx
, a
, false, false, false, true);
2611 static bool trans_add_c_tsv(DisasContext
*ctx
, arg_rrr_cf_sh
*a
)
2613 return do_add_reg(ctx
, a
, false, true, false, true);
2616 static bool trans_sub(DisasContext
*ctx
, arg_rrr_cf
*a
)
2618 return do_sub_reg(ctx
, a
, false, false, false);
2621 static bool trans_sub_tsv(DisasContext
*ctx
, arg_rrr_cf
*a
)
2623 return do_sub_reg(ctx
, a
, true, false, false);
2626 static bool trans_sub_tc(DisasContext
*ctx
, arg_rrr_cf
*a
)
2628 return do_sub_reg(ctx
, a
, false, false, true);
2631 static bool trans_sub_tsv_tc(DisasContext
*ctx
, arg_rrr_cf
*a
)
2633 return do_sub_reg(ctx
, a
, true, false, true);
2636 static bool trans_sub_b(DisasContext
*ctx
, arg_rrr_cf
*a
)
2638 return do_sub_reg(ctx
, a
, false, true, false);
2641 static bool trans_sub_b_tsv(DisasContext
*ctx
, arg_rrr_cf
*a
)
2643 return do_sub_reg(ctx
, a
, true, true, false);
2646 static bool trans_andcm(DisasContext
*ctx
, arg_rrr_cf
*a
)
2648 return do_log_reg(ctx
, a
, tcg_gen_andc_reg
);
2651 static bool trans_and(DisasContext
*ctx
, arg_rrr_cf
*a
)
2653 return do_log_reg(ctx
, a
, tcg_gen_and_reg
);
2656 static bool trans_or(DisasContext
*ctx
, arg_rrr_cf
*a
)
2659 unsigned r2
= a
->r2
;
2660 unsigned r1
= a
->r1
;
2663 if (rt
== 0) { /* NOP */
2664 cond_free(&ctx
->null_cond
);
2667 if (r2
== 0) { /* COPY */
2669 TCGv_reg dest
= dest_gpr(ctx
, rt
);
2670 tcg_gen_movi_reg(dest
, 0);
2671 save_gpr(ctx
, rt
, dest
);
2673 save_gpr(ctx
, rt
, cpu_gr
[r1
]);
2675 cond_free(&ctx
->null_cond
);
2678 #ifndef CONFIG_USER_ONLY
2679 /* These are QEMU extensions and are nops in the real architecture:
2681 * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2682 * or %r31,%r31,%r31 -- death loop; offline cpu
2683 * currently implemented as idle.
2685 if ((rt
== 10 || rt
== 31) && r1
== rt
&& r2
== rt
) { /* PAUSE */
2686 /* No need to check for supervisor, as userland can only pause
2687 until the next timer interrupt. */
2690 /* Advance the instruction queue. */
2691 copy_iaoq_entry(cpu_iaoq_f
, ctx
->iaoq_b
, cpu_iaoq_b
);
2692 copy_iaoq_entry(cpu_iaoq_b
, ctx
->iaoq_n
, ctx
->iaoq_n_var
);
2693 nullify_set(ctx
, 0);
2695 /* Tell the qemu main loop to halt until this cpu has work. */
2696 tcg_gen_st_i32(tcg_constant_i32(1), cpu_env
,
2697 offsetof(CPUState
, halted
) - offsetof(HPPACPU
, env
));
2698 gen_excp_1(EXCP_HALTED
);
2699 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2701 return nullify_end(ctx
);
2705 return do_log_reg(ctx
, a
, tcg_gen_or_reg
);
2708 static bool trans_xor(DisasContext
*ctx
, arg_rrr_cf
*a
)
2710 return do_log_reg(ctx
, a
, tcg_gen_xor_reg
);
2713 static bool trans_cmpclr(DisasContext
*ctx
, arg_rrr_cf
*a
)
2715 TCGv_reg tcg_r1
, tcg_r2
;
2720 tcg_r1
= load_gpr(ctx
, a
->r1
);
2721 tcg_r2
= load_gpr(ctx
, a
->r2
);
2722 do_cmpclr(ctx
, a
->t
, tcg_r1
, tcg_r2
, a
->cf
);
2723 return nullify_end(ctx
);
2726 static bool trans_uxor(DisasContext
*ctx
, arg_rrr_cf
*a
)
2728 TCGv_reg tcg_r1
, tcg_r2
;
2733 tcg_r1
= load_gpr(ctx
, a
->r1
);
2734 tcg_r2
= load_gpr(ctx
, a
->r2
);
2735 do_unit(ctx
, a
->t
, tcg_r1
, tcg_r2
, a
->cf
, false, tcg_gen_xor_reg
);
2736 return nullify_end(ctx
);
2739 static bool do_uaddcm(DisasContext
*ctx
, arg_rrr_cf
*a
, bool is_tc
)
2741 TCGv_reg tcg_r1
, tcg_r2
, tmp
;
2746 tcg_r1
= load_gpr(ctx
, a
->r1
);
2747 tcg_r2
= load_gpr(ctx
, a
->r2
);
2748 tmp
= get_temp(ctx
);
2749 tcg_gen_not_reg(tmp
, tcg_r2
);
2750 do_unit(ctx
, a
->t
, tcg_r1
, tmp
, a
->cf
, is_tc
, tcg_gen_add_reg
);
2751 return nullify_end(ctx
);
2754 static bool trans_uaddcm(DisasContext
*ctx
, arg_rrr_cf
*a
)
2756 return do_uaddcm(ctx
, a
, false);
2759 static bool trans_uaddcm_tc(DisasContext
*ctx
, arg_rrr_cf
*a
)
2761 return do_uaddcm(ctx
, a
, true);
2764 static bool do_dcor(DisasContext
*ctx
, arg_rr_cf
*a
, bool is_i
)
2770 tmp
= get_temp(ctx
);
2771 tcg_gen_shri_reg(tmp
, cpu_psw_cb
, 3);
2773 tcg_gen_not_reg(tmp
, tmp
);
2775 tcg_gen_andi_reg(tmp
, tmp
, 0x11111111);
2776 tcg_gen_muli_reg(tmp
, tmp
, 6);
2777 do_unit(ctx
, a
->t
, load_gpr(ctx
, a
->r
), tmp
, a
->cf
, false,
2778 is_i
? tcg_gen_add_reg
: tcg_gen_sub_reg
);
2779 return nullify_end(ctx
);
2782 static bool trans_dcor(DisasContext
*ctx
, arg_rr_cf
*a
)
2784 return do_dcor(ctx
, a
, false);
2787 static bool trans_dcor_i(DisasContext
*ctx
, arg_rr_cf
*a
)
2789 return do_dcor(ctx
, a
, true);
2792 static bool trans_ds(DisasContext
*ctx
, arg_rrr_cf
*a
)
2794 TCGv_reg dest
, add1
, add2
, addc
, zero
, in1
, in2
;
2798 in1
= load_gpr(ctx
, a
->r1
);
2799 in2
= load_gpr(ctx
, a
->r2
);
2801 add1
= tcg_temp_new();
2802 add2
= tcg_temp_new();
2803 addc
= tcg_temp_new();
2804 dest
= tcg_temp_new();
2805 zero
= tcg_constant_reg(0);
2807 /* Form R1 << 1 | PSW[CB]{8}. */
2808 tcg_gen_add_reg(add1
, in1
, in1
);
2809 tcg_gen_add_reg(add1
, add1
, cpu_psw_cb_msb
);
2811 /* Add or subtract R2, depending on PSW[V]. Proper computation of
2812 carry{8} requires that we subtract via + ~R2 + 1, as described in
2813 the manual. By extracting and masking V, we can produce the
2814 proper inputs to the addition without movcond. */
2815 tcg_gen_sari_reg(addc
, cpu_psw_v
, TARGET_REGISTER_BITS
- 1);
2816 tcg_gen_xor_reg(add2
, in2
, addc
);
2817 tcg_gen_andi_reg(addc
, addc
, 1);
2818 /* ??? This is only correct for 32-bit. */
2819 tcg_gen_add2_i32(dest
, cpu_psw_cb_msb
, add1
, zero
, add2
, zero
);
2820 tcg_gen_add2_i32(dest
, cpu_psw_cb_msb
, dest
, cpu_psw_cb_msb
, addc
, zero
);
2822 tcg_temp_free(addc
);
2824 /* Write back the result register. */
2825 save_gpr(ctx
, a
->t
, dest
);
2827 /* Write back PSW[CB]. */
2828 tcg_gen_xor_reg(cpu_psw_cb
, add1
, add2
);
2829 tcg_gen_xor_reg(cpu_psw_cb
, cpu_psw_cb
, dest
);
2831 /* Write back PSW[V] for the division step. */
2832 tcg_gen_neg_reg(cpu_psw_v
, cpu_psw_cb_msb
);
2833 tcg_gen_xor_reg(cpu_psw_v
, cpu_psw_v
, in2
);
2835 /* Install the new nullification. */
2838 if (cond_need_sv(a
->cf
>> 1)) {
2839 /* ??? The lshift is supposed to contribute to overflow. */
2840 sv
= do_add_sv(ctx
, dest
, add1
, add2
);
2842 ctx
->null_cond
= do_cond(a
->cf
, dest
, cpu_psw_cb_msb
, sv
);
2845 tcg_temp_free(add1
);
2846 tcg_temp_free(add2
);
2847 tcg_temp_free(dest
);
2849 return nullify_end(ctx
);
2852 static bool trans_addi(DisasContext
*ctx
, arg_rri_cf
*a
)
2854 return do_add_imm(ctx
, a
, false, false);
2857 static bool trans_addi_tsv(DisasContext
*ctx
, arg_rri_cf
*a
)
2859 return do_add_imm(ctx
, a
, true, false);
2862 static bool trans_addi_tc(DisasContext
*ctx
, arg_rri_cf
*a
)
2864 return do_add_imm(ctx
, a
, false, true);
2867 static bool trans_addi_tc_tsv(DisasContext
*ctx
, arg_rri_cf
*a
)
2869 return do_add_imm(ctx
, a
, true, true);
2872 static bool trans_subi(DisasContext
*ctx
, arg_rri_cf
*a
)
2874 return do_sub_imm(ctx
, a
, false);
2877 static bool trans_subi_tsv(DisasContext
*ctx
, arg_rri_cf
*a
)
2879 return do_sub_imm(ctx
, a
, true);
2882 static bool trans_cmpiclr(DisasContext
*ctx
, arg_rri_cf
*a
)
2884 TCGv_reg tcg_im
, tcg_r2
;
2890 tcg_im
= load_const(ctx
, a
->i
);
2891 tcg_r2
= load_gpr(ctx
, a
->r
);
2892 do_cmpclr(ctx
, a
->t
, tcg_im
, tcg_r2
, a
->cf
);
2894 return nullify_end(ctx
);
2897 static bool trans_ld(DisasContext
*ctx
, arg_ldst
*a
)
2899 if (unlikely(TARGET_REGISTER_BITS
== 32 && a
->size
> MO_32
)) {
2900 return gen_illegal(ctx
);
2902 return do_load(ctx
, a
->t
, a
->b
, a
->x
, a
->scale
? a
->size
: 0,
2903 a
->disp
, a
->sp
, a
->m
, a
->size
| MO_TE
);
2907 static bool trans_st(DisasContext
*ctx
, arg_ldst
*a
)
2909 assert(a
->x
== 0 && a
->scale
== 0);
2910 if (unlikely(TARGET_REGISTER_BITS
== 32 && a
->size
> MO_32
)) {
2911 return gen_illegal(ctx
);
2913 return do_store(ctx
, a
->t
, a
->b
, a
->disp
, a
->sp
, a
->m
, a
->size
| MO_TE
);
2917 static bool trans_ldc(DisasContext
*ctx
, arg_ldst
*a
)
2919 MemOp mop
= MO_TE
| MO_ALIGN
| a
->size
;
2920 TCGv_reg zero
, dest
, ofs
;
2926 /* Base register modification. Make sure if RT == RB,
2927 we see the result of the load. */
2928 dest
= get_temp(ctx
);
2930 dest
= dest_gpr(ctx
, a
->t
);
2933 form_gva(ctx
, &addr
, &ofs
, a
->b
, a
->x
, a
->scale
? a
->size
: 0,
2934 a
->disp
, a
->sp
, a
->m
, ctx
->mmu_idx
== MMU_PHYS_IDX
);
2937 * For hppa1.1, LDCW is undefined unless aligned mod 16.
2938 * However actual hardware succeeds with aligned mod 4.
2939 * Detect this case and log a GUEST_ERROR.
2941 * TODO: HPPA64 relaxes the over-alignment requirement
2942 * with the ,co completer.
2944 gen_helper_ldc_check(addr
);
2946 zero
= tcg_constant_reg(0);
2947 tcg_gen_atomic_xchg_reg(dest
, addr
, zero
, ctx
->mmu_idx
, mop
);
2950 save_gpr(ctx
, a
->b
, ofs
);
2952 save_gpr(ctx
, a
->t
, dest
);
2954 return nullify_end(ctx
);
2957 static bool trans_stby(DisasContext
*ctx
, arg_stby
*a
)
2964 form_gva(ctx
, &addr
, &ofs
, a
->b
, 0, 0, a
->disp
, a
->sp
, a
->m
,
2965 ctx
->mmu_idx
== MMU_PHYS_IDX
);
2966 val
= load_gpr(ctx
, a
->r
);
2968 if (tb_cflags(ctx
->base
.tb
) & CF_PARALLEL
) {
2969 gen_helper_stby_e_parallel(cpu_env
, addr
, val
);
2971 gen_helper_stby_e(cpu_env
, addr
, val
);
2974 if (tb_cflags(ctx
->base
.tb
) & CF_PARALLEL
) {
2975 gen_helper_stby_b_parallel(cpu_env
, addr
, val
);
2977 gen_helper_stby_b(cpu_env
, addr
, val
);
2981 tcg_gen_andi_reg(ofs
, ofs
, ~3);
2982 save_gpr(ctx
, a
->b
, ofs
);
2985 return nullify_end(ctx
);
2988 static bool trans_lda(DisasContext
*ctx
, arg_ldst
*a
)
2990 int hold_mmu_idx
= ctx
->mmu_idx
;
2992 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2993 ctx
->mmu_idx
= MMU_PHYS_IDX
;
2995 ctx
->mmu_idx
= hold_mmu_idx
;
2999 static bool trans_sta(DisasContext
*ctx
, arg_ldst
*a
)
3001 int hold_mmu_idx
= ctx
->mmu_idx
;
3003 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
3004 ctx
->mmu_idx
= MMU_PHYS_IDX
;
3006 ctx
->mmu_idx
= hold_mmu_idx
;
3010 static bool trans_ldil(DisasContext
*ctx
, arg_ldil
*a
)
3012 TCGv_reg tcg_rt
= dest_gpr(ctx
, a
->t
);
3014 tcg_gen_movi_reg(tcg_rt
, a
->i
);
3015 save_gpr(ctx
, a
->t
, tcg_rt
);
3016 cond_free(&ctx
->null_cond
);
3020 static bool trans_addil(DisasContext
*ctx
, arg_addil
*a
)
3022 TCGv_reg tcg_rt
= load_gpr(ctx
, a
->r
);
3023 TCGv_reg tcg_r1
= dest_gpr(ctx
, 1);
3025 tcg_gen_addi_reg(tcg_r1
, tcg_rt
, a
->i
);
3026 save_gpr(ctx
, 1, tcg_r1
);
3027 cond_free(&ctx
->null_cond
);
3031 static bool trans_ldo(DisasContext
*ctx
, arg_ldo
*a
)
3033 TCGv_reg tcg_rt
= dest_gpr(ctx
, a
->t
);
3035 /* Special case rb == 0, for the LDI pseudo-op.
3036 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
3038 tcg_gen_movi_reg(tcg_rt
, a
->i
);
3040 tcg_gen_addi_reg(tcg_rt
, cpu_gr
[a
->b
], a
->i
);
3042 save_gpr(ctx
, a
->t
, tcg_rt
);
3043 cond_free(&ctx
->null_cond
);
3047 static bool do_cmpb(DisasContext
*ctx
, unsigned r
, TCGv_reg in1
,
3048 unsigned c
, unsigned f
, unsigned n
, int disp
)
3050 TCGv_reg dest
, in2
, sv
;
3053 in2
= load_gpr(ctx
, r
);
3054 dest
= get_temp(ctx
);
3056 tcg_gen_sub_reg(dest
, in1
, in2
);
3059 if (cond_need_sv(c
)) {
3060 sv
= do_sub_sv(ctx
, dest
, in1
, in2
);
3063 cond
= do_sub_cond(c
* 2 + f
, dest
, in1
, in2
, sv
);
3064 return do_cbranch(ctx
, disp
, n
, &cond
);
3067 static bool trans_cmpb(DisasContext
*ctx
, arg_cmpb
*a
)
3070 return do_cmpb(ctx
, a
->r2
, load_gpr(ctx
, a
->r1
), a
->c
, a
->f
, a
->n
, a
->disp
);
3073 static bool trans_cmpbi(DisasContext
*ctx
, arg_cmpbi
*a
)
3076 return do_cmpb(ctx
, a
->r
, load_const(ctx
, a
->i
), a
->c
, a
->f
, a
->n
, a
->disp
);
3079 static bool do_addb(DisasContext
*ctx
, unsigned r
, TCGv_reg in1
,
3080 unsigned c
, unsigned f
, unsigned n
, int disp
)
3082 TCGv_reg dest
, in2
, sv
, cb_msb
;
3085 in2
= load_gpr(ctx
, r
);
3086 dest
= tcg_temp_new();
3090 if (cond_need_cb(c
)) {
3091 cb_msb
= get_temp(ctx
);
3092 tcg_gen_movi_reg(cb_msb
, 0);
3093 tcg_gen_add2_reg(dest
, cb_msb
, in1
, cb_msb
, in2
, cb_msb
);
3095 tcg_gen_add_reg(dest
, in1
, in2
);
3097 if (cond_need_sv(c
)) {
3098 sv
= do_add_sv(ctx
, dest
, in1
, in2
);
3101 cond
= do_cond(c
* 2 + f
, dest
, cb_msb
, sv
);
3102 save_gpr(ctx
, r
, dest
);
3103 tcg_temp_free(dest
);
3104 return do_cbranch(ctx
, disp
, n
, &cond
);
3107 static bool trans_addb(DisasContext
*ctx
, arg_addb
*a
)
3110 return do_addb(ctx
, a
->r2
, load_gpr(ctx
, a
->r1
), a
->c
, a
->f
, a
->n
, a
->disp
);
3113 static bool trans_addbi(DisasContext
*ctx
, arg_addbi
*a
)
3116 return do_addb(ctx
, a
->r
, load_const(ctx
, a
->i
), a
->c
, a
->f
, a
->n
, a
->disp
);
3119 static bool trans_bb_sar(DisasContext
*ctx
, arg_bb_sar
*a
)
3121 TCGv_reg tmp
, tcg_r
;
3126 tmp
= tcg_temp_new();
3127 tcg_r
= load_gpr(ctx
, a
->r
);
3128 tcg_gen_shl_reg(tmp
, tcg_r
, cpu_sar
);
3130 cond
= cond_make_0(a
->c
? TCG_COND_GE
: TCG_COND_LT
, tmp
);
3132 return do_cbranch(ctx
, a
->disp
, a
->n
, &cond
);
3135 static bool trans_bb_imm(DisasContext
*ctx
, arg_bb_imm
*a
)
3137 TCGv_reg tmp
, tcg_r
;
3142 tmp
= tcg_temp_new();
3143 tcg_r
= load_gpr(ctx
, a
->r
);
3144 tcg_gen_shli_reg(tmp
, tcg_r
, a
->p
);
3146 cond
= cond_make_0(a
->c
? TCG_COND_GE
: TCG_COND_LT
, tmp
);
3148 return do_cbranch(ctx
, a
->disp
, a
->n
, &cond
);
3151 static bool trans_movb(DisasContext
*ctx
, arg_movb
*a
)
3158 dest
= dest_gpr(ctx
, a
->r2
);
3160 tcg_gen_movi_reg(dest
, 0);
3162 tcg_gen_mov_reg(dest
, cpu_gr
[a
->r1
]);
3165 cond
= do_sed_cond(a
->c
, dest
);
3166 return do_cbranch(ctx
, a
->disp
, a
->n
, &cond
);
3169 static bool trans_movbi(DisasContext
*ctx
, arg_movbi
*a
)
3176 dest
= dest_gpr(ctx
, a
->r
);
3177 tcg_gen_movi_reg(dest
, a
->i
);
3179 cond
= do_sed_cond(a
->c
, dest
);
3180 return do_cbranch(ctx
, a
->disp
, a
->n
, &cond
);
3183 static bool trans_shrpw_sar(DisasContext
*ctx
, arg_shrpw_sar
*a
)
3191 dest
= dest_gpr(ctx
, a
->t
);
3193 tcg_gen_ext32u_reg(dest
, load_gpr(ctx
, a
->r2
));
3194 tcg_gen_shr_reg(dest
, dest
, cpu_sar
);
3195 } else if (a
->r1
== a
->r2
) {
3196 TCGv_i32 t32
= tcg_temp_new_i32();
3197 tcg_gen_trunc_reg_i32(t32
, load_gpr(ctx
, a
->r2
));
3198 tcg_gen_rotr_i32(t32
, t32
, cpu_sar
);
3199 tcg_gen_extu_i32_reg(dest
, t32
);
3200 tcg_temp_free_i32(t32
);
3202 TCGv_i64 t
= tcg_temp_new_i64();
3203 TCGv_i64 s
= tcg_temp_new_i64();
3205 tcg_gen_concat_reg_i64(t
, load_gpr(ctx
, a
->r2
), load_gpr(ctx
, a
->r1
));
3206 tcg_gen_extu_reg_i64(s
, cpu_sar
);
3207 tcg_gen_shr_i64(t
, t
, s
);
3208 tcg_gen_trunc_i64_reg(dest
, t
);
3210 tcg_temp_free_i64(t
);
3211 tcg_temp_free_i64(s
);
3213 save_gpr(ctx
, a
->t
, dest
);
3215 /* Install the new nullification. */
3216 cond_free(&ctx
->null_cond
);
3218 ctx
->null_cond
= do_sed_cond(a
->c
, dest
);
3220 return nullify_end(ctx
);
3223 static bool trans_shrpw_imm(DisasContext
*ctx
, arg_shrpw_imm
*a
)
3225 unsigned sa
= 31 - a
->cpos
;
3232 dest
= dest_gpr(ctx
, a
->t
);
3233 t2
= load_gpr(ctx
, a
->r2
);
3235 tcg_gen_extract_reg(dest
, t2
, sa
, 32 - sa
);
3236 } else if (TARGET_REGISTER_BITS
== 32) {
3237 tcg_gen_extract2_reg(dest
, t2
, cpu_gr
[a
->r1
], sa
);
3238 } else if (a
->r1
== a
->r2
) {
3239 TCGv_i32 t32
= tcg_temp_new_i32();
3240 tcg_gen_trunc_reg_i32(t32
, t2
);
3241 tcg_gen_rotri_i32(t32
, t32
, sa
);
3242 tcg_gen_extu_i32_reg(dest
, t32
);
3243 tcg_temp_free_i32(t32
);
3245 TCGv_i64 t64
= tcg_temp_new_i64();
3246 tcg_gen_concat_reg_i64(t64
, t2
, cpu_gr
[a
->r1
]);
3247 tcg_gen_shri_i64(t64
, t64
, sa
);
3248 tcg_gen_trunc_i64_reg(dest
, t64
);
3249 tcg_temp_free_i64(t64
);
3251 save_gpr(ctx
, a
->t
, dest
);
3253 /* Install the new nullification. */
3254 cond_free(&ctx
->null_cond
);
3256 ctx
->null_cond
= do_sed_cond(a
->c
, dest
);
3258 return nullify_end(ctx
);
3261 static bool trans_extrw_sar(DisasContext
*ctx
, arg_extrw_sar
*a
)
3263 unsigned len
= 32 - a
->clen
;
3264 TCGv_reg dest
, src
, tmp
;
3270 dest
= dest_gpr(ctx
, a
->t
);
3271 src
= load_gpr(ctx
, a
->r
);
3272 tmp
= tcg_temp_new();
3274 /* Recall that SAR is using big-endian bit numbering. */
3275 tcg_gen_xori_reg(tmp
, cpu_sar
, TARGET_REGISTER_BITS
- 1);
3277 tcg_gen_sar_reg(dest
, src
, tmp
);
3278 tcg_gen_sextract_reg(dest
, dest
, 0, len
);
3280 tcg_gen_shr_reg(dest
, src
, tmp
);
3281 tcg_gen_extract_reg(dest
, dest
, 0, len
);
3284 save_gpr(ctx
, a
->t
, dest
);
3286 /* Install the new nullification. */
3287 cond_free(&ctx
->null_cond
);
3289 ctx
->null_cond
= do_sed_cond(a
->c
, dest
);
3291 return nullify_end(ctx
);
3294 static bool trans_extrw_imm(DisasContext
*ctx
, arg_extrw_imm
*a
)
3296 unsigned len
= 32 - a
->clen
;
3297 unsigned cpos
= 31 - a
->pos
;
3304 dest
= dest_gpr(ctx
, a
->t
);
3305 src
= load_gpr(ctx
, a
->r
);
3307 tcg_gen_sextract_reg(dest
, src
, cpos
, len
);
3309 tcg_gen_extract_reg(dest
, src
, cpos
, len
);
3311 save_gpr(ctx
, a
->t
, dest
);
3313 /* Install the new nullification. */
3314 cond_free(&ctx
->null_cond
);
3316 ctx
->null_cond
= do_sed_cond(a
->c
, dest
);
3318 return nullify_end(ctx
);
3321 static bool trans_depwi_imm(DisasContext
*ctx
, arg_depwi_imm
*a
)
3323 unsigned len
= 32 - a
->clen
;
3324 target_sreg mask0
, mask1
;
3330 if (a
->cpos
+ len
> 32) {
3334 dest
= dest_gpr(ctx
, a
->t
);
3335 mask0
= deposit64(0, a
->cpos
, len
, a
->i
);
3336 mask1
= deposit64(-1, a
->cpos
, len
, a
->i
);
3339 TCGv_reg src
= load_gpr(ctx
, a
->t
);
3341 tcg_gen_andi_reg(dest
, src
, mask1
);
3344 tcg_gen_ori_reg(dest
, src
, mask0
);
3346 tcg_gen_movi_reg(dest
, mask0
);
3348 save_gpr(ctx
, a
->t
, dest
);
3350 /* Install the new nullification. */
3351 cond_free(&ctx
->null_cond
);
3353 ctx
->null_cond
= do_sed_cond(a
->c
, dest
);
3355 return nullify_end(ctx
);
3358 static bool trans_depw_imm(DisasContext
*ctx
, arg_depw_imm
*a
)
3360 unsigned rs
= a
->nz
? a
->t
: 0;
3361 unsigned len
= 32 - a
->clen
;
3367 if (a
->cpos
+ len
> 32) {
3371 dest
= dest_gpr(ctx
, a
->t
);
3372 val
= load_gpr(ctx
, a
->r
);
3374 tcg_gen_deposit_z_reg(dest
, val
, a
->cpos
, len
);
3376 tcg_gen_deposit_reg(dest
, cpu_gr
[rs
], val
, a
->cpos
, len
);
3378 save_gpr(ctx
, a
->t
, dest
);
3380 /* Install the new nullification. */
3381 cond_free(&ctx
->null_cond
);
3383 ctx
->null_cond
= do_sed_cond(a
->c
, dest
);
3385 return nullify_end(ctx
);
3388 static bool do_depw_sar(DisasContext
*ctx
, unsigned rt
, unsigned c
,
3389 unsigned nz
, unsigned clen
, TCGv_reg val
)
3391 unsigned rs
= nz
? rt
: 0;
3392 unsigned len
= 32 - clen
;
3393 TCGv_reg mask
, tmp
, shift
, dest
;
3394 unsigned msb
= 1U << (len
- 1);
3396 dest
= dest_gpr(ctx
, rt
);
3397 shift
= tcg_temp_new();
3398 tmp
= tcg_temp_new();
3400 /* Convert big-endian bit numbering in SAR to left-shift. */
3401 tcg_gen_xori_reg(shift
, cpu_sar
, TARGET_REGISTER_BITS
- 1);
3403 mask
= tcg_const_reg(msb
+ (msb
- 1));
3404 tcg_gen_and_reg(tmp
, val
, mask
);
3406 tcg_gen_shl_reg(mask
, mask
, shift
);
3407 tcg_gen_shl_reg(tmp
, tmp
, shift
);
3408 tcg_gen_andc_reg(dest
, cpu_gr
[rs
], mask
);
3409 tcg_gen_or_reg(dest
, dest
, tmp
);
3411 tcg_gen_shl_reg(dest
, tmp
, shift
);
3413 tcg_temp_free(shift
);
3414 tcg_temp_free(mask
);
3416 save_gpr(ctx
, rt
, dest
);
3418 /* Install the new nullification. */
3419 cond_free(&ctx
->null_cond
);
3421 ctx
->null_cond
= do_sed_cond(c
, dest
);
3423 return nullify_end(ctx
);
3426 static bool trans_depw_sar(DisasContext
*ctx
, arg_depw_sar
*a
)
3431 return do_depw_sar(ctx
, a
->t
, a
->c
, a
->nz
, a
->clen
, load_gpr(ctx
, a
->r
));
3434 static bool trans_depwi_sar(DisasContext
*ctx
, arg_depwi_sar
*a
)
3439 return do_depw_sar(ctx
, a
->t
, a
->c
, a
->nz
, a
->clen
, load_const(ctx
, a
->i
));
3442 static bool trans_be(DisasContext
*ctx
, arg_be
*a
)
3446 #ifdef CONFIG_USER_ONLY
3447 /* ??? It seems like there should be a good way of using
3448 "be disp(sr2, r0)", the canonical gateway entry mechanism
3449 to our advantage. But that appears to be inconvenient to
3450 manage along side branch delay slots. Therefore we handle
3451 entry into the gateway page via absolute address. */
3452 /* Since we don't implement spaces, just branch. Do notice the special
3453 case of "be disp(*,r0)" using a direct branch to disp, so that we can
3454 goto_tb to the TB containing the syscall. */
3456 return do_dbranch(ctx
, a
->disp
, a
->l
, a
->n
);
3462 tmp
= get_temp(ctx
);
3463 tcg_gen_addi_reg(tmp
, load_gpr(ctx
, a
->b
), a
->disp
);
3464 tmp
= do_ibranch_priv(ctx
, tmp
);
3466 #ifdef CONFIG_USER_ONLY
3467 return do_ibranch(ctx
, tmp
, a
->l
, a
->n
);
3469 TCGv_i64 new_spc
= tcg_temp_new_i64();
3471 load_spr(ctx
, new_spc
, a
->sp
);
3473 copy_iaoq_entry(cpu_gr
[31], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
3474 tcg_gen_mov_i64(cpu_sr
[0], cpu_iasq_f
);
3476 if (a
->n
&& use_nullify_skip(ctx
)) {
3477 tcg_gen_mov_reg(cpu_iaoq_f
, tmp
);
3478 tcg_gen_addi_reg(cpu_iaoq_b
, cpu_iaoq_f
, 4);
3479 tcg_gen_mov_i64(cpu_iasq_f
, new_spc
);
3480 tcg_gen_mov_i64(cpu_iasq_b
, cpu_iasq_f
);
3482 copy_iaoq_entry(cpu_iaoq_f
, ctx
->iaoq_b
, cpu_iaoq_b
);
3483 if (ctx
->iaoq_b
== -1) {
3484 tcg_gen_mov_i64(cpu_iasq_f
, cpu_iasq_b
);
3486 tcg_gen_mov_reg(cpu_iaoq_b
, tmp
);
3487 tcg_gen_mov_i64(cpu_iasq_b
, new_spc
);
3488 nullify_set(ctx
, a
->n
);
3490 tcg_temp_free_i64(new_spc
);
3491 tcg_gen_lookup_and_goto_ptr();
3492 ctx
->base
.is_jmp
= DISAS_NORETURN
;
3493 return nullify_end(ctx
);
3497 static bool trans_bl(DisasContext
*ctx
, arg_bl
*a
)
3499 return do_dbranch(ctx
, iaoq_dest(ctx
, a
->disp
), a
->l
, a
->n
);
3502 static bool trans_b_gate(DisasContext
*ctx
, arg_b_gate
*a
)
3504 target_ureg dest
= iaoq_dest(ctx
, a
->disp
);
3508 /* Make sure the caller hasn't done something weird with the queue.
3509 * ??? This is not quite the same as the PSW[B] bit, which would be
3510 * expensive to track. Real hardware will trap for
3512 * b gateway+4 (in delay slot of first branch)
3513 * However, checking for a non-sequential instruction queue *will*
3514 * diagnose the security hole
3517 * in which instructions at evil would run with increased privs.
3519 if (ctx
->iaoq_b
== -1 || ctx
->iaoq_b
!= ctx
->iaoq_f
+ 4) {
3520 return gen_illegal(ctx
);
3523 #ifndef CONFIG_USER_ONLY
3524 if (ctx
->tb_flags
& PSW_C
) {
3525 CPUHPPAState
*env
= ctx
->cs
->env_ptr
;
3526 int type
= hppa_artype_for_page(env
, ctx
->base
.pc_next
);
3527 /* If we could not find a TLB entry, then we need to generate an
3528 ITLB miss exception so the kernel will provide it.
3529 The resulting TLB fill operation will invalidate this TB and
3530 we will re-translate, at which point we *will* be able to find
3531 the TLB entry and determine if this is in fact a gateway page. */
3533 gen_excp(ctx
, EXCP_ITLB_MISS
);
3536 /* No change for non-gateway pages or for priv decrease. */
3537 if (type
>= 4 && type
- 4 < ctx
->privilege
) {
3538 dest
= deposit32(dest
, 0, 2, type
- 4);
3541 dest
&= -4; /* priv = 0 */
3546 TCGv_reg tmp
= dest_gpr(ctx
, a
->l
);
3547 if (ctx
->privilege
< 3) {
3548 tcg_gen_andi_reg(tmp
, tmp
, -4);
3550 tcg_gen_ori_reg(tmp
, tmp
, ctx
->privilege
);
3551 save_gpr(ctx
, a
->l
, tmp
);
3554 return do_dbranch(ctx
, dest
, 0, a
->n
);
3557 static bool trans_blr(DisasContext
*ctx
, arg_blr
*a
)
3560 TCGv_reg tmp
= get_temp(ctx
);
3561 tcg_gen_shli_reg(tmp
, load_gpr(ctx
, a
->x
), 3);
3562 tcg_gen_addi_reg(tmp
, tmp
, ctx
->iaoq_f
+ 8);
3563 /* The computation here never changes privilege level. */
3564 return do_ibranch(ctx
, tmp
, a
->l
, a
->n
);
3566 /* BLR R0,RX is a good way to load PC+8 into RX. */
3567 return do_dbranch(ctx
, ctx
->iaoq_f
+ 8, a
->l
, a
->n
);
3571 static bool trans_bv(DisasContext
*ctx
, arg_bv
*a
)
3576 dest
= load_gpr(ctx
, a
->b
);
3578 dest
= get_temp(ctx
);
3579 tcg_gen_shli_reg(dest
, load_gpr(ctx
, a
->x
), 3);
3580 tcg_gen_add_reg(dest
, dest
, load_gpr(ctx
, a
->b
));
3582 dest
= do_ibranch_priv(ctx
, dest
);
3583 return do_ibranch(ctx
, dest
, 0, a
->n
);
3586 static bool trans_bve(DisasContext
*ctx
, arg_bve
*a
)
3590 #ifdef CONFIG_USER_ONLY
3591 dest
= do_ibranch_priv(ctx
, load_gpr(ctx
, a
->b
));
3592 return do_ibranch(ctx
, dest
, a
->l
, a
->n
);
3595 dest
= do_ibranch_priv(ctx
, load_gpr(ctx
, a
->b
));
3597 copy_iaoq_entry(cpu_iaoq_f
, ctx
->iaoq_b
, cpu_iaoq_b
);
3598 if (ctx
->iaoq_b
== -1) {
3599 tcg_gen_mov_i64(cpu_iasq_f
, cpu_iasq_b
);
3601 copy_iaoq_entry(cpu_iaoq_b
, -1, dest
);
3602 tcg_gen_mov_i64(cpu_iasq_b
, space_select(ctx
, 0, dest
));
3604 copy_iaoq_entry(cpu_gr
[a
->l
], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
3606 nullify_set(ctx
, a
->n
);
3607 tcg_gen_lookup_and_goto_ptr();
3608 ctx
->base
.is_jmp
= DISAS_NORETURN
;
3609 return nullify_end(ctx
);
3617 static void gen_fcpy_f(TCGv_i32 dst
, TCGv_env unused
, TCGv_i32 src
)
3619 tcg_gen_mov_i32(dst
, src
);
3622 static bool trans_fid_f(DisasContext
*ctx
, arg_fid_f
*a
)
3625 #if TARGET_REGISTER_BITS == 64
3626 save_frd(0, tcg_const_i64(0x13080000000000ULL
)); /* PA8700 (PCX-W2) */
3628 save_frd(0, tcg_const_i64(0x0f080000000000ULL
)); /* PA7300LC (PCX-L2) */
3630 return nullify_end(ctx
);
3633 static bool trans_fcpy_f(DisasContext
*ctx
, arg_fclass01
*a
)
3635 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_fcpy_f
);
3638 static void gen_fcpy_d(TCGv_i64 dst
, TCGv_env unused
, TCGv_i64 src
)
3640 tcg_gen_mov_i64(dst
, src
);
3643 static bool trans_fcpy_d(DisasContext
*ctx
, arg_fclass01
*a
)
3645 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_fcpy_d
);
3648 static void gen_fabs_f(TCGv_i32 dst
, TCGv_env unused
, TCGv_i32 src
)
3650 tcg_gen_andi_i32(dst
, src
, INT32_MAX
);
3653 static bool trans_fabs_f(DisasContext
*ctx
, arg_fclass01
*a
)
3655 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_fabs_f
);
3658 static void gen_fabs_d(TCGv_i64 dst
, TCGv_env unused
, TCGv_i64 src
)
3660 tcg_gen_andi_i64(dst
, src
, INT64_MAX
);
3663 static bool trans_fabs_d(DisasContext
*ctx
, arg_fclass01
*a
)
3665 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_fabs_d
);
3668 static bool trans_fsqrt_f(DisasContext
*ctx
, arg_fclass01
*a
)
3670 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_fsqrt_s
);
3673 static bool trans_fsqrt_d(DisasContext
*ctx
, arg_fclass01
*a
)
3675 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_fsqrt_d
);
3678 static bool trans_frnd_f(DisasContext
*ctx
, arg_fclass01
*a
)
3680 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_frnd_s
);
3683 static bool trans_frnd_d(DisasContext
*ctx
, arg_fclass01
*a
)
3685 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_frnd_d
);
3688 static void gen_fneg_f(TCGv_i32 dst
, TCGv_env unused
, TCGv_i32 src
)
3690 tcg_gen_xori_i32(dst
, src
, INT32_MIN
);
3693 static bool trans_fneg_f(DisasContext
*ctx
, arg_fclass01
*a
)
3695 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_fneg_f
);
3698 static void gen_fneg_d(TCGv_i64 dst
, TCGv_env unused
, TCGv_i64 src
)
3700 tcg_gen_xori_i64(dst
, src
, INT64_MIN
);
3703 static bool trans_fneg_d(DisasContext
*ctx
, arg_fclass01
*a
)
3705 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_fneg_d
);
3708 static void gen_fnegabs_f(TCGv_i32 dst
, TCGv_env unused
, TCGv_i32 src
)
3710 tcg_gen_ori_i32(dst
, src
, INT32_MIN
);
3713 static bool trans_fnegabs_f(DisasContext
*ctx
, arg_fclass01
*a
)
3715 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_fnegabs_f
);
3718 static void gen_fnegabs_d(TCGv_i64 dst
, TCGv_env unused
, TCGv_i64 src
)
3720 tcg_gen_ori_i64(dst
, src
, INT64_MIN
);
3723 static bool trans_fnegabs_d(DisasContext
*ctx
, arg_fclass01
*a
)
3725 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_fnegabs_d
);
3732 static bool trans_fcnv_d_f(DisasContext
*ctx
, arg_fclass01
*a
)
3734 return do_fop_wed(ctx
, a
->t
, a
->r
, gen_helper_fcnv_d_s
);
3737 static bool trans_fcnv_f_d(DisasContext
*ctx
, arg_fclass01
*a
)
3739 return do_fop_dew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_s_d
);
3742 static bool trans_fcnv_w_f(DisasContext
*ctx
, arg_fclass01
*a
)
3744 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_w_s
);
3747 static bool trans_fcnv_q_f(DisasContext
*ctx
, arg_fclass01
*a
)
3749 return do_fop_wed(ctx
, a
->t
, a
->r
, gen_helper_fcnv_dw_s
);
3752 static bool trans_fcnv_w_d(DisasContext
*ctx
, arg_fclass01
*a
)
3754 return do_fop_dew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_w_d
);
3757 static bool trans_fcnv_q_d(DisasContext
*ctx
, arg_fclass01
*a
)
3759 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_fcnv_dw_d
);
3762 static bool trans_fcnv_f_w(DisasContext
*ctx
, arg_fclass01
*a
)
3764 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_s_w
);
3767 static bool trans_fcnv_d_w(DisasContext
*ctx
, arg_fclass01
*a
)
3769 return do_fop_wed(ctx
, a
->t
, a
->r
, gen_helper_fcnv_d_w
);
3772 static bool trans_fcnv_f_q(DisasContext
*ctx
, arg_fclass01
*a
)
3774 return do_fop_dew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_s_dw
);
3777 static bool trans_fcnv_d_q(DisasContext
*ctx
, arg_fclass01
*a
)
3779 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_fcnv_d_dw
);
3782 static bool trans_fcnv_t_f_w(DisasContext
*ctx
, arg_fclass01
*a
)
3784 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_s_w
);
3787 static bool trans_fcnv_t_d_w(DisasContext
*ctx
, arg_fclass01
*a
)
3789 return do_fop_wed(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_d_w
);
3792 static bool trans_fcnv_t_f_q(DisasContext
*ctx
, arg_fclass01
*a
)
3794 return do_fop_dew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_s_dw
);
3797 static bool trans_fcnv_t_d_q(DisasContext
*ctx
, arg_fclass01
*a
)
3799 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_d_dw
);
3802 static bool trans_fcnv_uw_f(DisasContext
*ctx
, arg_fclass01
*a
)
3804 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_uw_s
);
3807 static bool trans_fcnv_uq_f(DisasContext
*ctx
, arg_fclass01
*a
)
3809 return do_fop_wed(ctx
, a
->t
, a
->r
, gen_helper_fcnv_udw_s
);
3812 static bool trans_fcnv_uw_d(DisasContext
*ctx
, arg_fclass01
*a
)
3814 return do_fop_dew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_uw_d
);
3817 static bool trans_fcnv_uq_d(DisasContext
*ctx
, arg_fclass01
*a
)
3819 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_fcnv_udw_d
);
3822 static bool trans_fcnv_f_uw(DisasContext
*ctx
, arg_fclass01
*a
)
3824 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_s_uw
);
3827 static bool trans_fcnv_d_uw(DisasContext
*ctx
, arg_fclass01
*a
)
3829 return do_fop_wed(ctx
, a
->t
, a
->r
, gen_helper_fcnv_d_uw
);
3832 static bool trans_fcnv_f_uq(DisasContext
*ctx
, arg_fclass01
*a
)
3834 return do_fop_dew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_s_udw
);
3837 static bool trans_fcnv_d_uq(DisasContext
*ctx
, arg_fclass01
*a
)
3839 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_fcnv_d_udw
);
3842 static bool trans_fcnv_t_f_uw(DisasContext
*ctx
, arg_fclass01
*a
)
3844 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_s_uw
);
3847 static bool trans_fcnv_t_d_uw(DisasContext
*ctx
, arg_fclass01
*a
)
3849 return do_fop_wed(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_d_uw
);
3852 static bool trans_fcnv_t_f_uq(DisasContext
*ctx
, arg_fclass01
*a
)
3854 return do_fop_dew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_s_udw
);
3857 static bool trans_fcnv_t_d_uq(DisasContext
*ctx
, arg_fclass01
*a
)
3859 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_d_udw
);
3866 static bool trans_fcmp_f(DisasContext
*ctx
, arg_fclass2
*a
)
3868 TCGv_i32 ta
, tb
, tc
, ty
;
3872 ta
= load_frw0_i32(a
->r1
);
3873 tb
= load_frw0_i32(a
->r2
);
3874 ty
= tcg_constant_i32(a
->y
);
3875 tc
= tcg_constant_i32(a
->c
);
3877 gen_helper_fcmp_s(cpu_env
, ta
, tb
, ty
, tc
);
3879 tcg_temp_free_i32(ta
);
3880 tcg_temp_free_i32(tb
);
3882 return nullify_end(ctx
);
3885 static bool trans_fcmp_d(DisasContext
*ctx
, arg_fclass2
*a
)
3892 ta
= load_frd0(a
->r1
);
3893 tb
= load_frd0(a
->r2
);
3894 ty
= tcg_constant_i32(a
->y
);
3895 tc
= tcg_constant_i32(a
->c
);
3897 gen_helper_fcmp_d(cpu_env
, ta
, tb
, ty
, tc
);
3899 tcg_temp_free_i64(ta
);
3900 tcg_temp_free_i64(tb
);
3902 return nullify_end(ctx
);
3905 static bool trans_ftest(DisasContext
*ctx
, arg_ftest
*a
)
3912 tcg_gen_ld32u_reg(t
, cpu_env
, offsetof(CPUHPPAState
, fr0_shadow
));
3919 case 0: /* simple */
3920 tcg_gen_andi_reg(t
, t
, 0x4000000);
3921 ctx
->null_cond
= cond_make_0(TCG_COND_NE
, t
);
3949 TCGv_reg c
= load_const(ctx
, mask
);
3950 tcg_gen_or_reg(t
, t
, c
);
3951 ctx
->null_cond
= cond_make(TCG_COND_EQ
, t
, c
);
3953 tcg_gen_andi_reg(t
, t
, mask
);
3954 ctx
->null_cond
= cond_make_0(TCG_COND_EQ
, t
);
3957 unsigned cbit
= (a
->y
^ 1) - 1;
3959 tcg_gen_extract_reg(t
, t
, 21 - cbit
, 1);
3960 ctx
->null_cond
= cond_make_0(TCG_COND_NE
, t
);
3965 return nullify_end(ctx
);
3972 static bool trans_fadd_f(DisasContext
*ctx
, arg_fclass3
*a
)
3974 return do_fop_weww(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fadd_s
);
3977 static bool trans_fadd_d(DisasContext
*ctx
, arg_fclass3
*a
)
3979 return do_fop_dedd(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fadd_d
);
3982 static bool trans_fsub_f(DisasContext
*ctx
, arg_fclass3
*a
)
3984 return do_fop_weww(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fsub_s
);
3987 static bool trans_fsub_d(DisasContext
*ctx
, arg_fclass3
*a
)
3989 return do_fop_dedd(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fsub_d
);
3992 static bool trans_fmpy_f(DisasContext
*ctx
, arg_fclass3
*a
)
3994 return do_fop_weww(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fmpy_s
);
3997 static bool trans_fmpy_d(DisasContext
*ctx
, arg_fclass3
*a
)
3999 return do_fop_dedd(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fmpy_d
);
4002 static bool trans_fdiv_f(DisasContext
*ctx
, arg_fclass3
*a
)
4004 return do_fop_weww(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fdiv_s
);
4007 static bool trans_fdiv_d(DisasContext
*ctx
, arg_fclass3
*a
)
4009 return do_fop_dedd(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fdiv_d
);
4012 static bool trans_xmpyu(DisasContext
*ctx
, arg_xmpyu
*a
)
4018 x
= load_frw0_i64(a
->r1
);
4019 y
= load_frw0_i64(a
->r2
);
4020 tcg_gen_mul_i64(x
, x
, y
);
4022 tcg_temp_free_i64(x
);
4023 tcg_temp_free_i64(y
);
4025 return nullify_end(ctx
);
4028 /* Convert the fmpyadd single-precision register encodings to standard. */
4029 static inline int fmpyadd_s_reg(unsigned r
)
4031 return (r
& 16) * 2 + 16 + (r
& 15);
4034 static bool do_fmpyadd_s(DisasContext
*ctx
, arg_mpyadd
*a
, bool is_sub
)
4036 int tm
= fmpyadd_s_reg(a
->tm
);
4037 int ra
= fmpyadd_s_reg(a
->ra
);
4038 int ta
= fmpyadd_s_reg(a
->ta
);
4039 int rm2
= fmpyadd_s_reg(a
->rm2
);
4040 int rm1
= fmpyadd_s_reg(a
->rm1
);
4044 do_fop_weww(ctx
, tm
, rm1
, rm2
, gen_helper_fmpy_s
);
4045 do_fop_weww(ctx
, ta
, ta
, ra
,
4046 is_sub
? gen_helper_fsub_s
: gen_helper_fadd_s
);
4048 return nullify_end(ctx
);
4051 static bool trans_fmpyadd_f(DisasContext
*ctx
, arg_mpyadd
*a
)
4053 return do_fmpyadd_s(ctx
, a
, false);
4056 static bool trans_fmpysub_f(DisasContext
*ctx
, arg_mpyadd
*a
)
4058 return do_fmpyadd_s(ctx
, a
, true);
4061 static bool do_fmpyadd_d(DisasContext
*ctx
, arg_mpyadd
*a
, bool is_sub
)
4065 do_fop_dedd(ctx
, a
->tm
, a
->rm1
, a
->rm2
, gen_helper_fmpy_d
);
4066 do_fop_dedd(ctx
, a
->ta
, a
->ta
, a
->ra
,
4067 is_sub
? gen_helper_fsub_d
: gen_helper_fadd_d
);
4069 return nullify_end(ctx
);
4072 static bool trans_fmpyadd_d(DisasContext
*ctx
, arg_mpyadd
*a
)
4074 return do_fmpyadd_d(ctx
, a
, false);
4077 static bool trans_fmpysub_d(DisasContext
*ctx
, arg_mpyadd
*a
)
4079 return do_fmpyadd_d(ctx
, a
, true);
4082 static bool trans_fmpyfadd_f(DisasContext
*ctx
, arg_fmpyfadd_f
*a
)
4087 x
= load_frw0_i32(a
->rm1
);
4088 y
= load_frw0_i32(a
->rm2
);
4089 z
= load_frw0_i32(a
->ra3
);
4092 gen_helper_fmpynfadd_s(x
, cpu_env
, x
, y
, z
);
4094 gen_helper_fmpyfadd_s(x
, cpu_env
, x
, y
, z
);
4097 tcg_temp_free_i32(y
);
4098 tcg_temp_free_i32(z
);
4099 save_frw_i32(a
->t
, x
);
4100 tcg_temp_free_i32(x
);
4101 return nullify_end(ctx
);
4104 static bool trans_fmpyfadd_d(DisasContext
*ctx
, arg_fmpyfadd_d
*a
)
4109 x
= load_frd0(a
->rm1
);
4110 y
= load_frd0(a
->rm2
);
4111 z
= load_frd0(a
->ra3
);
4114 gen_helper_fmpynfadd_d(x
, cpu_env
, x
, y
, z
);
4116 gen_helper_fmpyfadd_d(x
, cpu_env
, x
, y
, z
);
4119 tcg_temp_free_i64(y
);
4120 tcg_temp_free_i64(z
);
4122 tcg_temp_free_i64(x
);
4123 return nullify_end(ctx
);
4126 static bool trans_diag(DisasContext
*ctx
, arg_diag
*a
)
4128 qemu_log_mask(LOG_UNIMP
, "DIAG opcode ignored\n");
4129 cond_free(&ctx
->null_cond
);
4133 static void hppa_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
4135 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
4139 ctx
->tb_flags
= ctx
->base
.tb
->flags
;
4141 #ifdef CONFIG_USER_ONLY
4142 ctx
->privilege
= MMU_USER_IDX
;
4143 ctx
->mmu_idx
= MMU_USER_IDX
;
4144 ctx
->iaoq_f
= ctx
->base
.pc_first
| MMU_USER_IDX
;
4145 ctx
->iaoq_b
= ctx
->base
.tb
->cs_base
| MMU_USER_IDX
;
4146 ctx
->unalign
= (ctx
->tb_flags
& TB_FLAG_UNALIGN
? MO_UNALN
: MO_ALIGN
);
4148 ctx
->privilege
= (ctx
->tb_flags
>> TB_FLAG_PRIV_SHIFT
) & 3;
4149 ctx
->mmu_idx
= (ctx
->tb_flags
& PSW_D
? ctx
->privilege
: MMU_PHYS_IDX
);
4151 /* Recover the IAOQ values from the GVA + PRIV. */
4152 uint64_t cs_base
= ctx
->base
.tb
->cs_base
;
4153 uint64_t iasq_f
= cs_base
& ~0xffffffffull
;
4154 int32_t diff
= cs_base
;
4156 ctx
->iaoq_f
= (ctx
->base
.pc_first
& ~iasq_f
) + ctx
->privilege
;
4157 ctx
->iaoq_b
= (diff
? ctx
->iaoq_f
+ diff
: -1);
4160 ctx
->iaoq_n_var
= NULL
;
4162 /* Bound the number of instructions by those left on the page. */
4163 bound
= -(ctx
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
4164 ctx
->base
.max_insns
= MIN(ctx
->base
.max_insns
, bound
);
4168 memset(ctx
->tempr
, 0, sizeof(ctx
->tempr
));
4169 memset(ctx
->templ
, 0, sizeof(ctx
->templ
));
4172 static void hppa_tr_tb_start(DisasContextBase
*dcbase
, CPUState
*cs
)
4174 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
4176 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
4177 ctx
->null_cond
= cond_make_f();
4178 ctx
->psw_n_nonzero
= false;
4179 if (ctx
->tb_flags
& PSW_N
) {
4180 ctx
->null_cond
.c
= TCG_COND_ALWAYS
;
4181 ctx
->psw_n_nonzero
= true;
4183 ctx
->null_lab
= NULL
;
4186 static void hppa_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cs
)
4188 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
4190 tcg_gen_insn_start(ctx
->iaoq_f
, ctx
->iaoq_b
);
4193 static void hppa_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cs
)
4195 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
4196 CPUHPPAState
*env
= cs
->env_ptr
;
4200 /* Execute one insn. */
4201 #ifdef CONFIG_USER_ONLY
4202 if (ctx
->base
.pc_next
< TARGET_PAGE_SIZE
) {
4204 ret
= ctx
->base
.is_jmp
;
4205 assert(ret
!= DISAS_NEXT
);
4209 /* Always fetch the insn, even if nullified, so that we check
4210 the page permissions for execute. */
4211 uint32_t insn
= translator_ldl(env
, &ctx
->base
, ctx
->base
.pc_next
);
4213 /* Set up the IA queue for the next insn.
4214 This will be overwritten by a branch. */
4215 if (ctx
->iaoq_b
== -1) {
4217 ctx
->iaoq_n_var
= get_temp(ctx
);
4218 tcg_gen_addi_reg(ctx
->iaoq_n_var
, cpu_iaoq_b
, 4);
4220 ctx
->iaoq_n
= ctx
->iaoq_b
+ 4;
4221 ctx
->iaoq_n_var
= NULL
;
4224 if (unlikely(ctx
->null_cond
.c
== TCG_COND_ALWAYS
)) {
4225 ctx
->null_cond
.c
= TCG_COND_NEVER
;
4229 if (!decode(ctx
, insn
)) {
4232 ret
= ctx
->base
.is_jmp
;
4233 assert(ctx
->null_lab
== NULL
);
4237 /* Free any temporaries allocated. */
4238 for (i
= 0, n
= ctx
->ntempr
; i
< n
; ++i
) {
4239 tcg_temp_free(ctx
->tempr
[i
]);
4240 ctx
->tempr
[i
] = NULL
;
4242 for (i
= 0, n
= ctx
->ntempl
; i
< n
; ++i
) {
4243 tcg_temp_free_tl(ctx
->templ
[i
]);
4244 ctx
->templ
[i
] = NULL
;
4249 /* Advance the insn queue. Note that this check also detects
4250 a priority change within the instruction queue. */
4251 if (ret
== DISAS_NEXT
&& ctx
->iaoq_b
!= ctx
->iaoq_f
+ 4) {
4252 if (ctx
->iaoq_b
!= -1 && ctx
->iaoq_n
!= -1
4253 && use_goto_tb(ctx
, ctx
->iaoq_b
)
4254 && (ctx
->null_cond
.c
== TCG_COND_NEVER
4255 || ctx
->null_cond
.c
== TCG_COND_ALWAYS
)) {
4256 nullify_set(ctx
, ctx
->null_cond
.c
== TCG_COND_ALWAYS
);
4257 gen_goto_tb(ctx
, 0, ctx
->iaoq_b
, ctx
->iaoq_n
);
4258 ctx
->base
.is_jmp
= ret
= DISAS_NORETURN
;
4260 ctx
->base
.is_jmp
= ret
= DISAS_IAQ_N_STALE
;
4263 ctx
->iaoq_f
= ctx
->iaoq_b
;
4264 ctx
->iaoq_b
= ctx
->iaoq_n
;
4265 ctx
->base
.pc_next
+= 4;
4268 case DISAS_NORETURN
:
4269 case DISAS_IAQ_N_UPDATED
:
4273 case DISAS_IAQ_N_STALE
:
4274 case DISAS_IAQ_N_STALE_EXIT
:
4275 if (ctx
->iaoq_f
== -1) {
4276 tcg_gen_mov_reg(cpu_iaoq_f
, cpu_iaoq_b
);
4277 copy_iaoq_entry(cpu_iaoq_b
, ctx
->iaoq_n
, ctx
->iaoq_n_var
);
4278 #ifndef CONFIG_USER_ONLY
4279 tcg_gen_mov_i64(cpu_iasq_f
, cpu_iasq_b
);
4282 ctx
->base
.is_jmp
= (ret
== DISAS_IAQ_N_STALE_EXIT
4284 : DISAS_IAQ_N_UPDATED
);
4285 } else if (ctx
->iaoq_b
== -1) {
4286 tcg_gen_mov_reg(cpu_iaoq_b
, ctx
->iaoq_n_var
);
4291 g_assert_not_reached();
4295 static void hppa_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cs
)
4297 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
4298 DisasJumpType is_jmp
= ctx
->base
.is_jmp
;
4301 case DISAS_NORETURN
:
4303 case DISAS_TOO_MANY
:
4304 case DISAS_IAQ_N_STALE
:
4305 case DISAS_IAQ_N_STALE_EXIT
:
4306 copy_iaoq_entry(cpu_iaoq_f
, ctx
->iaoq_f
, cpu_iaoq_f
);
4307 copy_iaoq_entry(cpu_iaoq_b
, ctx
->iaoq_b
, cpu_iaoq_b
);
4310 case DISAS_IAQ_N_UPDATED
:
4311 if (is_jmp
!= DISAS_IAQ_N_STALE_EXIT
) {
4312 tcg_gen_lookup_and_goto_ptr();
4317 tcg_gen_exit_tb(NULL
, 0);
4320 g_assert_not_reached();
4324 static void hppa_tr_disas_log(const DisasContextBase
*dcbase
,
4325 CPUState
*cs
, FILE *logfile
)
4327 target_ulong pc
= dcbase
->pc_first
;
4329 #ifdef CONFIG_USER_ONLY
4332 fprintf(logfile
, "IN:\n0x00000000: (null)\n");
4335 fprintf(logfile
, "IN:\n0x000000b0: light-weight-syscall\n");
4338 fprintf(logfile
, "IN:\n0x000000e0: set-thread-pointer-syscall\n");
4341 fprintf(logfile
, "IN:\n0x00000100: syscall\n");
4346 fprintf(logfile
, "IN: %s\n", lookup_symbol(pc
));
4347 target_disas(logfile
, cs
, pc
, dcbase
->tb
->size
);
4350 static const TranslatorOps hppa_tr_ops
= {
4351 .init_disas_context
= hppa_tr_init_disas_context
,
4352 .tb_start
= hppa_tr_tb_start
,
4353 .insn_start
= hppa_tr_insn_start
,
4354 .translate_insn
= hppa_tr_translate_insn
,
4355 .tb_stop
= hppa_tr_tb_stop
,
4356 .disas_log
= hppa_tr_disas_log
,
4359 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int *max_insns
,
4360 target_ulong pc
, void *host_pc
)
4363 translator_loop(cs
, tb
, max_insns
, pc
, host_pc
, &hppa_tr_ops
, &ctx
.base
);