2 * HPPA emulation cpu translation for qemu.
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "disas/disas.h"
23 #include "qemu/host-utils.h"
24 #include "exec/exec-all.h"
25 #include "tcg/tcg-op.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/helper-proto.h"
28 #include "exec/helper-gen.h"
29 #include "exec/translator.h"
32 #define HELPER_H "helper.h"
33 #include "exec/helper-info.c.inc"
37 /* Since we have a distinction between register size and address size,
38 we need to redefine all of these. */
42 #undef tcg_global_mem_new
44 #if TARGET_LONG_BITS == 64
45 #define TCGv_tl TCGv_i64
46 #define tcg_temp_new_tl tcg_temp_new_i64
47 #if TARGET_REGISTER_BITS == 64
48 #define tcg_gen_extu_reg_tl tcg_gen_mov_i64
50 #define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64
53 #define TCGv_tl TCGv_i32
54 #define tcg_temp_new_tl tcg_temp_new_i32
55 #define tcg_gen_extu_reg_tl tcg_gen_mov_i32
58 #if TARGET_REGISTER_BITS == 64
59 #define TCGv_reg TCGv_i64
61 #define tcg_temp_new tcg_temp_new_i64
62 #define tcg_global_mem_new tcg_global_mem_new_i64
64 #define tcg_gen_movi_reg tcg_gen_movi_i64
65 #define tcg_gen_mov_reg tcg_gen_mov_i64
66 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i64
67 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i64
68 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i64
69 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i64
70 #define tcg_gen_ld32u_reg tcg_gen_ld32u_i64
71 #define tcg_gen_ld32s_reg tcg_gen_ld32s_i64
72 #define tcg_gen_ld_reg tcg_gen_ld_i64
73 #define tcg_gen_st8_reg tcg_gen_st8_i64
74 #define tcg_gen_st16_reg tcg_gen_st16_i64
75 #define tcg_gen_st32_reg tcg_gen_st32_i64
76 #define tcg_gen_st_reg tcg_gen_st_i64
77 #define tcg_gen_add_reg tcg_gen_add_i64
78 #define tcg_gen_addi_reg tcg_gen_addi_i64
79 #define tcg_gen_sub_reg tcg_gen_sub_i64
80 #define tcg_gen_neg_reg tcg_gen_neg_i64
81 #define tcg_gen_subfi_reg tcg_gen_subfi_i64
82 #define tcg_gen_subi_reg tcg_gen_subi_i64
83 #define tcg_gen_and_reg tcg_gen_and_i64
84 #define tcg_gen_andi_reg tcg_gen_andi_i64
85 #define tcg_gen_or_reg tcg_gen_or_i64
86 #define tcg_gen_ori_reg tcg_gen_ori_i64
87 #define tcg_gen_xor_reg tcg_gen_xor_i64
88 #define tcg_gen_xori_reg tcg_gen_xori_i64
89 #define tcg_gen_not_reg tcg_gen_not_i64
90 #define tcg_gen_shl_reg tcg_gen_shl_i64
91 #define tcg_gen_shli_reg tcg_gen_shli_i64
92 #define tcg_gen_shr_reg tcg_gen_shr_i64
93 #define tcg_gen_shri_reg tcg_gen_shri_i64
94 #define tcg_gen_sar_reg tcg_gen_sar_i64
95 #define tcg_gen_sari_reg tcg_gen_sari_i64
96 #define tcg_gen_brcond_reg tcg_gen_brcond_i64
97 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i64
98 #define tcg_gen_setcond_reg tcg_gen_setcond_i64
99 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
100 #define tcg_gen_mul_reg tcg_gen_mul_i64
101 #define tcg_gen_muli_reg tcg_gen_muli_i64
102 #define tcg_gen_div_reg tcg_gen_div_i64
103 #define tcg_gen_rem_reg tcg_gen_rem_i64
104 #define tcg_gen_divu_reg tcg_gen_divu_i64
105 #define tcg_gen_remu_reg tcg_gen_remu_i64
106 #define tcg_gen_discard_reg tcg_gen_discard_i64
107 #define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
108 #define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
109 #define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
110 #define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64
111 #define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
112 #define tcg_gen_ext_reg_i64 tcg_gen_mov_i64
113 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i64
114 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i64
115 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i64
116 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i64
117 #define tcg_gen_ext32u_reg tcg_gen_ext32u_i64
118 #define tcg_gen_ext32s_reg tcg_gen_ext32s_i64
119 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i64
120 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i64
121 #define tcg_gen_bswap64_reg tcg_gen_bswap64_i64
122 #define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
123 #define tcg_gen_andc_reg tcg_gen_andc_i64
124 #define tcg_gen_eqv_reg tcg_gen_eqv_i64
125 #define tcg_gen_nand_reg tcg_gen_nand_i64
126 #define tcg_gen_nor_reg tcg_gen_nor_i64
127 #define tcg_gen_orc_reg tcg_gen_orc_i64
128 #define tcg_gen_clz_reg tcg_gen_clz_i64
129 #define tcg_gen_ctz_reg tcg_gen_ctz_i64
130 #define tcg_gen_clzi_reg tcg_gen_clzi_i64
131 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i64
132 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i64
133 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i64
134 #define tcg_gen_rotl_reg tcg_gen_rotl_i64
135 #define tcg_gen_rotli_reg tcg_gen_rotli_i64
136 #define tcg_gen_rotr_reg tcg_gen_rotr_i64
137 #define tcg_gen_rotri_reg tcg_gen_rotri_i64
138 #define tcg_gen_deposit_reg tcg_gen_deposit_i64
139 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
140 #define tcg_gen_extract_reg tcg_gen_extract_i64
141 #define tcg_gen_sextract_reg tcg_gen_sextract_i64
142 #define tcg_gen_extract2_reg tcg_gen_extract2_i64
143 #define tcg_constant_reg tcg_constant_i64
144 #define tcg_gen_movcond_reg tcg_gen_movcond_i64
145 #define tcg_gen_add2_reg tcg_gen_add2_i64
146 #define tcg_gen_sub2_reg tcg_gen_sub2_i64
147 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64
148 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64
149 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
150 #define tcg_gen_trunc_reg_ptr tcg_gen_trunc_i64_ptr
152 #define TCGv_reg TCGv_i32
153 #define tcg_temp_new tcg_temp_new_i32
154 #define tcg_global_mem_new tcg_global_mem_new_i32
156 #define tcg_gen_movi_reg tcg_gen_movi_i32
157 #define tcg_gen_mov_reg tcg_gen_mov_i32
158 #define tcg_gen_ld8u_reg tcg_gen_ld8u_i32
159 #define tcg_gen_ld8s_reg tcg_gen_ld8s_i32
160 #define tcg_gen_ld16u_reg tcg_gen_ld16u_i32
161 #define tcg_gen_ld16s_reg tcg_gen_ld16s_i32
162 #define tcg_gen_ld32u_reg tcg_gen_ld_i32
163 #define tcg_gen_ld32s_reg tcg_gen_ld_i32
164 #define tcg_gen_ld_reg tcg_gen_ld_i32
165 #define tcg_gen_st8_reg tcg_gen_st8_i32
166 #define tcg_gen_st16_reg tcg_gen_st16_i32
167 #define tcg_gen_st32_reg tcg_gen_st32_i32
168 #define tcg_gen_st_reg tcg_gen_st_i32
169 #define tcg_gen_add_reg tcg_gen_add_i32
170 #define tcg_gen_addi_reg tcg_gen_addi_i32
171 #define tcg_gen_sub_reg tcg_gen_sub_i32
172 #define tcg_gen_neg_reg tcg_gen_neg_i32
173 #define tcg_gen_subfi_reg tcg_gen_subfi_i32
174 #define tcg_gen_subi_reg tcg_gen_subi_i32
175 #define tcg_gen_and_reg tcg_gen_and_i32
176 #define tcg_gen_andi_reg tcg_gen_andi_i32
177 #define tcg_gen_or_reg tcg_gen_or_i32
178 #define tcg_gen_ori_reg tcg_gen_ori_i32
179 #define tcg_gen_xor_reg tcg_gen_xor_i32
180 #define tcg_gen_xori_reg tcg_gen_xori_i32
181 #define tcg_gen_not_reg tcg_gen_not_i32
182 #define tcg_gen_shl_reg tcg_gen_shl_i32
183 #define tcg_gen_shli_reg tcg_gen_shli_i32
184 #define tcg_gen_shr_reg tcg_gen_shr_i32
185 #define tcg_gen_shri_reg tcg_gen_shri_i32
186 #define tcg_gen_sar_reg tcg_gen_sar_i32
187 #define tcg_gen_sari_reg tcg_gen_sari_i32
188 #define tcg_gen_brcond_reg tcg_gen_brcond_i32
189 #define tcg_gen_brcondi_reg tcg_gen_brcondi_i32
190 #define tcg_gen_setcond_reg tcg_gen_setcond_i32
191 #define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
192 #define tcg_gen_mul_reg tcg_gen_mul_i32
193 #define tcg_gen_muli_reg tcg_gen_muli_i32
194 #define tcg_gen_div_reg tcg_gen_div_i32
195 #define tcg_gen_rem_reg tcg_gen_rem_i32
196 #define tcg_gen_divu_reg tcg_gen_divu_i32
197 #define tcg_gen_remu_reg tcg_gen_remu_i32
198 #define tcg_gen_discard_reg tcg_gen_discard_i32
199 #define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
200 #define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
201 #define tcg_gen_extu_i32_reg tcg_gen_mov_i32
202 #define tcg_gen_ext_i32_reg tcg_gen_mov_i32
203 #define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
204 #define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64
205 #define tcg_gen_ext8u_reg tcg_gen_ext8u_i32
206 #define tcg_gen_ext8s_reg tcg_gen_ext8s_i32
207 #define tcg_gen_ext16u_reg tcg_gen_ext16u_i32
208 #define tcg_gen_ext16s_reg tcg_gen_ext16s_i32
209 #define tcg_gen_ext32u_reg tcg_gen_mov_i32
210 #define tcg_gen_ext32s_reg tcg_gen_mov_i32
211 #define tcg_gen_bswap16_reg tcg_gen_bswap16_i32
212 #define tcg_gen_bswap32_reg tcg_gen_bswap32_i32
213 #define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
214 #define tcg_gen_andc_reg tcg_gen_andc_i32
215 #define tcg_gen_eqv_reg tcg_gen_eqv_i32
216 #define tcg_gen_nand_reg tcg_gen_nand_i32
217 #define tcg_gen_nor_reg tcg_gen_nor_i32
218 #define tcg_gen_orc_reg tcg_gen_orc_i32
219 #define tcg_gen_clz_reg tcg_gen_clz_i32
220 #define tcg_gen_ctz_reg tcg_gen_ctz_i32
221 #define tcg_gen_clzi_reg tcg_gen_clzi_i32
222 #define tcg_gen_ctzi_reg tcg_gen_ctzi_i32
223 #define tcg_gen_clrsb_reg tcg_gen_clrsb_i32
224 #define tcg_gen_ctpop_reg tcg_gen_ctpop_i32
225 #define tcg_gen_rotl_reg tcg_gen_rotl_i32
226 #define tcg_gen_rotli_reg tcg_gen_rotli_i32
227 #define tcg_gen_rotr_reg tcg_gen_rotr_i32
228 #define tcg_gen_rotri_reg tcg_gen_rotri_i32
229 #define tcg_gen_deposit_reg tcg_gen_deposit_i32
230 #define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
231 #define tcg_gen_extract_reg tcg_gen_extract_i32
232 #define tcg_gen_sextract_reg tcg_gen_sextract_i32
233 #define tcg_gen_extract2_reg tcg_gen_extract2_i32
234 #define tcg_constant_reg tcg_constant_i32
235 #define tcg_gen_movcond_reg tcg_gen_movcond_i32
236 #define tcg_gen_add2_reg tcg_gen_add2_i32
237 #define tcg_gen_sub2_reg tcg_gen_sub2_i32
238 #define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32
239 #define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32
240 #define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
241 #define tcg_gen_trunc_reg_ptr tcg_gen_ext_i32_ptr
242 #endif /* TARGET_REGISTER_BITS */
244 typedef struct DisasCond
{
249 typedef struct DisasContext
{
250 DisasContextBase base
;
271 #ifdef CONFIG_USER_ONLY
276 #ifdef CONFIG_USER_ONLY
277 #define UNALIGN(C) (C)->unalign
279 #define UNALIGN(C) MO_ALIGN
282 /* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
283 static int expand_sm_imm(DisasContext
*ctx
, int val
)
285 if (val
& PSW_SM_E
) {
286 val
= (val
& ~PSW_SM_E
) | PSW_E
;
288 if (val
& PSW_SM_W
) {
289 val
= (val
& ~PSW_SM_W
) | PSW_W
;
294 /* Inverted space register indicates 0 means sr0 not inferred from base. */
295 static int expand_sr3x(DisasContext
*ctx
, int val
)
300 /* Convert the M:A bits within a memory insn to the tri-state value
301 we use for the final M. */
302 static int ma_to_m(DisasContext
*ctx
, int val
)
304 return val
& 2 ? (val
& 1 ? -1 : 1) : 0;
307 /* Convert the sign of the displacement to a pre or post-modify. */
308 static int pos_to_m(DisasContext
*ctx
, int val
)
313 static int neg_to_m(DisasContext
*ctx
, int val
)
318 /* Used for branch targets and fp memory ops. */
319 static int expand_shl2(DisasContext
*ctx
, int val
)
324 /* Used for fp memory ops. */
325 static int expand_shl3(DisasContext
*ctx
, int val
)
330 /* Used for assemble_21. */
331 static int expand_shl11(DisasContext
*ctx
, int val
)
337 /* Include the auto-generated decoder. */
338 #include "decode-insns.c.inc"
340 /* We are not using a goto_tb (for whatever reason), but have updated
341 the iaq (for whatever reason), so don't do it again on exit. */
342 #define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
344 /* We are exiting the TB, but have neither emitted a goto_tb, nor
345 updated the iaq for the next instruction to be executed. */
346 #define DISAS_IAQ_N_STALE DISAS_TARGET_1
348 /* Similarly, but we want to return to the main loop immediately
349 to recognize unmasked interrupts. */
350 #define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
351 #define DISAS_EXIT DISAS_TARGET_3
353 /* global register indexes */
354 static TCGv_reg cpu_gr
[32];
355 static TCGv_i64 cpu_sr
[4];
356 static TCGv_i64 cpu_srH
;
357 static TCGv_reg cpu_iaoq_f
;
358 static TCGv_reg cpu_iaoq_b
;
359 static TCGv_i64 cpu_iasq_f
;
360 static TCGv_i64 cpu_iasq_b
;
361 static TCGv_reg cpu_sar
;
362 static TCGv_reg cpu_psw_n
;
363 static TCGv_reg cpu_psw_v
;
364 static TCGv_reg cpu_psw_cb
;
365 static TCGv_reg cpu_psw_cb_msb
;
367 void hppa_translate_init(void)
369 #define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
371 typedef struct { TCGv_reg
*var
; const char *name
; int ofs
; } GlobalVar
;
372 static const GlobalVar vars
[] = {
373 { &cpu_sar
, "sar", offsetof(CPUHPPAState
, cr
[CR_SAR
]) },
384 /* Use the symbolic register names that match the disassembler. */
385 static const char gr_names
[32][4] = {
386 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
387 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
388 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
389 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
391 /* SR[4-7] are not global registers so that we can index them. */
392 static const char sr_names
[5][4] = {
393 "sr0", "sr1", "sr2", "sr3", "srH"
399 for (i
= 1; i
< 32; i
++) {
400 cpu_gr
[i
] = tcg_global_mem_new(cpu_env
,
401 offsetof(CPUHPPAState
, gr
[i
]),
404 for (i
= 0; i
< 4; i
++) {
405 cpu_sr
[i
] = tcg_global_mem_new_i64(cpu_env
,
406 offsetof(CPUHPPAState
, sr
[i
]),
409 cpu_srH
= tcg_global_mem_new_i64(cpu_env
,
410 offsetof(CPUHPPAState
, sr
[4]),
413 for (i
= 0; i
< ARRAY_SIZE(vars
); ++i
) {
414 const GlobalVar
*v
= &vars
[i
];
415 *v
->var
= tcg_global_mem_new(cpu_env
, v
->ofs
, v
->name
);
418 cpu_iasq_f
= tcg_global_mem_new_i64(cpu_env
,
419 offsetof(CPUHPPAState
, iasq_f
),
421 cpu_iasq_b
= tcg_global_mem_new_i64(cpu_env
,
422 offsetof(CPUHPPAState
, iasq_b
),
426 static DisasCond
cond_make_f(void)
435 static DisasCond
cond_make_t(void)
438 .c
= TCG_COND_ALWAYS
,
444 static DisasCond
cond_make_n(void)
449 .a1
= tcg_constant_reg(0)
453 static DisasCond
cond_make_0_tmp(TCGCond c
, TCGv_reg a0
)
455 assert (c
!= TCG_COND_NEVER
&& c
!= TCG_COND_ALWAYS
);
457 .c
= c
, .a0
= a0
, .a1
= tcg_constant_reg(0)
461 static DisasCond
cond_make_0(TCGCond c
, TCGv_reg a0
)
463 TCGv_reg tmp
= tcg_temp_new();
464 tcg_gen_mov_reg(tmp
, a0
);
465 return cond_make_0_tmp(c
, tmp
);
468 static DisasCond
cond_make(TCGCond c
, TCGv_reg a0
, TCGv_reg a1
)
470 DisasCond r
= { .c
= c
};
472 assert (c
!= TCG_COND_NEVER
&& c
!= TCG_COND_ALWAYS
);
473 r
.a0
= tcg_temp_new();
474 tcg_gen_mov_reg(r
.a0
, a0
);
475 r
.a1
= tcg_temp_new();
476 tcg_gen_mov_reg(r
.a1
, a1
);
481 static void cond_free(DisasCond
*cond
)
488 case TCG_COND_ALWAYS
:
489 cond
->c
= TCG_COND_NEVER
;
496 static TCGv_reg
get_temp(DisasContext
*ctx
)
498 unsigned i
= ctx
->ntempr
++;
499 g_assert(i
< ARRAY_SIZE(ctx
->tempr
));
500 return ctx
->tempr
[i
] = tcg_temp_new();
503 #ifndef CONFIG_USER_ONLY
504 static TCGv_tl
get_temp_tl(DisasContext
*ctx
)
506 unsigned i
= ctx
->ntempl
++;
507 g_assert(i
< ARRAY_SIZE(ctx
->templ
));
508 return ctx
->templ
[i
] = tcg_temp_new_tl();
512 static TCGv_reg
load_const(DisasContext
*ctx
, target_sreg v
)
514 TCGv_reg t
= get_temp(ctx
);
515 tcg_gen_movi_reg(t
, v
);
519 static TCGv_reg
load_gpr(DisasContext
*ctx
, unsigned reg
)
522 TCGv_reg t
= get_temp(ctx
);
523 tcg_gen_movi_reg(t
, 0);
530 static TCGv_reg
dest_gpr(DisasContext
*ctx
, unsigned reg
)
532 if (reg
== 0 || ctx
->null_cond
.c
!= TCG_COND_NEVER
) {
533 return get_temp(ctx
);
539 static void save_or_nullify(DisasContext
*ctx
, TCGv_reg dest
, TCGv_reg t
)
541 if (ctx
->null_cond
.c
!= TCG_COND_NEVER
) {
542 tcg_gen_movcond_reg(ctx
->null_cond
.c
, dest
, ctx
->null_cond
.a0
,
543 ctx
->null_cond
.a1
, dest
, t
);
545 tcg_gen_mov_reg(dest
, t
);
549 static void save_gpr(DisasContext
*ctx
, unsigned reg
, TCGv_reg t
)
552 save_or_nullify(ctx
, cpu_gr
[reg
], t
);
564 static TCGv_i32
load_frw_i32(unsigned rt
)
566 TCGv_i32 ret
= tcg_temp_new_i32();
567 tcg_gen_ld_i32(ret
, cpu_env
,
568 offsetof(CPUHPPAState
, fr
[rt
& 31])
569 + (rt
& 32 ? LO_OFS
: HI_OFS
));
573 static TCGv_i32
load_frw0_i32(unsigned rt
)
576 TCGv_i32 ret
= tcg_temp_new_i32();
577 tcg_gen_movi_i32(ret
, 0);
580 return load_frw_i32(rt
);
584 static TCGv_i64
load_frw0_i64(unsigned rt
)
586 TCGv_i64 ret
= tcg_temp_new_i64();
588 tcg_gen_movi_i64(ret
, 0);
590 tcg_gen_ld32u_i64(ret
, cpu_env
,
591 offsetof(CPUHPPAState
, fr
[rt
& 31])
592 + (rt
& 32 ? LO_OFS
: HI_OFS
));
597 static void save_frw_i32(unsigned rt
, TCGv_i32 val
)
599 tcg_gen_st_i32(val
, cpu_env
,
600 offsetof(CPUHPPAState
, fr
[rt
& 31])
601 + (rt
& 32 ? LO_OFS
: HI_OFS
));
607 static TCGv_i64
load_frd(unsigned rt
)
609 TCGv_i64 ret
= tcg_temp_new_i64();
610 tcg_gen_ld_i64(ret
, cpu_env
, offsetof(CPUHPPAState
, fr
[rt
]));
614 static TCGv_i64
load_frd0(unsigned rt
)
617 TCGv_i64 ret
= tcg_temp_new_i64();
618 tcg_gen_movi_i64(ret
, 0);
625 static void save_frd(unsigned rt
, TCGv_i64 val
)
627 tcg_gen_st_i64(val
, cpu_env
, offsetof(CPUHPPAState
, fr
[rt
]));
630 static void load_spr(DisasContext
*ctx
, TCGv_i64 dest
, unsigned reg
)
632 #ifdef CONFIG_USER_ONLY
633 tcg_gen_movi_i64(dest
, 0);
636 tcg_gen_mov_i64(dest
, cpu_sr
[reg
]);
637 } else if (ctx
->tb_flags
& TB_FLAG_SR_SAME
) {
638 tcg_gen_mov_i64(dest
, cpu_srH
);
640 tcg_gen_ld_i64(dest
, cpu_env
, offsetof(CPUHPPAState
, sr
[reg
]));
645 /* Skip over the implementation of an insn that has been nullified.
646 Use this when the insn is too complex for a conditional move. */
647 static void nullify_over(DisasContext
*ctx
)
649 if (ctx
->null_cond
.c
!= TCG_COND_NEVER
) {
650 /* The always condition should have been handled in the main loop. */
651 assert(ctx
->null_cond
.c
!= TCG_COND_ALWAYS
);
653 ctx
->null_lab
= gen_new_label();
655 /* If we're using PSW[N], copy it to a temp because... */
656 if (ctx
->null_cond
.a0
== cpu_psw_n
) {
657 ctx
->null_cond
.a0
= tcg_temp_new();
658 tcg_gen_mov_reg(ctx
->null_cond
.a0
, cpu_psw_n
);
660 /* ... we clear it before branching over the implementation,
661 so that (1) it's clear after nullifying this insn and
662 (2) if this insn nullifies the next, PSW[N] is valid. */
663 if (ctx
->psw_n_nonzero
) {
664 ctx
->psw_n_nonzero
= false;
665 tcg_gen_movi_reg(cpu_psw_n
, 0);
668 tcg_gen_brcond_reg(ctx
->null_cond
.c
, ctx
->null_cond
.a0
,
669 ctx
->null_cond
.a1
, ctx
->null_lab
);
670 cond_free(&ctx
->null_cond
);
674 /* Save the current nullification state to PSW[N]. */
675 static void nullify_save(DisasContext
*ctx
)
677 if (ctx
->null_cond
.c
== TCG_COND_NEVER
) {
678 if (ctx
->psw_n_nonzero
) {
679 tcg_gen_movi_reg(cpu_psw_n
, 0);
683 if (ctx
->null_cond
.a0
!= cpu_psw_n
) {
684 tcg_gen_setcond_reg(ctx
->null_cond
.c
, cpu_psw_n
,
685 ctx
->null_cond
.a0
, ctx
->null_cond
.a1
);
686 ctx
->psw_n_nonzero
= true;
688 cond_free(&ctx
->null_cond
);
691 /* Set a PSW[N] to X. The intention is that this is used immediately
692 before a goto_tb/exit_tb, so that there is no fallthru path to other
693 code within the TB. Therefore we do not update psw_n_nonzero. */
694 static void nullify_set(DisasContext
*ctx
, bool x
)
696 if (ctx
->psw_n_nonzero
|| x
) {
697 tcg_gen_movi_reg(cpu_psw_n
, x
);
701 /* Mark the end of an instruction that may have been nullified.
702 This is the pair to nullify_over. Always returns true so that
703 it may be tail-called from a translate function. */
704 static bool nullify_end(DisasContext
*ctx
)
706 TCGLabel
*null_lab
= ctx
->null_lab
;
707 DisasJumpType status
= ctx
->base
.is_jmp
;
709 /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
710 For UPDATED, we cannot update on the nullified path. */
711 assert(status
!= DISAS_IAQ_N_UPDATED
);
713 if (likely(null_lab
== NULL
)) {
714 /* The current insn wasn't conditional or handled the condition
715 applied to it without a branch, so the (new) setting of
716 NULL_COND can be applied directly to the next insn. */
719 ctx
->null_lab
= NULL
;
721 if (likely(ctx
->null_cond
.c
== TCG_COND_NEVER
)) {
722 /* The next instruction will be unconditional,
723 and NULL_COND already reflects that. */
724 gen_set_label(null_lab
);
726 /* The insn that we just executed is itself nullifying the next
727 instruction. Store the condition in the PSW[N] global.
728 We asserted PSW[N] = 0 in nullify_over, so that after the
729 label we have the proper value in place. */
731 gen_set_label(null_lab
);
732 ctx
->null_cond
= cond_make_n();
734 if (status
== DISAS_NORETURN
) {
735 ctx
->base
.is_jmp
= DISAS_NEXT
;
740 static void copy_iaoq_entry(TCGv_reg dest
, target_ureg ival
, TCGv_reg vval
)
742 if (unlikely(ival
== -1)) {
743 tcg_gen_mov_reg(dest
, vval
);
745 tcg_gen_movi_reg(dest
, ival
);
749 static inline target_ureg
iaoq_dest(DisasContext
*ctx
, target_sreg disp
)
751 return ctx
->iaoq_f
+ disp
+ 8;
754 static void gen_excp_1(int exception
)
756 gen_helper_excp(cpu_env
, tcg_constant_i32(exception
));
759 static void gen_excp(DisasContext
*ctx
, int exception
)
761 copy_iaoq_entry(cpu_iaoq_f
, ctx
->iaoq_f
, cpu_iaoq_f
);
762 copy_iaoq_entry(cpu_iaoq_b
, ctx
->iaoq_b
, cpu_iaoq_b
);
764 gen_excp_1(exception
);
765 ctx
->base
.is_jmp
= DISAS_NORETURN
;
768 static bool gen_excp_iir(DisasContext
*ctx
, int exc
)
771 tcg_gen_st_reg(tcg_constant_reg(ctx
->insn
),
772 cpu_env
, offsetof(CPUHPPAState
, cr
[CR_IIR
]));
774 return nullify_end(ctx
);
777 static bool gen_illegal(DisasContext
*ctx
)
779 return gen_excp_iir(ctx
, EXCP_ILL
);
782 #ifdef CONFIG_USER_ONLY
783 #define CHECK_MOST_PRIVILEGED(EXCP) \
784 return gen_excp_iir(ctx, EXCP)
786 #define CHECK_MOST_PRIVILEGED(EXCP) \
788 if (ctx->privilege != 0) { \
789 return gen_excp_iir(ctx, EXCP); \
794 static bool use_goto_tb(DisasContext
*ctx
, target_ureg dest
)
796 return translator_use_goto_tb(&ctx
->base
, dest
);
799 /* If the next insn is to be nullified, and it's on the same page,
800 and we're not attempting to set a breakpoint on it, then we can
801 totally skip the nullified insn. This avoids creating and
802 executing a TB that merely branches to the next TB. */
803 static bool use_nullify_skip(DisasContext
*ctx
)
805 return (((ctx
->iaoq_b
^ ctx
->iaoq_f
) & TARGET_PAGE_MASK
) == 0
806 && !cpu_breakpoint_test(ctx
->cs
, ctx
->iaoq_b
, BP_ANY
));
809 static void gen_goto_tb(DisasContext
*ctx
, int which
,
810 target_ureg f
, target_ureg b
)
812 if (f
!= -1 && b
!= -1 && use_goto_tb(ctx
, f
)) {
813 tcg_gen_goto_tb(which
);
814 tcg_gen_movi_reg(cpu_iaoq_f
, f
);
815 tcg_gen_movi_reg(cpu_iaoq_b
, b
);
816 tcg_gen_exit_tb(ctx
->base
.tb
, which
);
818 copy_iaoq_entry(cpu_iaoq_f
, f
, cpu_iaoq_b
);
819 copy_iaoq_entry(cpu_iaoq_b
, b
, ctx
->iaoq_n_var
);
820 tcg_gen_lookup_and_goto_ptr();
824 static bool cond_need_sv(int c
)
826 return c
== 2 || c
== 3 || c
== 6;
829 static bool cond_need_cb(int c
)
831 return c
== 4 || c
== 5;
835 * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of
836 * the Parisc 1.1 Architecture Reference Manual for details.
839 static DisasCond
do_cond(unsigned cf
, TCGv_reg res
,
840 TCGv_reg cb_msb
, TCGv_reg sv
)
846 case 0: /* Never / TR (0 / 1) */
847 cond
= cond_make_f();
849 case 1: /* = / <> (Z / !Z) */
850 cond
= cond_make_0(TCG_COND_EQ
, res
);
852 case 2: /* < / >= (N ^ V / !(N ^ V) */
853 tmp
= tcg_temp_new();
854 tcg_gen_xor_reg(tmp
, res
, sv
);
855 cond
= cond_make_0_tmp(TCG_COND_LT
, tmp
);
857 case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */
861 * ((res < 0) ^ (sv < 0)) | !res
862 * ((res ^ sv) < 0) | !res
863 * (~(res ^ sv) >= 0) | !res
864 * !(~(res ^ sv) >> 31) | !res
865 * !(~(res ^ sv) >> 31 & res)
867 tmp
= tcg_temp_new();
868 tcg_gen_eqv_reg(tmp
, res
, sv
);
869 tcg_gen_sari_reg(tmp
, tmp
, TARGET_REGISTER_BITS
- 1);
870 tcg_gen_and_reg(tmp
, tmp
, res
);
871 cond
= cond_make_0_tmp(TCG_COND_EQ
, tmp
);
873 case 4: /* NUV / UV (!C / C) */
874 cond
= cond_make_0(TCG_COND_EQ
, cb_msb
);
876 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
877 tmp
= tcg_temp_new();
878 tcg_gen_neg_reg(tmp
, cb_msb
);
879 tcg_gen_and_reg(tmp
, tmp
, res
);
880 cond
= cond_make_0_tmp(TCG_COND_EQ
, tmp
);
882 case 6: /* SV / NSV (V / !V) */
883 cond
= cond_make_0(TCG_COND_LT
, sv
);
885 case 7: /* OD / EV */
886 tmp
= tcg_temp_new();
887 tcg_gen_andi_reg(tmp
, res
, 1);
888 cond
= cond_make_0_tmp(TCG_COND_NE
, tmp
);
891 g_assert_not_reached();
894 cond
.c
= tcg_invert_cond(cond
.c
);
900 /* Similar, but for the special case of subtraction without borrow, we
901 can use the inputs directly. This can allow other computation to be
902 deleted as unused. */
904 static DisasCond
do_sub_cond(unsigned cf
, TCGv_reg res
,
905 TCGv_reg in1
, TCGv_reg in2
, TCGv_reg sv
)
911 cond
= cond_make(TCG_COND_EQ
, in1
, in2
);
914 cond
= cond_make(TCG_COND_LT
, in1
, in2
);
917 cond
= cond_make(TCG_COND_LE
, in1
, in2
);
919 case 4: /* << / >>= */
920 cond
= cond_make(TCG_COND_LTU
, in1
, in2
);
922 case 5: /* <<= / >> */
923 cond
= cond_make(TCG_COND_LEU
, in1
, in2
);
926 return do_cond(cf
, res
, NULL
, sv
);
929 cond
.c
= tcg_invert_cond(cond
.c
);
936 * Similar, but for logicals, where the carry and overflow bits are not
937 * computed, and use of them is undefined.
939 * Undefined or not, hardware does not trap. It seems reasonable to
940 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
941 * how cases c={2,3} are treated.
944 static DisasCond
do_log_cond(unsigned cf
, TCGv_reg res
)
948 case 9: /* undef, C */
949 case 11: /* undef, C & !Z */
950 case 12: /* undef, V */
951 return cond_make_f();
954 case 8: /* undef, !C */
955 case 10: /* undef, !C | Z */
956 case 13: /* undef, !V */
957 return cond_make_t();
960 return cond_make_0(TCG_COND_EQ
, res
);
962 return cond_make_0(TCG_COND_NE
, res
);
964 return cond_make_0(TCG_COND_LT
, res
);
966 return cond_make_0(TCG_COND_GE
, res
);
968 return cond_make_0(TCG_COND_LE
, res
);
970 return cond_make_0(TCG_COND_GT
, res
);
974 return do_cond(cf
, res
, NULL
, NULL
);
977 g_assert_not_reached();
981 /* Similar, but for shift/extract/deposit conditions. */
983 static DisasCond
do_sed_cond(unsigned orig
, TCGv_reg res
)
987 /* Convert the compressed condition codes to standard.
988 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
989 4-7 are the reverse of 0-3. */
996 return do_log_cond(c
* 2 + f
, res
);
999 /* Similar, but for unit conditions. */
1001 static DisasCond
do_unit_cond(unsigned cf
, TCGv_reg res
,
1002 TCGv_reg in1
, TCGv_reg in2
)
1005 TCGv_reg tmp
, cb
= NULL
;
1008 /* Since we want to test lots of carry-out bits all at once, do not
1009 * do our normal thing and compute carry-in of bit B+1 since that
1010 * leaves us with carry bits spread across two words.
1012 cb
= tcg_temp_new();
1013 tmp
= tcg_temp_new();
1014 tcg_gen_or_reg(cb
, in1
, in2
);
1015 tcg_gen_and_reg(tmp
, in1
, in2
);
1016 tcg_gen_andc_reg(cb
, cb
, res
);
1017 tcg_gen_or_reg(cb
, cb
, tmp
);
1021 case 0: /* never / TR */
1022 case 1: /* undefined */
1023 case 5: /* undefined */
1024 cond
= cond_make_f();
1027 case 2: /* SBZ / NBZ */
1028 /* See hasless(v,1) from
1029 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1031 tmp
= tcg_temp_new();
1032 tcg_gen_subi_reg(tmp
, res
, 0x01010101u
);
1033 tcg_gen_andc_reg(tmp
, tmp
, res
);
1034 tcg_gen_andi_reg(tmp
, tmp
, 0x80808080u
);
1035 cond
= cond_make_0(TCG_COND_NE
, tmp
);
1038 case 3: /* SHZ / NHZ */
1039 tmp
= tcg_temp_new();
1040 tcg_gen_subi_reg(tmp
, res
, 0x00010001u
);
1041 tcg_gen_andc_reg(tmp
, tmp
, res
);
1042 tcg_gen_andi_reg(tmp
, tmp
, 0x80008000u
);
1043 cond
= cond_make_0(TCG_COND_NE
, tmp
);
1046 case 4: /* SDC / NDC */
1047 tcg_gen_andi_reg(cb
, cb
, 0x88888888u
);
1048 cond
= cond_make_0(TCG_COND_NE
, cb
);
1051 case 6: /* SBC / NBC */
1052 tcg_gen_andi_reg(cb
, cb
, 0x80808080u
);
1053 cond
= cond_make_0(TCG_COND_NE
, cb
);
1056 case 7: /* SHC / NHC */
1057 tcg_gen_andi_reg(cb
, cb
, 0x80008000u
);
1058 cond
= cond_make_0(TCG_COND_NE
, cb
);
1062 g_assert_not_reached();
1065 cond
.c
= tcg_invert_cond(cond
.c
);
1071 /* Compute signed overflow for addition. */
1072 static TCGv_reg
do_add_sv(DisasContext
*ctx
, TCGv_reg res
,
1073 TCGv_reg in1
, TCGv_reg in2
)
1075 TCGv_reg sv
= get_temp(ctx
);
1076 TCGv_reg tmp
= tcg_temp_new();
1078 tcg_gen_xor_reg(sv
, res
, in1
);
1079 tcg_gen_xor_reg(tmp
, in1
, in2
);
1080 tcg_gen_andc_reg(sv
, sv
, tmp
);
1085 /* Compute signed overflow for subtraction. */
1086 static TCGv_reg
do_sub_sv(DisasContext
*ctx
, TCGv_reg res
,
1087 TCGv_reg in1
, TCGv_reg in2
)
1089 TCGv_reg sv
= get_temp(ctx
);
1090 TCGv_reg tmp
= tcg_temp_new();
1092 tcg_gen_xor_reg(sv
, res
, in1
);
1093 tcg_gen_xor_reg(tmp
, in1
, in2
);
1094 tcg_gen_and_reg(sv
, sv
, tmp
);
1099 static void do_add(DisasContext
*ctx
, unsigned rt
, TCGv_reg in1
,
1100 TCGv_reg in2
, unsigned shift
, bool is_l
,
1101 bool is_tsv
, bool is_tc
, bool is_c
, unsigned cf
)
1103 TCGv_reg dest
, cb
, cb_msb
, sv
, tmp
;
1104 unsigned c
= cf
>> 1;
1107 dest
= tcg_temp_new();
1112 tmp
= get_temp(ctx
);
1113 tcg_gen_shli_reg(tmp
, in1
, shift
);
1117 if (!is_l
|| cond_need_cb(c
)) {
1118 TCGv_reg zero
= tcg_constant_reg(0);
1119 cb_msb
= get_temp(ctx
);
1120 tcg_gen_add2_reg(dest
, cb_msb
, in1
, zero
, in2
, zero
);
1122 tcg_gen_add2_reg(dest
, cb_msb
, dest
, cb_msb
, cpu_psw_cb_msb
, zero
);
1126 tcg_gen_xor_reg(cb
, in1
, in2
);
1127 tcg_gen_xor_reg(cb
, cb
, dest
);
1130 tcg_gen_add_reg(dest
, in1
, in2
);
1132 tcg_gen_add_reg(dest
, dest
, cpu_psw_cb_msb
);
1136 /* Compute signed overflow if required. */
1138 if (is_tsv
|| cond_need_sv(c
)) {
1139 sv
= do_add_sv(ctx
, dest
, in1
, in2
);
1141 /* ??? Need to include overflow from shift. */
1142 gen_helper_tsv(cpu_env
, sv
);
1146 /* Emit any conditional trap before any writeback. */
1147 cond
= do_cond(cf
, dest
, cb_msb
, sv
);
1149 tmp
= tcg_temp_new();
1150 tcg_gen_setcond_reg(cond
.c
, tmp
, cond
.a0
, cond
.a1
);
1151 gen_helper_tcond(cpu_env
, tmp
);
1154 /* Write back the result. */
1156 save_or_nullify(ctx
, cpu_psw_cb
, cb
);
1157 save_or_nullify(ctx
, cpu_psw_cb_msb
, cb_msb
);
1159 save_gpr(ctx
, rt
, dest
);
1161 /* Install the new nullification. */
1162 cond_free(&ctx
->null_cond
);
1163 ctx
->null_cond
= cond
;
1166 static bool do_add_reg(DisasContext
*ctx
, arg_rrr_cf_sh
*a
,
1167 bool is_l
, bool is_tsv
, bool is_tc
, bool is_c
)
1169 TCGv_reg tcg_r1
, tcg_r2
;
1174 tcg_r1
= load_gpr(ctx
, a
->r1
);
1175 tcg_r2
= load_gpr(ctx
, a
->r2
);
1176 do_add(ctx
, a
->t
, tcg_r1
, tcg_r2
, a
->sh
, is_l
, is_tsv
, is_tc
, is_c
, a
->cf
);
1177 return nullify_end(ctx
);
1180 static bool do_add_imm(DisasContext
*ctx
, arg_rri_cf
*a
,
1181 bool is_tsv
, bool is_tc
)
1183 TCGv_reg tcg_im
, tcg_r2
;
1188 tcg_im
= load_const(ctx
, a
->i
);
1189 tcg_r2
= load_gpr(ctx
, a
->r
);
1190 do_add(ctx
, a
->t
, tcg_im
, tcg_r2
, 0, 0, is_tsv
, is_tc
, 0, a
->cf
);
1191 return nullify_end(ctx
);
1194 static void do_sub(DisasContext
*ctx
, unsigned rt
, TCGv_reg in1
,
1195 TCGv_reg in2
, bool is_tsv
, bool is_b
,
1196 bool is_tc
, unsigned cf
)
1198 TCGv_reg dest
, sv
, cb
, cb_msb
, zero
, tmp
;
1199 unsigned c
= cf
>> 1;
1202 dest
= tcg_temp_new();
1203 cb
= tcg_temp_new();
1204 cb_msb
= tcg_temp_new();
1206 zero
= tcg_constant_reg(0);
1208 /* DEST,C = IN1 + ~IN2 + C. */
1209 tcg_gen_not_reg(cb
, in2
);
1210 tcg_gen_add2_reg(dest
, cb_msb
, in1
, zero
, cpu_psw_cb_msb
, zero
);
1211 tcg_gen_add2_reg(dest
, cb_msb
, dest
, cb_msb
, cb
, zero
);
1212 tcg_gen_xor_reg(cb
, cb
, in1
);
1213 tcg_gen_xor_reg(cb
, cb
, dest
);
1215 /* DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
1216 operations by seeding the high word with 1 and subtracting. */
1217 tcg_gen_movi_reg(cb_msb
, 1);
1218 tcg_gen_sub2_reg(dest
, cb_msb
, in1
, cb_msb
, in2
, zero
);
1219 tcg_gen_eqv_reg(cb
, in1
, in2
);
1220 tcg_gen_xor_reg(cb
, cb
, dest
);
1223 /* Compute signed overflow if required. */
1225 if (is_tsv
|| cond_need_sv(c
)) {
1226 sv
= do_sub_sv(ctx
, dest
, in1
, in2
);
1228 gen_helper_tsv(cpu_env
, sv
);
1232 /* Compute the condition. We cannot use the special case for borrow. */
1234 cond
= do_sub_cond(cf
, dest
, in1
, in2
, sv
);
1236 cond
= do_cond(cf
, dest
, cb_msb
, sv
);
1239 /* Emit any conditional trap before any writeback. */
1241 tmp
= tcg_temp_new();
1242 tcg_gen_setcond_reg(cond
.c
, tmp
, cond
.a0
, cond
.a1
);
1243 gen_helper_tcond(cpu_env
, tmp
);
1246 /* Write back the result. */
1247 save_or_nullify(ctx
, cpu_psw_cb
, cb
);
1248 save_or_nullify(ctx
, cpu_psw_cb_msb
, cb_msb
);
1249 save_gpr(ctx
, rt
, dest
);
1251 /* Install the new nullification. */
1252 cond_free(&ctx
->null_cond
);
1253 ctx
->null_cond
= cond
;
1256 static bool do_sub_reg(DisasContext
*ctx
, arg_rrr_cf
*a
,
1257 bool is_tsv
, bool is_b
, bool is_tc
)
1259 TCGv_reg tcg_r1
, tcg_r2
;
1264 tcg_r1
= load_gpr(ctx
, a
->r1
);
1265 tcg_r2
= load_gpr(ctx
, a
->r2
);
1266 do_sub(ctx
, a
->t
, tcg_r1
, tcg_r2
, is_tsv
, is_b
, is_tc
, a
->cf
);
1267 return nullify_end(ctx
);
1270 static bool do_sub_imm(DisasContext
*ctx
, arg_rri_cf
*a
, bool is_tsv
)
1272 TCGv_reg tcg_im
, tcg_r2
;
1277 tcg_im
= load_const(ctx
, a
->i
);
1278 tcg_r2
= load_gpr(ctx
, a
->r
);
1279 do_sub(ctx
, a
->t
, tcg_im
, tcg_r2
, is_tsv
, 0, 0, a
->cf
);
1280 return nullify_end(ctx
);
1283 static void do_cmpclr(DisasContext
*ctx
, unsigned rt
, TCGv_reg in1
,
1284 TCGv_reg in2
, unsigned cf
)
1289 dest
= tcg_temp_new();
1290 tcg_gen_sub_reg(dest
, in1
, in2
);
1292 /* Compute signed overflow if required. */
1294 if (cond_need_sv(cf
>> 1)) {
1295 sv
= do_sub_sv(ctx
, dest
, in1
, in2
);
1298 /* Form the condition for the compare. */
1299 cond
= do_sub_cond(cf
, dest
, in1
, in2
, sv
);
1302 tcg_gen_movi_reg(dest
, 0);
1303 save_gpr(ctx
, rt
, dest
);
1305 /* Install the new nullification. */
1306 cond_free(&ctx
->null_cond
);
1307 ctx
->null_cond
= cond
;
1310 static void do_log(DisasContext
*ctx
, unsigned rt
, TCGv_reg in1
,
1311 TCGv_reg in2
, unsigned cf
,
1312 void (*fn
)(TCGv_reg
, TCGv_reg
, TCGv_reg
))
1314 TCGv_reg dest
= dest_gpr(ctx
, rt
);
1316 /* Perform the operation, and writeback. */
1318 save_gpr(ctx
, rt
, dest
);
1320 /* Install the new nullification. */
1321 cond_free(&ctx
->null_cond
);
1323 ctx
->null_cond
= do_log_cond(cf
, dest
);
1327 static bool do_log_reg(DisasContext
*ctx
, arg_rrr_cf
*a
,
1328 void (*fn
)(TCGv_reg
, TCGv_reg
, TCGv_reg
))
1330 TCGv_reg tcg_r1
, tcg_r2
;
1335 tcg_r1
= load_gpr(ctx
, a
->r1
);
1336 tcg_r2
= load_gpr(ctx
, a
->r2
);
1337 do_log(ctx
, a
->t
, tcg_r1
, tcg_r2
, a
->cf
, fn
);
1338 return nullify_end(ctx
);
1341 static void do_unit(DisasContext
*ctx
, unsigned rt
, TCGv_reg in1
,
1342 TCGv_reg in2
, unsigned cf
, bool is_tc
,
1343 void (*fn
)(TCGv_reg
, TCGv_reg
, TCGv_reg
))
1349 dest
= dest_gpr(ctx
, rt
);
1351 save_gpr(ctx
, rt
, dest
);
1352 cond_free(&ctx
->null_cond
);
1354 dest
= tcg_temp_new();
1357 cond
= do_unit_cond(cf
, dest
, in1
, in2
);
1360 TCGv_reg tmp
= tcg_temp_new();
1361 tcg_gen_setcond_reg(cond
.c
, tmp
, cond
.a0
, cond
.a1
);
1362 gen_helper_tcond(cpu_env
, tmp
);
1364 save_gpr(ctx
, rt
, dest
);
1366 cond_free(&ctx
->null_cond
);
1367 ctx
->null_cond
= cond
;
1371 #ifndef CONFIG_USER_ONLY
1372 /* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1373 from the top 2 bits of the base register. There are a few system
1374 instructions that have a 3-bit space specifier, for which SR0 is
1375 not special. To handle this, pass ~SP. */
1376 static TCGv_i64
space_select(DisasContext
*ctx
, int sp
, TCGv_reg base
)
1386 spc
= get_temp_tl(ctx
);
1387 load_spr(ctx
, spc
, sp
);
1390 if (ctx
->tb_flags
& TB_FLAG_SR_SAME
) {
1394 ptr
= tcg_temp_new_ptr();
1395 tmp
= tcg_temp_new();
1396 spc
= get_temp_tl(ctx
);
1398 tcg_gen_shri_reg(tmp
, base
, TARGET_REGISTER_BITS
- 5);
1399 tcg_gen_andi_reg(tmp
, tmp
, 030);
1400 tcg_gen_trunc_reg_ptr(ptr
, tmp
);
1402 tcg_gen_add_ptr(ptr
, ptr
, cpu_env
);
1403 tcg_gen_ld_i64(spc
, ptr
, offsetof(CPUHPPAState
, sr
[4]));
1409 static void form_gva(DisasContext
*ctx
, TCGv_tl
*pgva
, TCGv_reg
*pofs
,
1410 unsigned rb
, unsigned rx
, int scale
, target_sreg disp
,
1411 unsigned sp
, int modify
, bool is_phys
)
1413 TCGv_reg base
= load_gpr(ctx
, rb
);
1416 /* Note that RX is mutually exclusive with DISP. */
1418 ofs
= get_temp(ctx
);
1419 tcg_gen_shli_reg(ofs
, cpu_gr
[rx
], scale
);
1420 tcg_gen_add_reg(ofs
, ofs
, base
);
1421 } else if (disp
|| modify
) {
1422 ofs
= get_temp(ctx
);
1423 tcg_gen_addi_reg(ofs
, base
, disp
);
1429 #ifdef CONFIG_USER_ONLY
1430 *pgva
= (modify
<= 0 ? ofs
: base
);
1432 TCGv_tl addr
= get_temp_tl(ctx
);
1433 tcg_gen_extu_reg_tl(addr
, modify
<= 0 ? ofs
: base
);
1434 if (ctx
->tb_flags
& PSW_W
) {
1435 tcg_gen_andi_tl(addr
, addr
, 0x3fffffffffffffffull
);
1438 tcg_gen_or_tl(addr
, addr
, space_select(ctx
, sp
, base
));
1444 /* Emit a memory load. The modify parameter should be
1445 * < 0 for pre-modify,
1446 * > 0 for post-modify,
1447 * = 0 for no base register update.
1449 static void do_load_32(DisasContext
*ctx
, TCGv_i32 dest
, unsigned rb
,
1450 unsigned rx
, int scale
, target_sreg disp
,
1451 unsigned sp
, int modify
, MemOp mop
)
1456 /* Caller uses nullify_over/nullify_end. */
1457 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1459 form_gva(ctx
, &addr
, &ofs
, rb
, rx
, scale
, disp
, sp
, modify
,
1460 ctx
->mmu_idx
== MMU_PHYS_IDX
);
1461 tcg_gen_qemu_ld_reg(dest
, addr
, ctx
->mmu_idx
, mop
| UNALIGN(ctx
));
1463 save_gpr(ctx
, rb
, ofs
);
1467 static void do_load_64(DisasContext
*ctx
, TCGv_i64 dest
, unsigned rb
,
1468 unsigned rx
, int scale
, target_sreg disp
,
1469 unsigned sp
, int modify
, MemOp mop
)
1474 /* Caller uses nullify_over/nullify_end. */
1475 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1477 form_gva(ctx
, &addr
, &ofs
, rb
, rx
, scale
, disp
, sp
, modify
,
1478 ctx
->mmu_idx
== MMU_PHYS_IDX
);
1479 tcg_gen_qemu_ld_i64(dest
, addr
, ctx
->mmu_idx
, mop
| UNALIGN(ctx
));
1481 save_gpr(ctx
, rb
, ofs
);
1485 static void do_store_32(DisasContext
*ctx
, TCGv_i32 src
, unsigned rb
,
1486 unsigned rx
, int scale
, target_sreg disp
,
1487 unsigned sp
, int modify
, MemOp mop
)
1492 /* Caller uses nullify_over/nullify_end. */
1493 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1495 form_gva(ctx
, &addr
, &ofs
, rb
, rx
, scale
, disp
, sp
, modify
,
1496 ctx
->mmu_idx
== MMU_PHYS_IDX
);
1497 tcg_gen_qemu_st_i32(src
, addr
, ctx
->mmu_idx
, mop
| UNALIGN(ctx
));
1499 save_gpr(ctx
, rb
, ofs
);
1503 static void do_store_64(DisasContext
*ctx
, TCGv_i64 src
, unsigned rb
,
1504 unsigned rx
, int scale
, target_sreg disp
,
1505 unsigned sp
, int modify
, MemOp mop
)
1510 /* Caller uses nullify_over/nullify_end. */
1511 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1513 form_gva(ctx
, &addr
, &ofs
, rb
, rx
, scale
, disp
, sp
, modify
,
1514 ctx
->mmu_idx
== MMU_PHYS_IDX
);
1515 tcg_gen_qemu_st_i64(src
, addr
, ctx
->mmu_idx
, mop
| UNALIGN(ctx
));
1517 save_gpr(ctx
, rb
, ofs
);
1521 #if TARGET_REGISTER_BITS == 64
1522 #define do_load_reg do_load_64
1523 #define do_store_reg do_store_64
1525 #define do_load_reg do_load_32
1526 #define do_store_reg do_store_32
1529 static bool do_load(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1530 unsigned rx
, int scale
, target_sreg disp
,
1531 unsigned sp
, int modify
, MemOp mop
)
1538 /* No base register update. */
1539 dest
= dest_gpr(ctx
, rt
);
1541 /* Make sure if RT == RB, we see the result of the load. */
1542 dest
= get_temp(ctx
);
1544 do_load_reg(ctx
, dest
, rb
, rx
, scale
, disp
, sp
, modify
, mop
);
1545 save_gpr(ctx
, rt
, dest
);
1547 return nullify_end(ctx
);
1550 static bool do_floadw(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1551 unsigned rx
, int scale
, target_sreg disp
,
1552 unsigned sp
, int modify
)
1558 tmp
= tcg_temp_new_i32();
1559 do_load_32(ctx
, tmp
, rb
, rx
, scale
, disp
, sp
, modify
, MO_TEUL
);
1560 save_frw_i32(rt
, tmp
);
1563 gen_helper_loaded_fr0(cpu_env
);
1566 return nullify_end(ctx
);
1569 static bool trans_fldw(DisasContext
*ctx
, arg_ldst
*a
)
1571 return do_floadw(ctx
, a
->t
, a
->b
, a
->x
, a
->scale
? 2 : 0,
1572 a
->disp
, a
->sp
, a
->m
);
1575 static bool do_floadd(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1576 unsigned rx
, int scale
, target_sreg disp
,
1577 unsigned sp
, int modify
)
1583 tmp
= tcg_temp_new_i64();
1584 do_load_64(ctx
, tmp
, rb
, rx
, scale
, disp
, sp
, modify
, MO_TEUQ
);
1588 gen_helper_loaded_fr0(cpu_env
);
1591 return nullify_end(ctx
);
1594 static bool trans_fldd(DisasContext
*ctx
, arg_ldst
*a
)
1596 return do_floadd(ctx
, a
->t
, a
->b
, a
->x
, a
->scale
? 3 : 0,
1597 a
->disp
, a
->sp
, a
->m
);
1600 static bool do_store(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1601 target_sreg disp
, unsigned sp
,
1602 int modify
, MemOp mop
)
1605 do_store_reg(ctx
, load_gpr(ctx
, rt
), rb
, 0, 0, disp
, sp
, modify
, mop
);
1606 return nullify_end(ctx
);
1609 static bool do_fstorew(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1610 unsigned rx
, int scale
, target_sreg disp
,
1611 unsigned sp
, int modify
)
1617 tmp
= load_frw_i32(rt
);
1618 do_store_32(ctx
, tmp
, rb
, rx
, scale
, disp
, sp
, modify
, MO_TEUL
);
1620 return nullify_end(ctx
);
1623 static bool trans_fstw(DisasContext
*ctx
, arg_ldst
*a
)
1625 return do_fstorew(ctx
, a
->t
, a
->b
, a
->x
, a
->scale
? 2 : 0,
1626 a
->disp
, a
->sp
, a
->m
);
1629 static bool do_fstored(DisasContext
*ctx
, unsigned rt
, unsigned rb
,
1630 unsigned rx
, int scale
, target_sreg disp
,
1631 unsigned sp
, int modify
)
1638 do_store_64(ctx
, tmp
, rb
, rx
, scale
, disp
, sp
, modify
, MO_TEUQ
);
1640 return nullify_end(ctx
);
1643 static bool trans_fstd(DisasContext
*ctx
, arg_ldst
*a
)
1645 return do_fstored(ctx
, a
->t
, a
->b
, a
->x
, a
->scale
? 3 : 0,
1646 a
->disp
, a
->sp
, a
->m
);
1649 static bool do_fop_wew(DisasContext
*ctx
, unsigned rt
, unsigned ra
,
1650 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i32
))
1655 tmp
= load_frw0_i32(ra
);
1657 func(tmp
, cpu_env
, tmp
);
1659 save_frw_i32(rt
, tmp
);
1660 return nullify_end(ctx
);
1663 static bool do_fop_wed(DisasContext
*ctx
, unsigned rt
, unsigned ra
,
1664 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i64
))
1671 dst
= tcg_temp_new_i32();
1673 func(dst
, cpu_env
, src
);
1675 save_frw_i32(rt
, dst
);
1676 return nullify_end(ctx
);
1679 static bool do_fop_ded(DisasContext
*ctx
, unsigned rt
, unsigned ra
,
1680 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i64
))
1685 tmp
= load_frd0(ra
);
1687 func(tmp
, cpu_env
, tmp
);
1690 return nullify_end(ctx
);
1693 static bool do_fop_dew(DisasContext
*ctx
, unsigned rt
, unsigned ra
,
1694 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i32
))
1700 src
= load_frw0_i32(ra
);
1701 dst
= tcg_temp_new_i64();
1703 func(dst
, cpu_env
, src
);
1706 return nullify_end(ctx
);
1709 static bool do_fop_weww(DisasContext
*ctx
, unsigned rt
,
1710 unsigned ra
, unsigned rb
,
1711 void (*func
)(TCGv_i32
, TCGv_env
, TCGv_i32
, TCGv_i32
))
1716 a
= load_frw0_i32(ra
);
1717 b
= load_frw0_i32(rb
);
1719 func(a
, cpu_env
, a
, b
);
1721 save_frw_i32(rt
, a
);
1722 return nullify_end(ctx
);
1725 static bool do_fop_dedd(DisasContext
*ctx
, unsigned rt
,
1726 unsigned ra
, unsigned rb
,
1727 void (*func
)(TCGv_i64
, TCGv_env
, TCGv_i64
, TCGv_i64
))
1735 func(a
, cpu_env
, a
, b
);
1738 return nullify_end(ctx
);
1741 /* Emit an unconditional branch to a direct target, which may or may not
1742 have already had nullification handled. */
1743 static bool do_dbranch(DisasContext
*ctx
, target_ureg dest
,
1744 unsigned link
, bool is_n
)
1746 if (ctx
->null_cond
.c
== TCG_COND_NEVER
&& ctx
->null_lab
== NULL
) {
1748 copy_iaoq_entry(cpu_gr
[link
], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1752 ctx
->null_cond
.c
= TCG_COND_ALWAYS
;
1758 copy_iaoq_entry(cpu_gr
[link
], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1761 if (is_n
&& use_nullify_skip(ctx
)) {
1762 nullify_set(ctx
, 0);
1763 gen_goto_tb(ctx
, 0, dest
, dest
+ 4);
1765 nullify_set(ctx
, is_n
);
1766 gen_goto_tb(ctx
, 0, ctx
->iaoq_b
, dest
);
1771 nullify_set(ctx
, 0);
1772 gen_goto_tb(ctx
, 1, ctx
->iaoq_b
, ctx
->iaoq_n
);
1773 ctx
->base
.is_jmp
= DISAS_NORETURN
;
1778 /* Emit a conditional branch to a direct target. If the branch itself
1779 is nullified, we should have already used nullify_over. */
1780 static bool do_cbranch(DisasContext
*ctx
, target_sreg disp
, bool is_n
,
1783 target_ureg dest
= iaoq_dest(ctx
, disp
);
1784 TCGLabel
*taken
= NULL
;
1785 TCGCond c
= cond
->c
;
1788 assert(ctx
->null_cond
.c
== TCG_COND_NEVER
);
1790 /* Handle TRUE and NEVER as direct branches. */
1791 if (c
== TCG_COND_ALWAYS
) {
1792 return do_dbranch(ctx
, dest
, 0, is_n
&& disp
>= 0);
1794 if (c
== TCG_COND_NEVER
) {
1795 return do_dbranch(ctx
, ctx
->iaoq_n
, 0, is_n
&& disp
< 0);
1798 taken
= gen_new_label();
1799 tcg_gen_brcond_reg(c
, cond
->a0
, cond
->a1
, taken
);
1802 /* Not taken: Condition not satisfied; nullify on backward branches. */
1803 n
= is_n
&& disp
< 0;
1804 if (n
&& use_nullify_skip(ctx
)) {
1805 nullify_set(ctx
, 0);
1806 gen_goto_tb(ctx
, 0, ctx
->iaoq_n
, ctx
->iaoq_n
+ 4);
1808 if (!n
&& ctx
->null_lab
) {
1809 gen_set_label(ctx
->null_lab
);
1810 ctx
->null_lab
= NULL
;
1812 nullify_set(ctx
, n
);
1813 if (ctx
->iaoq_n
== -1) {
1814 /* The temporary iaoq_n_var died at the branch above.
1815 Regenerate it here instead of saving it. */
1816 tcg_gen_addi_reg(ctx
->iaoq_n_var
, cpu_iaoq_b
, 4);
1818 gen_goto_tb(ctx
, 0, ctx
->iaoq_b
, ctx
->iaoq_n
);
1821 gen_set_label(taken
);
1823 /* Taken: Condition satisfied; nullify on forward branches. */
1824 n
= is_n
&& disp
>= 0;
1825 if (n
&& use_nullify_skip(ctx
)) {
1826 nullify_set(ctx
, 0);
1827 gen_goto_tb(ctx
, 1, dest
, dest
+ 4);
1829 nullify_set(ctx
, n
);
1830 gen_goto_tb(ctx
, 1, ctx
->iaoq_b
, dest
);
1833 /* Not taken: the branch itself was nullified. */
1834 if (ctx
->null_lab
) {
1835 gen_set_label(ctx
->null_lab
);
1836 ctx
->null_lab
= NULL
;
1837 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE
;
1839 ctx
->base
.is_jmp
= DISAS_NORETURN
;
1844 /* Emit an unconditional branch to an indirect target. This handles
1845 nullification of the branch itself. */
1846 static bool do_ibranch(DisasContext
*ctx
, TCGv_reg dest
,
1847 unsigned link
, bool is_n
)
1849 TCGv_reg a0
, a1
, next
, tmp
;
1852 assert(ctx
->null_lab
== NULL
);
1854 if (ctx
->null_cond
.c
== TCG_COND_NEVER
) {
1856 copy_iaoq_entry(cpu_gr
[link
], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1858 next
= get_temp(ctx
);
1859 tcg_gen_mov_reg(next
, dest
);
1861 if (use_nullify_skip(ctx
)) {
1862 tcg_gen_mov_reg(cpu_iaoq_f
, next
);
1863 tcg_gen_addi_reg(cpu_iaoq_b
, next
, 4);
1864 nullify_set(ctx
, 0);
1865 ctx
->base
.is_jmp
= DISAS_IAQ_N_UPDATED
;
1868 ctx
->null_cond
.c
= TCG_COND_ALWAYS
;
1871 ctx
->iaoq_n_var
= next
;
1872 } else if (is_n
&& use_nullify_skip(ctx
)) {
1873 /* The (conditional) branch, B, nullifies the next insn, N,
1874 and we're allowed to skip execution N (no single-step or
1875 tracepoint in effect). Since the goto_ptr that we must use
1876 for the indirect branch consumes no special resources, we
1877 can (conditionally) skip B and continue execution. */
1878 /* The use_nullify_skip test implies we have a known control path. */
1879 tcg_debug_assert(ctx
->iaoq_b
!= -1);
1880 tcg_debug_assert(ctx
->iaoq_n
!= -1);
1882 /* We do have to handle the non-local temporary, DEST, before
1883 branching. Since IOAQ_F is not really live at this point, we
1884 can simply store DEST optimistically. Similarly with IAOQ_B. */
1885 tcg_gen_mov_reg(cpu_iaoq_f
, dest
);
1886 tcg_gen_addi_reg(cpu_iaoq_b
, dest
, 4);
1890 tcg_gen_movi_reg(cpu_gr
[link
], ctx
->iaoq_n
);
1892 tcg_gen_lookup_and_goto_ptr();
1893 return nullify_end(ctx
);
1895 c
= ctx
->null_cond
.c
;
1896 a0
= ctx
->null_cond
.a0
;
1897 a1
= ctx
->null_cond
.a1
;
1899 tmp
= tcg_temp_new();
1900 next
= get_temp(ctx
);
1902 copy_iaoq_entry(tmp
, ctx
->iaoq_n
, ctx
->iaoq_n_var
);
1903 tcg_gen_movcond_reg(c
, next
, a0
, a1
, tmp
, dest
);
1905 ctx
->iaoq_n_var
= next
;
1908 tcg_gen_movcond_reg(c
, cpu_gr
[link
], a0
, a1
, cpu_gr
[link
], tmp
);
1912 /* The branch nullifies the next insn, which means the state of N
1913 after the branch is the inverse of the state of N that applied
1915 tcg_gen_setcond_reg(tcg_invert_cond(c
), cpu_psw_n
, a0
, a1
);
1916 cond_free(&ctx
->null_cond
);
1917 ctx
->null_cond
= cond_make_n();
1918 ctx
->psw_n_nonzero
= true;
1920 cond_free(&ctx
->null_cond
);
1927 * if (IAOQ_Front{30..31} < GR[b]{30..31})
1928 * IAOQ_Next{30..31} ← GR[b]{30..31};
1930 * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1931 * which keeps the privilege level from being increased.
1933 static TCGv_reg
do_ibranch_priv(DisasContext
*ctx
, TCGv_reg offset
)
1936 switch (ctx
->privilege
) {
1938 /* Privilege 0 is maximum and is allowed to decrease. */
1941 /* Privilege 3 is minimum and is never allowed to increase. */
1942 dest
= get_temp(ctx
);
1943 tcg_gen_ori_reg(dest
, offset
, 3);
1946 dest
= get_temp(ctx
);
1947 tcg_gen_andi_reg(dest
, offset
, -4);
1948 tcg_gen_ori_reg(dest
, dest
, ctx
->privilege
);
1949 tcg_gen_movcond_reg(TCG_COND_GTU
, dest
, dest
, offset
, dest
, offset
);
1955 #ifdef CONFIG_USER_ONLY
1956 /* On Linux, page zero is normally marked execute only + gateway.
1957 Therefore normal read or write is supposed to fail, but specific
1958 offsets have kernel code mapped to raise permissions to implement
1959 system calls. Handling this via an explicit check here, rather
1960 in than the "be disp(sr2,r0)" instruction that probably sent us
1961 here, is the easiest way to handle the branch delay slot on the
1962 aforementioned BE. */
1963 static void do_page_zero(DisasContext
*ctx
)
1965 /* If by some means we get here with PSW[N]=1, that implies that
1966 the B,GATE instruction would be skipped, and we'd fault on the
1967 next insn within the privileged page. */
1968 switch (ctx
->null_cond
.c
) {
1969 case TCG_COND_NEVER
:
1971 case TCG_COND_ALWAYS
:
1972 tcg_gen_movi_reg(cpu_psw_n
, 0);
1975 /* Since this is always the first (and only) insn within the
1976 TB, we should know the state of PSW[N] from TB->FLAGS. */
1977 g_assert_not_reached();
1980 /* Check that we didn't arrive here via some means that allowed
1981 non-sequential instruction execution. Normally the PSW[B] bit
1982 detects this by disallowing the B,GATE instruction to execute
1983 under such conditions. */
1984 if (ctx
->iaoq_b
!= ctx
->iaoq_f
+ 4) {
1988 switch (ctx
->iaoq_f
& -4) {
1989 case 0x00: /* Null pointer call */
1990 gen_excp_1(EXCP_IMP
);
1991 ctx
->base
.is_jmp
= DISAS_NORETURN
;
1994 case 0xb0: /* LWS */
1995 gen_excp_1(EXCP_SYSCALL_LWS
);
1996 ctx
->base
.is_jmp
= DISAS_NORETURN
;
1999 case 0xe0: /* SET_THREAD_POINTER */
2000 tcg_gen_st_reg(cpu_gr
[26], cpu_env
, offsetof(CPUHPPAState
, cr
[27]));
2001 tcg_gen_ori_reg(cpu_iaoq_f
, cpu_gr
[31], 3);
2002 tcg_gen_addi_reg(cpu_iaoq_b
, cpu_iaoq_f
, 4);
2003 ctx
->base
.is_jmp
= DISAS_IAQ_N_UPDATED
;
2006 case 0x100: /* SYSCALL */
2007 gen_excp_1(EXCP_SYSCALL
);
2008 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2013 gen_excp_1(EXCP_ILL
);
2014 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2020 static bool trans_nop(DisasContext
*ctx
, arg_nop
*a
)
2022 cond_free(&ctx
->null_cond
);
2026 static bool trans_break(DisasContext
*ctx
, arg_break
*a
)
2028 return gen_excp_iir(ctx
, EXCP_BREAK
);
2031 static bool trans_sync(DisasContext
*ctx
, arg_sync
*a
)
2033 /* No point in nullifying the memory barrier. */
2034 tcg_gen_mb(TCG_BAR_SC
| TCG_MO_ALL
);
2036 cond_free(&ctx
->null_cond
);
2040 static bool trans_mfia(DisasContext
*ctx
, arg_mfia
*a
)
2043 TCGv_reg tmp
= dest_gpr(ctx
, rt
);
2044 tcg_gen_movi_reg(tmp
, ctx
->iaoq_f
);
2045 save_gpr(ctx
, rt
, tmp
);
2047 cond_free(&ctx
->null_cond
);
2051 static bool trans_mfsp(DisasContext
*ctx
, arg_mfsp
*a
)
2054 unsigned rs
= a
->sp
;
2055 TCGv_i64 t0
= tcg_temp_new_i64();
2056 TCGv_reg t1
= tcg_temp_new();
2058 load_spr(ctx
, t0
, rs
);
2059 tcg_gen_shri_i64(t0
, t0
, 32);
2060 tcg_gen_trunc_i64_reg(t1
, t0
);
2062 save_gpr(ctx
, rt
, t1
);
2064 cond_free(&ctx
->null_cond
);
2068 static bool trans_mfctl(DisasContext
*ctx
, arg_mfctl
*a
)
2071 unsigned ctl
= a
->r
;
2076 #ifdef TARGET_HPPA64
2078 /* MFSAR without ,W masks low 5 bits. */
2079 tmp
= dest_gpr(ctx
, rt
);
2080 tcg_gen_andi_reg(tmp
, cpu_sar
, 31);
2081 save_gpr(ctx
, rt
, tmp
);
2085 save_gpr(ctx
, rt
, cpu_sar
);
2087 case CR_IT
: /* Interval Timer */
2088 /* FIXME: Respect PSW_S bit. */
2090 tmp
= dest_gpr(ctx
, rt
);
2091 if (translator_io_start(&ctx
->base
)) {
2092 gen_helper_read_interval_timer(tmp
);
2093 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE
;
2095 gen_helper_read_interval_timer(tmp
);
2097 save_gpr(ctx
, rt
, tmp
);
2098 return nullify_end(ctx
);
2103 /* All other control registers are privileged. */
2104 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG
);
2108 tmp
= get_temp(ctx
);
2109 tcg_gen_ld_reg(tmp
, cpu_env
, offsetof(CPUHPPAState
, cr
[ctl
]));
2110 save_gpr(ctx
, rt
, tmp
);
2113 cond_free(&ctx
->null_cond
);
2117 static bool trans_mtsp(DisasContext
*ctx
, arg_mtsp
*a
)
2120 unsigned rs
= a
->sp
;
2124 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG
);
2128 t64
= tcg_temp_new_i64();
2129 tcg_gen_extu_reg_i64(t64
, load_gpr(ctx
, rr
));
2130 tcg_gen_shli_i64(t64
, t64
, 32);
2133 tcg_gen_st_i64(t64
, cpu_env
, offsetof(CPUHPPAState
, sr
[rs
]));
2134 ctx
->tb_flags
&= ~TB_FLAG_SR_SAME
;
2136 tcg_gen_mov_i64(cpu_sr
[rs
], t64
);
2139 return nullify_end(ctx
);
2142 static bool trans_mtctl(DisasContext
*ctx
, arg_mtctl
*a
)
2144 unsigned ctl
= a
->t
;
2148 if (ctl
== CR_SAR
) {
2149 reg
= load_gpr(ctx
, a
->r
);
2150 tmp
= tcg_temp_new();
2151 tcg_gen_andi_reg(tmp
, reg
, TARGET_REGISTER_BITS
- 1);
2152 save_or_nullify(ctx
, cpu_sar
, tmp
);
2154 cond_free(&ctx
->null_cond
);
2158 /* All other control registers are privileged or read-only. */
2159 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG
);
2161 #ifndef CONFIG_USER_ONLY
2163 reg
= load_gpr(ctx
, a
->r
);
2167 gen_helper_write_interval_timer(cpu_env
, reg
);
2170 gen_helper_write_eirr(cpu_env
, reg
);
2173 gen_helper_write_eiem(cpu_env
, reg
);
2174 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE_EXIT
;
2179 /* FIXME: Respect PSW_Q bit */
2180 /* The write advances the queue and stores to the back element. */
2181 tmp
= get_temp(ctx
);
2182 tcg_gen_ld_reg(tmp
, cpu_env
,
2183 offsetof(CPUHPPAState
, cr_back
[ctl
- CR_IIASQ
]));
2184 tcg_gen_st_reg(tmp
, cpu_env
, offsetof(CPUHPPAState
, cr
[ctl
]));
2185 tcg_gen_st_reg(reg
, cpu_env
,
2186 offsetof(CPUHPPAState
, cr_back
[ctl
- CR_IIASQ
]));
2193 tcg_gen_st_reg(reg
, cpu_env
, offsetof(CPUHPPAState
, cr
[ctl
]));
2194 #ifndef CONFIG_USER_ONLY
2195 gen_helper_change_prot_id(cpu_env
);
2200 tcg_gen_st_reg(reg
, cpu_env
, offsetof(CPUHPPAState
, cr
[ctl
]));
2203 return nullify_end(ctx
);
2207 static bool trans_mtsarcm(DisasContext
*ctx
, arg_mtsarcm
*a
)
2209 TCGv_reg tmp
= tcg_temp_new();
2211 tcg_gen_not_reg(tmp
, load_gpr(ctx
, a
->r
));
2212 tcg_gen_andi_reg(tmp
, tmp
, TARGET_REGISTER_BITS
- 1);
2213 save_or_nullify(ctx
, cpu_sar
, tmp
);
2215 cond_free(&ctx
->null_cond
);
2219 static bool trans_ldsid(DisasContext
*ctx
, arg_ldsid
*a
)
2221 TCGv_reg dest
= dest_gpr(ctx
, a
->t
);
2223 #ifdef CONFIG_USER_ONLY
2224 /* We don't implement space registers in user mode. */
2225 tcg_gen_movi_reg(dest
, 0);
2227 TCGv_i64 t0
= tcg_temp_new_i64();
2229 tcg_gen_mov_i64(t0
, space_select(ctx
, a
->sp
, load_gpr(ctx
, a
->b
)));
2230 tcg_gen_shri_i64(t0
, t0
, 32);
2231 tcg_gen_trunc_i64_reg(dest
, t0
);
2233 save_gpr(ctx
, a
->t
, dest
);
2235 cond_free(&ctx
->null_cond
);
2239 static bool trans_rsm(DisasContext
*ctx
, arg_rsm
*a
)
2241 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2242 #ifndef CONFIG_USER_ONLY
2247 tmp
= get_temp(ctx
);
2248 tcg_gen_ld_reg(tmp
, cpu_env
, offsetof(CPUHPPAState
, psw
));
2249 tcg_gen_andi_reg(tmp
, tmp
, ~a
->i
);
2250 gen_helper_swap_system_mask(tmp
, cpu_env
, tmp
);
2251 save_gpr(ctx
, a
->t
, tmp
);
2253 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
2254 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE_EXIT
;
2255 return nullify_end(ctx
);
2259 static bool trans_ssm(DisasContext
*ctx
, arg_ssm
*a
)
2261 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2262 #ifndef CONFIG_USER_ONLY
2267 tmp
= get_temp(ctx
);
2268 tcg_gen_ld_reg(tmp
, cpu_env
, offsetof(CPUHPPAState
, psw
));
2269 tcg_gen_ori_reg(tmp
, tmp
, a
->i
);
2270 gen_helper_swap_system_mask(tmp
, cpu_env
, tmp
);
2271 save_gpr(ctx
, a
->t
, tmp
);
2273 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
2274 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE_EXIT
;
2275 return nullify_end(ctx
);
2279 static bool trans_mtsm(DisasContext
*ctx
, arg_mtsm
*a
)
2281 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2282 #ifndef CONFIG_USER_ONLY
2286 reg
= load_gpr(ctx
, a
->r
);
2287 tmp
= get_temp(ctx
);
2288 gen_helper_swap_system_mask(tmp
, cpu_env
, reg
);
2290 /* Exit the TB to recognize new interrupts. */
2291 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE_EXIT
;
2292 return nullify_end(ctx
);
2296 static bool do_rfi(DisasContext
*ctx
, bool rfi_r
)
2298 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2299 #ifndef CONFIG_USER_ONLY
2303 gen_helper_rfi_r(cpu_env
);
2305 gen_helper_rfi(cpu_env
);
2307 /* Exit the TB to recognize new interrupts. */
2308 tcg_gen_exit_tb(NULL
, 0);
2309 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2311 return nullify_end(ctx
);
2315 static bool trans_rfi(DisasContext
*ctx
, arg_rfi
*a
)
2317 return do_rfi(ctx
, false);
2320 static bool trans_rfi_r(DisasContext
*ctx
, arg_rfi_r
*a
)
2322 return do_rfi(ctx
, true);
2325 static bool trans_halt(DisasContext
*ctx
, arg_halt
*a
)
2327 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2328 #ifndef CONFIG_USER_ONLY
2330 gen_helper_halt(cpu_env
);
2331 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2332 return nullify_end(ctx
);
2336 static bool trans_reset(DisasContext
*ctx
, arg_reset
*a
)
2338 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2339 #ifndef CONFIG_USER_ONLY
2341 gen_helper_reset(cpu_env
);
2342 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2343 return nullify_end(ctx
);
2347 static bool trans_getshadowregs(DisasContext
*ctx
, arg_getshadowregs
*a
)
2349 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2350 #ifndef CONFIG_USER_ONLY
2352 gen_helper_getshadowregs(cpu_env
);
2353 return nullify_end(ctx
);
2357 static bool trans_nop_addrx(DisasContext
*ctx
, arg_ldst
*a
)
2360 TCGv_reg dest
= dest_gpr(ctx
, a
->b
);
2361 TCGv_reg src1
= load_gpr(ctx
, a
->b
);
2362 TCGv_reg src2
= load_gpr(ctx
, a
->x
);
2364 /* The only thing we need to do is the base register modification. */
2365 tcg_gen_add_reg(dest
, src1
, src2
);
2366 save_gpr(ctx
, a
->b
, dest
);
2368 cond_free(&ctx
->null_cond
);
2372 static bool trans_probe(DisasContext
*ctx
, arg_probe
*a
)
2375 TCGv_i32 level
, want
;
2380 dest
= dest_gpr(ctx
, a
->t
);
2381 form_gva(ctx
, &addr
, &ofs
, a
->b
, 0, 0, 0, a
->sp
, 0, false);
2384 level
= tcg_constant_i32(a
->ri
);
2386 level
= tcg_temp_new_i32();
2387 tcg_gen_trunc_reg_i32(level
, load_gpr(ctx
, a
->ri
));
2388 tcg_gen_andi_i32(level
, level
, 3);
2390 want
= tcg_constant_i32(a
->write
? PAGE_WRITE
: PAGE_READ
);
2392 gen_helper_probe(dest
, cpu_env
, addr
, level
, want
);
2394 save_gpr(ctx
, a
->t
, dest
);
2395 return nullify_end(ctx
);
2398 static bool trans_ixtlbx(DisasContext
*ctx
, arg_ixtlbx
*a
)
2400 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2401 #ifndef CONFIG_USER_ONLY
2407 form_gva(ctx
, &addr
, &ofs
, a
->b
, 0, 0, 0, a
->sp
, 0, false);
2408 reg
= load_gpr(ctx
, a
->r
);
2410 gen_helper_itlba(cpu_env
, addr
, reg
);
2412 gen_helper_itlbp(cpu_env
, addr
, reg
);
2415 /* Exit TB for TLB change if mmu is enabled. */
2416 if (ctx
->tb_flags
& PSW_C
) {
2417 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE
;
2419 return nullify_end(ctx
);
2423 static bool trans_pxtlbx(DisasContext
*ctx
, arg_pxtlbx
*a
)
2425 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2426 #ifndef CONFIG_USER_ONLY
2432 form_gva(ctx
, &addr
, &ofs
, a
->b
, a
->x
, 0, 0, a
->sp
, a
->m
, false);
2434 save_gpr(ctx
, a
->b
, ofs
);
2437 gen_helper_ptlbe(cpu_env
);
2439 gen_helper_ptlb(cpu_env
, addr
);
2442 /* Exit TB for TLB change if mmu is enabled. */
2443 if (ctx
->tb_flags
& PSW_C
) {
2444 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE
;
2446 return nullify_end(ctx
);
2451 * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2453 * https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2454 * page 13-9 (195/206)
2456 static bool trans_ixtlbxf(DisasContext
*ctx
, arg_ixtlbxf
*a
)
2458 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2459 #ifndef CONFIG_USER_ONLY
2460 TCGv_tl addr
, atl
, stl
;
2467 * if (not (pcxl or pcxl2))
2468 * return gen_illegal(ctx);
2470 * Note for future: these are 32-bit systems; no hppa64.
2473 atl
= tcg_temp_new_tl();
2474 stl
= tcg_temp_new_tl();
2475 addr
= tcg_temp_new_tl();
2477 tcg_gen_ld32u_i64(stl
, cpu_env
,
2478 a
->data
? offsetof(CPUHPPAState
, cr
[CR_ISR
])
2479 : offsetof(CPUHPPAState
, cr
[CR_IIASQ
]));
2480 tcg_gen_ld32u_i64(atl
, cpu_env
,
2481 a
->data
? offsetof(CPUHPPAState
, cr
[CR_IOR
])
2482 : offsetof(CPUHPPAState
, cr
[CR_IIAOQ
]));
2483 tcg_gen_shli_i64(stl
, stl
, 32);
2484 tcg_gen_or_tl(addr
, atl
, stl
);
2486 reg
= load_gpr(ctx
, a
->r
);
2488 gen_helper_itlba(cpu_env
, addr
, reg
);
2490 gen_helper_itlbp(cpu_env
, addr
, reg
);
2493 /* Exit TB for TLB change if mmu is enabled. */
2494 if (ctx
->tb_flags
& PSW_C
) {
2495 ctx
->base
.is_jmp
= DISAS_IAQ_N_STALE
;
2497 return nullify_end(ctx
);
2501 static bool trans_lpa(DisasContext
*ctx
, arg_ldst
*a
)
2503 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2504 #ifndef CONFIG_USER_ONLY
2506 TCGv_reg ofs
, paddr
;
2510 form_gva(ctx
, &vaddr
, &ofs
, a
->b
, a
->x
, 0, 0, a
->sp
, a
->m
, false);
2512 paddr
= tcg_temp_new();
2513 gen_helper_lpa(paddr
, cpu_env
, vaddr
);
2515 /* Note that physical address result overrides base modification. */
2517 save_gpr(ctx
, a
->b
, ofs
);
2519 save_gpr(ctx
, a
->t
, paddr
);
2521 return nullify_end(ctx
);
2525 static bool trans_lci(DisasContext
*ctx
, arg_lci
*a
)
2527 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2529 /* The Coherence Index is an implementation-defined function of the
2530 physical address. Two addresses with the same CI have a coherent
2531 view of the cache. Our implementation is to return 0 for all,
2532 since the entire address space is coherent. */
2533 save_gpr(ctx
, a
->t
, tcg_constant_reg(0));
2535 cond_free(&ctx
->null_cond
);
2539 static bool trans_add(DisasContext
*ctx
, arg_rrr_cf_sh
*a
)
2541 return do_add_reg(ctx
, a
, false, false, false, false);
2544 static bool trans_add_l(DisasContext
*ctx
, arg_rrr_cf_sh
*a
)
2546 return do_add_reg(ctx
, a
, true, false, false, false);
2549 static bool trans_add_tsv(DisasContext
*ctx
, arg_rrr_cf_sh
*a
)
2551 return do_add_reg(ctx
, a
, false, true, false, false);
2554 static bool trans_add_c(DisasContext
*ctx
, arg_rrr_cf_sh
*a
)
2556 return do_add_reg(ctx
, a
, false, false, false, true);
2559 static bool trans_add_c_tsv(DisasContext
*ctx
, arg_rrr_cf_sh
*a
)
2561 return do_add_reg(ctx
, a
, false, true, false, true);
2564 static bool trans_sub(DisasContext
*ctx
, arg_rrr_cf
*a
)
2566 return do_sub_reg(ctx
, a
, false, false, false);
2569 static bool trans_sub_tsv(DisasContext
*ctx
, arg_rrr_cf
*a
)
2571 return do_sub_reg(ctx
, a
, true, false, false);
2574 static bool trans_sub_tc(DisasContext
*ctx
, arg_rrr_cf
*a
)
2576 return do_sub_reg(ctx
, a
, false, false, true);
2579 static bool trans_sub_tsv_tc(DisasContext
*ctx
, arg_rrr_cf
*a
)
2581 return do_sub_reg(ctx
, a
, true, false, true);
2584 static bool trans_sub_b(DisasContext
*ctx
, arg_rrr_cf
*a
)
2586 return do_sub_reg(ctx
, a
, false, true, false);
2589 static bool trans_sub_b_tsv(DisasContext
*ctx
, arg_rrr_cf
*a
)
2591 return do_sub_reg(ctx
, a
, true, true, false);
2594 static bool trans_andcm(DisasContext
*ctx
, arg_rrr_cf
*a
)
2596 return do_log_reg(ctx
, a
, tcg_gen_andc_reg
);
2599 static bool trans_and(DisasContext
*ctx
, arg_rrr_cf
*a
)
2601 return do_log_reg(ctx
, a
, tcg_gen_and_reg
);
2604 static bool trans_or(DisasContext
*ctx
, arg_rrr_cf
*a
)
2607 unsigned r2
= a
->r2
;
2608 unsigned r1
= a
->r1
;
2611 if (rt
== 0) { /* NOP */
2612 cond_free(&ctx
->null_cond
);
2615 if (r2
== 0) { /* COPY */
2617 TCGv_reg dest
= dest_gpr(ctx
, rt
);
2618 tcg_gen_movi_reg(dest
, 0);
2619 save_gpr(ctx
, rt
, dest
);
2621 save_gpr(ctx
, rt
, cpu_gr
[r1
]);
2623 cond_free(&ctx
->null_cond
);
2626 #ifndef CONFIG_USER_ONLY
2627 /* These are QEMU extensions and are nops in the real architecture:
2629 * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2630 * or %r31,%r31,%r31 -- death loop; offline cpu
2631 * currently implemented as idle.
2633 if ((rt
== 10 || rt
== 31) && r1
== rt
&& r2
== rt
) { /* PAUSE */
2634 /* No need to check for supervisor, as userland can only pause
2635 until the next timer interrupt. */
2638 /* Advance the instruction queue. */
2639 copy_iaoq_entry(cpu_iaoq_f
, ctx
->iaoq_b
, cpu_iaoq_b
);
2640 copy_iaoq_entry(cpu_iaoq_b
, ctx
->iaoq_n
, ctx
->iaoq_n_var
);
2641 nullify_set(ctx
, 0);
2643 /* Tell the qemu main loop to halt until this cpu has work. */
2644 tcg_gen_st_i32(tcg_constant_i32(1), cpu_env
,
2645 offsetof(CPUState
, halted
) - offsetof(HPPACPU
, env
));
2646 gen_excp_1(EXCP_HALTED
);
2647 ctx
->base
.is_jmp
= DISAS_NORETURN
;
2649 return nullify_end(ctx
);
2653 return do_log_reg(ctx
, a
, tcg_gen_or_reg
);
2656 static bool trans_xor(DisasContext
*ctx
, arg_rrr_cf
*a
)
2658 return do_log_reg(ctx
, a
, tcg_gen_xor_reg
);
2661 static bool trans_cmpclr(DisasContext
*ctx
, arg_rrr_cf
*a
)
2663 TCGv_reg tcg_r1
, tcg_r2
;
2668 tcg_r1
= load_gpr(ctx
, a
->r1
);
2669 tcg_r2
= load_gpr(ctx
, a
->r2
);
2670 do_cmpclr(ctx
, a
->t
, tcg_r1
, tcg_r2
, a
->cf
);
2671 return nullify_end(ctx
);
2674 static bool trans_uxor(DisasContext
*ctx
, arg_rrr_cf
*a
)
2676 TCGv_reg tcg_r1
, tcg_r2
;
2681 tcg_r1
= load_gpr(ctx
, a
->r1
);
2682 tcg_r2
= load_gpr(ctx
, a
->r2
);
2683 do_unit(ctx
, a
->t
, tcg_r1
, tcg_r2
, a
->cf
, false, tcg_gen_xor_reg
);
2684 return nullify_end(ctx
);
2687 static bool do_uaddcm(DisasContext
*ctx
, arg_rrr_cf
*a
, bool is_tc
)
2689 TCGv_reg tcg_r1
, tcg_r2
, tmp
;
2694 tcg_r1
= load_gpr(ctx
, a
->r1
);
2695 tcg_r2
= load_gpr(ctx
, a
->r2
);
2696 tmp
= get_temp(ctx
);
2697 tcg_gen_not_reg(tmp
, tcg_r2
);
2698 do_unit(ctx
, a
->t
, tcg_r1
, tmp
, a
->cf
, is_tc
, tcg_gen_add_reg
);
2699 return nullify_end(ctx
);
2702 static bool trans_uaddcm(DisasContext
*ctx
, arg_rrr_cf
*a
)
2704 return do_uaddcm(ctx
, a
, false);
2707 static bool trans_uaddcm_tc(DisasContext
*ctx
, arg_rrr_cf
*a
)
2709 return do_uaddcm(ctx
, a
, true);
2712 static bool do_dcor(DisasContext
*ctx
, arg_rr_cf
*a
, bool is_i
)
2718 tmp
= get_temp(ctx
);
2719 tcg_gen_shri_reg(tmp
, cpu_psw_cb
, 3);
2721 tcg_gen_not_reg(tmp
, tmp
);
2723 tcg_gen_andi_reg(tmp
, tmp
, 0x11111111);
2724 tcg_gen_muli_reg(tmp
, tmp
, 6);
2725 do_unit(ctx
, a
->t
, load_gpr(ctx
, a
->r
), tmp
, a
->cf
, false,
2726 is_i
? tcg_gen_add_reg
: tcg_gen_sub_reg
);
2727 return nullify_end(ctx
);
2730 static bool trans_dcor(DisasContext
*ctx
, arg_rr_cf
*a
)
2732 return do_dcor(ctx
, a
, false);
2735 static bool trans_dcor_i(DisasContext
*ctx
, arg_rr_cf
*a
)
2737 return do_dcor(ctx
, a
, true);
2740 static bool trans_ds(DisasContext
*ctx
, arg_rrr_cf
*a
)
2742 TCGv_reg dest
, add1
, add2
, addc
, zero
, in1
, in2
;
2746 in1
= load_gpr(ctx
, a
->r1
);
2747 in2
= load_gpr(ctx
, a
->r2
);
2749 add1
= tcg_temp_new();
2750 add2
= tcg_temp_new();
2751 addc
= tcg_temp_new();
2752 dest
= tcg_temp_new();
2753 zero
= tcg_constant_reg(0);
2755 /* Form R1 << 1 | PSW[CB]{8}. */
2756 tcg_gen_add_reg(add1
, in1
, in1
);
2757 tcg_gen_add_reg(add1
, add1
, cpu_psw_cb_msb
);
2759 /* Add or subtract R2, depending on PSW[V]. Proper computation of
2760 carry{8} requires that we subtract via + ~R2 + 1, as described in
2761 the manual. By extracting and masking V, we can produce the
2762 proper inputs to the addition without movcond. */
2763 tcg_gen_sari_reg(addc
, cpu_psw_v
, TARGET_REGISTER_BITS
- 1);
2764 tcg_gen_xor_reg(add2
, in2
, addc
);
2765 tcg_gen_andi_reg(addc
, addc
, 1);
2766 /* ??? This is only correct for 32-bit. */
2767 tcg_gen_add2_i32(dest
, cpu_psw_cb_msb
, add1
, zero
, add2
, zero
);
2768 tcg_gen_add2_i32(dest
, cpu_psw_cb_msb
, dest
, cpu_psw_cb_msb
, addc
, zero
);
2770 /* Write back the result register. */
2771 save_gpr(ctx
, a
->t
, dest
);
2773 /* Write back PSW[CB]. */
2774 tcg_gen_xor_reg(cpu_psw_cb
, add1
, add2
);
2775 tcg_gen_xor_reg(cpu_psw_cb
, cpu_psw_cb
, dest
);
2777 /* Write back PSW[V] for the division step. */
2778 tcg_gen_neg_reg(cpu_psw_v
, cpu_psw_cb_msb
);
2779 tcg_gen_xor_reg(cpu_psw_v
, cpu_psw_v
, in2
);
2781 /* Install the new nullification. */
2784 if (cond_need_sv(a
->cf
>> 1)) {
2785 /* ??? The lshift is supposed to contribute to overflow. */
2786 sv
= do_add_sv(ctx
, dest
, add1
, add2
);
2788 ctx
->null_cond
= do_cond(a
->cf
, dest
, cpu_psw_cb_msb
, sv
);
2791 return nullify_end(ctx
);
2794 static bool trans_addi(DisasContext
*ctx
, arg_rri_cf
*a
)
2796 return do_add_imm(ctx
, a
, false, false);
2799 static bool trans_addi_tsv(DisasContext
*ctx
, arg_rri_cf
*a
)
2801 return do_add_imm(ctx
, a
, true, false);
2804 static bool trans_addi_tc(DisasContext
*ctx
, arg_rri_cf
*a
)
2806 return do_add_imm(ctx
, a
, false, true);
2809 static bool trans_addi_tc_tsv(DisasContext
*ctx
, arg_rri_cf
*a
)
2811 return do_add_imm(ctx
, a
, true, true);
2814 static bool trans_subi(DisasContext
*ctx
, arg_rri_cf
*a
)
2816 return do_sub_imm(ctx
, a
, false);
2819 static bool trans_subi_tsv(DisasContext
*ctx
, arg_rri_cf
*a
)
2821 return do_sub_imm(ctx
, a
, true);
2824 static bool trans_cmpiclr(DisasContext
*ctx
, arg_rri_cf
*a
)
2826 TCGv_reg tcg_im
, tcg_r2
;
2832 tcg_im
= load_const(ctx
, a
->i
);
2833 tcg_r2
= load_gpr(ctx
, a
->r
);
2834 do_cmpclr(ctx
, a
->t
, tcg_im
, tcg_r2
, a
->cf
);
2836 return nullify_end(ctx
);
2839 static bool trans_ld(DisasContext
*ctx
, arg_ldst
*a
)
2841 if (unlikely(TARGET_REGISTER_BITS
== 32 && a
->size
> MO_32
)) {
2842 return gen_illegal(ctx
);
2844 return do_load(ctx
, a
->t
, a
->b
, a
->x
, a
->scale
? a
->size
: 0,
2845 a
->disp
, a
->sp
, a
->m
, a
->size
| MO_TE
);
2849 static bool trans_st(DisasContext
*ctx
, arg_ldst
*a
)
2851 assert(a
->x
== 0 && a
->scale
== 0);
2852 if (unlikely(TARGET_REGISTER_BITS
== 32 && a
->size
> MO_32
)) {
2853 return gen_illegal(ctx
);
2855 return do_store(ctx
, a
->t
, a
->b
, a
->disp
, a
->sp
, a
->m
, a
->size
| MO_TE
);
2859 static bool trans_ldc(DisasContext
*ctx
, arg_ldst
*a
)
2861 MemOp mop
= MO_TE
| MO_ALIGN
| a
->size
;
2862 TCGv_reg zero
, dest
, ofs
;
2868 /* Base register modification. Make sure if RT == RB,
2869 we see the result of the load. */
2870 dest
= get_temp(ctx
);
2872 dest
= dest_gpr(ctx
, a
->t
);
2875 form_gva(ctx
, &addr
, &ofs
, a
->b
, a
->x
, a
->scale
? a
->size
: 0,
2876 a
->disp
, a
->sp
, a
->m
, ctx
->mmu_idx
== MMU_PHYS_IDX
);
2879 * For hppa1.1, LDCW is undefined unless aligned mod 16.
2880 * However actual hardware succeeds with aligned mod 4.
2881 * Detect this case and log a GUEST_ERROR.
2883 * TODO: HPPA64 relaxes the over-alignment requirement
2884 * with the ,co completer.
2886 gen_helper_ldc_check(addr
);
2888 zero
= tcg_constant_reg(0);
2889 tcg_gen_atomic_xchg_reg(dest
, addr
, zero
, ctx
->mmu_idx
, mop
);
2892 save_gpr(ctx
, a
->b
, ofs
);
2894 save_gpr(ctx
, a
->t
, dest
);
2896 return nullify_end(ctx
);
2899 static bool trans_stby(DisasContext
*ctx
, arg_stby
*a
)
2906 form_gva(ctx
, &addr
, &ofs
, a
->b
, 0, 0, a
->disp
, a
->sp
, a
->m
,
2907 ctx
->mmu_idx
== MMU_PHYS_IDX
);
2908 val
= load_gpr(ctx
, a
->r
);
2910 if (tb_cflags(ctx
->base
.tb
) & CF_PARALLEL
) {
2911 gen_helper_stby_e_parallel(cpu_env
, addr
, val
);
2913 gen_helper_stby_e(cpu_env
, addr
, val
);
2916 if (tb_cflags(ctx
->base
.tb
) & CF_PARALLEL
) {
2917 gen_helper_stby_b_parallel(cpu_env
, addr
, val
);
2919 gen_helper_stby_b(cpu_env
, addr
, val
);
2923 tcg_gen_andi_reg(ofs
, ofs
, ~3);
2924 save_gpr(ctx
, a
->b
, ofs
);
2927 return nullify_end(ctx
);
2930 static bool trans_lda(DisasContext
*ctx
, arg_ldst
*a
)
2932 int hold_mmu_idx
= ctx
->mmu_idx
;
2934 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2935 ctx
->mmu_idx
= MMU_PHYS_IDX
;
2937 ctx
->mmu_idx
= hold_mmu_idx
;
2941 static bool trans_sta(DisasContext
*ctx
, arg_ldst
*a
)
2943 int hold_mmu_idx
= ctx
->mmu_idx
;
2945 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR
);
2946 ctx
->mmu_idx
= MMU_PHYS_IDX
;
2948 ctx
->mmu_idx
= hold_mmu_idx
;
2952 static bool trans_ldil(DisasContext
*ctx
, arg_ldil
*a
)
2954 TCGv_reg tcg_rt
= dest_gpr(ctx
, a
->t
);
2956 tcg_gen_movi_reg(tcg_rt
, a
->i
);
2957 save_gpr(ctx
, a
->t
, tcg_rt
);
2958 cond_free(&ctx
->null_cond
);
2962 static bool trans_addil(DisasContext
*ctx
, arg_addil
*a
)
2964 TCGv_reg tcg_rt
= load_gpr(ctx
, a
->r
);
2965 TCGv_reg tcg_r1
= dest_gpr(ctx
, 1);
2967 tcg_gen_addi_reg(tcg_r1
, tcg_rt
, a
->i
);
2968 save_gpr(ctx
, 1, tcg_r1
);
2969 cond_free(&ctx
->null_cond
);
2973 static bool trans_ldo(DisasContext
*ctx
, arg_ldo
*a
)
2975 TCGv_reg tcg_rt
= dest_gpr(ctx
, a
->t
);
2977 /* Special case rb == 0, for the LDI pseudo-op.
2978 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
2980 tcg_gen_movi_reg(tcg_rt
, a
->i
);
2982 tcg_gen_addi_reg(tcg_rt
, cpu_gr
[a
->b
], a
->i
);
2984 save_gpr(ctx
, a
->t
, tcg_rt
);
2985 cond_free(&ctx
->null_cond
);
2989 static bool do_cmpb(DisasContext
*ctx
, unsigned r
, TCGv_reg in1
,
2990 unsigned c
, unsigned f
, unsigned n
, int disp
)
2992 TCGv_reg dest
, in2
, sv
;
2995 in2
= load_gpr(ctx
, r
);
2996 dest
= get_temp(ctx
);
2998 tcg_gen_sub_reg(dest
, in1
, in2
);
3001 if (cond_need_sv(c
)) {
3002 sv
= do_sub_sv(ctx
, dest
, in1
, in2
);
3005 cond
= do_sub_cond(c
* 2 + f
, dest
, in1
, in2
, sv
);
3006 return do_cbranch(ctx
, disp
, n
, &cond
);
3009 static bool trans_cmpb(DisasContext
*ctx
, arg_cmpb
*a
)
3012 return do_cmpb(ctx
, a
->r2
, load_gpr(ctx
, a
->r1
), a
->c
, a
->f
, a
->n
, a
->disp
);
3015 static bool trans_cmpbi(DisasContext
*ctx
, arg_cmpbi
*a
)
3018 return do_cmpb(ctx
, a
->r
, load_const(ctx
, a
->i
), a
->c
, a
->f
, a
->n
, a
->disp
);
3021 static bool do_addb(DisasContext
*ctx
, unsigned r
, TCGv_reg in1
,
3022 unsigned c
, unsigned f
, unsigned n
, int disp
)
3024 TCGv_reg dest
, in2
, sv
, cb_msb
;
3027 in2
= load_gpr(ctx
, r
);
3028 dest
= tcg_temp_new();
3032 if (cond_need_cb(c
)) {
3033 cb_msb
= get_temp(ctx
);
3034 tcg_gen_movi_reg(cb_msb
, 0);
3035 tcg_gen_add2_reg(dest
, cb_msb
, in1
, cb_msb
, in2
, cb_msb
);
3037 tcg_gen_add_reg(dest
, in1
, in2
);
3039 if (cond_need_sv(c
)) {
3040 sv
= do_add_sv(ctx
, dest
, in1
, in2
);
3043 cond
= do_cond(c
* 2 + f
, dest
, cb_msb
, sv
);
3044 save_gpr(ctx
, r
, dest
);
3045 return do_cbranch(ctx
, disp
, n
, &cond
);
3048 static bool trans_addb(DisasContext
*ctx
, arg_addb
*a
)
3051 return do_addb(ctx
, a
->r2
, load_gpr(ctx
, a
->r1
), a
->c
, a
->f
, a
->n
, a
->disp
);
3054 static bool trans_addbi(DisasContext
*ctx
, arg_addbi
*a
)
3057 return do_addb(ctx
, a
->r
, load_const(ctx
, a
->i
), a
->c
, a
->f
, a
->n
, a
->disp
);
3060 static bool trans_bb_sar(DisasContext
*ctx
, arg_bb_sar
*a
)
3062 TCGv_reg tmp
, tcg_r
;
3067 tmp
= tcg_temp_new();
3068 tcg_r
= load_gpr(ctx
, a
->r
);
3069 tcg_gen_shl_reg(tmp
, tcg_r
, cpu_sar
);
3071 cond
= cond_make_0(a
->c
? TCG_COND_GE
: TCG_COND_LT
, tmp
);
3072 return do_cbranch(ctx
, a
->disp
, a
->n
, &cond
);
3075 static bool trans_bb_imm(DisasContext
*ctx
, arg_bb_imm
*a
)
3077 TCGv_reg tmp
, tcg_r
;
3082 tmp
= tcg_temp_new();
3083 tcg_r
= load_gpr(ctx
, a
->r
);
3084 tcg_gen_shli_reg(tmp
, tcg_r
, a
->p
);
3086 cond
= cond_make_0(a
->c
? TCG_COND_GE
: TCG_COND_LT
, tmp
);
3087 return do_cbranch(ctx
, a
->disp
, a
->n
, &cond
);
3090 static bool trans_movb(DisasContext
*ctx
, arg_movb
*a
)
3097 dest
= dest_gpr(ctx
, a
->r2
);
3099 tcg_gen_movi_reg(dest
, 0);
3101 tcg_gen_mov_reg(dest
, cpu_gr
[a
->r1
]);
3104 cond
= do_sed_cond(a
->c
, dest
);
3105 return do_cbranch(ctx
, a
->disp
, a
->n
, &cond
);
3108 static bool trans_movbi(DisasContext
*ctx
, arg_movbi
*a
)
3115 dest
= dest_gpr(ctx
, a
->r
);
3116 tcg_gen_movi_reg(dest
, a
->i
);
3118 cond
= do_sed_cond(a
->c
, dest
);
3119 return do_cbranch(ctx
, a
->disp
, a
->n
, &cond
);
3122 static bool trans_shrpw_sar(DisasContext
*ctx
, arg_shrpw_sar
*a
)
3130 dest
= dest_gpr(ctx
, a
->t
);
3132 tcg_gen_ext32u_reg(dest
, load_gpr(ctx
, a
->r2
));
3133 tcg_gen_shr_reg(dest
, dest
, cpu_sar
);
3134 } else if (a
->r1
== a
->r2
) {
3135 TCGv_i32 t32
= tcg_temp_new_i32();
3136 tcg_gen_trunc_reg_i32(t32
, load_gpr(ctx
, a
->r2
));
3137 tcg_gen_rotr_i32(t32
, t32
, cpu_sar
);
3138 tcg_gen_extu_i32_reg(dest
, t32
);
3140 TCGv_i64 t
= tcg_temp_new_i64();
3141 TCGv_i64 s
= tcg_temp_new_i64();
3143 tcg_gen_concat_reg_i64(t
, load_gpr(ctx
, a
->r2
), load_gpr(ctx
, a
->r1
));
3144 tcg_gen_extu_reg_i64(s
, cpu_sar
);
3145 tcg_gen_shr_i64(t
, t
, s
);
3146 tcg_gen_trunc_i64_reg(dest
, t
);
3148 save_gpr(ctx
, a
->t
, dest
);
3150 /* Install the new nullification. */
3151 cond_free(&ctx
->null_cond
);
3153 ctx
->null_cond
= do_sed_cond(a
->c
, dest
);
3155 return nullify_end(ctx
);
3158 static bool trans_shrpw_imm(DisasContext
*ctx
, arg_shrpw_imm
*a
)
3160 unsigned sa
= 31 - a
->cpos
;
3167 dest
= dest_gpr(ctx
, a
->t
);
3168 t2
= load_gpr(ctx
, a
->r2
);
3170 tcg_gen_extract_reg(dest
, t2
, sa
, 32 - sa
);
3171 } else if (TARGET_REGISTER_BITS
== 32) {
3172 tcg_gen_extract2_reg(dest
, t2
, cpu_gr
[a
->r1
], sa
);
3173 } else if (a
->r1
== a
->r2
) {
3174 TCGv_i32 t32
= tcg_temp_new_i32();
3175 tcg_gen_trunc_reg_i32(t32
, t2
);
3176 tcg_gen_rotri_i32(t32
, t32
, sa
);
3177 tcg_gen_extu_i32_reg(dest
, t32
);
3179 TCGv_i64 t64
= tcg_temp_new_i64();
3180 tcg_gen_concat_reg_i64(t64
, t2
, cpu_gr
[a
->r1
]);
3181 tcg_gen_shri_i64(t64
, t64
, sa
);
3182 tcg_gen_trunc_i64_reg(dest
, t64
);
3184 save_gpr(ctx
, a
->t
, dest
);
3186 /* Install the new nullification. */
3187 cond_free(&ctx
->null_cond
);
3189 ctx
->null_cond
= do_sed_cond(a
->c
, dest
);
3191 return nullify_end(ctx
);
3194 static bool trans_extrw_sar(DisasContext
*ctx
, arg_extrw_sar
*a
)
3196 unsigned len
= 32 - a
->clen
;
3197 TCGv_reg dest
, src
, tmp
;
3203 dest
= dest_gpr(ctx
, a
->t
);
3204 src
= load_gpr(ctx
, a
->r
);
3205 tmp
= tcg_temp_new();
3207 /* Recall that SAR is using big-endian bit numbering. */
3208 tcg_gen_xori_reg(tmp
, cpu_sar
, TARGET_REGISTER_BITS
- 1);
3210 tcg_gen_sar_reg(dest
, src
, tmp
);
3211 tcg_gen_sextract_reg(dest
, dest
, 0, len
);
3213 tcg_gen_shr_reg(dest
, src
, tmp
);
3214 tcg_gen_extract_reg(dest
, dest
, 0, len
);
3216 save_gpr(ctx
, a
->t
, dest
);
3218 /* Install the new nullification. */
3219 cond_free(&ctx
->null_cond
);
3221 ctx
->null_cond
= do_sed_cond(a
->c
, dest
);
3223 return nullify_end(ctx
);
3226 static bool trans_extrw_imm(DisasContext
*ctx
, arg_extrw_imm
*a
)
3228 unsigned len
= 32 - a
->clen
;
3229 unsigned cpos
= 31 - a
->pos
;
3236 dest
= dest_gpr(ctx
, a
->t
);
3237 src
= load_gpr(ctx
, a
->r
);
3239 tcg_gen_sextract_reg(dest
, src
, cpos
, len
);
3241 tcg_gen_extract_reg(dest
, src
, cpos
, len
);
3243 save_gpr(ctx
, a
->t
, dest
);
3245 /* Install the new nullification. */
3246 cond_free(&ctx
->null_cond
);
3248 ctx
->null_cond
= do_sed_cond(a
->c
, dest
);
3250 return nullify_end(ctx
);
3253 static bool trans_depwi_imm(DisasContext
*ctx
, arg_depwi_imm
*a
)
3255 unsigned len
= 32 - a
->clen
;
3256 target_sreg mask0
, mask1
;
3262 if (a
->cpos
+ len
> 32) {
3266 dest
= dest_gpr(ctx
, a
->t
);
3267 mask0
= deposit64(0, a
->cpos
, len
, a
->i
);
3268 mask1
= deposit64(-1, a
->cpos
, len
, a
->i
);
3271 TCGv_reg src
= load_gpr(ctx
, a
->t
);
3273 tcg_gen_andi_reg(dest
, src
, mask1
);
3276 tcg_gen_ori_reg(dest
, src
, mask0
);
3278 tcg_gen_movi_reg(dest
, mask0
);
3280 save_gpr(ctx
, a
->t
, dest
);
3282 /* Install the new nullification. */
3283 cond_free(&ctx
->null_cond
);
3285 ctx
->null_cond
= do_sed_cond(a
->c
, dest
);
3287 return nullify_end(ctx
);
3290 static bool trans_depw_imm(DisasContext
*ctx
, arg_depw_imm
*a
)
3292 unsigned rs
= a
->nz
? a
->t
: 0;
3293 unsigned len
= 32 - a
->clen
;
3299 if (a
->cpos
+ len
> 32) {
3303 dest
= dest_gpr(ctx
, a
->t
);
3304 val
= load_gpr(ctx
, a
->r
);
3306 tcg_gen_deposit_z_reg(dest
, val
, a
->cpos
, len
);
3308 tcg_gen_deposit_reg(dest
, cpu_gr
[rs
], val
, a
->cpos
, len
);
3310 save_gpr(ctx
, a
->t
, dest
);
3312 /* Install the new nullification. */
3313 cond_free(&ctx
->null_cond
);
3315 ctx
->null_cond
= do_sed_cond(a
->c
, dest
);
3317 return nullify_end(ctx
);
3320 static bool do_depw_sar(DisasContext
*ctx
, unsigned rt
, unsigned c
,
3321 unsigned nz
, unsigned clen
, TCGv_reg val
)
3323 unsigned rs
= nz
? rt
: 0;
3324 unsigned len
= 32 - clen
;
3325 TCGv_reg mask
, tmp
, shift
, dest
;
3326 unsigned msb
= 1U << (len
- 1);
3328 dest
= dest_gpr(ctx
, rt
);
3329 shift
= tcg_temp_new();
3330 tmp
= tcg_temp_new();
3332 /* Convert big-endian bit numbering in SAR to left-shift. */
3333 tcg_gen_xori_reg(shift
, cpu_sar
, TARGET_REGISTER_BITS
- 1);
3335 mask
= tcg_temp_new();
3336 tcg_gen_movi_reg(mask
, msb
+ (msb
- 1));
3337 tcg_gen_and_reg(tmp
, val
, mask
);
3339 tcg_gen_shl_reg(mask
, mask
, shift
);
3340 tcg_gen_shl_reg(tmp
, tmp
, shift
);
3341 tcg_gen_andc_reg(dest
, cpu_gr
[rs
], mask
);
3342 tcg_gen_or_reg(dest
, dest
, tmp
);
3344 tcg_gen_shl_reg(dest
, tmp
, shift
);
3346 save_gpr(ctx
, rt
, dest
);
3348 /* Install the new nullification. */
3349 cond_free(&ctx
->null_cond
);
3351 ctx
->null_cond
= do_sed_cond(c
, dest
);
3353 return nullify_end(ctx
);
3356 static bool trans_depw_sar(DisasContext
*ctx
, arg_depw_sar
*a
)
3361 return do_depw_sar(ctx
, a
->t
, a
->c
, a
->nz
, a
->clen
, load_gpr(ctx
, a
->r
));
3364 static bool trans_depwi_sar(DisasContext
*ctx
, arg_depwi_sar
*a
)
3369 return do_depw_sar(ctx
, a
->t
, a
->c
, a
->nz
, a
->clen
, load_const(ctx
, a
->i
));
3372 static bool trans_be(DisasContext
*ctx
, arg_be
*a
)
3376 #ifdef CONFIG_USER_ONLY
3377 /* ??? It seems like there should be a good way of using
3378 "be disp(sr2, r0)", the canonical gateway entry mechanism
3379 to our advantage. But that appears to be inconvenient to
3380 manage along side branch delay slots. Therefore we handle
3381 entry into the gateway page via absolute address. */
3382 /* Since we don't implement spaces, just branch. Do notice the special
3383 case of "be disp(*,r0)" using a direct branch to disp, so that we can
3384 goto_tb to the TB containing the syscall. */
3386 return do_dbranch(ctx
, a
->disp
, a
->l
, a
->n
);
3392 tmp
= get_temp(ctx
);
3393 tcg_gen_addi_reg(tmp
, load_gpr(ctx
, a
->b
), a
->disp
);
3394 tmp
= do_ibranch_priv(ctx
, tmp
);
3396 #ifdef CONFIG_USER_ONLY
3397 return do_ibranch(ctx
, tmp
, a
->l
, a
->n
);
3399 TCGv_i64 new_spc
= tcg_temp_new_i64();
3401 load_spr(ctx
, new_spc
, a
->sp
);
3403 copy_iaoq_entry(cpu_gr
[31], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
3404 tcg_gen_mov_i64(cpu_sr
[0], cpu_iasq_f
);
3406 if (a
->n
&& use_nullify_skip(ctx
)) {
3407 tcg_gen_mov_reg(cpu_iaoq_f
, tmp
);
3408 tcg_gen_addi_reg(cpu_iaoq_b
, cpu_iaoq_f
, 4);
3409 tcg_gen_mov_i64(cpu_iasq_f
, new_spc
);
3410 tcg_gen_mov_i64(cpu_iasq_b
, cpu_iasq_f
);
3412 copy_iaoq_entry(cpu_iaoq_f
, ctx
->iaoq_b
, cpu_iaoq_b
);
3413 if (ctx
->iaoq_b
== -1) {
3414 tcg_gen_mov_i64(cpu_iasq_f
, cpu_iasq_b
);
3416 tcg_gen_mov_reg(cpu_iaoq_b
, tmp
);
3417 tcg_gen_mov_i64(cpu_iasq_b
, new_spc
);
3418 nullify_set(ctx
, a
->n
);
3420 tcg_gen_lookup_and_goto_ptr();
3421 ctx
->base
.is_jmp
= DISAS_NORETURN
;
3422 return nullify_end(ctx
);
3426 static bool trans_bl(DisasContext
*ctx
, arg_bl
*a
)
3428 return do_dbranch(ctx
, iaoq_dest(ctx
, a
->disp
), a
->l
, a
->n
);
3431 static bool trans_b_gate(DisasContext
*ctx
, arg_b_gate
*a
)
3433 target_ureg dest
= iaoq_dest(ctx
, a
->disp
);
3437 /* Make sure the caller hasn't done something weird with the queue.
3438 * ??? This is not quite the same as the PSW[B] bit, which would be
3439 * expensive to track. Real hardware will trap for
3441 * b gateway+4 (in delay slot of first branch)
3442 * However, checking for a non-sequential instruction queue *will*
3443 * diagnose the security hole
3446 * in which instructions at evil would run with increased privs.
3448 if (ctx
->iaoq_b
== -1 || ctx
->iaoq_b
!= ctx
->iaoq_f
+ 4) {
3449 return gen_illegal(ctx
);
3452 #ifndef CONFIG_USER_ONLY
3453 if (ctx
->tb_flags
& PSW_C
) {
3454 CPUHPPAState
*env
= ctx
->cs
->env_ptr
;
3455 int type
= hppa_artype_for_page(env
, ctx
->base
.pc_next
);
3456 /* If we could not find a TLB entry, then we need to generate an
3457 ITLB miss exception so the kernel will provide it.
3458 The resulting TLB fill operation will invalidate this TB and
3459 we will re-translate, at which point we *will* be able to find
3460 the TLB entry and determine if this is in fact a gateway page. */
3462 gen_excp(ctx
, EXCP_ITLB_MISS
);
3465 /* No change for non-gateway pages or for priv decrease. */
3466 if (type
>= 4 && type
- 4 < ctx
->privilege
) {
3467 dest
= deposit32(dest
, 0, 2, type
- 4);
3470 dest
&= -4; /* priv = 0 */
3475 TCGv_reg tmp
= dest_gpr(ctx
, a
->l
);
3476 if (ctx
->privilege
< 3) {
3477 tcg_gen_andi_reg(tmp
, tmp
, -4);
3479 tcg_gen_ori_reg(tmp
, tmp
, ctx
->privilege
);
3480 save_gpr(ctx
, a
->l
, tmp
);
3483 return do_dbranch(ctx
, dest
, 0, a
->n
);
3486 static bool trans_blr(DisasContext
*ctx
, arg_blr
*a
)
3489 TCGv_reg tmp
= get_temp(ctx
);
3490 tcg_gen_shli_reg(tmp
, load_gpr(ctx
, a
->x
), 3);
3491 tcg_gen_addi_reg(tmp
, tmp
, ctx
->iaoq_f
+ 8);
3492 /* The computation here never changes privilege level. */
3493 return do_ibranch(ctx
, tmp
, a
->l
, a
->n
);
3495 /* BLR R0,RX is a good way to load PC+8 into RX. */
3496 return do_dbranch(ctx
, ctx
->iaoq_f
+ 8, a
->l
, a
->n
);
3500 static bool trans_bv(DisasContext
*ctx
, arg_bv
*a
)
3505 dest
= load_gpr(ctx
, a
->b
);
3507 dest
= get_temp(ctx
);
3508 tcg_gen_shli_reg(dest
, load_gpr(ctx
, a
->x
), 3);
3509 tcg_gen_add_reg(dest
, dest
, load_gpr(ctx
, a
->b
));
3511 dest
= do_ibranch_priv(ctx
, dest
);
3512 return do_ibranch(ctx
, dest
, 0, a
->n
);
3515 static bool trans_bve(DisasContext
*ctx
, arg_bve
*a
)
3519 #ifdef CONFIG_USER_ONLY
3520 dest
= do_ibranch_priv(ctx
, load_gpr(ctx
, a
->b
));
3521 return do_ibranch(ctx
, dest
, a
->l
, a
->n
);
3524 dest
= do_ibranch_priv(ctx
, load_gpr(ctx
, a
->b
));
3526 copy_iaoq_entry(cpu_iaoq_f
, ctx
->iaoq_b
, cpu_iaoq_b
);
3527 if (ctx
->iaoq_b
== -1) {
3528 tcg_gen_mov_i64(cpu_iasq_f
, cpu_iasq_b
);
3530 copy_iaoq_entry(cpu_iaoq_b
, -1, dest
);
3531 tcg_gen_mov_i64(cpu_iasq_b
, space_select(ctx
, 0, dest
));
3533 copy_iaoq_entry(cpu_gr
[a
->l
], ctx
->iaoq_n
, ctx
->iaoq_n_var
);
3535 nullify_set(ctx
, a
->n
);
3536 tcg_gen_lookup_and_goto_ptr();
3537 ctx
->base
.is_jmp
= DISAS_NORETURN
;
3538 return nullify_end(ctx
);
3546 static void gen_fcpy_f(TCGv_i32 dst
, TCGv_env unused
, TCGv_i32 src
)
3548 tcg_gen_mov_i32(dst
, src
);
3551 static bool trans_fid_f(DisasContext
*ctx
, arg_fid_f
*a
)
3555 if (TARGET_REGISTER_BITS
== 64) {
3556 ret
= 0x13080000000000ULL
; /* PA8700 (PCX-W2) */
3558 ret
= 0x0f080000000000ULL
; /* PA7300LC (PCX-L2) */
3562 save_frd(0, tcg_constant_i64(ret
));
3563 return nullify_end(ctx
);
3566 static bool trans_fcpy_f(DisasContext
*ctx
, arg_fclass01
*a
)
3568 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_fcpy_f
);
3571 static void gen_fcpy_d(TCGv_i64 dst
, TCGv_env unused
, TCGv_i64 src
)
3573 tcg_gen_mov_i64(dst
, src
);
3576 static bool trans_fcpy_d(DisasContext
*ctx
, arg_fclass01
*a
)
3578 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_fcpy_d
);
3581 static void gen_fabs_f(TCGv_i32 dst
, TCGv_env unused
, TCGv_i32 src
)
3583 tcg_gen_andi_i32(dst
, src
, INT32_MAX
);
3586 static bool trans_fabs_f(DisasContext
*ctx
, arg_fclass01
*a
)
3588 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_fabs_f
);
3591 static void gen_fabs_d(TCGv_i64 dst
, TCGv_env unused
, TCGv_i64 src
)
3593 tcg_gen_andi_i64(dst
, src
, INT64_MAX
);
3596 static bool trans_fabs_d(DisasContext
*ctx
, arg_fclass01
*a
)
3598 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_fabs_d
);
3601 static bool trans_fsqrt_f(DisasContext
*ctx
, arg_fclass01
*a
)
3603 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_fsqrt_s
);
3606 static bool trans_fsqrt_d(DisasContext
*ctx
, arg_fclass01
*a
)
3608 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_fsqrt_d
);
3611 static bool trans_frnd_f(DisasContext
*ctx
, arg_fclass01
*a
)
3613 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_frnd_s
);
3616 static bool trans_frnd_d(DisasContext
*ctx
, arg_fclass01
*a
)
3618 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_frnd_d
);
3621 static void gen_fneg_f(TCGv_i32 dst
, TCGv_env unused
, TCGv_i32 src
)
3623 tcg_gen_xori_i32(dst
, src
, INT32_MIN
);
3626 static bool trans_fneg_f(DisasContext
*ctx
, arg_fclass01
*a
)
3628 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_fneg_f
);
3631 static void gen_fneg_d(TCGv_i64 dst
, TCGv_env unused
, TCGv_i64 src
)
3633 tcg_gen_xori_i64(dst
, src
, INT64_MIN
);
3636 static bool trans_fneg_d(DisasContext
*ctx
, arg_fclass01
*a
)
3638 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_fneg_d
);
3641 static void gen_fnegabs_f(TCGv_i32 dst
, TCGv_env unused
, TCGv_i32 src
)
3643 tcg_gen_ori_i32(dst
, src
, INT32_MIN
);
3646 static bool trans_fnegabs_f(DisasContext
*ctx
, arg_fclass01
*a
)
3648 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_fnegabs_f
);
3651 static void gen_fnegabs_d(TCGv_i64 dst
, TCGv_env unused
, TCGv_i64 src
)
3653 tcg_gen_ori_i64(dst
, src
, INT64_MIN
);
3656 static bool trans_fnegabs_d(DisasContext
*ctx
, arg_fclass01
*a
)
3658 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_fnegabs_d
);
3665 static bool trans_fcnv_d_f(DisasContext
*ctx
, arg_fclass01
*a
)
3667 return do_fop_wed(ctx
, a
->t
, a
->r
, gen_helper_fcnv_d_s
);
3670 static bool trans_fcnv_f_d(DisasContext
*ctx
, arg_fclass01
*a
)
3672 return do_fop_dew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_s_d
);
3675 static bool trans_fcnv_w_f(DisasContext
*ctx
, arg_fclass01
*a
)
3677 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_w_s
);
3680 static bool trans_fcnv_q_f(DisasContext
*ctx
, arg_fclass01
*a
)
3682 return do_fop_wed(ctx
, a
->t
, a
->r
, gen_helper_fcnv_dw_s
);
3685 static bool trans_fcnv_w_d(DisasContext
*ctx
, arg_fclass01
*a
)
3687 return do_fop_dew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_w_d
);
3690 static bool trans_fcnv_q_d(DisasContext
*ctx
, arg_fclass01
*a
)
3692 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_fcnv_dw_d
);
3695 static bool trans_fcnv_f_w(DisasContext
*ctx
, arg_fclass01
*a
)
3697 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_s_w
);
3700 static bool trans_fcnv_d_w(DisasContext
*ctx
, arg_fclass01
*a
)
3702 return do_fop_wed(ctx
, a
->t
, a
->r
, gen_helper_fcnv_d_w
);
3705 static bool trans_fcnv_f_q(DisasContext
*ctx
, arg_fclass01
*a
)
3707 return do_fop_dew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_s_dw
);
3710 static bool trans_fcnv_d_q(DisasContext
*ctx
, arg_fclass01
*a
)
3712 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_fcnv_d_dw
);
3715 static bool trans_fcnv_t_f_w(DisasContext
*ctx
, arg_fclass01
*a
)
3717 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_s_w
);
3720 static bool trans_fcnv_t_d_w(DisasContext
*ctx
, arg_fclass01
*a
)
3722 return do_fop_wed(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_d_w
);
3725 static bool trans_fcnv_t_f_q(DisasContext
*ctx
, arg_fclass01
*a
)
3727 return do_fop_dew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_s_dw
);
3730 static bool trans_fcnv_t_d_q(DisasContext
*ctx
, arg_fclass01
*a
)
3732 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_d_dw
);
3735 static bool trans_fcnv_uw_f(DisasContext
*ctx
, arg_fclass01
*a
)
3737 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_uw_s
);
3740 static bool trans_fcnv_uq_f(DisasContext
*ctx
, arg_fclass01
*a
)
3742 return do_fop_wed(ctx
, a
->t
, a
->r
, gen_helper_fcnv_udw_s
);
3745 static bool trans_fcnv_uw_d(DisasContext
*ctx
, arg_fclass01
*a
)
3747 return do_fop_dew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_uw_d
);
3750 static bool trans_fcnv_uq_d(DisasContext
*ctx
, arg_fclass01
*a
)
3752 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_fcnv_udw_d
);
3755 static bool trans_fcnv_f_uw(DisasContext
*ctx
, arg_fclass01
*a
)
3757 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_s_uw
);
3760 static bool trans_fcnv_d_uw(DisasContext
*ctx
, arg_fclass01
*a
)
3762 return do_fop_wed(ctx
, a
->t
, a
->r
, gen_helper_fcnv_d_uw
);
3765 static bool trans_fcnv_f_uq(DisasContext
*ctx
, arg_fclass01
*a
)
3767 return do_fop_dew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_s_udw
);
3770 static bool trans_fcnv_d_uq(DisasContext
*ctx
, arg_fclass01
*a
)
3772 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_fcnv_d_udw
);
3775 static bool trans_fcnv_t_f_uw(DisasContext
*ctx
, arg_fclass01
*a
)
3777 return do_fop_wew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_s_uw
);
3780 static bool trans_fcnv_t_d_uw(DisasContext
*ctx
, arg_fclass01
*a
)
3782 return do_fop_wed(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_d_uw
);
3785 static bool trans_fcnv_t_f_uq(DisasContext
*ctx
, arg_fclass01
*a
)
3787 return do_fop_dew(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_s_udw
);
3790 static bool trans_fcnv_t_d_uq(DisasContext
*ctx
, arg_fclass01
*a
)
3792 return do_fop_ded(ctx
, a
->t
, a
->r
, gen_helper_fcnv_t_d_udw
);
3799 static bool trans_fcmp_f(DisasContext
*ctx
, arg_fclass2
*a
)
3801 TCGv_i32 ta
, tb
, tc
, ty
;
3805 ta
= load_frw0_i32(a
->r1
);
3806 tb
= load_frw0_i32(a
->r2
);
3807 ty
= tcg_constant_i32(a
->y
);
3808 tc
= tcg_constant_i32(a
->c
);
3810 gen_helper_fcmp_s(cpu_env
, ta
, tb
, ty
, tc
);
3812 return nullify_end(ctx
);
3815 static bool trans_fcmp_d(DisasContext
*ctx
, arg_fclass2
*a
)
3822 ta
= load_frd0(a
->r1
);
3823 tb
= load_frd0(a
->r2
);
3824 ty
= tcg_constant_i32(a
->y
);
3825 tc
= tcg_constant_i32(a
->c
);
3827 gen_helper_fcmp_d(cpu_env
, ta
, tb
, ty
, tc
);
3829 return nullify_end(ctx
);
3832 static bool trans_ftest(DisasContext
*ctx
, arg_ftest
*a
)
3839 tcg_gen_ld32u_reg(t
, cpu_env
, offsetof(CPUHPPAState
, fr0_shadow
));
3846 case 0: /* simple */
3847 tcg_gen_andi_reg(t
, t
, 0x4000000);
3848 ctx
->null_cond
= cond_make_0(TCG_COND_NE
, t
);
3876 TCGv_reg c
= load_const(ctx
, mask
);
3877 tcg_gen_or_reg(t
, t
, c
);
3878 ctx
->null_cond
= cond_make(TCG_COND_EQ
, t
, c
);
3880 tcg_gen_andi_reg(t
, t
, mask
);
3881 ctx
->null_cond
= cond_make_0(TCG_COND_EQ
, t
);
3884 unsigned cbit
= (a
->y
^ 1) - 1;
3886 tcg_gen_extract_reg(t
, t
, 21 - cbit
, 1);
3887 ctx
->null_cond
= cond_make_0(TCG_COND_NE
, t
);
3891 return nullify_end(ctx
);
3898 static bool trans_fadd_f(DisasContext
*ctx
, arg_fclass3
*a
)
3900 return do_fop_weww(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fadd_s
);
3903 static bool trans_fadd_d(DisasContext
*ctx
, arg_fclass3
*a
)
3905 return do_fop_dedd(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fadd_d
);
3908 static bool trans_fsub_f(DisasContext
*ctx
, arg_fclass3
*a
)
3910 return do_fop_weww(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fsub_s
);
3913 static bool trans_fsub_d(DisasContext
*ctx
, arg_fclass3
*a
)
3915 return do_fop_dedd(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fsub_d
);
3918 static bool trans_fmpy_f(DisasContext
*ctx
, arg_fclass3
*a
)
3920 return do_fop_weww(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fmpy_s
);
3923 static bool trans_fmpy_d(DisasContext
*ctx
, arg_fclass3
*a
)
3925 return do_fop_dedd(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fmpy_d
);
3928 static bool trans_fdiv_f(DisasContext
*ctx
, arg_fclass3
*a
)
3930 return do_fop_weww(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fdiv_s
);
3933 static bool trans_fdiv_d(DisasContext
*ctx
, arg_fclass3
*a
)
3935 return do_fop_dedd(ctx
, a
->t
, a
->r1
, a
->r2
, gen_helper_fdiv_d
);
3938 static bool trans_xmpyu(DisasContext
*ctx
, arg_xmpyu
*a
)
3944 x
= load_frw0_i64(a
->r1
);
3945 y
= load_frw0_i64(a
->r2
);
3946 tcg_gen_mul_i64(x
, x
, y
);
3949 return nullify_end(ctx
);
3952 /* Convert the fmpyadd single-precision register encodings to standard. */
3953 static inline int fmpyadd_s_reg(unsigned r
)
3955 return (r
& 16) * 2 + 16 + (r
& 15);
3958 static bool do_fmpyadd_s(DisasContext
*ctx
, arg_mpyadd
*a
, bool is_sub
)
3960 int tm
= fmpyadd_s_reg(a
->tm
);
3961 int ra
= fmpyadd_s_reg(a
->ra
);
3962 int ta
= fmpyadd_s_reg(a
->ta
);
3963 int rm2
= fmpyadd_s_reg(a
->rm2
);
3964 int rm1
= fmpyadd_s_reg(a
->rm1
);
3968 do_fop_weww(ctx
, tm
, rm1
, rm2
, gen_helper_fmpy_s
);
3969 do_fop_weww(ctx
, ta
, ta
, ra
,
3970 is_sub
? gen_helper_fsub_s
: gen_helper_fadd_s
);
3972 return nullify_end(ctx
);
3975 static bool trans_fmpyadd_f(DisasContext
*ctx
, arg_mpyadd
*a
)
3977 return do_fmpyadd_s(ctx
, a
, false);
3980 static bool trans_fmpysub_f(DisasContext
*ctx
, arg_mpyadd
*a
)
3982 return do_fmpyadd_s(ctx
, a
, true);
3985 static bool do_fmpyadd_d(DisasContext
*ctx
, arg_mpyadd
*a
, bool is_sub
)
3989 do_fop_dedd(ctx
, a
->tm
, a
->rm1
, a
->rm2
, gen_helper_fmpy_d
);
3990 do_fop_dedd(ctx
, a
->ta
, a
->ta
, a
->ra
,
3991 is_sub
? gen_helper_fsub_d
: gen_helper_fadd_d
);
3993 return nullify_end(ctx
);
3996 static bool trans_fmpyadd_d(DisasContext
*ctx
, arg_mpyadd
*a
)
3998 return do_fmpyadd_d(ctx
, a
, false);
4001 static bool trans_fmpysub_d(DisasContext
*ctx
, arg_mpyadd
*a
)
4003 return do_fmpyadd_d(ctx
, a
, true);
4006 static bool trans_fmpyfadd_f(DisasContext
*ctx
, arg_fmpyfadd_f
*a
)
4011 x
= load_frw0_i32(a
->rm1
);
4012 y
= load_frw0_i32(a
->rm2
);
4013 z
= load_frw0_i32(a
->ra3
);
4016 gen_helper_fmpynfadd_s(x
, cpu_env
, x
, y
, z
);
4018 gen_helper_fmpyfadd_s(x
, cpu_env
, x
, y
, z
);
4021 save_frw_i32(a
->t
, x
);
4022 return nullify_end(ctx
);
4025 static bool trans_fmpyfadd_d(DisasContext
*ctx
, arg_fmpyfadd_d
*a
)
4030 x
= load_frd0(a
->rm1
);
4031 y
= load_frd0(a
->rm2
);
4032 z
= load_frd0(a
->ra3
);
4035 gen_helper_fmpynfadd_d(x
, cpu_env
, x
, y
, z
);
4037 gen_helper_fmpyfadd_d(x
, cpu_env
, x
, y
, z
);
4041 return nullify_end(ctx
);
4044 static bool trans_diag(DisasContext
*ctx
, arg_diag
*a
)
4046 qemu_log_mask(LOG_UNIMP
, "DIAG opcode ignored\n");
4047 cond_free(&ctx
->null_cond
);
4051 static void hppa_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cs
)
4053 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
4057 ctx
->tb_flags
= ctx
->base
.tb
->flags
;
4059 #ifdef CONFIG_USER_ONLY
4060 ctx
->privilege
= MMU_IDX_TO_PRIV(MMU_USER_IDX
);
4061 ctx
->mmu_idx
= MMU_USER_IDX
;
4062 ctx
->iaoq_f
= ctx
->base
.pc_first
| ctx
->privilege
;
4063 ctx
->iaoq_b
= ctx
->base
.tb
->cs_base
| ctx
->privilege
;
4064 ctx
->unalign
= (ctx
->tb_flags
& TB_FLAG_UNALIGN
? MO_UNALN
: MO_ALIGN
);
4066 ctx
->privilege
= (ctx
->tb_flags
>> TB_FLAG_PRIV_SHIFT
) & 3;
4067 ctx
->mmu_idx
= (ctx
->tb_flags
& PSW_D
?
4068 PRIV_TO_MMU_IDX(ctx
->privilege
) : MMU_PHYS_IDX
);
4070 /* Recover the IAOQ values from the GVA + PRIV. */
4071 uint64_t cs_base
= ctx
->base
.tb
->cs_base
;
4072 uint64_t iasq_f
= cs_base
& ~0xffffffffull
;
4073 int32_t diff
= cs_base
;
4075 ctx
->iaoq_f
= (ctx
->base
.pc_first
& ~iasq_f
) + ctx
->privilege
;
4076 ctx
->iaoq_b
= (diff
? ctx
->iaoq_f
+ diff
: -1);
4079 ctx
->iaoq_n_var
= NULL
;
4081 /* Bound the number of instructions by those left on the page. */
4082 bound
= -(ctx
->base
.pc_first
| TARGET_PAGE_MASK
) / 4;
4083 ctx
->base
.max_insns
= MIN(ctx
->base
.max_insns
, bound
);
4087 memset(ctx
->tempr
, 0, sizeof(ctx
->tempr
));
4088 memset(ctx
->templ
, 0, sizeof(ctx
->templ
));
4091 static void hppa_tr_tb_start(DisasContextBase
*dcbase
, CPUState
*cs
)
4093 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
4095 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
4096 ctx
->null_cond
= cond_make_f();
4097 ctx
->psw_n_nonzero
= false;
4098 if (ctx
->tb_flags
& PSW_N
) {
4099 ctx
->null_cond
.c
= TCG_COND_ALWAYS
;
4100 ctx
->psw_n_nonzero
= true;
4102 ctx
->null_lab
= NULL
;
4105 static void hppa_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cs
)
4107 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
4109 tcg_gen_insn_start(ctx
->iaoq_f
, ctx
->iaoq_b
);
4112 static void hppa_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cs
)
4114 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
4115 CPUHPPAState
*env
= cs
->env_ptr
;
4119 /* Execute one insn. */
4120 #ifdef CONFIG_USER_ONLY
4121 if (ctx
->base
.pc_next
< TARGET_PAGE_SIZE
) {
4123 ret
= ctx
->base
.is_jmp
;
4124 assert(ret
!= DISAS_NEXT
);
4128 /* Always fetch the insn, even if nullified, so that we check
4129 the page permissions for execute. */
4130 uint32_t insn
= translator_ldl(env
, &ctx
->base
, ctx
->base
.pc_next
);
4132 /* Set up the IA queue for the next insn.
4133 This will be overwritten by a branch. */
4134 if (ctx
->iaoq_b
== -1) {
4136 ctx
->iaoq_n_var
= get_temp(ctx
);
4137 tcg_gen_addi_reg(ctx
->iaoq_n_var
, cpu_iaoq_b
, 4);
4139 ctx
->iaoq_n
= ctx
->iaoq_b
+ 4;
4140 ctx
->iaoq_n_var
= NULL
;
4143 if (unlikely(ctx
->null_cond
.c
== TCG_COND_ALWAYS
)) {
4144 ctx
->null_cond
.c
= TCG_COND_NEVER
;
4148 if (!decode(ctx
, insn
)) {
4151 ret
= ctx
->base
.is_jmp
;
4152 assert(ctx
->null_lab
== NULL
);
4156 /* Forget any temporaries allocated. */
4157 for (i
= 0, n
= ctx
->ntempr
; i
< n
; ++i
) {
4158 ctx
->tempr
[i
] = NULL
;
4160 for (i
= 0, n
= ctx
->ntempl
; i
< n
; ++i
) {
4161 ctx
->templ
[i
] = NULL
;
4166 /* Advance the insn queue. Note that this check also detects
4167 a priority change within the instruction queue. */
4168 if (ret
== DISAS_NEXT
&& ctx
->iaoq_b
!= ctx
->iaoq_f
+ 4) {
4169 if (ctx
->iaoq_b
!= -1 && ctx
->iaoq_n
!= -1
4170 && use_goto_tb(ctx
, ctx
->iaoq_b
)
4171 && (ctx
->null_cond
.c
== TCG_COND_NEVER
4172 || ctx
->null_cond
.c
== TCG_COND_ALWAYS
)) {
4173 nullify_set(ctx
, ctx
->null_cond
.c
== TCG_COND_ALWAYS
);
4174 gen_goto_tb(ctx
, 0, ctx
->iaoq_b
, ctx
->iaoq_n
);
4175 ctx
->base
.is_jmp
= ret
= DISAS_NORETURN
;
4177 ctx
->base
.is_jmp
= ret
= DISAS_IAQ_N_STALE
;
4180 ctx
->iaoq_f
= ctx
->iaoq_b
;
4181 ctx
->iaoq_b
= ctx
->iaoq_n
;
4182 ctx
->base
.pc_next
+= 4;
4185 case DISAS_NORETURN
:
4186 case DISAS_IAQ_N_UPDATED
:
4190 case DISAS_IAQ_N_STALE
:
4191 case DISAS_IAQ_N_STALE_EXIT
:
4192 if (ctx
->iaoq_f
== -1) {
4193 tcg_gen_mov_reg(cpu_iaoq_f
, cpu_iaoq_b
);
4194 copy_iaoq_entry(cpu_iaoq_b
, ctx
->iaoq_n
, ctx
->iaoq_n_var
);
4195 #ifndef CONFIG_USER_ONLY
4196 tcg_gen_mov_i64(cpu_iasq_f
, cpu_iasq_b
);
4199 ctx
->base
.is_jmp
= (ret
== DISAS_IAQ_N_STALE_EXIT
4201 : DISAS_IAQ_N_UPDATED
);
4202 } else if (ctx
->iaoq_b
== -1) {
4203 tcg_gen_mov_reg(cpu_iaoq_b
, ctx
->iaoq_n_var
);
4208 g_assert_not_reached();
4212 static void hppa_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cs
)
4214 DisasContext
*ctx
= container_of(dcbase
, DisasContext
, base
);
4215 DisasJumpType is_jmp
= ctx
->base
.is_jmp
;
4218 case DISAS_NORETURN
:
4220 case DISAS_TOO_MANY
:
4221 case DISAS_IAQ_N_STALE
:
4222 case DISAS_IAQ_N_STALE_EXIT
:
4223 copy_iaoq_entry(cpu_iaoq_f
, ctx
->iaoq_f
, cpu_iaoq_f
);
4224 copy_iaoq_entry(cpu_iaoq_b
, ctx
->iaoq_b
, cpu_iaoq_b
);
4227 case DISAS_IAQ_N_UPDATED
:
4228 if (is_jmp
!= DISAS_IAQ_N_STALE_EXIT
) {
4229 tcg_gen_lookup_and_goto_ptr();
4234 tcg_gen_exit_tb(NULL
, 0);
4237 g_assert_not_reached();
4241 static void hppa_tr_disas_log(const DisasContextBase
*dcbase
,
4242 CPUState
*cs
, FILE *logfile
)
4244 target_ulong pc
= dcbase
->pc_first
;
4246 #ifdef CONFIG_USER_ONLY
4249 fprintf(logfile
, "IN:\n0x00000000: (null)\n");
4252 fprintf(logfile
, "IN:\n0x000000b0: light-weight-syscall\n");
4255 fprintf(logfile
, "IN:\n0x000000e0: set-thread-pointer-syscall\n");
4258 fprintf(logfile
, "IN:\n0x00000100: syscall\n");
4263 fprintf(logfile
, "IN: %s\n", lookup_symbol(pc
));
4264 target_disas(logfile
, cs
, pc
, dcbase
->tb
->size
);
4267 static const TranslatorOps hppa_tr_ops
= {
4268 .init_disas_context
= hppa_tr_init_disas_context
,
4269 .tb_start
= hppa_tr_tb_start
,
4270 .insn_start
= hppa_tr_insn_start
,
4271 .translate_insn
= hppa_tr_translate_insn
,
4272 .tb_stop
= hppa_tr_tb_stop
,
4273 .disas_log
= hppa_tr_disas_log
,
4276 void gen_intermediate_code(CPUState
*cs
, TranslationBlock
*tb
, int *max_insns
,
4277 target_ulong pc
, void *host_pc
)
4280 translator_loop(cs
, tb
, max_insns
, pc
, host_pc
, &hppa_tr_ops
, &ctx
.base
);