]> git.proxmox.com Git - mirror_qemu.git/blame - target/hppa/translate.c
target/hppa: Pass DisasContext to copy_iaoq_entry
[mirror_qemu.git] / target / hppa / translate.c
CommitLineData
61766fe9
RH
1/*
2 * HPPA emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
d6ea4236 9 * version 2.1 of the License, or (at your option) any later version.
61766fe9
RH
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "qemu/osdep.h"
21#include "cpu.h"
22#include "disas/disas.h"
23#include "qemu/host-utils.h"
24#include "exec/exec-all.h"
dcb32f1d 25#include "tcg/tcg-op.h"
61766fe9
RH
26#include "exec/helper-proto.h"
27#include "exec/helper-gen.h"
869051ea 28#include "exec/translator.h"
61766fe9
RH
29#include "exec/log.h"
30
d53106c9
RH
31#define HELPER_H "helper.h"
32#include "exec/helper-info.c.inc"
33#undef HELPER_H
34
35
eaa3783b
RH
36/* Since we have a distinction between register size and address size,
37 we need to redefine all of these. */
38
39#undef TCGv
40#undef tcg_temp_new
eaa3783b 41#undef tcg_global_mem_new
eaa3783b
RH
42
43#if TARGET_LONG_BITS == 64
44#define TCGv_tl TCGv_i64
45#define tcg_temp_new_tl tcg_temp_new_i64
eaa3783b
RH
46#if TARGET_REGISTER_BITS == 64
47#define tcg_gen_extu_reg_tl tcg_gen_mov_i64
48#else
49#define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64
50#endif
51#else
52#define TCGv_tl TCGv_i32
53#define tcg_temp_new_tl tcg_temp_new_i32
eaa3783b
RH
54#define tcg_gen_extu_reg_tl tcg_gen_mov_i32
55#endif
56
57#if TARGET_REGISTER_BITS == 64
58#define TCGv_reg TCGv_i64
59
60#define tcg_temp_new tcg_temp_new_i64
eaa3783b 61#define tcg_global_mem_new tcg_global_mem_new_i64
eaa3783b
RH
62
63#define tcg_gen_movi_reg tcg_gen_movi_i64
64#define tcg_gen_mov_reg tcg_gen_mov_i64
65#define tcg_gen_ld8u_reg tcg_gen_ld8u_i64
66#define tcg_gen_ld8s_reg tcg_gen_ld8s_i64
67#define tcg_gen_ld16u_reg tcg_gen_ld16u_i64
68#define tcg_gen_ld16s_reg tcg_gen_ld16s_i64
69#define tcg_gen_ld32u_reg tcg_gen_ld32u_i64
70#define tcg_gen_ld32s_reg tcg_gen_ld32s_i64
71#define tcg_gen_ld_reg tcg_gen_ld_i64
72#define tcg_gen_st8_reg tcg_gen_st8_i64
73#define tcg_gen_st16_reg tcg_gen_st16_i64
74#define tcg_gen_st32_reg tcg_gen_st32_i64
75#define tcg_gen_st_reg tcg_gen_st_i64
76#define tcg_gen_add_reg tcg_gen_add_i64
77#define tcg_gen_addi_reg tcg_gen_addi_i64
78#define tcg_gen_sub_reg tcg_gen_sub_i64
79#define tcg_gen_neg_reg tcg_gen_neg_i64
80#define tcg_gen_subfi_reg tcg_gen_subfi_i64
81#define tcg_gen_subi_reg tcg_gen_subi_i64
82#define tcg_gen_and_reg tcg_gen_and_i64
83#define tcg_gen_andi_reg tcg_gen_andi_i64
84#define tcg_gen_or_reg tcg_gen_or_i64
85#define tcg_gen_ori_reg tcg_gen_ori_i64
86#define tcg_gen_xor_reg tcg_gen_xor_i64
87#define tcg_gen_xori_reg tcg_gen_xori_i64
88#define tcg_gen_not_reg tcg_gen_not_i64
89#define tcg_gen_shl_reg tcg_gen_shl_i64
90#define tcg_gen_shli_reg tcg_gen_shli_i64
91#define tcg_gen_shr_reg tcg_gen_shr_i64
92#define tcg_gen_shri_reg tcg_gen_shri_i64
93#define tcg_gen_sar_reg tcg_gen_sar_i64
94#define tcg_gen_sari_reg tcg_gen_sari_i64
95#define tcg_gen_brcond_reg tcg_gen_brcond_i64
96#define tcg_gen_brcondi_reg tcg_gen_brcondi_i64
97#define tcg_gen_setcond_reg tcg_gen_setcond_i64
98#define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
99#define tcg_gen_mul_reg tcg_gen_mul_i64
100#define tcg_gen_muli_reg tcg_gen_muli_i64
101#define tcg_gen_div_reg tcg_gen_div_i64
102#define tcg_gen_rem_reg tcg_gen_rem_i64
103#define tcg_gen_divu_reg tcg_gen_divu_i64
104#define tcg_gen_remu_reg tcg_gen_remu_i64
105#define tcg_gen_discard_reg tcg_gen_discard_i64
106#define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
107#define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
108#define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
109#define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64
110#define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
111#define tcg_gen_ext_reg_i64 tcg_gen_mov_i64
112#define tcg_gen_ext8u_reg tcg_gen_ext8u_i64
113#define tcg_gen_ext8s_reg tcg_gen_ext8s_i64
114#define tcg_gen_ext16u_reg tcg_gen_ext16u_i64
115#define tcg_gen_ext16s_reg tcg_gen_ext16s_i64
116#define tcg_gen_ext32u_reg tcg_gen_ext32u_i64
117#define tcg_gen_ext32s_reg tcg_gen_ext32s_i64
118#define tcg_gen_bswap16_reg tcg_gen_bswap16_i64
119#define tcg_gen_bswap32_reg tcg_gen_bswap32_i64
120#define tcg_gen_bswap64_reg tcg_gen_bswap64_i64
121#define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
122#define tcg_gen_andc_reg tcg_gen_andc_i64
123#define tcg_gen_eqv_reg tcg_gen_eqv_i64
124#define tcg_gen_nand_reg tcg_gen_nand_i64
125#define tcg_gen_nor_reg tcg_gen_nor_i64
126#define tcg_gen_orc_reg tcg_gen_orc_i64
127#define tcg_gen_clz_reg tcg_gen_clz_i64
128#define tcg_gen_ctz_reg tcg_gen_ctz_i64
129#define tcg_gen_clzi_reg tcg_gen_clzi_i64
130#define tcg_gen_ctzi_reg tcg_gen_ctzi_i64
131#define tcg_gen_clrsb_reg tcg_gen_clrsb_i64
132#define tcg_gen_ctpop_reg tcg_gen_ctpop_i64
133#define tcg_gen_rotl_reg tcg_gen_rotl_i64
134#define tcg_gen_rotli_reg tcg_gen_rotli_i64
135#define tcg_gen_rotr_reg tcg_gen_rotr_i64
136#define tcg_gen_rotri_reg tcg_gen_rotri_i64
137#define tcg_gen_deposit_reg tcg_gen_deposit_i64
138#define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
139#define tcg_gen_extract_reg tcg_gen_extract_i64
140#define tcg_gen_sextract_reg tcg_gen_sextract_i64
05bfd4db 141#define tcg_gen_extract2_reg tcg_gen_extract2_i64
29dd6f64 142#define tcg_constant_reg tcg_constant_i64
eaa3783b
RH
143#define tcg_gen_movcond_reg tcg_gen_movcond_i64
144#define tcg_gen_add2_reg tcg_gen_add2_i64
145#define tcg_gen_sub2_reg tcg_gen_sub2_i64
146#define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64
147#define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64
148#define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
5bfa8034 149#define tcg_gen_trunc_reg_ptr tcg_gen_trunc_i64_ptr
eaa3783b
RH
150#else
151#define TCGv_reg TCGv_i32
152#define tcg_temp_new tcg_temp_new_i32
eaa3783b 153#define tcg_global_mem_new tcg_global_mem_new_i32
eaa3783b
RH
154
155#define tcg_gen_movi_reg tcg_gen_movi_i32
156#define tcg_gen_mov_reg tcg_gen_mov_i32
157#define tcg_gen_ld8u_reg tcg_gen_ld8u_i32
158#define tcg_gen_ld8s_reg tcg_gen_ld8s_i32
159#define tcg_gen_ld16u_reg tcg_gen_ld16u_i32
160#define tcg_gen_ld16s_reg tcg_gen_ld16s_i32
161#define tcg_gen_ld32u_reg tcg_gen_ld_i32
162#define tcg_gen_ld32s_reg tcg_gen_ld_i32
163#define tcg_gen_ld_reg tcg_gen_ld_i32
164#define tcg_gen_st8_reg tcg_gen_st8_i32
165#define tcg_gen_st16_reg tcg_gen_st16_i32
166#define tcg_gen_st32_reg tcg_gen_st32_i32
167#define tcg_gen_st_reg tcg_gen_st_i32
168#define tcg_gen_add_reg tcg_gen_add_i32
169#define tcg_gen_addi_reg tcg_gen_addi_i32
170#define tcg_gen_sub_reg tcg_gen_sub_i32
171#define tcg_gen_neg_reg tcg_gen_neg_i32
172#define tcg_gen_subfi_reg tcg_gen_subfi_i32
173#define tcg_gen_subi_reg tcg_gen_subi_i32
174#define tcg_gen_and_reg tcg_gen_and_i32
175#define tcg_gen_andi_reg tcg_gen_andi_i32
176#define tcg_gen_or_reg tcg_gen_or_i32
177#define tcg_gen_ori_reg tcg_gen_ori_i32
178#define tcg_gen_xor_reg tcg_gen_xor_i32
179#define tcg_gen_xori_reg tcg_gen_xori_i32
180#define tcg_gen_not_reg tcg_gen_not_i32
181#define tcg_gen_shl_reg tcg_gen_shl_i32
182#define tcg_gen_shli_reg tcg_gen_shli_i32
183#define tcg_gen_shr_reg tcg_gen_shr_i32
184#define tcg_gen_shri_reg tcg_gen_shri_i32
185#define tcg_gen_sar_reg tcg_gen_sar_i32
186#define tcg_gen_sari_reg tcg_gen_sari_i32
187#define tcg_gen_brcond_reg tcg_gen_brcond_i32
188#define tcg_gen_brcondi_reg tcg_gen_brcondi_i32
189#define tcg_gen_setcond_reg tcg_gen_setcond_i32
190#define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
191#define tcg_gen_mul_reg tcg_gen_mul_i32
192#define tcg_gen_muli_reg tcg_gen_muli_i32
193#define tcg_gen_div_reg tcg_gen_div_i32
194#define tcg_gen_rem_reg tcg_gen_rem_i32
195#define tcg_gen_divu_reg tcg_gen_divu_i32
196#define tcg_gen_remu_reg tcg_gen_remu_i32
197#define tcg_gen_discard_reg tcg_gen_discard_i32
198#define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
199#define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
200#define tcg_gen_extu_i32_reg tcg_gen_mov_i32
201#define tcg_gen_ext_i32_reg tcg_gen_mov_i32
202#define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
203#define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64
204#define tcg_gen_ext8u_reg tcg_gen_ext8u_i32
205#define tcg_gen_ext8s_reg tcg_gen_ext8s_i32
206#define tcg_gen_ext16u_reg tcg_gen_ext16u_i32
207#define tcg_gen_ext16s_reg tcg_gen_ext16s_i32
208#define tcg_gen_ext32u_reg tcg_gen_mov_i32
209#define tcg_gen_ext32s_reg tcg_gen_mov_i32
210#define tcg_gen_bswap16_reg tcg_gen_bswap16_i32
211#define tcg_gen_bswap32_reg tcg_gen_bswap32_i32
212#define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
213#define tcg_gen_andc_reg tcg_gen_andc_i32
214#define tcg_gen_eqv_reg tcg_gen_eqv_i32
215#define tcg_gen_nand_reg tcg_gen_nand_i32
216#define tcg_gen_nor_reg tcg_gen_nor_i32
217#define tcg_gen_orc_reg tcg_gen_orc_i32
218#define tcg_gen_clz_reg tcg_gen_clz_i32
219#define tcg_gen_ctz_reg tcg_gen_ctz_i32
220#define tcg_gen_clzi_reg tcg_gen_clzi_i32
221#define tcg_gen_ctzi_reg tcg_gen_ctzi_i32
222#define tcg_gen_clrsb_reg tcg_gen_clrsb_i32
223#define tcg_gen_ctpop_reg tcg_gen_ctpop_i32
224#define tcg_gen_rotl_reg tcg_gen_rotl_i32
225#define tcg_gen_rotli_reg tcg_gen_rotli_i32
226#define tcg_gen_rotr_reg tcg_gen_rotr_i32
227#define tcg_gen_rotri_reg tcg_gen_rotri_i32
228#define tcg_gen_deposit_reg tcg_gen_deposit_i32
229#define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
230#define tcg_gen_extract_reg tcg_gen_extract_i32
231#define tcg_gen_sextract_reg tcg_gen_sextract_i32
05bfd4db 232#define tcg_gen_extract2_reg tcg_gen_extract2_i32
29dd6f64 233#define tcg_constant_reg tcg_constant_i32
eaa3783b
RH
234#define tcg_gen_movcond_reg tcg_gen_movcond_i32
235#define tcg_gen_add2_reg tcg_gen_add2_i32
236#define tcg_gen_sub2_reg tcg_gen_sub2_i32
237#define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32
238#define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32
239#define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
5bfa8034 240#define tcg_gen_trunc_reg_ptr tcg_gen_ext_i32_ptr
eaa3783b
RH
241#endif /* TARGET_REGISTER_BITS */
242
61766fe9
RH
243typedef struct DisasCond {
244 TCGCond c;
eaa3783b 245 TCGv_reg a0, a1;
61766fe9
RH
246} DisasCond;
247
248typedef struct DisasContext {
d01a3625 249 DisasContextBase base;
61766fe9
RH
250 CPUState *cs;
251
eaa3783b
RH
252 target_ureg iaoq_f;
253 target_ureg iaoq_b;
254 target_ureg iaoq_n;
255 TCGv_reg iaoq_n_var;
61766fe9 256
61766fe9
RH
257 DisasCond null_cond;
258 TCGLabel *null_lab;
259
1a19da0d 260 uint32_t insn;
494737b7 261 uint32_t tb_flags;
3d68ee7b
RH
262 int mmu_idx;
263 int privilege;
61766fe9 264 bool psw_n_nonzero;
bd6243a3 265 bool is_pa20;
217d1a5e
RH
266
267#ifdef CONFIG_USER_ONLY
268 MemOp unalign;
269#endif
61766fe9
RH
270} DisasContext;
271
217d1a5e
RH
272#ifdef CONFIG_USER_ONLY
273#define UNALIGN(C) (C)->unalign
274#else
2d4afb03 275#define UNALIGN(C) MO_ALIGN
217d1a5e
RH
276#endif
277
e36f27ef 278/* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
451e4ffd 279static int expand_sm_imm(DisasContext *ctx, int val)
e36f27ef
RH
280{
281 if (val & PSW_SM_E) {
282 val = (val & ~PSW_SM_E) | PSW_E;
283 }
284 if (val & PSW_SM_W) {
285 val = (val & ~PSW_SM_W) | PSW_W;
286 }
287 return val;
288}
289
deee69a1 290/* Inverted space register indicates 0 means sr0 not inferred from base. */
451e4ffd 291static int expand_sr3x(DisasContext *ctx, int val)
deee69a1
RH
292{
293 return ~val;
294}
295
1cd012a5
RH
296/* Convert the M:A bits within a memory insn to the tri-state value
297 we use for the final M. */
451e4ffd 298static int ma_to_m(DisasContext *ctx, int val)
1cd012a5
RH
299{
300 return val & 2 ? (val & 1 ? -1 : 1) : 0;
301}
302
740038d7 303/* Convert the sign of the displacement to a pre or post-modify. */
451e4ffd 304static int pos_to_m(DisasContext *ctx, int val)
740038d7
RH
305{
306 return val ? 1 : -1;
307}
308
451e4ffd 309static int neg_to_m(DisasContext *ctx, int val)
740038d7
RH
310{
311 return val ? -1 : 1;
312}
313
314/* Used for branch targets and fp memory ops. */
451e4ffd 315static int expand_shl2(DisasContext *ctx, int val)
01afb7be
RH
316{
317 return val << 2;
318}
319
740038d7 320/* Used for fp memory ops. */
451e4ffd 321static int expand_shl3(DisasContext *ctx, int val)
740038d7
RH
322{
323 return val << 3;
324}
325
0588e061 326/* Used for assemble_21. */
451e4ffd 327static int expand_shl11(DisasContext *ctx, int val)
0588e061
RH
328{
329 return val << 11;
330}
331
01afb7be 332
40f9f908 333/* Include the auto-generated decoder. */
abff1abf 334#include "decode-insns.c.inc"
40f9f908 335
869051ea
RH
336/* We are not using a goto_tb (for whatever reason), but have updated
337 the iaq (for whatever reason), so don't do it again on exit. */
338#define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
61766fe9 339
869051ea
RH
340/* We are exiting the TB, but have neither emitted a goto_tb, nor
341 updated the iaq for the next instruction to be executed. */
342#define DISAS_IAQ_N_STALE DISAS_TARGET_1
61766fe9 343
e1b5a5ed
RH
344/* Similarly, but we want to return to the main loop immediately
345 to recognize unmasked interrupts. */
346#define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
c5d0aec2 347#define DISAS_EXIT DISAS_TARGET_3
e1b5a5ed 348
61766fe9 349/* global register indexes */
eaa3783b 350static TCGv_reg cpu_gr[32];
33423472 351static TCGv_i64 cpu_sr[4];
494737b7 352static TCGv_i64 cpu_srH;
eaa3783b
RH
353static TCGv_reg cpu_iaoq_f;
354static TCGv_reg cpu_iaoq_b;
c301f34e
RH
355static TCGv_i64 cpu_iasq_f;
356static TCGv_i64 cpu_iasq_b;
eaa3783b
RH
357static TCGv_reg cpu_sar;
358static TCGv_reg cpu_psw_n;
359static TCGv_reg cpu_psw_v;
360static TCGv_reg cpu_psw_cb;
361static TCGv_reg cpu_psw_cb_msb;
61766fe9 362
61766fe9
RH
363void hppa_translate_init(void)
364{
365#define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
366
eaa3783b 367 typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
61766fe9 368 static const GlobalVar vars[] = {
35136a77 369 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
61766fe9
RH
370 DEF_VAR(psw_n),
371 DEF_VAR(psw_v),
372 DEF_VAR(psw_cb),
373 DEF_VAR(psw_cb_msb),
374 DEF_VAR(iaoq_f),
375 DEF_VAR(iaoq_b),
376 };
377
378#undef DEF_VAR
379
380 /* Use the symbolic register names that match the disassembler. */
381 static const char gr_names[32][4] = {
382 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
383 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
384 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
385 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
386 };
33423472 387 /* SR[4-7] are not global registers so that we can index them. */
494737b7
RH
388 static const char sr_names[5][4] = {
389 "sr0", "sr1", "sr2", "sr3", "srH"
33423472 390 };
61766fe9 391
61766fe9
RH
392 int i;
393
f764718d 394 cpu_gr[0] = NULL;
61766fe9 395 for (i = 1; i < 32; i++) {
ad75a51e 396 cpu_gr[i] = tcg_global_mem_new(tcg_env,
61766fe9
RH
397 offsetof(CPUHPPAState, gr[i]),
398 gr_names[i]);
399 }
33423472 400 for (i = 0; i < 4; i++) {
ad75a51e 401 cpu_sr[i] = tcg_global_mem_new_i64(tcg_env,
33423472
RH
402 offsetof(CPUHPPAState, sr[i]),
403 sr_names[i]);
404 }
ad75a51e 405 cpu_srH = tcg_global_mem_new_i64(tcg_env,
494737b7
RH
406 offsetof(CPUHPPAState, sr[4]),
407 sr_names[4]);
61766fe9
RH
408
409 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
410 const GlobalVar *v = &vars[i];
ad75a51e 411 *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name);
61766fe9 412 }
c301f34e 413
ad75a51e 414 cpu_iasq_f = tcg_global_mem_new_i64(tcg_env,
c301f34e
RH
415 offsetof(CPUHPPAState, iasq_f),
416 "iasq_f");
ad75a51e 417 cpu_iasq_b = tcg_global_mem_new_i64(tcg_env,
c301f34e
RH
418 offsetof(CPUHPPAState, iasq_b),
419 "iasq_b");
61766fe9
RH
420}
421
129e9cc3
RH
422static DisasCond cond_make_f(void)
423{
f764718d
RH
424 return (DisasCond){
425 .c = TCG_COND_NEVER,
426 .a0 = NULL,
427 .a1 = NULL,
428 };
129e9cc3
RH
429}
430
df0232fe
RH
431static DisasCond cond_make_t(void)
432{
433 return (DisasCond){
434 .c = TCG_COND_ALWAYS,
435 .a0 = NULL,
436 .a1 = NULL,
437 };
438}
439
129e9cc3
RH
440static DisasCond cond_make_n(void)
441{
f764718d
RH
442 return (DisasCond){
443 .c = TCG_COND_NE,
444 .a0 = cpu_psw_n,
6e94937a 445 .a1 = tcg_constant_reg(0)
f764718d 446 };
129e9cc3
RH
447}
448
b47a4a02 449static DisasCond cond_make_0_tmp(TCGCond c, TCGv_reg a0)
129e9cc3 450{
129e9cc3 451 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
b47a4a02 452 return (DisasCond){
6e94937a 453 .c = c, .a0 = a0, .a1 = tcg_constant_reg(0)
b47a4a02
SS
454 };
455}
129e9cc3 456
b47a4a02
SS
457static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
458{
459 TCGv_reg tmp = tcg_temp_new();
460 tcg_gen_mov_reg(tmp, a0);
461 return cond_make_0_tmp(c, tmp);
129e9cc3
RH
462}
463
eaa3783b 464static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
129e9cc3
RH
465{
466 DisasCond r = { .c = c };
467
468 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
469 r.a0 = tcg_temp_new();
eaa3783b 470 tcg_gen_mov_reg(r.a0, a0);
129e9cc3 471 r.a1 = tcg_temp_new();
eaa3783b 472 tcg_gen_mov_reg(r.a1, a1);
129e9cc3
RH
473
474 return r;
475}
476
129e9cc3
RH
477static void cond_free(DisasCond *cond)
478{
479 switch (cond->c) {
480 default:
f764718d
RH
481 cond->a0 = NULL;
482 cond->a1 = NULL;
129e9cc3
RH
483 /* fallthru */
484 case TCG_COND_ALWAYS:
485 cond->c = TCG_COND_NEVER;
486 break;
487 case TCG_COND_NEVER:
488 break;
489 }
490}
491
eaa3783b 492static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
61766fe9
RH
493{
494 if (reg == 0) {
e12c6309 495 TCGv_reg t = tcg_temp_new();
eaa3783b 496 tcg_gen_movi_reg(t, 0);
61766fe9
RH
497 return t;
498 } else {
499 return cpu_gr[reg];
500 }
501}
502
eaa3783b 503static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
61766fe9 504{
129e9cc3 505 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
e12c6309 506 return tcg_temp_new();
61766fe9
RH
507 } else {
508 return cpu_gr[reg];
509 }
510}
511
eaa3783b 512static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
129e9cc3
RH
513{
514 if (ctx->null_cond.c != TCG_COND_NEVER) {
eaa3783b 515 tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
6e94937a 516 ctx->null_cond.a1, dest, t);
129e9cc3 517 } else {
eaa3783b 518 tcg_gen_mov_reg(dest, t);
129e9cc3
RH
519 }
520}
521
eaa3783b 522static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
129e9cc3
RH
523{
524 if (reg != 0) {
525 save_or_nullify(ctx, cpu_gr[reg], t);
526 }
527}
528
e03b5686 529#if HOST_BIG_ENDIAN
96d6407f
RH
530# define HI_OFS 0
531# define LO_OFS 4
532#else
533# define HI_OFS 4
534# define LO_OFS 0
535#endif
536
537static TCGv_i32 load_frw_i32(unsigned rt)
538{
539 TCGv_i32 ret = tcg_temp_new_i32();
ad75a51e 540 tcg_gen_ld_i32(ret, tcg_env,
96d6407f
RH
541 offsetof(CPUHPPAState, fr[rt & 31])
542 + (rt & 32 ? LO_OFS : HI_OFS));
543 return ret;
544}
545
ebe9383c
RH
546static TCGv_i32 load_frw0_i32(unsigned rt)
547{
548 if (rt == 0) {
0992a930
RH
549 TCGv_i32 ret = tcg_temp_new_i32();
550 tcg_gen_movi_i32(ret, 0);
551 return ret;
ebe9383c
RH
552 } else {
553 return load_frw_i32(rt);
554 }
555}
556
557static TCGv_i64 load_frw0_i64(unsigned rt)
558{
0992a930 559 TCGv_i64 ret = tcg_temp_new_i64();
ebe9383c 560 if (rt == 0) {
0992a930 561 tcg_gen_movi_i64(ret, 0);
ebe9383c 562 } else {
ad75a51e 563 tcg_gen_ld32u_i64(ret, tcg_env,
ebe9383c
RH
564 offsetof(CPUHPPAState, fr[rt & 31])
565 + (rt & 32 ? LO_OFS : HI_OFS));
ebe9383c 566 }
0992a930 567 return ret;
ebe9383c
RH
568}
569
96d6407f
RH
570static void save_frw_i32(unsigned rt, TCGv_i32 val)
571{
ad75a51e 572 tcg_gen_st_i32(val, tcg_env,
96d6407f
RH
573 offsetof(CPUHPPAState, fr[rt & 31])
574 + (rt & 32 ? LO_OFS : HI_OFS));
575}
576
577#undef HI_OFS
578#undef LO_OFS
579
580static TCGv_i64 load_frd(unsigned rt)
581{
582 TCGv_i64 ret = tcg_temp_new_i64();
ad75a51e 583 tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt]));
96d6407f
RH
584 return ret;
585}
586
ebe9383c
RH
587static TCGv_i64 load_frd0(unsigned rt)
588{
589 if (rt == 0) {
0992a930
RH
590 TCGv_i64 ret = tcg_temp_new_i64();
591 tcg_gen_movi_i64(ret, 0);
592 return ret;
ebe9383c
RH
593 } else {
594 return load_frd(rt);
595 }
596}
597
96d6407f
RH
598static void save_frd(unsigned rt, TCGv_i64 val)
599{
ad75a51e 600 tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt]));
96d6407f
RH
601}
602
33423472
RH
603static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
604{
605#ifdef CONFIG_USER_ONLY
606 tcg_gen_movi_i64(dest, 0);
607#else
608 if (reg < 4) {
609 tcg_gen_mov_i64(dest, cpu_sr[reg]);
494737b7
RH
610 } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
611 tcg_gen_mov_i64(dest, cpu_srH);
33423472 612 } else {
ad75a51e 613 tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg]));
33423472
RH
614 }
615#endif
616}
617
129e9cc3
RH
618/* Skip over the implementation of an insn that has been nullified.
619 Use this when the insn is too complex for a conditional move. */
620static void nullify_over(DisasContext *ctx)
621{
622 if (ctx->null_cond.c != TCG_COND_NEVER) {
623 /* The always condition should have been handled in the main loop. */
624 assert(ctx->null_cond.c != TCG_COND_ALWAYS);
625
626 ctx->null_lab = gen_new_label();
129e9cc3
RH
627
628 /* If we're using PSW[N], copy it to a temp because... */
6e94937a 629 if (ctx->null_cond.a0 == cpu_psw_n) {
129e9cc3 630 ctx->null_cond.a0 = tcg_temp_new();
eaa3783b 631 tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
129e9cc3
RH
632 }
633 /* ... we clear it before branching over the implementation,
634 so that (1) it's clear after nullifying this insn and
635 (2) if this insn nullifies the next, PSW[N] is valid. */
636 if (ctx->psw_n_nonzero) {
637 ctx->psw_n_nonzero = false;
eaa3783b 638 tcg_gen_movi_reg(cpu_psw_n, 0);
129e9cc3
RH
639 }
640
eaa3783b 641 tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
6e94937a 642 ctx->null_cond.a1, ctx->null_lab);
129e9cc3
RH
643 cond_free(&ctx->null_cond);
644 }
645}
646
647/* Save the current nullification state to PSW[N]. */
648static void nullify_save(DisasContext *ctx)
649{
650 if (ctx->null_cond.c == TCG_COND_NEVER) {
651 if (ctx->psw_n_nonzero) {
eaa3783b 652 tcg_gen_movi_reg(cpu_psw_n, 0);
129e9cc3
RH
653 }
654 return;
655 }
6e94937a 656 if (ctx->null_cond.a0 != cpu_psw_n) {
eaa3783b 657 tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
6e94937a 658 ctx->null_cond.a0, ctx->null_cond.a1);
129e9cc3
RH
659 ctx->psw_n_nonzero = true;
660 }
661 cond_free(&ctx->null_cond);
662}
663
664/* Set a PSW[N] to X. The intention is that this is used immediately
665 before a goto_tb/exit_tb, so that there is no fallthru path to other
666 code within the TB. Therefore we do not update psw_n_nonzero. */
667static void nullify_set(DisasContext *ctx, bool x)
668{
669 if (ctx->psw_n_nonzero || x) {
eaa3783b 670 tcg_gen_movi_reg(cpu_psw_n, x);
129e9cc3
RH
671 }
672}
673
674/* Mark the end of an instruction that may have been nullified.
40f9f908
RH
675 This is the pair to nullify_over. Always returns true so that
676 it may be tail-called from a translate function. */
31234768 677static bool nullify_end(DisasContext *ctx)
129e9cc3
RH
678{
679 TCGLabel *null_lab = ctx->null_lab;
31234768 680 DisasJumpType status = ctx->base.is_jmp;
129e9cc3 681
f49b3537
RH
682 /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
683 For UPDATED, we cannot update on the nullified path. */
684 assert(status != DISAS_IAQ_N_UPDATED);
685
129e9cc3
RH
686 if (likely(null_lab == NULL)) {
687 /* The current insn wasn't conditional or handled the condition
688 applied to it without a branch, so the (new) setting of
689 NULL_COND can be applied directly to the next insn. */
31234768 690 return true;
129e9cc3
RH
691 }
692 ctx->null_lab = NULL;
693
694 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
695 /* The next instruction will be unconditional,
696 and NULL_COND already reflects that. */
697 gen_set_label(null_lab);
698 } else {
699 /* The insn that we just executed is itself nullifying the next
700 instruction. Store the condition in the PSW[N] global.
701 We asserted PSW[N] = 0 in nullify_over, so that after the
702 label we have the proper value in place. */
703 nullify_save(ctx);
704 gen_set_label(null_lab);
705 ctx->null_cond = cond_make_n();
706 }
869051ea 707 if (status == DISAS_NORETURN) {
31234768 708 ctx->base.is_jmp = DISAS_NEXT;
129e9cc3 709 }
31234768 710 return true;
129e9cc3
RH
711}
712
698240d1
RH
713static target_ureg gva_offset_mask(DisasContext *ctx)
714{
715 return (ctx->tb_flags & PSW_W
716 ? MAKE_64BIT_MASK(0, 62)
717 : MAKE_64BIT_MASK(0, 32));
718}
719
741322f4
RH
720static void copy_iaoq_entry(DisasContext *ctx, TCGv_reg dest,
721 target_ureg ival, TCGv_reg vval)
61766fe9
RH
722{
723 if (unlikely(ival == -1)) {
eaa3783b 724 tcg_gen_mov_reg(dest, vval);
61766fe9 725 } else {
eaa3783b 726 tcg_gen_movi_reg(dest, ival);
61766fe9
RH
727 }
728}
729
eaa3783b 730static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
61766fe9
RH
731{
732 return ctx->iaoq_f + disp + 8;
733}
734
735static void gen_excp_1(int exception)
736{
ad75a51e 737 gen_helper_excp(tcg_env, tcg_constant_i32(exception));
61766fe9
RH
738}
739
31234768 740static void gen_excp(DisasContext *ctx, int exception)
61766fe9 741{
741322f4
RH
742 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
743 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
129e9cc3 744 nullify_save(ctx);
61766fe9 745 gen_excp_1(exception);
31234768 746 ctx->base.is_jmp = DISAS_NORETURN;
61766fe9
RH
747}
748
31234768 749static bool gen_excp_iir(DisasContext *ctx, int exc)
1a19da0d 750{
31234768 751 nullify_over(ctx);
29dd6f64 752 tcg_gen_st_reg(tcg_constant_reg(ctx->insn),
ad75a51e 753 tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
31234768
RH
754 gen_excp(ctx, exc);
755 return nullify_end(ctx);
1a19da0d
RH
756}
757
31234768 758static bool gen_illegal(DisasContext *ctx)
61766fe9 759{
31234768 760 return gen_excp_iir(ctx, EXCP_ILL);
61766fe9
RH
761}
762
40f9f908
RH
763#ifdef CONFIG_USER_ONLY
764#define CHECK_MOST_PRIVILEGED(EXCP) \
765 return gen_excp_iir(ctx, EXCP)
766#else
767#define CHECK_MOST_PRIVILEGED(EXCP) \
31234768
RH
768 do { \
769 if (ctx->privilege != 0) { \
770 return gen_excp_iir(ctx, EXCP); \
771 } \
e1b5a5ed 772 } while (0)
40f9f908 773#endif
e1b5a5ed 774
eaa3783b 775static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
61766fe9 776{
57f91498 777 return translator_use_goto_tb(&ctx->base, dest);
61766fe9
RH
778}
779
129e9cc3
RH
780/* If the next insn is to be nullified, and it's on the same page,
781 and we're not attempting to set a breakpoint on it, then we can
782 totally skip the nullified insn. This avoids creating and
783 executing a TB that merely branches to the next TB. */
784static bool use_nullify_skip(DisasContext *ctx)
785{
786 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
787 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
788}
789
61766fe9 790static void gen_goto_tb(DisasContext *ctx, int which,
eaa3783b 791 target_ureg f, target_ureg b)
61766fe9
RH
792{
793 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
794 tcg_gen_goto_tb(which);
eaa3783b
RH
795 tcg_gen_movi_reg(cpu_iaoq_f, f);
796 tcg_gen_movi_reg(cpu_iaoq_b, b);
07ea28b4 797 tcg_gen_exit_tb(ctx->base.tb, which);
61766fe9 798 } else {
741322f4
RH
799 copy_iaoq_entry(ctx, cpu_iaoq_f, f, cpu_iaoq_b);
800 copy_iaoq_entry(ctx, cpu_iaoq_b, b, ctx->iaoq_n_var);
8532a14e 801 tcg_gen_lookup_and_goto_ptr();
61766fe9
RH
802 }
803}
804
b47a4a02
SS
805static bool cond_need_sv(int c)
806{
807 return c == 2 || c == 3 || c == 6;
808}
809
810static bool cond_need_cb(int c)
811{
812 return c == 4 || c == 5;
813}
814
72ca8753
RH
815/* Need extensions from TCGv_i32 to TCGv_reg. */
816static bool cond_need_ext(DisasContext *ctx, bool d)
817{
818 return TARGET_REGISTER_BITS == 64 && !d;
819}
820
b47a4a02
SS
821/*
822 * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of
823 * the Parisc 1.1 Architecture Reference Manual for details.
824 */
b2167459 825
eaa3783b
RH
826static DisasCond do_cond(unsigned cf, TCGv_reg res,
827 TCGv_reg cb_msb, TCGv_reg sv)
b2167459
RH
828{
829 DisasCond cond;
eaa3783b 830 TCGv_reg tmp;
b2167459
RH
831
832 switch (cf >> 1) {
b47a4a02 833 case 0: /* Never / TR (0 / 1) */
b2167459
RH
834 cond = cond_make_f();
835 break;
836 case 1: /* = / <> (Z / !Z) */
837 cond = cond_make_0(TCG_COND_EQ, res);
838 break;
b47a4a02
SS
839 case 2: /* < / >= (N ^ V / !(N ^ V) */
840 tmp = tcg_temp_new();
841 tcg_gen_xor_reg(tmp, res, sv);
842 cond = cond_make_0_tmp(TCG_COND_LT, tmp);
b2167459 843 break;
b47a4a02
SS
844 case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */
845 /*
846 * Simplify:
847 * (N ^ V) | Z
848 * ((res < 0) ^ (sv < 0)) | !res
849 * ((res ^ sv) < 0) | !res
850 * (~(res ^ sv) >= 0) | !res
851 * !(~(res ^ sv) >> 31) | !res
852 * !(~(res ^ sv) >> 31 & res)
853 */
854 tmp = tcg_temp_new();
855 tcg_gen_eqv_reg(tmp, res, sv);
856 tcg_gen_sari_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
857 tcg_gen_and_reg(tmp, tmp, res);
858 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
b2167459
RH
859 break;
860 case 4: /* NUV / UV (!C / C) */
861 cond = cond_make_0(TCG_COND_EQ, cb_msb);
862 break;
863 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
864 tmp = tcg_temp_new();
eaa3783b
RH
865 tcg_gen_neg_reg(tmp, cb_msb);
866 tcg_gen_and_reg(tmp, tmp, res);
b47a4a02 867 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
b2167459
RH
868 break;
869 case 6: /* SV / NSV (V / !V) */
870 cond = cond_make_0(TCG_COND_LT, sv);
871 break;
872 case 7: /* OD / EV */
873 tmp = tcg_temp_new();
eaa3783b 874 tcg_gen_andi_reg(tmp, res, 1);
b47a4a02 875 cond = cond_make_0_tmp(TCG_COND_NE, tmp);
b2167459
RH
876 break;
877 default:
878 g_assert_not_reached();
879 }
880 if (cf & 1) {
881 cond.c = tcg_invert_cond(cond.c);
882 }
883
884 return cond;
885}
886
887/* Similar, but for the special case of subtraction without borrow, we
888 can use the inputs directly. This can allow other computation to be
889 deleted as unused. */
890
eaa3783b
RH
891static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
892 TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
b2167459
RH
893{
894 DisasCond cond;
895
896 switch (cf >> 1) {
897 case 1: /* = / <> */
898 cond = cond_make(TCG_COND_EQ, in1, in2);
899 break;
900 case 2: /* < / >= */
901 cond = cond_make(TCG_COND_LT, in1, in2);
902 break;
903 case 3: /* <= / > */
904 cond = cond_make(TCG_COND_LE, in1, in2);
905 break;
906 case 4: /* << / >>= */
907 cond = cond_make(TCG_COND_LTU, in1, in2);
908 break;
909 case 5: /* <<= / >> */
910 cond = cond_make(TCG_COND_LEU, in1, in2);
911 break;
912 default:
b47a4a02 913 return do_cond(cf, res, NULL, sv);
b2167459
RH
914 }
915 if (cf & 1) {
916 cond.c = tcg_invert_cond(cond.c);
917 }
918
919 return cond;
920}
921
df0232fe
RH
922/*
923 * Similar, but for logicals, where the carry and overflow bits are not
924 * computed, and use of them is undefined.
925 *
926 * Undefined or not, hardware does not trap. It seems reasonable to
927 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
928 * how cases c={2,3} are treated.
929 */
b2167459 930
eaa3783b 931static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
b2167459 932{
df0232fe
RH
933 switch (cf) {
934 case 0: /* never */
935 case 9: /* undef, C */
936 case 11: /* undef, C & !Z */
937 case 12: /* undef, V */
938 return cond_make_f();
939
940 case 1: /* true */
941 case 8: /* undef, !C */
942 case 10: /* undef, !C | Z */
943 case 13: /* undef, !V */
944 return cond_make_t();
945
946 case 2: /* == */
947 return cond_make_0(TCG_COND_EQ, res);
948 case 3: /* <> */
949 return cond_make_0(TCG_COND_NE, res);
950 case 4: /* < */
951 return cond_make_0(TCG_COND_LT, res);
952 case 5: /* >= */
953 return cond_make_0(TCG_COND_GE, res);
954 case 6: /* <= */
955 return cond_make_0(TCG_COND_LE, res);
956 case 7: /* > */
957 return cond_make_0(TCG_COND_GT, res);
958
959 case 14: /* OD */
960 case 15: /* EV */
961 return do_cond(cf, res, NULL, NULL);
962
963 default:
964 g_assert_not_reached();
b2167459 965 }
b2167459
RH
966}
967
98cd9ca7
RH
968/* Similar, but for shift/extract/deposit conditions. */
969
eaa3783b 970static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
98cd9ca7
RH
971{
972 unsigned c, f;
973
974 /* Convert the compressed condition codes to standard.
975 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
976 4-7 are the reverse of 0-3. */
977 c = orig & 3;
978 if (c == 3) {
979 c = 7;
980 }
981 f = (orig & 4) / 4;
982
983 return do_log_cond(c * 2 + f, res);
984}
985
b2167459
RH
986/* Similar, but for unit conditions. */
987
eaa3783b
RH
988static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
989 TCGv_reg in1, TCGv_reg in2)
b2167459
RH
990{
991 DisasCond cond;
eaa3783b 992 TCGv_reg tmp, cb = NULL;
b2167459 993
b2167459
RH
994 if (cf & 8) {
995 /* Since we want to test lots of carry-out bits all at once, do not
996 * do our normal thing and compute carry-in of bit B+1 since that
997 * leaves us with carry bits spread across two words.
998 */
999 cb = tcg_temp_new();
1000 tmp = tcg_temp_new();
eaa3783b
RH
1001 tcg_gen_or_reg(cb, in1, in2);
1002 tcg_gen_and_reg(tmp, in1, in2);
1003 tcg_gen_andc_reg(cb, cb, res);
1004 tcg_gen_or_reg(cb, cb, tmp);
b2167459
RH
1005 }
1006
1007 switch (cf >> 1) {
1008 case 0: /* never / TR */
1009 case 1: /* undefined */
1010 case 5: /* undefined */
1011 cond = cond_make_f();
1012 break;
1013
1014 case 2: /* SBZ / NBZ */
1015 /* See hasless(v,1) from
1016 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1017 */
1018 tmp = tcg_temp_new();
eaa3783b
RH
1019 tcg_gen_subi_reg(tmp, res, 0x01010101u);
1020 tcg_gen_andc_reg(tmp, tmp, res);
1021 tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
b2167459 1022 cond = cond_make_0(TCG_COND_NE, tmp);
b2167459
RH
1023 break;
1024
1025 case 3: /* SHZ / NHZ */
1026 tmp = tcg_temp_new();
eaa3783b
RH
1027 tcg_gen_subi_reg(tmp, res, 0x00010001u);
1028 tcg_gen_andc_reg(tmp, tmp, res);
1029 tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
b2167459 1030 cond = cond_make_0(TCG_COND_NE, tmp);
b2167459
RH
1031 break;
1032
1033 case 4: /* SDC / NDC */
eaa3783b 1034 tcg_gen_andi_reg(cb, cb, 0x88888888u);
b2167459
RH
1035 cond = cond_make_0(TCG_COND_NE, cb);
1036 break;
1037
1038 case 6: /* SBC / NBC */
eaa3783b 1039 tcg_gen_andi_reg(cb, cb, 0x80808080u);
b2167459
RH
1040 cond = cond_make_0(TCG_COND_NE, cb);
1041 break;
1042
1043 case 7: /* SHC / NHC */
eaa3783b 1044 tcg_gen_andi_reg(cb, cb, 0x80008000u);
b2167459
RH
1045 cond = cond_make_0(TCG_COND_NE, cb);
1046 break;
1047
1048 default:
1049 g_assert_not_reached();
1050 }
b2167459
RH
1051 if (cf & 1) {
1052 cond.c = tcg_invert_cond(cond.c);
1053 }
1054
1055 return cond;
1056}
1057
72ca8753
RH
1058static TCGv_reg get_carry(DisasContext *ctx, bool d,
1059 TCGv_reg cb, TCGv_reg cb_msb)
1060{
1061 if (cond_need_ext(ctx, d)) {
1062 TCGv_reg t = tcg_temp_new();
1063 tcg_gen_extract_reg(t, cb, 32, 1);
1064 return t;
1065 }
1066 return cb_msb;
1067}
1068
1069static TCGv_reg get_psw_carry(DisasContext *ctx, bool d)
1070{
1071 return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb);
1072}
1073
b2167459 1074/* Compute signed overflow for addition. */
eaa3783b
RH
1075static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1076 TCGv_reg in1, TCGv_reg in2)
b2167459 1077{
e12c6309 1078 TCGv_reg sv = tcg_temp_new();
eaa3783b 1079 TCGv_reg tmp = tcg_temp_new();
b2167459 1080
eaa3783b
RH
1081 tcg_gen_xor_reg(sv, res, in1);
1082 tcg_gen_xor_reg(tmp, in1, in2);
1083 tcg_gen_andc_reg(sv, sv, tmp);
b2167459
RH
1084
1085 return sv;
1086}
1087
1088/* Compute signed overflow for subtraction. */
eaa3783b
RH
1089static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1090 TCGv_reg in1, TCGv_reg in2)
b2167459 1091{
e12c6309 1092 TCGv_reg sv = tcg_temp_new();
eaa3783b 1093 TCGv_reg tmp = tcg_temp_new();
b2167459 1094
eaa3783b
RH
1095 tcg_gen_xor_reg(sv, res, in1);
1096 tcg_gen_xor_reg(tmp, in1, in2);
1097 tcg_gen_and_reg(sv, sv, tmp);
b2167459
RH
1098
1099 return sv;
1100}
1101
31234768
RH
1102static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1103 TCGv_reg in2, unsigned shift, bool is_l,
1104 bool is_tsv, bool is_tc, bool is_c, unsigned cf)
b2167459 1105{
bdcccc17 1106 TCGv_reg dest, cb, cb_msb, cb_cond, sv, tmp;
b2167459
RH
1107 unsigned c = cf >> 1;
1108 DisasCond cond;
bdcccc17 1109 bool d = false;
b2167459
RH
1110
1111 dest = tcg_temp_new();
f764718d
RH
1112 cb = NULL;
1113 cb_msb = NULL;
bdcccc17 1114 cb_cond = NULL;
b2167459
RH
1115
1116 if (shift) {
e12c6309 1117 tmp = tcg_temp_new();
eaa3783b 1118 tcg_gen_shli_reg(tmp, in1, shift);
b2167459
RH
1119 in1 = tmp;
1120 }
1121
b47a4a02 1122 if (!is_l || cond_need_cb(c)) {
29dd6f64 1123 TCGv_reg zero = tcg_constant_reg(0);
e12c6309 1124 cb_msb = tcg_temp_new();
bdcccc17
RH
1125 cb = tcg_temp_new();
1126
eaa3783b 1127 tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
b2167459 1128 if (is_c) {
bdcccc17
RH
1129 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb,
1130 get_psw_carry(ctx, d), zero);
b2167459 1131 }
bdcccc17
RH
1132 tcg_gen_xor_reg(cb, in1, in2);
1133 tcg_gen_xor_reg(cb, cb, dest);
1134 if (cond_need_cb(c)) {
1135 cb_cond = get_carry(ctx, d, cb, cb_msb);
b2167459
RH
1136 }
1137 } else {
eaa3783b 1138 tcg_gen_add_reg(dest, in1, in2);
b2167459 1139 if (is_c) {
bdcccc17 1140 tcg_gen_add_reg(dest, dest, get_psw_carry(ctx, d));
b2167459
RH
1141 }
1142 }
1143
1144 /* Compute signed overflow if required. */
f764718d 1145 sv = NULL;
b47a4a02 1146 if (is_tsv || cond_need_sv(c)) {
b2167459
RH
1147 sv = do_add_sv(ctx, dest, in1, in2);
1148 if (is_tsv) {
1149 /* ??? Need to include overflow from shift. */
ad75a51e 1150 gen_helper_tsv(tcg_env, sv);
b2167459
RH
1151 }
1152 }
1153
1154 /* Emit any conditional trap before any writeback. */
bdcccc17 1155 cond = do_cond(cf, dest, cb_cond, sv);
b2167459 1156 if (is_tc) {
b2167459 1157 tmp = tcg_temp_new();
eaa3783b 1158 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
ad75a51e 1159 gen_helper_tcond(tcg_env, tmp);
b2167459
RH
1160 }
1161
1162 /* Write back the result. */
1163 if (!is_l) {
1164 save_or_nullify(ctx, cpu_psw_cb, cb);
1165 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1166 }
1167 save_gpr(ctx, rt, dest);
b2167459
RH
1168
1169 /* Install the new nullification. */
1170 cond_free(&ctx->null_cond);
1171 ctx->null_cond = cond;
b2167459
RH
1172}
1173
0c982a28
RH
1174static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_sh *a,
1175 bool is_l, bool is_tsv, bool is_tc, bool is_c)
1176{
1177 TCGv_reg tcg_r1, tcg_r2;
1178
1179 if (a->cf) {
1180 nullify_over(ctx);
1181 }
1182 tcg_r1 = load_gpr(ctx, a->r1);
1183 tcg_r2 = load_gpr(ctx, a->r2);
1184 do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, is_tsv, is_tc, is_c, a->cf);
1185 return nullify_end(ctx);
1186}
1187
0588e061
RH
1188static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1189 bool is_tsv, bool is_tc)
1190{
1191 TCGv_reg tcg_im, tcg_r2;
1192
1193 if (a->cf) {
1194 nullify_over(ctx);
1195 }
d4e58033 1196 tcg_im = tcg_constant_reg(a->i);
0588e061
RH
1197 tcg_r2 = load_gpr(ctx, a->r);
1198 do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf);
1199 return nullify_end(ctx);
1200}
1201
31234768
RH
1202static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1203 TCGv_reg in2, bool is_tsv, bool is_b,
1204 bool is_tc, unsigned cf)
b2167459 1205{
eaa3783b 1206 TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
b2167459
RH
1207 unsigned c = cf >> 1;
1208 DisasCond cond;
bdcccc17 1209 bool d = false;
b2167459
RH
1210
1211 dest = tcg_temp_new();
1212 cb = tcg_temp_new();
1213 cb_msb = tcg_temp_new();
1214
29dd6f64 1215 zero = tcg_constant_reg(0);
b2167459
RH
1216 if (is_b) {
1217 /* DEST,C = IN1 + ~IN2 + C. */
eaa3783b 1218 tcg_gen_not_reg(cb, in2);
bdcccc17 1219 tcg_gen_add2_reg(dest, cb_msb, in1, zero, get_psw_carry(ctx, d), zero);
eaa3783b
RH
1220 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1221 tcg_gen_xor_reg(cb, cb, in1);
1222 tcg_gen_xor_reg(cb, cb, dest);
b2167459 1223 } else {
bdcccc17
RH
1224 /*
1225 * DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
1226 * operations by seeding the high word with 1 and subtracting.
1227 */
1228 TCGv_reg one = tcg_constant_reg(1);
1229 tcg_gen_sub2_reg(dest, cb_msb, in1, one, in2, zero);
eaa3783b
RH
1230 tcg_gen_eqv_reg(cb, in1, in2);
1231 tcg_gen_xor_reg(cb, cb, dest);
b2167459 1232 }
b2167459
RH
1233
1234 /* Compute signed overflow if required. */
f764718d 1235 sv = NULL;
b47a4a02 1236 if (is_tsv || cond_need_sv(c)) {
b2167459
RH
1237 sv = do_sub_sv(ctx, dest, in1, in2);
1238 if (is_tsv) {
ad75a51e 1239 gen_helper_tsv(tcg_env, sv);
b2167459
RH
1240 }
1241 }
1242
1243 /* Compute the condition. We cannot use the special case for borrow. */
1244 if (!is_b) {
1245 cond = do_sub_cond(cf, dest, in1, in2, sv);
1246 } else {
bdcccc17 1247 cond = do_cond(cf, dest, get_carry(ctx, d, cb, cb_msb), sv);
b2167459
RH
1248 }
1249
1250 /* Emit any conditional trap before any writeback. */
1251 if (is_tc) {
b2167459 1252 tmp = tcg_temp_new();
eaa3783b 1253 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
ad75a51e 1254 gen_helper_tcond(tcg_env, tmp);
b2167459
RH
1255 }
1256
1257 /* Write back the result. */
1258 save_or_nullify(ctx, cpu_psw_cb, cb);
1259 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1260 save_gpr(ctx, rt, dest);
b2167459
RH
1261
1262 /* Install the new nullification. */
1263 cond_free(&ctx->null_cond);
1264 ctx->null_cond = cond;
b2167459
RH
1265}
1266
0c982a28
RH
1267static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf *a,
1268 bool is_tsv, bool is_b, bool is_tc)
1269{
1270 TCGv_reg tcg_r1, tcg_r2;
1271
1272 if (a->cf) {
1273 nullify_over(ctx);
1274 }
1275 tcg_r1 = load_gpr(ctx, a->r1);
1276 tcg_r2 = load_gpr(ctx, a->r2);
1277 do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf);
1278 return nullify_end(ctx);
1279}
1280
0588e061
RH
1281static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1282{
1283 TCGv_reg tcg_im, tcg_r2;
1284
1285 if (a->cf) {
1286 nullify_over(ctx);
1287 }
d4e58033 1288 tcg_im = tcg_constant_reg(a->i);
0588e061
RH
1289 tcg_r2 = load_gpr(ctx, a->r);
1290 do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf);
1291 return nullify_end(ctx);
1292}
1293
31234768
RH
1294static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1295 TCGv_reg in2, unsigned cf)
b2167459 1296{
eaa3783b 1297 TCGv_reg dest, sv;
b2167459
RH
1298 DisasCond cond;
1299
1300 dest = tcg_temp_new();
eaa3783b 1301 tcg_gen_sub_reg(dest, in1, in2);
b2167459
RH
1302
1303 /* Compute signed overflow if required. */
f764718d 1304 sv = NULL;
b47a4a02 1305 if (cond_need_sv(cf >> 1)) {
b2167459
RH
1306 sv = do_sub_sv(ctx, dest, in1, in2);
1307 }
1308
1309 /* Form the condition for the compare. */
1310 cond = do_sub_cond(cf, dest, in1, in2, sv);
1311
1312 /* Clear. */
eaa3783b 1313 tcg_gen_movi_reg(dest, 0);
b2167459 1314 save_gpr(ctx, rt, dest);
b2167459
RH
1315
1316 /* Install the new nullification. */
1317 cond_free(&ctx->null_cond);
1318 ctx->null_cond = cond;
b2167459
RH
1319}
1320
31234768
RH
1321static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1322 TCGv_reg in2, unsigned cf,
1323 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
b2167459 1324{
eaa3783b 1325 TCGv_reg dest = dest_gpr(ctx, rt);
b2167459
RH
1326
1327 /* Perform the operation, and writeback. */
1328 fn(dest, in1, in2);
1329 save_gpr(ctx, rt, dest);
1330
1331 /* Install the new nullification. */
1332 cond_free(&ctx->null_cond);
1333 if (cf) {
1334 ctx->null_cond = do_log_cond(cf, dest);
1335 }
b2167459
RH
1336}
1337
0c982a28
RH
1338static bool do_log_reg(DisasContext *ctx, arg_rrr_cf *a,
1339 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1340{
1341 TCGv_reg tcg_r1, tcg_r2;
1342
1343 if (a->cf) {
1344 nullify_over(ctx);
1345 }
1346 tcg_r1 = load_gpr(ctx, a->r1);
1347 tcg_r2 = load_gpr(ctx, a->r2);
1348 do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, fn);
1349 return nullify_end(ctx);
1350}
1351
31234768
RH
1352static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1353 TCGv_reg in2, unsigned cf, bool is_tc,
1354 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
b2167459 1355{
eaa3783b 1356 TCGv_reg dest;
b2167459
RH
1357 DisasCond cond;
1358
1359 if (cf == 0) {
1360 dest = dest_gpr(ctx, rt);
1361 fn(dest, in1, in2);
1362 save_gpr(ctx, rt, dest);
1363 cond_free(&ctx->null_cond);
1364 } else {
1365 dest = tcg_temp_new();
1366 fn(dest, in1, in2);
1367
1368 cond = do_unit_cond(cf, dest, in1, in2);
1369
1370 if (is_tc) {
eaa3783b 1371 TCGv_reg tmp = tcg_temp_new();
eaa3783b 1372 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
ad75a51e 1373 gen_helper_tcond(tcg_env, tmp);
b2167459
RH
1374 }
1375 save_gpr(ctx, rt, dest);
1376
1377 cond_free(&ctx->null_cond);
1378 ctx->null_cond = cond;
1379 }
b2167459
RH
1380}
1381
86f8d05f 1382#ifndef CONFIG_USER_ONLY
8d6ae7fb
RH
1383/* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1384 from the top 2 bits of the base register. There are a few system
1385 instructions that have a 3-bit space specifier, for which SR0 is
1386 not special. To handle this, pass ~SP. */
86f8d05f
RH
1387static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
1388{
1389 TCGv_ptr ptr;
1390 TCGv_reg tmp;
1391 TCGv_i64 spc;
1392
1393 if (sp != 0) {
8d6ae7fb
RH
1394 if (sp < 0) {
1395 sp = ~sp;
1396 }
a6779861 1397 spc = tcg_temp_new_tl();
8d6ae7fb
RH
1398 load_spr(ctx, spc, sp);
1399 return spc;
86f8d05f 1400 }
494737b7
RH
1401 if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1402 return cpu_srH;
1403 }
86f8d05f
RH
1404
1405 ptr = tcg_temp_new_ptr();
1406 tmp = tcg_temp_new();
a6779861 1407 spc = tcg_temp_new_tl();
86f8d05f 1408
698240d1
RH
1409 /* Extract top 2 bits of the address, shift left 3 for uint64_t index. */
1410 tcg_gen_shri_reg(tmp, base, (ctx->tb_flags & PSW_W ? 64 : 32) - 5);
86f8d05f
RH
1411 tcg_gen_andi_reg(tmp, tmp, 030);
1412 tcg_gen_trunc_reg_ptr(ptr, tmp);
86f8d05f 1413
ad75a51e 1414 tcg_gen_add_ptr(ptr, ptr, tcg_env);
86f8d05f 1415 tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
86f8d05f
RH
1416
1417 return spc;
1418}
1419#endif
1420
1421static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
1422 unsigned rb, unsigned rx, int scale, target_sreg disp,
1423 unsigned sp, int modify, bool is_phys)
1424{
1425 TCGv_reg base = load_gpr(ctx, rb);
1426 TCGv_reg ofs;
698240d1 1427 TCGv_tl addr;
86f8d05f
RH
1428
1429 /* Note that RX is mutually exclusive with DISP. */
1430 if (rx) {
e12c6309 1431 ofs = tcg_temp_new();
86f8d05f
RH
1432 tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
1433 tcg_gen_add_reg(ofs, ofs, base);
1434 } else if (disp || modify) {
e12c6309 1435 ofs = tcg_temp_new();
86f8d05f
RH
1436 tcg_gen_addi_reg(ofs, base, disp);
1437 } else {
1438 ofs = base;
1439 }
1440
1441 *pofs = ofs;
698240d1 1442 *pgva = addr = tcg_temp_new_tl();
86f8d05f 1443 tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
698240d1
RH
1444 tcg_gen_andi_tl(addr, addr, gva_offset_mask(ctx));
1445#ifndef CONFIG_USER_ONLY
86f8d05f
RH
1446 if (!is_phys) {
1447 tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1448 }
86f8d05f
RH
1449#endif
1450}
1451
96d6407f
RH
1452/* Emit a memory load. The modify parameter should be
1453 * < 0 for pre-modify,
1454 * > 0 for post-modify,
1455 * = 0 for no base register update.
1456 */
1457static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
eaa3783b 1458 unsigned rx, int scale, target_sreg disp,
14776ab5 1459 unsigned sp, int modify, MemOp mop)
96d6407f 1460{
86f8d05f
RH
1461 TCGv_reg ofs;
1462 TCGv_tl addr;
96d6407f
RH
1463
1464 /* Caller uses nullify_over/nullify_end. */
1465 assert(ctx->null_cond.c == TCG_COND_NEVER);
1466
86f8d05f
RH
1467 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1468 ctx->mmu_idx == MMU_PHYS_IDX);
c1f55d97 1469 tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
86f8d05f
RH
1470 if (modify) {
1471 save_gpr(ctx, rb, ofs);
96d6407f 1472 }
96d6407f
RH
1473}
1474
1475static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
eaa3783b 1476 unsigned rx, int scale, target_sreg disp,
14776ab5 1477 unsigned sp, int modify, MemOp mop)
96d6407f 1478{
86f8d05f
RH
1479 TCGv_reg ofs;
1480 TCGv_tl addr;
96d6407f
RH
1481
1482 /* Caller uses nullify_over/nullify_end. */
1483 assert(ctx->null_cond.c == TCG_COND_NEVER);
1484
86f8d05f
RH
1485 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1486 ctx->mmu_idx == MMU_PHYS_IDX);
217d1a5e 1487 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
86f8d05f
RH
1488 if (modify) {
1489 save_gpr(ctx, rb, ofs);
96d6407f 1490 }
96d6407f
RH
1491}
1492
1493static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
eaa3783b 1494 unsigned rx, int scale, target_sreg disp,
14776ab5 1495 unsigned sp, int modify, MemOp mop)
96d6407f 1496{
86f8d05f
RH
1497 TCGv_reg ofs;
1498 TCGv_tl addr;
96d6407f
RH
1499
1500 /* Caller uses nullify_over/nullify_end. */
1501 assert(ctx->null_cond.c == TCG_COND_NEVER);
1502
86f8d05f
RH
1503 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1504 ctx->mmu_idx == MMU_PHYS_IDX);
217d1a5e 1505 tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
86f8d05f
RH
1506 if (modify) {
1507 save_gpr(ctx, rb, ofs);
96d6407f 1508 }
96d6407f
RH
1509}
1510
1511static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
eaa3783b 1512 unsigned rx, int scale, target_sreg disp,
14776ab5 1513 unsigned sp, int modify, MemOp mop)
96d6407f 1514{
86f8d05f
RH
1515 TCGv_reg ofs;
1516 TCGv_tl addr;
96d6407f
RH
1517
1518 /* Caller uses nullify_over/nullify_end. */
1519 assert(ctx->null_cond.c == TCG_COND_NEVER);
1520
86f8d05f
RH
1521 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1522 ctx->mmu_idx == MMU_PHYS_IDX);
217d1a5e 1523 tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
86f8d05f
RH
1524 if (modify) {
1525 save_gpr(ctx, rb, ofs);
96d6407f 1526 }
96d6407f
RH
1527}
1528
eaa3783b
RH
1529#if TARGET_REGISTER_BITS == 64
1530#define do_load_reg do_load_64
1531#define do_store_reg do_store_64
96d6407f 1532#else
eaa3783b
RH
1533#define do_load_reg do_load_32
1534#define do_store_reg do_store_32
96d6407f
RH
1535#endif
1536
1cd012a5 1537static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
31234768 1538 unsigned rx, int scale, target_sreg disp,
14776ab5 1539 unsigned sp, int modify, MemOp mop)
96d6407f 1540{
eaa3783b 1541 TCGv_reg dest;
96d6407f
RH
1542
1543 nullify_over(ctx);
1544
1545 if (modify == 0) {
1546 /* No base register update. */
1547 dest = dest_gpr(ctx, rt);
1548 } else {
1549 /* Make sure if RT == RB, we see the result of the load. */
e12c6309 1550 dest = tcg_temp_new();
96d6407f 1551 }
86f8d05f 1552 do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
96d6407f
RH
1553 save_gpr(ctx, rt, dest);
1554
1cd012a5 1555 return nullify_end(ctx);
96d6407f
RH
1556}
1557
740038d7 1558static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
31234768
RH
1559 unsigned rx, int scale, target_sreg disp,
1560 unsigned sp, int modify)
96d6407f
RH
1561{
1562 TCGv_i32 tmp;
1563
1564 nullify_over(ctx);
1565
1566 tmp = tcg_temp_new_i32();
86f8d05f 1567 do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
96d6407f 1568 save_frw_i32(rt, tmp);
96d6407f
RH
1569
1570 if (rt == 0) {
ad75a51e 1571 gen_helper_loaded_fr0(tcg_env);
96d6407f
RH
1572 }
1573
740038d7
RH
1574 return nullify_end(ctx);
1575}
1576
1577static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1578{
1579 return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1580 a->disp, a->sp, a->m);
96d6407f
RH
1581}
1582
740038d7 1583static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
31234768
RH
1584 unsigned rx, int scale, target_sreg disp,
1585 unsigned sp, int modify)
96d6407f
RH
1586{
1587 TCGv_i64 tmp;
1588
1589 nullify_over(ctx);
1590
1591 tmp = tcg_temp_new_i64();
fc313c64 1592 do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
96d6407f 1593 save_frd(rt, tmp);
96d6407f
RH
1594
1595 if (rt == 0) {
ad75a51e 1596 gen_helper_loaded_fr0(tcg_env);
96d6407f
RH
1597 }
1598
740038d7
RH
1599 return nullify_end(ctx);
1600}
1601
1602static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1603{
1604 return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1605 a->disp, a->sp, a->m);
96d6407f
RH
1606}
1607
1cd012a5 1608static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
31234768 1609 target_sreg disp, unsigned sp,
14776ab5 1610 int modify, MemOp mop)
96d6407f
RH
1611{
1612 nullify_over(ctx);
86f8d05f 1613 do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1cd012a5 1614 return nullify_end(ctx);
96d6407f
RH
1615}
1616
740038d7 1617static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
31234768
RH
1618 unsigned rx, int scale, target_sreg disp,
1619 unsigned sp, int modify)
96d6407f
RH
1620{
1621 TCGv_i32 tmp;
1622
1623 nullify_over(ctx);
1624
1625 tmp = load_frw_i32(rt);
86f8d05f 1626 do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
96d6407f 1627
740038d7
RH
1628 return nullify_end(ctx);
1629}
1630
1631static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1632{
1633 return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1634 a->disp, a->sp, a->m);
96d6407f
RH
1635}
1636
740038d7 1637static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
31234768
RH
1638 unsigned rx, int scale, target_sreg disp,
1639 unsigned sp, int modify)
96d6407f
RH
1640{
1641 TCGv_i64 tmp;
1642
1643 nullify_over(ctx);
1644
1645 tmp = load_frd(rt);
fc313c64 1646 do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
96d6407f 1647
740038d7
RH
1648 return nullify_end(ctx);
1649}
1650
1651static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1652{
1653 return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1654 a->disp, a->sp, a->m);
96d6407f
RH
1655}
1656
1ca74648 1657static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
31234768 1658 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
ebe9383c
RH
1659{
1660 TCGv_i32 tmp;
1661
1662 nullify_over(ctx);
1663 tmp = load_frw0_i32(ra);
1664
ad75a51e 1665 func(tmp, tcg_env, tmp);
ebe9383c
RH
1666
1667 save_frw_i32(rt, tmp);
1ca74648 1668 return nullify_end(ctx);
ebe9383c
RH
1669}
1670
1ca74648 1671static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
31234768 1672 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
ebe9383c
RH
1673{
1674 TCGv_i32 dst;
1675 TCGv_i64 src;
1676
1677 nullify_over(ctx);
1678 src = load_frd(ra);
1679 dst = tcg_temp_new_i32();
1680
ad75a51e 1681 func(dst, tcg_env, src);
ebe9383c 1682
ebe9383c 1683 save_frw_i32(rt, dst);
1ca74648 1684 return nullify_end(ctx);
ebe9383c
RH
1685}
1686
1ca74648 1687static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
31234768 1688 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
ebe9383c
RH
1689{
1690 TCGv_i64 tmp;
1691
1692 nullify_over(ctx);
1693 tmp = load_frd0(ra);
1694
ad75a51e 1695 func(tmp, tcg_env, tmp);
ebe9383c
RH
1696
1697 save_frd(rt, tmp);
1ca74648 1698 return nullify_end(ctx);
ebe9383c
RH
1699}
1700
1ca74648 1701static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
31234768 1702 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
ebe9383c
RH
1703{
1704 TCGv_i32 src;
1705 TCGv_i64 dst;
1706
1707 nullify_over(ctx);
1708 src = load_frw0_i32(ra);
1709 dst = tcg_temp_new_i64();
1710
ad75a51e 1711 func(dst, tcg_env, src);
ebe9383c 1712
ebe9383c 1713 save_frd(rt, dst);
1ca74648 1714 return nullify_end(ctx);
ebe9383c
RH
1715}
1716
1ca74648 1717static bool do_fop_weww(DisasContext *ctx, unsigned rt,
31234768
RH
1718 unsigned ra, unsigned rb,
1719 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
ebe9383c
RH
1720{
1721 TCGv_i32 a, b;
1722
1723 nullify_over(ctx);
1724 a = load_frw0_i32(ra);
1725 b = load_frw0_i32(rb);
1726
ad75a51e 1727 func(a, tcg_env, a, b);
ebe9383c 1728
ebe9383c 1729 save_frw_i32(rt, a);
1ca74648 1730 return nullify_end(ctx);
ebe9383c
RH
1731}
1732
1ca74648 1733static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
31234768
RH
1734 unsigned ra, unsigned rb,
1735 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
ebe9383c
RH
1736{
1737 TCGv_i64 a, b;
1738
1739 nullify_over(ctx);
1740 a = load_frd0(ra);
1741 b = load_frd0(rb);
1742
ad75a51e 1743 func(a, tcg_env, a, b);
ebe9383c 1744
ebe9383c 1745 save_frd(rt, a);
1ca74648 1746 return nullify_end(ctx);
ebe9383c
RH
1747}
1748
98cd9ca7
RH
1749/* Emit an unconditional branch to a direct target, which may or may not
1750 have already had nullification handled. */
01afb7be 1751static bool do_dbranch(DisasContext *ctx, target_ureg dest,
31234768 1752 unsigned link, bool is_n)
98cd9ca7
RH
1753{
1754 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1755 if (link != 0) {
741322f4 1756 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
98cd9ca7
RH
1757 }
1758 ctx->iaoq_n = dest;
1759 if (is_n) {
1760 ctx->null_cond.c = TCG_COND_ALWAYS;
1761 }
98cd9ca7
RH
1762 } else {
1763 nullify_over(ctx);
1764
1765 if (link != 0) {
741322f4 1766 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
98cd9ca7
RH
1767 }
1768
1769 if (is_n && use_nullify_skip(ctx)) {
1770 nullify_set(ctx, 0);
1771 gen_goto_tb(ctx, 0, dest, dest + 4);
1772 } else {
1773 nullify_set(ctx, is_n);
1774 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1775 }
1776
31234768 1777 nullify_end(ctx);
98cd9ca7
RH
1778
1779 nullify_set(ctx, 0);
1780 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
31234768 1781 ctx->base.is_jmp = DISAS_NORETURN;
98cd9ca7 1782 }
01afb7be 1783 return true;
98cd9ca7
RH
1784}
1785
1786/* Emit a conditional branch to a direct target. If the branch itself
1787 is nullified, we should have already used nullify_over. */
01afb7be 1788static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
31234768 1789 DisasCond *cond)
98cd9ca7 1790{
eaa3783b 1791 target_ureg dest = iaoq_dest(ctx, disp);
98cd9ca7
RH
1792 TCGLabel *taken = NULL;
1793 TCGCond c = cond->c;
98cd9ca7
RH
1794 bool n;
1795
1796 assert(ctx->null_cond.c == TCG_COND_NEVER);
1797
1798 /* Handle TRUE and NEVER as direct branches. */
1799 if (c == TCG_COND_ALWAYS) {
01afb7be 1800 return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
98cd9ca7
RH
1801 }
1802 if (c == TCG_COND_NEVER) {
01afb7be 1803 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
98cd9ca7
RH
1804 }
1805
1806 taken = gen_new_label();
eaa3783b 1807 tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
98cd9ca7
RH
1808 cond_free(cond);
1809
1810 /* Not taken: Condition not satisfied; nullify on backward branches. */
1811 n = is_n && disp < 0;
1812 if (n && use_nullify_skip(ctx)) {
1813 nullify_set(ctx, 0);
a881c8e7 1814 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
98cd9ca7
RH
1815 } else {
1816 if (!n && ctx->null_lab) {
1817 gen_set_label(ctx->null_lab);
1818 ctx->null_lab = NULL;
1819 }
1820 nullify_set(ctx, n);
c301f34e
RH
1821 if (ctx->iaoq_n == -1) {
1822 /* The temporary iaoq_n_var died at the branch above.
1823 Regenerate it here instead of saving it. */
1824 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1825 }
a881c8e7 1826 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
98cd9ca7
RH
1827 }
1828
1829 gen_set_label(taken);
1830
1831 /* Taken: Condition satisfied; nullify on forward branches. */
1832 n = is_n && disp >= 0;
1833 if (n && use_nullify_skip(ctx)) {
1834 nullify_set(ctx, 0);
a881c8e7 1835 gen_goto_tb(ctx, 1, dest, dest + 4);
98cd9ca7
RH
1836 } else {
1837 nullify_set(ctx, n);
a881c8e7 1838 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
98cd9ca7
RH
1839 }
1840
1841 /* Not taken: the branch itself was nullified. */
1842 if (ctx->null_lab) {
1843 gen_set_label(ctx->null_lab);
1844 ctx->null_lab = NULL;
31234768 1845 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
98cd9ca7 1846 } else {
31234768 1847 ctx->base.is_jmp = DISAS_NORETURN;
98cd9ca7 1848 }
01afb7be 1849 return true;
98cd9ca7
RH
1850}
1851
1852/* Emit an unconditional branch to an indirect target. This handles
1853 nullification of the branch itself. */
01afb7be 1854static bool do_ibranch(DisasContext *ctx, TCGv_reg dest,
31234768 1855 unsigned link, bool is_n)
98cd9ca7 1856{
eaa3783b 1857 TCGv_reg a0, a1, next, tmp;
98cd9ca7
RH
1858 TCGCond c;
1859
1860 assert(ctx->null_lab == NULL);
1861
1862 if (ctx->null_cond.c == TCG_COND_NEVER) {
1863 if (link != 0) {
741322f4 1864 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
98cd9ca7 1865 }
e12c6309 1866 next = tcg_temp_new();
eaa3783b 1867 tcg_gen_mov_reg(next, dest);
98cd9ca7 1868 if (is_n) {
c301f34e
RH
1869 if (use_nullify_skip(ctx)) {
1870 tcg_gen_mov_reg(cpu_iaoq_f, next);
1871 tcg_gen_addi_reg(cpu_iaoq_b, next, 4);
1872 nullify_set(ctx, 0);
31234768 1873 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
01afb7be 1874 return true;
c301f34e 1875 }
98cd9ca7
RH
1876 ctx->null_cond.c = TCG_COND_ALWAYS;
1877 }
c301f34e
RH
1878 ctx->iaoq_n = -1;
1879 ctx->iaoq_n_var = next;
98cd9ca7
RH
1880 } else if (is_n && use_nullify_skip(ctx)) {
1881 /* The (conditional) branch, B, nullifies the next insn, N,
1882 and we're allowed to skip execution N (no single-step or
4137cb83 1883 tracepoint in effect). Since the goto_ptr that we must use
98cd9ca7
RH
1884 for the indirect branch consumes no special resources, we
1885 can (conditionally) skip B and continue execution. */
1886 /* The use_nullify_skip test implies we have a known control path. */
1887 tcg_debug_assert(ctx->iaoq_b != -1);
1888 tcg_debug_assert(ctx->iaoq_n != -1);
1889
1890 /* We do have to handle the non-local temporary, DEST, before
1891 branching. Since IOAQ_F is not really live at this point, we
1892 can simply store DEST optimistically. Similarly with IAOQ_B. */
eaa3783b
RH
1893 tcg_gen_mov_reg(cpu_iaoq_f, dest);
1894 tcg_gen_addi_reg(cpu_iaoq_b, dest, 4);
98cd9ca7
RH
1895
1896 nullify_over(ctx);
1897 if (link != 0) {
eaa3783b 1898 tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n);
98cd9ca7 1899 }
7f11636d 1900 tcg_gen_lookup_and_goto_ptr();
01afb7be 1901 return nullify_end(ctx);
98cd9ca7 1902 } else {
98cd9ca7
RH
1903 c = ctx->null_cond.c;
1904 a0 = ctx->null_cond.a0;
1905 a1 = ctx->null_cond.a1;
1906
1907 tmp = tcg_temp_new();
e12c6309 1908 next = tcg_temp_new();
98cd9ca7 1909
741322f4 1910 copy_iaoq_entry(ctx, tmp, ctx->iaoq_n, ctx->iaoq_n_var);
eaa3783b 1911 tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
98cd9ca7
RH
1912 ctx->iaoq_n = -1;
1913 ctx->iaoq_n_var = next;
1914
1915 if (link != 0) {
eaa3783b 1916 tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
98cd9ca7
RH
1917 }
1918
1919 if (is_n) {
1920 /* The branch nullifies the next insn, which means the state of N
1921 after the branch is the inverse of the state of N that applied
1922 to the branch. */
eaa3783b 1923 tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
98cd9ca7
RH
1924 cond_free(&ctx->null_cond);
1925 ctx->null_cond = cond_make_n();
1926 ctx->psw_n_nonzero = true;
1927 } else {
1928 cond_free(&ctx->null_cond);
1929 }
1930 }
01afb7be 1931 return true;
98cd9ca7
RH
1932}
1933
660eefe1
RH
1934/* Implement
1935 * if (IAOQ_Front{30..31} < GR[b]{30..31})
1936 * IAOQ_Next{30..31} ← GR[b]{30..31};
1937 * else
1938 * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1939 * which keeps the privilege level from being increased.
1940 */
1941static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
1942{
660eefe1
RH
1943 TCGv_reg dest;
1944 switch (ctx->privilege) {
1945 case 0:
1946 /* Privilege 0 is maximum and is allowed to decrease. */
1947 return offset;
1948 case 3:
993119fe 1949 /* Privilege 3 is minimum and is never allowed to increase. */
e12c6309 1950 dest = tcg_temp_new();
660eefe1
RH
1951 tcg_gen_ori_reg(dest, offset, 3);
1952 break;
1953 default:
e12c6309 1954 dest = tcg_temp_new();
660eefe1
RH
1955 tcg_gen_andi_reg(dest, offset, -4);
1956 tcg_gen_ori_reg(dest, dest, ctx->privilege);
1957 tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
660eefe1
RH
1958 break;
1959 }
1960 return dest;
660eefe1
RH
1961}
1962
ba1d0b44 1963#ifdef CONFIG_USER_ONLY
7ad439df
RH
1964/* On Linux, page zero is normally marked execute only + gateway.
1965 Therefore normal read or write is supposed to fail, but specific
1966 offsets have kernel code mapped to raise permissions to implement
1967 system calls. Handling this via an explicit check here, rather
1968 in than the "be disp(sr2,r0)" instruction that probably sent us
1969 here, is the easiest way to handle the branch delay slot on the
1970 aforementioned BE. */
31234768 1971static void do_page_zero(DisasContext *ctx)
7ad439df
RH
1972{
1973 /* If by some means we get here with PSW[N]=1, that implies that
1974 the B,GATE instruction would be skipped, and we'd fault on the
8b81968c 1975 next insn within the privileged page. */
7ad439df
RH
1976 switch (ctx->null_cond.c) {
1977 case TCG_COND_NEVER:
1978 break;
1979 case TCG_COND_ALWAYS:
eaa3783b 1980 tcg_gen_movi_reg(cpu_psw_n, 0);
7ad439df
RH
1981 goto do_sigill;
1982 default:
1983 /* Since this is always the first (and only) insn within the
1984 TB, we should know the state of PSW[N] from TB->FLAGS. */
1985 g_assert_not_reached();
1986 }
1987
1988 /* Check that we didn't arrive here via some means that allowed
1989 non-sequential instruction execution. Normally the PSW[B] bit
1990 detects this by disallowing the B,GATE instruction to execute
1991 under such conditions. */
1992 if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1993 goto do_sigill;
1994 }
1995
ebd0e151 1996 switch (ctx->iaoq_f & -4) {
7ad439df 1997 case 0x00: /* Null pointer call */
2986721d 1998 gen_excp_1(EXCP_IMP);
31234768
RH
1999 ctx->base.is_jmp = DISAS_NORETURN;
2000 break;
7ad439df
RH
2001
2002 case 0xb0: /* LWS */
2003 gen_excp_1(EXCP_SYSCALL_LWS);
31234768
RH
2004 ctx->base.is_jmp = DISAS_NORETURN;
2005 break;
7ad439df
RH
2006
2007 case 0xe0: /* SET_THREAD_POINTER */
ad75a51e 2008 tcg_gen_st_reg(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27]));
ebd0e151 2009 tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3);
eaa3783b 2010 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
31234768
RH
2011 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
2012 break;
7ad439df
RH
2013
2014 case 0x100: /* SYSCALL */
2015 gen_excp_1(EXCP_SYSCALL);
31234768
RH
2016 ctx->base.is_jmp = DISAS_NORETURN;
2017 break;
7ad439df
RH
2018
2019 default:
2020 do_sigill:
2986721d 2021 gen_excp_1(EXCP_ILL);
31234768
RH
2022 ctx->base.is_jmp = DISAS_NORETURN;
2023 break;
7ad439df
RH
2024 }
2025}
ba1d0b44 2026#endif
7ad439df 2027
deee69a1 2028static bool trans_nop(DisasContext *ctx, arg_nop *a)
b2167459
RH
2029{
2030 cond_free(&ctx->null_cond);
31234768 2031 return true;
b2167459
RH
2032}
2033
40f9f908 2034static bool trans_break(DisasContext *ctx, arg_break *a)
98a9cb79 2035{
31234768 2036 return gen_excp_iir(ctx, EXCP_BREAK);
98a9cb79
RH
2037}
2038
e36f27ef 2039static bool trans_sync(DisasContext *ctx, arg_sync *a)
98a9cb79
RH
2040{
2041 /* No point in nullifying the memory barrier. */
2042 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2043
2044 cond_free(&ctx->null_cond);
31234768 2045 return true;
98a9cb79
RH
2046}
2047
c603e14a 2048static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
98a9cb79 2049{
c603e14a 2050 unsigned rt = a->t;
eaa3783b
RH
2051 TCGv_reg tmp = dest_gpr(ctx, rt);
2052 tcg_gen_movi_reg(tmp, ctx->iaoq_f);
98a9cb79
RH
2053 save_gpr(ctx, rt, tmp);
2054
2055 cond_free(&ctx->null_cond);
31234768 2056 return true;
98a9cb79
RH
2057}
2058
c603e14a 2059static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
98a9cb79 2060{
c603e14a
RH
2061 unsigned rt = a->t;
2062 unsigned rs = a->sp;
33423472
RH
2063 TCGv_i64 t0 = tcg_temp_new_i64();
2064 TCGv_reg t1 = tcg_temp_new();
98a9cb79 2065
33423472
RH
2066 load_spr(ctx, t0, rs);
2067 tcg_gen_shri_i64(t0, t0, 32);
2068 tcg_gen_trunc_i64_reg(t1, t0);
2069
2070 save_gpr(ctx, rt, t1);
98a9cb79
RH
2071
2072 cond_free(&ctx->null_cond);
31234768 2073 return true;
98a9cb79
RH
2074}
2075
c603e14a 2076static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
98a9cb79 2077{
c603e14a
RH
2078 unsigned rt = a->t;
2079 unsigned ctl = a->r;
eaa3783b 2080 TCGv_reg tmp;
98a9cb79
RH
2081
2082 switch (ctl) {
35136a77 2083 case CR_SAR:
98a9cb79 2084#ifdef TARGET_HPPA64
c603e14a 2085 if (a->e == 0) {
98a9cb79
RH
2086 /* MFSAR without ,W masks low 5 bits. */
2087 tmp = dest_gpr(ctx, rt);
eaa3783b 2088 tcg_gen_andi_reg(tmp, cpu_sar, 31);
98a9cb79 2089 save_gpr(ctx, rt, tmp);
35136a77 2090 goto done;
98a9cb79
RH
2091 }
2092#endif
2093 save_gpr(ctx, rt, cpu_sar);
35136a77
RH
2094 goto done;
2095 case CR_IT: /* Interval Timer */
2096 /* FIXME: Respect PSW_S bit. */
2097 nullify_over(ctx);
98a9cb79 2098 tmp = dest_gpr(ctx, rt);
dfd1b812 2099 if (translator_io_start(&ctx->base)) {
49c29d6c 2100 gen_helper_read_interval_timer(tmp);
31234768 2101 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
49c29d6c
RH
2102 } else {
2103 gen_helper_read_interval_timer(tmp);
49c29d6c 2104 }
98a9cb79 2105 save_gpr(ctx, rt, tmp);
31234768 2106 return nullify_end(ctx);
98a9cb79 2107 case 26:
98a9cb79 2108 case 27:
98a9cb79
RH
2109 break;
2110 default:
2111 /* All other control registers are privileged. */
35136a77
RH
2112 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2113 break;
98a9cb79
RH
2114 }
2115
e12c6309 2116 tmp = tcg_temp_new();
ad75a51e 2117 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
35136a77
RH
2118 save_gpr(ctx, rt, tmp);
2119
2120 done:
98a9cb79 2121 cond_free(&ctx->null_cond);
31234768 2122 return true;
98a9cb79
RH
2123}
2124
c603e14a 2125static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
33423472 2126{
c603e14a
RH
2127 unsigned rr = a->r;
2128 unsigned rs = a->sp;
33423472
RH
2129 TCGv_i64 t64;
2130
2131 if (rs >= 5) {
2132 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2133 }
2134 nullify_over(ctx);
2135
2136 t64 = tcg_temp_new_i64();
2137 tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2138 tcg_gen_shli_i64(t64, t64, 32);
2139
2140 if (rs >= 4) {
ad75a51e 2141 tcg_gen_st_i64(t64, tcg_env, offsetof(CPUHPPAState, sr[rs]));
494737b7 2142 ctx->tb_flags &= ~TB_FLAG_SR_SAME;
33423472
RH
2143 } else {
2144 tcg_gen_mov_i64(cpu_sr[rs], t64);
2145 }
33423472 2146
31234768 2147 return nullify_end(ctx);
33423472
RH
2148}
2149
c603e14a 2150static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
98a9cb79 2151{
c603e14a 2152 unsigned ctl = a->t;
4845f015 2153 TCGv_reg reg;
eaa3783b 2154 TCGv_reg tmp;
98a9cb79 2155
35136a77 2156 if (ctl == CR_SAR) {
4845f015 2157 reg = load_gpr(ctx, a->r);
98a9cb79 2158 tmp = tcg_temp_new();
35136a77 2159 tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1);
98a9cb79 2160 save_or_nullify(ctx, cpu_sar, tmp);
35136a77
RH
2161
2162 cond_free(&ctx->null_cond);
31234768 2163 return true;
98a9cb79
RH
2164 }
2165
35136a77
RH
2166 /* All other control registers are privileged or read-only. */
2167 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2168
c603e14a 2169#ifndef CONFIG_USER_ONLY
35136a77 2170 nullify_over(ctx);
4845f015
SS
2171 reg = load_gpr(ctx, a->r);
2172
35136a77
RH
2173 switch (ctl) {
2174 case CR_IT:
ad75a51e 2175 gen_helper_write_interval_timer(tcg_env, reg);
35136a77 2176 break;
4f5f2548 2177 case CR_EIRR:
ad75a51e 2178 gen_helper_write_eirr(tcg_env, reg);
4f5f2548
RH
2179 break;
2180 case CR_EIEM:
ad75a51e 2181 gen_helper_write_eiem(tcg_env, reg);
31234768 2182 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
4f5f2548
RH
2183 break;
2184
35136a77
RH
2185 case CR_IIASQ:
2186 case CR_IIAOQ:
2187 /* FIXME: Respect PSW_Q bit */
2188 /* The write advances the queue and stores to the back element. */
e12c6309 2189 tmp = tcg_temp_new();
ad75a51e 2190 tcg_gen_ld_reg(tmp, tcg_env,
35136a77 2191 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
ad75a51e
RH
2192 tcg_gen_st_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2193 tcg_gen_st_reg(reg, tcg_env,
35136a77
RH
2194 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2195 break;
2196
d5de20bd
SS
2197 case CR_PID1:
2198 case CR_PID2:
2199 case CR_PID3:
2200 case CR_PID4:
ad75a51e 2201 tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
d5de20bd 2202#ifndef CONFIG_USER_ONLY
ad75a51e 2203 gen_helper_change_prot_id(tcg_env);
d5de20bd
SS
2204#endif
2205 break;
2206
35136a77 2207 default:
ad75a51e 2208 tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
35136a77
RH
2209 break;
2210 }
31234768 2211 return nullify_end(ctx);
4f5f2548 2212#endif
98a9cb79
RH
2213}
2214
c603e14a 2215static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
98a9cb79 2216{
eaa3783b 2217 TCGv_reg tmp = tcg_temp_new();
98a9cb79 2218
c603e14a 2219 tcg_gen_not_reg(tmp, load_gpr(ctx, a->r));
eaa3783b 2220 tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
98a9cb79 2221 save_or_nullify(ctx, cpu_sar, tmp);
98a9cb79
RH
2222
2223 cond_free(&ctx->null_cond);
31234768 2224 return true;
98a9cb79
RH
2225}
2226
e36f27ef 2227static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
98a9cb79 2228{
e36f27ef 2229 TCGv_reg dest = dest_gpr(ctx, a->t);
98a9cb79 2230
2330504c
HD
2231#ifdef CONFIG_USER_ONLY
2232 /* We don't implement space registers in user mode. */
eaa3783b 2233 tcg_gen_movi_reg(dest, 0);
2330504c 2234#else
2330504c
HD
2235 TCGv_i64 t0 = tcg_temp_new_i64();
2236
e36f27ef 2237 tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2330504c
HD
2238 tcg_gen_shri_i64(t0, t0, 32);
2239 tcg_gen_trunc_i64_reg(dest, t0);
2330504c 2240#endif
e36f27ef 2241 save_gpr(ctx, a->t, dest);
98a9cb79
RH
2242
2243 cond_free(&ctx->null_cond);
31234768 2244 return true;
98a9cb79
RH
2245}
2246
e36f27ef 2247static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
e1b5a5ed 2248{
e36f27ef
RH
2249 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2250#ifndef CONFIG_USER_ONLY
e1b5a5ed
RH
2251 TCGv_reg tmp;
2252
e1b5a5ed
RH
2253 nullify_over(ctx);
2254
e12c6309 2255 tmp = tcg_temp_new();
ad75a51e 2256 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw));
e36f27ef 2257 tcg_gen_andi_reg(tmp, tmp, ~a->i);
ad75a51e 2258 gen_helper_swap_system_mask(tmp, tcg_env, tmp);
e36f27ef 2259 save_gpr(ctx, a->t, tmp);
e1b5a5ed
RH
2260
2261 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
31234768
RH
2262 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2263 return nullify_end(ctx);
e36f27ef 2264#endif
e1b5a5ed
RH
2265}
2266
e36f27ef 2267static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
e1b5a5ed 2268{
e36f27ef
RH
2269 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2270#ifndef CONFIG_USER_ONLY
e1b5a5ed
RH
2271 TCGv_reg tmp;
2272
e1b5a5ed
RH
2273 nullify_over(ctx);
2274
e12c6309 2275 tmp = tcg_temp_new();
ad75a51e 2276 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw));
e36f27ef 2277 tcg_gen_ori_reg(tmp, tmp, a->i);
ad75a51e 2278 gen_helper_swap_system_mask(tmp, tcg_env, tmp);
e36f27ef 2279 save_gpr(ctx, a->t, tmp);
e1b5a5ed
RH
2280
2281 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
31234768
RH
2282 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2283 return nullify_end(ctx);
e36f27ef 2284#endif
e1b5a5ed
RH
2285}
2286
c603e14a 2287static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
e1b5a5ed 2288{
e1b5a5ed 2289 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
c603e14a
RH
2290#ifndef CONFIG_USER_ONLY
2291 TCGv_reg tmp, reg;
e1b5a5ed
RH
2292 nullify_over(ctx);
2293
c603e14a 2294 reg = load_gpr(ctx, a->r);
e12c6309 2295 tmp = tcg_temp_new();
ad75a51e 2296 gen_helper_swap_system_mask(tmp, tcg_env, reg);
e1b5a5ed
RH
2297
2298 /* Exit the TB to recognize new interrupts. */
31234768
RH
2299 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2300 return nullify_end(ctx);
c603e14a 2301#endif
e1b5a5ed 2302}
f49b3537 2303
e36f27ef 2304static bool do_rfi(DisasContext *ctx, bool rfi_r)
f49b3537 2305{
f49b3537 2306 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
e36f27ef 2307#ifndef CONFIG_USER_ONLY
f49b3537
RH
2308 nullify_over(ctx);
2309
e36f27ef 2310 if (rfi_r) {
ad75a51e 2311 gen_helper_rfi_r(tcg_env);
f49b3537 2312 } else {
ad75a51e 2313 gen_helper_rfi(tcg_env);
f49b3537 2314 }
31234768 2315 /* Exit the TB to recognize new interrupts. */
8532a14e 2316 tcg_gen_exit_tb(NULL, 0);
31234768 2317 ctx->base.is_jmp = DISAS_NORETURN;
f49b3537 2318
31234768 2319 return nullify_end(ctx);
e36f27ef
RH
2320#endif
2321}
2322
2323static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2324{
2325 return do_rfi(ctx, false);
2326}
2327
2328static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2329{
2330 return do_rfi(ctx, true);
f49b3537 2331}
6210db05 2332
96927adb
RH
2333static bool trans_halt(DisasContext *ctx, arg_halt *a)
2334{
2335 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
e36f27ef 2336#ifndef CONFIG_USER_ONLY
96927adb 2337 nullify_over(ctx);
ad75a51e 2338 gen_helper_halt(tcg_env);
96927adb
RH
2339 ctx->base.is_jmp = DISAS_NORETURN;
2340 return nullify_end(ctx);
2341#endif
2342}
2343
2344static bool trans_reset(DisasContext *ctx, arg_reset *a)
6210db05
HD
2345{
2346 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
96927adb 2347#ifndef CONFIG_USER_ONLY
6210db05 2348 nullify_over(ctx);
ad75a51e 2349 gen_helper_reset(tcg_env);
31234768
RH
2350 ctx->base.is_jmp = DISAS_NORETURN;
2351 return nullify_end(ctx);
96927adb 2352#endif
6210db05 2353}
e1b5a5ed 2354
4a4554c6
HD
2355static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2356{
2357 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2358#ifndef CONFIG_USER_ONLY
2359 nullify_over(ctx);
ad75a51e 2360 gen_helper_getshadowregs(tcg_env);
4a4554c6
HD
2361 return nullify_end(ctx);
2362#endif
2363}
2364
deee69a1 2365static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
98a9cb79 2366{
deee69a1
RH
2367 if (a->m) {
2368 TCGv_reg dest = dest_gpr(ctx, a->b);
2369 TCGv_reg src1 = load_gpr(ctx, a->b);
2370 TCGv_reg src2 = load_gpr(ctx, a->x);
98a9cb79 2371
deee69a1
RH
2372 /* The only thing we need to do is the base register modification. */
2373 tcg_gen_add_reg(dest, src1, src2);
2374 save_gpr(ctx, a->b, dest);
2375 }
98a9cb79 2376 cond_free(&ctx->null_cond);
31234768 2377 return true;
98a9cb79
RH
2378}
2379
deee69a1 2380static bool trans_probe(DisasContext *ctx, arg_probe *a)
98a9cb79 2381{
86f8d05f 2382 TCGv_reg dest, ofs;
eed14219 2383 TCGv_i32 level, want;
86f8d05f 2384 TCGv_tl addr;
98a9cb79
RH
2385
2386 nullify_over(ctx);
2387
deee69a1
RH
2388 dest = dest_gpr(ctx, a->t);
2389 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
eed14219 2390
deee69a1 2391 if (a->imm) {
29dd6f64 2392 level = tcg_constant_i32(a->ri);
98a9cb79 2393 } else {
eed14219 2394 level = tcg_temp_new_i32();
deee69a1 2395 tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri));
eed14219 2396 tcg_gen_andi_i32(level, level, 3);
98a9cb79 2397 }
29dd6f64 2398 want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
eed14219 2399
ad75a51e 2400 gen_helper_probe(dest, tcg_env, addr, level, want);
eed14219 2401
deee69a1 2402 save_gpr(ctx, a->t, dest);
31234768 2403 return nullify_end(ctx);
98a9cb79
RH
2404}
2405
deee69a1 2406static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
8d6ae7fb 2407{
deee69a1
RH
2408 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2409#ifndef CONFIG_USER_ONLY
8d6ae7fb
RH
2410 TCGv_tl addr;
2411 TCGv_reg ofs, reg;
2412
8d6ae7fb
RH
2413 nullify_over(ctx);
2414
deee69a1
RH
2415 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2416 reg = load_gpr(ctx, a->r);
2417 if (a->addr) {
ad75a51e 2418 gen_helper_itlba(tcg_env, addr, reg);
8d6ae7fb 2419 } else {
ad75a51e 2420 gen_helper_itlbp(tcg_env, addr, reg);
8d6ae7fb
RH
2421 }
2422
32dc7569
SS
2423 /* Exit TB for TLB change if mmu is enabled. */
2424 if (ctx->tb_flags & PSW_C) {
31234768
RH
2425 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2426 }
2427 return nullify_end(ctx);
deee69a1 2428#endif
8d6ae7fb 2429}
63300a00 2430
deee69a1 2431static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
63300a00 2432{
deee69a1
RH
2433 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2434#ifndef CONFIG_USER_ONLY
63300a00
RH
2435 TCGv_tl addr;
2436 TCGv_reg ofs;
2437
63300a00
RH
2438 nullify_over(ctx);
2439
deee69a1
RH
2440 form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2441 if (a->m) {
2442 save_gpr(ctx, a->b, ofs);
63300a00 2443 }
deee69a1 2444 if (a->local) {
ad75a51e 2445 gen_helper_ptlbe(tcg_env);
63300a00 2446 } else {
ad75a51e 2447 gen_helper_ptlb(tcg_env, addr);
63300a00
RH
2448 }
2449
2450 /* Exit TB for TLB change if mmu is enabled. */
6797c315
NH
2451 if (ctx->tb_flags & PSW_C) {
2452 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2453 }
2454 return nullify_end(ctx);
2455#endif
2456}
2457
2458/*
2459 * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2460 * See
2461 * https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2462 * page 13-9 (195/206)
2463 */
2464static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2465{
2466 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2467#ifndef CONFIG_USER_ONLY
2468 TCGv_tl addr, atl, stl;
2469 TCGv_reg reg;
2470
2471 nullify_over(ctx);
2472
2473 /*
2474 * FIXME:
2475 * if (not (pcxl or pcxl2))
2476 * return gen_illegal(ctx);
2477 *
2478 * Note for future: these are 32-bit systems; no hppa64.
2479 */
2480
2481 atl = tcg_temp_new_tl();
2482 stl = tcg_temp_new_tl();
2483 addr = tcg_temp_new_tl();
2484
ad75a51e 2485 tcg_gen_ld32u_i64(stl, tcg_env,
6797c315
NH
2486 a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2487 : offsetof(CPUHPPAState, cr[CR_IIASQ]));
ad75a51e 2488 tcg_gen_ld32u_i64(atl, tcg_env,
6797c315
NH
2489 a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2490 : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2491 tcg_gen_shli_i64(stl, stl, 32);
2492 tcg_gen_or_tl(addr, atl, stl);
6797c315
NH
2493
2494 reg = load_gpr(ctx, a->r);
2495 if (a->addr) {
ad75a51e 2496 gen_helper_itlba(tcg_env, addr, reg);
6797c315 2497 } else {
ad75a51e 2498 gen_helper_itlbp(tcg_env, addr, reg);
6797c315 2499 }
6797c315
NH
2500
2501 /* Exit TB for TLB change if mmu is enabled. */
32dc7569 2502 if (ctx->tb_flags & PSW_C) {
31234768
RH
2503 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2504 }
2505 return nullify_end(ctx);
deee69a1 2506#endif
63300a00 2507}
2dfcca9f 2508
deee69a1 2509static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2dfcca9f 2510{
deee69a1
RH
2511 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2512#ifndef CONFIG_USER_ONLY
2dfcca9f
RH
2513 TCGv_tl vaddr;
2514 TCGv_reg ofs, paddr;
2515
2dfcca9f
RH
2516 nullify_over(ctx);
2517
deee69a1 2518 form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2dfcca9f
RH
2519
2520 paddr = tcg_temp_new();
ad75a51e 2521 gen_helper_lpa(paddr, tcg_env, vaddr);
2dfcca9f
RH
2522
2523 /* Note that physical address result overrides base modification. */
deee69a1
RH
2524 if (a->m) {
2525 save_gpr(ctx, a->b, ofs);
2dfcca9f 2526 }
deee69a1 2527 save_gpr(ctx, a->t, paddr);
2dfcca9f 2528
31234768 2529 return nullify_end(ctx);
deee69a1 2530#endif
2dfcca9f 2531}
43a97b81 2532
deee69a1 2533static bool trans_lci(DisasContext *ctx, arg_lci *a)
43a97b81 2534{
43a97b81
RH
2535 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2536
2537 /* The Coherence Index is an implementation-defined function of the
2538 physical address. Two addresses with the same CI have a coherent
2539 view of the cache. Our implementation is to return 0 for all,
2540 since the entire address space is coherent. */
29dd6f64 2541 save_gpr(ctx, a->t, tcg_constant_reg(0));
43a97b81 2542
31234768
RH
2543 cond_free(&ctx->null_cond);
2544 return true;
43a97b81 2545}
98a9cb79 2546
0c982a28 2547static bool trans_add(DisasContext *ctx, arg_rrr_cf_sh *a)
b2167459 2548{
0c982a28
RH
2549 return do_add_reg(ctx, a, false, false, false, false);
2550}
b2167459 2551
0c982a28
RH
2552static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_sh *a)
2553{
2554 return do_add_reg(ctx, a, true, false, false, false);
2555}
b2167459 2556
0c982a28
RH
2557static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2558{
2559 return do_add_reg(ctx, a, false, true, false, false);
b2167459
RH
2560}
2561
0c982a28 2562static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_sh *a)
b2167459 2563{
0c982a28
RH
2564 return do_add_reg(ctx, a, false, false, false, true);
2565}
b2167459 2566
0c982a28
RH
2567static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2568{
2569 return do_add_reg(ctx, a, false, true, false, true);
2570}
b2167459 2571
0c982a28
RH
2572static bool trans_sub(DisasContext *ctx, arg_rrr_cf *a)
2573{
2574 return do_sub_reg(ctx, a, false, false, false);
b2167459
RH
2575}
2576
0c982a28 2577static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2578{
0c982a28
RH
2579 return do_sub_reg(ctx, a, true, false, false);
2580}
b2167459 2581
0c982a28
RH
2582static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf *a)
2583{
2584 return do_sub_reg(ctx, a, false, false, true);
b2167459
RH
2585}
2586
0c982a28 2587static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2588{
0c982a28
RH
2589 return do_sub_reg(ctx, a, true, false, true);
2590}
2591
2592static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf *a)
2593{
2594 return do_sub_reg(ctx, a, false, true, false);
2595}
2596
2597static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf *a)
2598{
2599 return do_sub_reg(ctx, a, true, true, false);
2600}
2601
2602static bool trans_andcm(DisasContext *ctx, arg_rrr_cf *a)
2603{
2604 return do_log_reg(ctx, a, tcg_gen_andc_reg);
2605}
2606
2607static bool trans_and(DisasContext *ctx, arg_rrr_cf *a)
2608{
2609 return do_log_reg(ctx, a, tcg_gen_and_reg);
2610}
2611
2612static bool trans_or(DisasContext *ctx, arg_rrr_cf *a)
2613{
2614 if (a->cf == 0) {
2615 unsigned r2 = a->r2;
2616 unsigned r1 = a->r1;
2617 unsigned rt = a->t;
b2167459 2618
7aee8189
RH
2619 if (rt == 0) { /* NOP */
2620 cond_free(&ctx->null_cond);
2621 return true;
2622 }
2623 if (r2 == 0) { /* COPY */
2624 if (r1 == 0) {
2625 TCGv_reg dest = dest_gpr(ctx, rt);
2626 tcg_gen_movi_reg(dest, 0);
2627 save_gpr(ctx, rt, dest);
2628 } else {
2629 save_gpr(ctx, rt, cpu_gr[r1]);
2630 }
2631 cond_free(&ctx->null_cond);
2632 return true;
2633 }
2634#ifndef CONFIG_USER_ONLY
2635 /* These are QEMU extensions and are nops in the real architecture:
2636 *
2637 * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2638 * or %r31,%r31,%r31 -- death loop; offline cpu
2639 * currently implemented as idle.
2640 */
2641 if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
7aee8189
RH
2642 /* No need to check for supervisor, as userland can only pause
2643 until the next timer interrupt. */
2644 nullify_over(ctx);
2645
2646 /* Advance the instruction queue. */
741322f4
RH
2647 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2648 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
7aee8189
RH
2649 nullify_set(ctx, 0);
2650
2651 /* Tell the qemu main loop to halt until this cpu has work. */
ad75a51e 2652 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
29dd6f64 2653 offsetof(CPUState, halted) - offsetof(HPPACPU, env));
7aee8189
RH
2654 gen_excp_1(EXCP_HALTED);
2655 ctx->base.is_jmp = DISAS_NORETURN;
2656
2657 return nullify_end(ctx);
2658 }
2659#endif
b2167459 2660 }
0c982a28
RH
2661 return do_log_reg(ctx, a, tcg_gen_or_reg);
2662}
7aee8189 2663
0c982a28
RH
2664static bool trans_xor(DisasContext *ctx, arg_rrr_cf *a)
2665{
2666 return do_log_reg(ctx, a, tcg_gen_xor_reg);
b2167459
RH
2667}
2668
0c982a28 2669static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2670{
eaa3783b 2671 TCGv_reg tcg_r1, tcg_r2;
b2167459 2672
0c982a28 2673 if (a->cf) {
b2167459
RH
2674 nullify_over(ctx);
2675 }
0c982a28
RH
2676 tcg_r1 = load_gpr(ctx, a->r1);
2677 tcg_r2 = load_gpr(ctx, a->r2);
2678 do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf);
31234768 2679 return nullify_end(ctx);
b2167459
RH
2680}
2681
0c982a28 2682static bool trans_uxor(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2683{
eaa3783b 2684 TCGv_reg tcg_r1, tcg_r2;
b2167459 2685
0c982a28 2686 if (a->cf) {
b2167459
RH
2687 nullify_over(ctx);
2688 }
0c982a28
RH
2689 tcg_r1 = load_gpr(ctx, a->r1);
2690 tcg_r2 = load_gpr(ctx, a->r2);
2691 do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, false, tcg_gen_xor_reg);
31234768 2692 return nullify_end(ctx);
b2167459
RH
2693}
2694
0c982a28 2695static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf *a, bool is_tc)
b2167459 2696{
eaa3783b 2697 TCGv_reg tcg_r1, tcg_r2, tmp;
b2167459 2698
0c982a28 2699 if (a->cf) {
b2167459
RH
2700 nullify_over(ctx);
2701 }
0c982a28
RH
2702 tcg_r1 = load_gpr(ctx, a->r1);
2703 tcg_r2 = load_gpr(ctx, a->r2);
e12c6309 2704 tmp = tcg_temp_new();
eaa3783b 2705 tcg_gen_not_reg(tmp, tcg_r2);
0c982a28 2706 do_unit(ctx, a->t, tcg_r1, tmp, a->cf, is_tc, tcg_gen_add_reg);
31234768 2707 return nullify_end(ctx);
b2167459
RH
2708}
2709
0c982a28
RH
2710static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf *a)
2711{
2712 return do_uaddcm(ctx, a, false);
2713}
2714
2715static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf *a)
2716{
2717 return do_uaddcm(ctx, a, true);
2718}
2719
2720static bool do_dcor(DisasContext *ctx, arg_rr_cf *a, bool is_i)
b2167459 2721{
eaa3783b 2722 TCGv_reg tmp;
b2167459
RH
2723
2724 nullify_over(ctx);
2725
e12c6309 2726 tmp = tcg_temp_new();
eaa3783b 2727 tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
b2167459 2728 if (!is_i) {
eaa3783b 2729 tcg_gen_not_reg(tmp, tmp);
b2167459 2730 }
eaa3783b
RH
2731 tcg_gen_andi_reg(tmp, tmp, 0x11111111);
2732 tcg_gen_muli_reg(tmp, tmp, 6);
60e29463 2733 do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, false,
31234768 2734 is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
31234768 2735 return nullify_end(ctx);
b2167459
RH
2736}
2737
0c982a28
RH
2738static bool trans_dcor(DisasContext *ctx, arg_rr_cf *a)
2739{
2740 return do_dcor(ctx, a, false);
2741}
2742
2743static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf *a)
2744{
2745 return do_dcor(ctx, a, true);
2746}
2747
2748static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2749{
eaa3783b 2750 TCGv_reg dest, add1, add2, addc, zero, in1, in2;
72ca8753 2751 TCGv_reg cout;
b2167459
RH
2752
2753 nullify_over(ctx);
2754
0c982a28
RH
2755 in1 = load_gpr(ctx, a->r1);
2756 in2 = load_gpr(ctx, a->r2);
b2167459
RH
2757
2758 add1 = tcg_temp_new();
2759 add2 = tcg_temp_new();
2760 addc = tcg_temp_new();
2761 dest = tcg_temp_new();
29dd6f64 2762 zero = tcg_constant_reg(0);
b2167459
RH
2763
2764 /* Form R1 << 1 | PSW[CB]{8}. */
eaa3783b 2765 tcg_gen_add_reg(add1, in1, in1);
72ca8753 2766 tcg_gen_add_reg(add1, add1, get_psw_carry(ctx, false));
b2167459 2767
72ca8753
RH
2768 /*
2769 * Add or subtract R2, depending on PSW[V]. Proper computation of
2770 * carry requires that we subtract via + ~R2 + 1, as described in
2771 * the manual. By extracting and masking V, we can produce the
2772 * proper inputs to the addition without movcond.
2773 */
2774 tcg_gen_sextract_reg(addc, cpu_psw_v, 31, 1);
eaa3783b
RH
2775 tcg_gen_xor_reg(add2, in2, addc);
2776 tcg_gen_andi_reg(addc, addc, 1);
72ca8753
RH
2777
2778 tcg_gen_add2_reg(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2779 tcg_gen_add2_reg(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
b2167459 2780
b2167459 2781 /* Write back the result register. */
0c982a28 2782 save_gpr(ctx, a->t, dest);
b2167459
RH
2783
2784 /* Write back PSW[CB]. */
eaa3783b
RH
2785 tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2786 tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
b2167459
RH
2787
2788 /* Write back PSW[V] for the division step. */
72ca8753
RH
2789 cout = get_psw_carry(ctx, false);
2790 tcg_gen_neg_reg(cpu_psw_v, cout);
eaa3783b 2791 tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
b2167459
RH
2792
2793 /* Install the new nullification. */
0c982a28 2794 if (a->cf) {
eaa3783b 2795 TCGv_reg sv = NULL;
b47a4a02 2796 if (cond_need_sv(a->cf >> 1)) {
b2167459
RH
2797 /* ??? The lshift is supposed to contribute to overflow. */
2798 sv = do_add_sv(ctx, dest, add1, add2);
2799 }
72ca8753 2800 ctx->null_cond = do_cond(a->cf, dest, cout, sv);
b2167459
RH
2801 }
2802
31234768 2803 return nullify_end(ctx);
b2167459
RH
2804}
2805
0588e061 2806static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
b2167459 2807{
0588e061
RH
2808 return do_add_imm(ctx, a, false, false);
2809}
b2167459 2810
0588e061
RH
2811static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2812{
2813 return do_add_imm(ctx, a, true, false);
b2167459
RH
2814}
2815
0588e061 2816static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
b2167459 2817{
0588e061
RH
2818 return do_add_imm(ctx, a, false, true);
2819}
b2167459 2820
0588e061
RH
2821static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2822{
2823 return do_add_imm(ctx, a, true, true);
2824}
b2167459 2825
0588e061
RH
2826static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2827{
2828 return do_sub_imm(ctx, a, false);
2829}
b2167459 2830
0588e061
RH
2831static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2832{
2833 return do_sub_imm(ctx, a, true);
b2167459
RH
2834}
2835
0588e061 2836static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf *a)
b2167459 2837{
eaa3783b 2838 TCGv_reg tcg_im, tcg_r2;
b2167459 2839
0588e061 2840 if (a->cf) {
b2167459
RH
2841 nullify_over(ctx);
2842 }
2843
d4e58033 2844 tcg_im = tcg_constant_reg(a->i);
0588e061
RH
2845 tcg_r2 = load_gpr(ctx, a->r);
2846 do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf);
b2167459 2847
31234768 2848 return nullify_end(ctx);
b2167459
RH
2849}
2850
1cd012a5 2851static bool trans_ld(DisasContext *ctx, arg_ldst *a)
96d6407f 2852{
0786a3b6
HD
2853 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
2854 return gen_illegal(ctx);
2855 } else {
2856 return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
1cd012a5 2857 a->disp, a->sp, a->m, a->size | MO_TE);
0786a3b6 2858 }
96d6407f
RH
2859}
2860
1cd012a5 2861static bool trans_st(DisasContext *ctx, arg_ldst *a)
96d6407f 2862{
1cd012a5 2863 assert(a->x == 0 && a->scale == 0);
0786a3b6
HD
2864 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
2865 return gen_illegal(ctx);
2866 } else {
2867 return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
2868 }
96d6407f
RH
2869}
2870
1cd012a5 2871static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
96d6407f 2872{
b1af755c 2873 MemOp mop = MO_TE | MO_ALIGN | a->size;
86f8d05f
RH
2874 TCGv_reg zero, dest, ofs;
2875 TCGv_tl addr;
96d6407f
RH
2876
2877 nullify_over(ctx);
2878
1cd012a5 2879 if (a->m) {
86f8d05f
RH
2880 /* Base register modification. Make sure if RT == RB,
2881 we see the result of the load. */
e12c6309 2882 dest = tcg_temp_new();
96d6407f 2883 } else {
1cd012a5 2884 dest = dest_gpr(ctx, a->t);
96d6407f
RH
2885 }
2886
1cd012a5
RH
2887 form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
2888 a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
b1af755c
RH
2889
2890 /*
2891 * For hppa1.1, LDCW is undefined unless aligned mod 16.
2892 * However actual hardware succeeds with aligned mod 4.
2893 * Detect this case and log a GUEST_ERROR.
2894 *
2895 * TODO: HPPA64 relaxes the over-alignment requirement
2896 * with the ,co completer.
2897 */
2898 gen_helper_ldc_check(addr);
2899
29dd6f64 2900 zero = tcg_constant_reg(0);
86f8d05f 2901 tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
b1af755c 2902
1cd012a5
RH
2903 if (a->m) {
2904 save_gpr(ctx, a->b, ofs);
96d6407f 2905 }
1cd012a5 2906 save_gpr(ctx, a->t, dest);
96d6407f 2907
31234768 2908 return nullify_end(ctx);
96d6407f
RH
2909}
2910
1cd012a5 2911static bool trans_stby(DisasContext *ctx, arg_stby *a)
96d6407f 2912{
86f8d05f
RH
2913 TCGv_reg ofs, val;
2914 TCGv_tl addr;
96d6407f
RH
2915
2916 nullify_over(ctx);
2917
1cd012a5 2918 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
86f8d05f 2919 ctx->mmu_idx == MMU_PHYS_IDX);
1cd012a5
RH
2920 val = load_gpr(ctx, a->r);
2921 if (a->a) {
f9f46db4 2922 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
ad75a51e 2923 gen_helper_stby_e_parallel(tcg_env, addr, val);
f9f46db4 2924 } else {
ad75a51e 2925 gen_helper_stby_e(tcg_env, addr, val);
f9f46db4 2926 }
96d6407f 2927 } else {
f9f46db4 2928 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
ad75a51e 2929 gen_helper_stby_b_parallel(tcg_env, addr, val);
f9f46db4 2930 } else {
ad75a51e 2931 gen_helper_stby_b(tcg_env, addr, val);
f9f46db4 2932 }
96d6407f 2933 }
1cd012a5 2934 if (a->m) {
86f8d05f 2935 tcg_gen_andi_reg(ofs, ofs, ~3);
1cd012a5 2936 save_gpr(ctx, a->b, ofs);
96d6407f 2937 }
96d6407f 2938
31234768 2939 return nullify_end(ctx);
96d6407f
RH
2940}
2941
1cd012a5 2942static bool trans_lda(DisasContext *ctx, arg_ldst *a)
d0a851cc
RH
2943{
2944 int hold_mmu_idx = ctx->mmu_idx;
d0a851cc
RH
2945
2946 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
d0a851cc 2947 ctx->mmu_idx = MMU_PHYS_IDX;
1cd012a5 2948 trans_ld(ctx, a);
d0a851cc 2949 ctx->mmu_idx = hold_mmu_idx;
31234768 2950 return true;
d0a851cc
RH
2951}
2952
1cd012a5 2953static bool trans_sta(DisasContext *ctx, arg_ldst *a)
d0a851cc
RH
2954{
2955 int hold_mmu_idx = ctx->mmu_idx;
d0a851cc
RH
2956
2957 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
d0a851cc 2958 ctx->mmu_idx = MMU_PHYS_IDX;
1cd012a5 2959 trans_st(ctx, a);
d0a851cc 2960 ctx->mmu_idx = hold_mmu_idx;
31234768 2961 return true;
d0a851cc 2962}
95412a61 2963
0588e061 2964static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
b2167459 2965{
0588e061 2966 TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
b2167459 2967
0588e061
RH
2968 tcg_gen_movi_reg(tcg_rt, a->i);
2969 save_gpr(ctx, a->t, tcg_rt);
b2167459 2970 cond_free(&ctx->null_cond);
31234768 2971 return true;
b2167459
RH
2972}
2973
0588e061 2974static bool trans_addil(DisasContext *ctx, arg_addil *a)
b2167459 2975{
0588e061 2976 TCGv_reg tcg_rt = load_gpr(ctx, a->r);
eaa3783b 2977 TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
b2167459 2978
0588e061 2979 tcg_gen_addi_reg(tcg_r1, tcg_rt, a->i);
b2167459
RH
2980 save_gpr(ctx, 1, tcg_r1);
2981 cond_free(&ctx->null_cond);
31234768 2982 return true;
b2167459
RH
2983}
2984
0588e061 2985static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
b2167459 2986{
0588e061 2987 TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
b2167459
RH
2988
2989 /* Special case rb == 0, for the LDI pseudo-op.
2990 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
0588e061
RH
2991 if (a->b == 0) {
2992 tcg_gen_movi_reg(tcg_rt, a->i);
b2167459 2993 } else {
0588e061 2994 tcg_gen_addi_reg(tcg_rt, cpu_gr[a->b], a->i);
b2167459 2995 }
0588e061 2996 save_gpr(ctx, a->t, tcg_rt);
b2167459 2997 cond_free(&ctx->null_cond);
31234768 2998 return true;
b2167459
RH
2999}
3000
01afb7be
RH
3001static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3002 unsigned c, unsigned f, unsigned n, int disp)
98cd9ca7 3003{
01afb7be 3004 TCGv_reg dest, in2, sv;
98cd9ca7
RH
3005 DisasCond cond;
3006
98cd9ca7 3007 in2 = load_gpr(ctx, r);
e12c6309 3008 dest = tcg_temp_new();
98cd9ca7 3009
eaa3783b 3010 tcg_gen_sub_reg(dest, in1, in2);
98cd9ca7 3011
f764718d 3012 sv = NULL;
b47a4a02 3013 if (cond_need_sv(c)) {
98cd9ca7
RH
3014 sv = do_sub_sv(ctx, dest, in1, in2);
3015 }
3016
01afb7be
RH
3017 cond = do_sub_cond(c * 2 + f, dest, in1, in2, sv);
3018 return do_cbranch(ctx, disp, n, &cond);
98cd9ca7
RH
3019}
3020
01afb7be 3021static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
98cd9ca7 3022{
01afb7be
RH
3023 nullify_over(ctx);
3024 return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3025}
98cd9ca7 3026
01afb7be
RH
3027static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3028{
98cd9ca7 3029 nullify_over(ctx);
d4e58033 3030 return do_cmpb(ctx, a->r, tcg_constant_reg(a->i), a->c, a->f, a->n, a->disp);
01afb7be
RH
3031}
3032
3033static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3034 unsigned c, unsigned f, unsigned n, int disp)
3035{
bdcccc17 3036 TCGv_reg dest, in2, sv, cb_cond;
01afb7be 3037 DisasCond cond;
bdcccc17 3038 bool d = false;
98cd9ca7 3039
98cd9ca7 3040 in2 = load_gpr(ctx, r);
43675d20 3041 dest = tcg_temp_new();
f764718d 3042 sv = NULL;
bdcccc17 3043 cb_cond = NULL;
98cd9ca7 3044
b47a4a02 3045 if (cond_need_cb(c)) {
bdcccc17
RH
3046 TCGv_reg cb = tcg_temp_new();
3047 TCGv_reg cb_msb = tcg_temp_new();
3048
eaa3783b
RH
3049 tcg_gen_movi_reg(cb_msb, 0);
3050 tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
bdcccc17
RH
3051 tcg_gen_xor_reg(cb, in1, in2);
3052 tcg_gen_xor_reg(cb, cb, dest);
3053 cb_cond = get_carry(ctx, d, cb, cb_msb);
b47a4a02 3054 } else {
eaa3783b 3055 tcg_gen_add_reg(dest, in1, in2);
b47a4a02
SS
3056 }
3057 if (cond_need_sv(c)) {
98cd9ca7 3058 sv = do_add_sv(ctx, dest, in1, in2);
98cd9ca7
RH
3059 }
3060
bdcccc17 3061 cond = do_cond(c * 2 + f, dest, cb_cond, sv);
43675d20 3062 save_gpr(ctx, r, dest);
01afb7be 3063 return do_cbranch(ctx, disp, n, &cond);
98cd9ca7
RH
3064}
3065
01afb7be
RH
3066static bool trans_addb(DisasContext *ctx, arg_addb *a)
3067{
3068 nullify_over(ctx);
3069 return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3070}
3071
3072static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3073{
3074 nullify_over(ctx);
d4e58033 3075 return do_addb(ctx, a->r, tcg_constant_reg(a->i), a->c, a->f, a->n, a->disp);
01afb7be
RH
3076}
3077
3078static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
98cd9ca7 3079{
eaa3783b 3080 TCGv_reg tmp, tcg_r;
98cd9ca7 3081 DisasCond cond;
1e9ab9fb 3082 bool d = false;
98cd9ca7
RH
3083
3084 nullify_over(ctx);
3085
3086 tmp = tcg_temp_new();
01afb7be 3087 tcg_r = load_gpr(ctx, a->r);
1e9ab9fb
RH
3088 if (cond_need_ext(ctx, d)) {
3089 /* Force shift into [32,63] */
3090 tcg_gen_ori_reg(tmp, cpu_sar, 32);
3091 tcg_gen_shl_reg(tmp, tcg_r, tmp);
3092 } else {
3093 tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
3094 }
98cd9ca7 3095
1e9ab9fb 3096 cond = cond_make_0_tmp(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
01afb7be 3097 return do_cbranch(ctx, a->disp, a->n, &cond);
98cd9ca7
RH
3098}
3099
01afb7be
RH
3100static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3101{
3102 TCGv_reg tmp, tcg_r;
3103 DisasCond cond;
1e9ab9fb
RH
3104 bool d = false;
3105 int p;
01afb7be
RH
3106
3107 nullify_over(ctx);
3108
3109 tmp = tcg_temp_new();
3110 tcg_r = load_gpr(ctx, a->r);
1e9ab9fb
RH
3111 p = a->p | (cond_need_ext(ctx, d) ? 32 : 0);
3112 tcg_gen_shli_reg(tmp, tcg_r, p);
01afb7be
RH
3113
3114 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
01afb7be
RH
3115 return do_cbranch(ctx, a->disp, a->n, &cond);
3116}
3117
3118static bool trans_movb(DisasContext *ctx, arg_movb *a)
98cd9ca7 3119{
eaa3783b 3120 TCGv_reg dest;
98cd9ca7
RH
3121 DisasCond cond;
3122
3123 nullify_over(ctx);
3124
01afb7be
RH
3125 dest = dest_gpr(ctx, a->r2);
3126 if (a->r1 == 0) {
eaa3783b 3127 tcg_gen_movi_reg(dest, 0);
98cd9ca7 3128 } else {
01afb7be 3129 tcg_gen_mov_reg(dest, cpu_gr[a->r1]);
98cd9ca7
RH
3130 }
3131
01afb7be
RH
3132 cond = do_sed_cond(a->c, dest);
3133 return do_cbranch(ctx, a->disp, a->n, &cond);
3134}
3135
3136static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3137{
3138 TCGv_reg dest;
3139 DisasCond cond;
3140
3141 nullify_over(ctx);
3142
3143 dest = dest_gpr(ctx, a->r);
3144 tcg_gen_movi_reg(dest, a->i);
3145
3146 cond = do_sed_cond(a->c, dest);
3147 return do_cbranch(ctx, a->disp, a->n, &cond);
98cd9ca7
RH
3148}
3149
30878590 3150static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a)
0b1347d2 3151{
eaa3783b 3152 TCGv_reg dest;
0b1347d2 3153
30878590 3154 if (a->c) {
0b1347d2
RH
3155 nullify_over(ctx);
3156 }
3157
30878590
RH
3158 dest = dest_gpr(ctx, a->t);
3159 if (a->r1 == 0) {
3160 tcg_gen_ext32u_reg(dest, load_gpr(ctx, a->r2));
eaa3783b 3161 tcg_gen_shr_reg(dest, dest, cpu_sar);
30878590 3162 } else if (a->r1 == a->r2) {
0b1347d2 3163 TCGv_i32 t32 = tcg_temp_new_i32();
e1d635e8
RH
3164 TCGv_i32 s32 = tcg_temp_new_i32();
3165
30878590 3166 tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, a->r2));
e1d635e8
RH
3167 tcg_gen_trunc_reg_i32(s32, cpu_sar);
3168 tcg_gen_rotr_i32(t32, t32, s32);
eaa3783b 3169 tcg_gen_extu_i32_reg(dest, t32);
0b1347d2
RH
3170 } else {
3171 TCGv_i64 t = tcg_temp_new_i64();
3172 TCGv_i64 s = tcg_temp_new_i64();
3173
30878590 3174 tcg_gen_concat_reg_i64(t, load_gpr(ctx, a->r2), load_gpr(ctx, a->r1));
eaa3783b 3175 tcg_gen_extu_reg_i64(s, cpu_sar);
0b1347d2 3176 tcg_gen_shr_i64(t, t, s);
eaa3783b 3177 tcg_gen_trunc_i64_reg(dest, t);
0b1347d2 3178 }
30878590 3179 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3180
3181 /* Install the new nullification. */
3182 cond_free(&ctx->null_cond);
30878590
RH
3183 if (a->c) {
3184 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3185 }
31234768 3186 return nullify_end(ctx);
0b1347d2
RH
3187}
3188
30878590 3189static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a)
0b1347d2 3190{
30878590 3191 unsigned sa = 31 - a->cpos;
eaa3783b 3192 TCGv_reg dest, t2;
0b1347d2 3193
30878590 3194 if (a->c) {
0b1347d2
RH
3195 nullify_over(ctx);
3196 }
3197
30878590
RH
3198 dest = dest_gpr(ctx, a->t);
3199 t2 = load_gpr(ctx, a->r2);
05bfd4db
RH
3200 if (a->r1 == 0) {
3201 tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
3202 } else if (TARGET_REGISTER_BITS == 32) {
3203 tcg_gen_extract2_reg(dest, t2, cpu_gr[a->r1], sa);
3204 } else if (a->r1 == a->r2) {
0b1347d2 3205 TCGv_i32 t32 = tcg_temp_new_i32();
eaa3783b 3206 tcg_gen_trunc_reg_i32(t32, t2);
0b1347d2 3207 tcg_gen_rotri_i32(t32, t32, sa);
eaa3783b 3208 tcg_gen_extu_i32_reg(dest, t32);
0b1347d2 3209 } else {
05bfd4db
RH
3210 TCGv_i64 t64 = tcg_temp_new_i64();
3211 tcg_gen_concat_reg_i64(t64, t2, cpu_gr[a->r1]);
3212 tcg_gen_shri_i64(t64, t64, sa);
3213 tcg_gen_trunc_i64_reg(dest, t64);
0b1347d2 3214 }
30878590 3215 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3216
3217 /* Install the new nullification. */
3218 cond_free(&ctx->null_cond);
30878590
RH
3219 if (a->c) {
3220 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3221 }
31234768 3222 return nullify_end(ctx);
0b1347d2
RH
3223}
3224
30878590 3225static bool trans_extrw_sar(DisasContext *ctx, arg_extrw_sar *a)
0b1347d2 3226{
30878590 3227 unsigned len = 32 - a->clen;
eaa3783b 3228 TCGv_reg dest, src, tmp;
0b1347d2 3229
30878590 3230 if (a->c) {
0b1347d2
RH
3231 nullify_over(ctx);
3232 }
3233
30878590
RH
3234 dest = dest_gpr(ctx, a->t);
3235 src = load_gpr(ctx, a->r);
0b1347d2
RH
3236 tmp = tcg_temp_new();
3237
3238 /* Recall that SAR is using big-endian bit numbering. */
d781cb77
RH
3239 tcg_gen_andi_reg(tmp, cpu_sar, 31);
3240 tcg_gen_xori_reg(tmp, tmp, 31);
3241
30878590 3242 if (a->se) {
eaa3783b
RH
3243 tcg_gen_sar_reg(dest, src, tmp);
3244 tcg_gen_sextract_reg(dest, dest, 0, len);
0b1347d2 3245 } else {
eaa3783b
RH
3246 tcg_gen_shr_reg(dest, src, tmp);
3247 tcg_gen_extract_reg(dest, dest, 0, len);
0b1347d2 3248 }
30878590 3249 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3250
3251 /* Install the new nullification. */
3252 cond_free(&ctx->null_cond);
30878590
RH
3253 if (a->c) {
3254 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3255 }
31234768 3256 return nullify_end(ctx);
0b1347d2
RH
3257}
3258
30878590 3259static bool trans_extrw_imm(DisasContext *ctx, arg_extrw_imm *a)
0b1347d2 3260{
30878590
RH
3261 unsigned len = 32 - a->clen;
3262 unsigned cpos = 31 - a->pos;
eaa3783b 3263 TCGv_reg dest, src;
0b1347d2 3264
30878590 3265 if (a->c) {
0b1347d2
RH
3266 nullify_over(ctx);
3267 }
3268
30878590
RH
3269 dest = dest_gpr(ctx, a->t);
3270 src = load_gpr(ctx, a->r);
3271 if (a->se) {
eaa3783b 3272 tcg_gen_sextract_reg(dest, src, cpos, len);
0b1347d2 3273 } else {
eaa3783b 3274 tcg_gen_extract_reg(dest, src, cpos, len);
0b1347d2 3275 }
30878590 3276 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3277
3278 /* Install the new nullification. */
3279 cond_free(&ctx->null_cond);
30878590
RH
3280 if (a->c) {
3281 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3282 }
31234768 3283 return nullify_end(ctx);
0b1347d2
RH
3284}
3285
30878590 3286static bool trans_depwi_imm(DisasContext *ctx, arg_depwi_imm *a)
0b1347d2 3287{
30878590 3288 unsigned len = 32 - a->clen;
eaa3783b
RH
3289 target_sreg mask0, mask1;
3290 TCGv_reg dest;
0b1347d2 3291
30878590 3292 if (a->c) {
0b1347d2
RH
3293 nullify_over(ctx);
3294 }
30878590
RH
3295 if (a->cpos + len > 32) {
3296 len = 32 - a->cpos;
0b1347d2
RH
3297 }
3298
30878590
RH
3299 dest = dest_gpr(ctx, a->t);
3300 mask0 = deposit64(0, a->cpos, len, a->i);
3301 mask1 = deposit64(-1, a->cpos, len, a->i);
0b1347d2 3302
30878590
RH
3303 if (a->nz) {
3304 TCGv_reg src = load_gpr(ctx, a->t);
0b1347d2 3305 if (mask1 != -1) {
eaa3783b 3306 tcg_gen_andi_reg(dest, src, mask1);
0b1347d2
RH
3307 src = dest;
3308 }
eaa3783b 3309 tcg_gen_ori_reg(dest, src, mask0);
0b1347d2 3310 } else {
eaa3783b 3311 tcg_gen_movi_reg(dest, mask0);
0b1347d2 3312 }
30878590 3313 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3314
3315 /* Install the new nullification. */
3316 cond_free(&ctx->null_cond);
30878590
RH
3317 if (a->c) {
3318 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3319 }
31234768 3320 return nullify_end(ctx);
0b1347d2
RH
3321}
3322
30878590 3323static bool trans_depw_imm(DisasContext *ctx, arg_depw_imm *a)
0b1347d2 3324{
30878590
RH
3325 unsigned rs = a->nz ? a->t : 0;
3326 unsigned len = 32 - a->clen;
eaa3783b 3327 TCGv_reg dest, val;
0b1347d2 3328
30878590 3329 if (a->c) {
0b1347d2
RH
3330 nullify_over(ctx);
3331 }
30878590
RH
3332 if (a->cpos + len > 32) {
3333 len = 32 - a->cpos;
0b1347d2
RH
3334 }
3335
30878590
RH
3336 dest = dest_gpr(ctx, a->t);
3337 val = load_gpr(ctx, a->r);
0b1347d2 3338 if (rs == 0) {
30878590 3339 tcg_gen_deposit_z_reg(dest, val, a->cpos, len);
0b1347d2 3340 } else {
30878590 3341 tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len);
0b1347d2 3342 }
30878590 3343 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3344
3345 /* Install the new nullification. */
3346 cond_free(&ctx->null_cond);
30878590
RH
3347 if (a->c) {
3348 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3349 }
31234768 3350 return nullify_end(ctx);
0b1347d2
RH
3351}
3352
30878590
RH
3353static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c,
3354 unsigned nz, unsigned clen, TCGv_reg val)
0b1347d2 3355{
0b1347d2
RH
3356 unsigned rs = nz ? rt : 0;
3357 unsigned len = 32 - clen;
30878590 3358 TCGv_reg mask, tmp, shift, dest;
0b1347d2
RH
3359 unsigned msb = 1U << (len - 1);
3360
0b1347d2
RH
3361 dest = dest_gpr(ctx, rt);
3362 shift = tcg_temp_new();
3363 tmp = tcg_temp_new();
3364
3365 /* Convert big-endian bit numbering in SAR to left-shift. */
d781cb77
RH
3366 tcg_gen_andi_reg(shift, cpu_sar, 31);
3367 tcg_gen_xori_reg(shift, shift, 31);
0b1347d2 3368
0992a930
RH
3369 mask = tcg_temp_new();
3370 tcg_gen_movi_reg(mask, msb + (msb - 1));
eaa3783b 3371 tcg_gen_and_reg(tmp, val, mask);
0b1347d2 3372 if (rs) {
eaa3783b
RH
3373 tcg_gen_shl_reg(mask, mask, shift);
3374 tcg_gen_shl_reg(tmp, tmp, shift);
3375 tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3376 tcg_gen_or_reg(dest, dest, tmp);
0b1347d2 3377 } else {
eaa3783b 3378 tcg_gen_shl_reg(dest, tmp, shift);
0b1347d2 3379 }
0b1347d2
RH
3380 save_gpr(ctx, rt, dest);
3381
3382 /* Install the new nullification. */
3383 cond_free(&ctx->null_cond);
3384 if (c) {
3385 ctx->null_cond = do_sed_cond(c, dest);
3386 }
31234768 3387 return nullify_end(ctx);
0b1347d2
RH
3388}
3389
30878590
RH
3390static bool trans_depw_sar(DisasContext *ctx, arg_depw_sar *a)
3391{
a6deecce
SS
3392 if (a->c) {
3393 nullify_over(ctx);
3394 }
30878590
RH
3395 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_gpr(ctx, a->r));
3396}
3397
3398static bool trans_depwi_sar(DisasContext *ctx, arg_depwi_sar *a)
3399{
a6deecce
SS
3400 if (a->c) {
3401 nullify_over(ctx);
3402 }
d4e58033 3403 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, tcg_constant_reg(a->i));
30878590 3404}
0b1347d2 3405
8340f534 3406static bool trans_be(DisasContext *ctx, arg_be *a)
98cd9ca7 3407{
660eefe1 3408 TCGv_reg tmp;
98cd9ca7 3409
c301f34e 3410#ifdef CONFIG_USER_ONLY
98cd9ca7
RH
3411 /* ??? It seems like there should be a good way of using
3412 "be disp(sr2, r0)", the canonical gateway entry mechanism
3413 to our advantage. But that appears to be inconvenient to
3414 manage along side branch delay slots. Therefore we handle
3415 entry into the gateway page via absolute address. */
98cd9ca7
RH
3416 /* Since we don't implement spaces, just branch. Do notice the special
3417 case of "be disp(*,r0)" using a direct branch to disp, so that we can
3418 goto_tb to the TB containing the syscall. */
8340f534
RH
3419 if (a->b == 0) {
3420 return do_dbranch(ctx, a->disp, a->l, a->n);
98cd9ca7 3421 }
c301f34e 3422#else
c301f34e 3423 nullify_over(ctx);
660eefe1
RH
3424#endif
3425
e12c6309 3426 tmp = tcg_temp_new();
8340f534 3427 tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp);
660eefe1 3428 tmp = do_ibranch_priv(ctx, tmp);
c301f34e
RH
3429
3430#ifdef CONFIG_USER_ONLY
8340f534 3431 return do_ibranch(ctx, tmp, a->l, a->n);
c301f34e
RH
3432#else
3433 TCGv_i64 new_spc = tcg_temp_new_i64();
3434
8340f534
RH
3435 load_spr(ctx, new_spc, a->sp);
3436 if (a->l) {
741322f4 3437 copy_iaoq_entry(ctx, cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
c301f34e
RH
3438 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3439 }
8340f534 3440 if (a->n && use_nullify_skip(ctx)) {
c301f34e
RH
3441 tcg_gen_mov_reg(cpu_iaoq_f, tmp);
3442 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
3443 tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3444 tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3445 } else {
741322f4 3446 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
c301f34e
RH
3447 if (ctx->iaoq_b == -1) {
3448 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3449 }
3450 tcg_gen_mov_reg(cpu_iaoq_b, tmp);
3451 tcg_gen_mov_i64(cpu_iasq_b, new_spc);
8340f534 3452 nullify_set(ctx, a->n);
c301f34e 3453 }
c301f34e 3454 tcg_gen_lookup_and_goto_ptr();
31234768
RH
3455 ctx->base.is_jmp = DISAS_NORETURN;
3456 return nullify_end(ctx);
c301f34e 3457#endif
98cd9ca7
RH
3458}
3459
8340f534 3460static bool trans_bl(DisasContext *ctx, arg_bl *a)
98cd9ca7 3461{
8340f534 3462 return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
98cd9ca7
RH
3463}
3464
8340f534 3465static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
43e05652 3466{
8340f534 3467 target_ureg dest = iaoq_dest(ctx, a->disp);
43e05652 3468
6e5f5300
SS
3469 nullify_over(ctx);
3470
43e05652
RH
3471 /* Make sure the caller hasn't done something weird with the queue.
3472 * ??? This is not quite the same as the PSW[B] bit, which would be
3473 * expensive to track. Real hardware will trap for
3474 * b gateway
3475 * b gateway+4 (in delay slot of first branch)
3476 * However, checking for a non-sequential instruction queue *will*
3477 * diagnose the security hole
3478 * b gateway
3479 * b evil
3480 * in which instructions at evil would run with increased privs.
3481 */
3482 if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3483 return gen_illegal(ctx);
3484 }
3485
3486#ifndef CONFIG_USER_ONLY
3487 if (ctx->tb_flags & PSW_C) {
b77af26e 3488 CPUHPPAState *env = cpu_env(ctx->cs);
43e05652
RH
3489 int type = hppa_artype_for_page(env, ctx->base.pc_next);
3490 /* If we could not find a TLB entry, then we need to generate an
3491 ITLB miss exception so the kernel will provide it.
3492 The resulting TLB fill operation will invalidate this TB and
3493 we will re-translate, at which point we *will* be able to find
3494 the TLB entry and determine if this is in fact a gateway page. */
3495 if (type < 0) {
31234768
RH
3496 gen_excp(ctx, EXCP_ITLB_MISS);
3497 return true;
43e05652
RH
3498 }
3499 /* No change for non-gateway pages or for priv decrease. */
3500 if (type >= 4 && type - 4 < ctx->privilege) {
3501 dest = deposit32(dest, 0, 2, type - 4);
3502 }
3503 } else {
3504 dest &= -4; /* priv = 0 */
3505 }
3506#endif
3507
6e5f5300
SS
3508 if (a->l) {
3509 TCGv_reg tmp = dest_gpr(ctx, a->l);
3510 if (ctx->privilege < 3) {
3511 tcg_gen_andi_reg(tmp, tmp, -4);
3512 }
3513 tcg_gen_ori_reg(tmp, tmp, ctx->privilege);
3514 save_gpr(ctx, a->l, tmp);
3515 }
3516
3517 return do_dbranch(ctx, dest, 0, a->n);
43e05652
RH
3518}
3519
8340f534 3520static bool trans_blr(DisasContext *ctx, arg_blr *a)
98cd9ca7 3521{
b35aec85 3522 if (a->x) {
e12c6309 3523 TCGv_reg tmp = tcg_temp_new();
b35aec85
RH
3524 tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3);
3525 tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
3526 /* The computation here never changes privilege level. */
3527 return do_ibranch(ctx, tmp, a->l, a->n);
3528 } else {
3529 /* BLR R0,RX is a good way to load PC+8 into RX. */
3530 return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3531 }
98cd9ca7
RH
3532}
3533
8340f534 3534static bool trans_bv(DisasContext *ctx, arg_bv *a)
98cd9ca7 3535{
eaa3783b 3536 TCGv_reg dest;
98cd9ca7 3537
8340f534
RH
3538 if (a->x == 0) {
3539 dest = load_gpr(ctx, a->b);
98cd9ca7 3540 } else {
e12c6309 3541 dest = tcg_temp_new();
8340f534
RH
3542 tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3);
3543 tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b));
98cd9ca7 3544 }
660eefe1 3545 dest = do_ibranch_priv(ctx, dest);
8340f534 3546 return do_ibranch(ctx, dest, 0, a->n);
98cd9ca7
RH
3547}
3548
8340f534 3549static bool trans_bve(DisasContext *ctx, arg_bve *a)
98cd9ca7 3550{
660eefe1 3551 TCGv_reg dest;
98cd9ca7 3552
c301f34e 3553#ifdef CONFIG_USER_ONLY
8340f534
RH
3554 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3555 return do_ibranch(ctx, dest, a->l, a->n);
c301f34e
RH
3556#else
3557 nullify_over(ctx);
8340f534 3558 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
c301f34e 3559
741322f4 3560 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
c301f34e
RH
3561 if (ctx->iaoq_b == -1) {
3562 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3563 }
741322f4 3564 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, dest);
c301f34e 3565 tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
8340f534 3566 if (a->l) {
741322f4 3567 copy_iaoq_entry(ctx, cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
c301f34e 3568 }
8340f534 3569 nullify_set(ctx, a->n);
c301f34e 3570 tcg_gen_lookup_and_goto_ptr();
31234768
RH
3571 ctx->base.is_jmp = DISAS_NORETURN;
3572 return nullify_end(ctx);
c301f34e 3573#endif
98cd9ca7
RH
3574}
3575
1ca74648
RH
3576/*
3577 * Float class 0
3578 */
ebe9383c 3579
1ca74648 3580static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
ebe9383c 3581{
1ca74648 3582 tcg_gen_mov_i32(dst, src);
ebe9383c
RH
3583}
3584
59f8c04b
HD
3585static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
3586{
a300dad3
RH
3587 uint64_t ret;
3588
3589 if (TARGET_REGISTER_BITS == 64) {
3590 ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
3591 } else {
3592 ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
3593 }
3594
59f8c04b 3595 nullify_over(ctx);
a300dad3 3596 save_frd(0, tcg_constant_i64(ret));
59f8c04b
HD
3597 return nullify_end(ctx);
3598}
3599
1ca74648 3600static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3601{
1ca74648 3602 return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
ebe9383c
RH
3603}
3604
1ca74648 3605static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
ebe9383c 3606{
1ca74648 3607 tcg_gen_mov_i64(dst, src);
ebe9383c
RH
3608}
3609
1ca74648 3610static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3611{
1ca74648 3612 return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
ebe9383c
RH
3613}
3614
1ca74648 3615static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
ebe9383c 3616{
1ca74648 3617 tcg_gen_andi_i32(dst, src, INT32_MAX);
ebe9383c
RH
3618}
3619
1ca74648 3620static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3621{
1ca74648 3622 return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
ebe9383c
RH
3623}
3624
1ca74648 3625static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
ebe9383c 3626{
1ca74648 3627 tcg_gen_andi_i64(dst, src, INT64_MAX);
ebe9383c
RH
3628}
3629
1ca74648 3630static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3631{
1ca74648 3632 return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
ebe9383c
RH
3633}
3634
1ca74648 3635static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3636{
1ca74648 3637 return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
ebe9383c
RH
3638}
3639
1ca74648 3640static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3641{
1ca74648 3642 return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
ebe9383c
RH
3643}
3644
1ca74648 3645static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3646{
1ca74648 3647 return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
ebe9383c
RH
3648}
3649
1ca74648 3650static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3651{
1ca74648 3652 return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
ebe9383c
RH
3653}
3654
1ca74648 3655static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
ebe9383c 3656{
1ca74648 3657 tcg_gen_xori_i32(dst, src, INT32_MIN);
ebe9383c
RH
3658}
3659
1ca74648 3660static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3661{
1ca74648 3662 return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
ebe9383c
RH
3663}
3664
3665static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3666{
3667 tcg_gen_xori_i64(dst, src, INT64_MIN);
3668}
3669
1ca74648
RH
3670static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3671{
3672 return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3673}
3674
3675static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
ebe9383c
RH
3676{
3677 tcg_gen_ori_i32(dst, src, INT32_MIN);
3678}
3679
1ca74648
RH
3680static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3681{
3682 return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3683}
3684
ebe9383c
RH
3685static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3686{
3687 tcg_gen_ori_i64(dst, src, INT64_MIN);
3688}
3689
1ca74648
RH
3690static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
3691{
3692 return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
3693}
3694
3695/*
3696 * Float class 1
3697 */
3698
3699static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
3700{
3701 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
3702}
3703
3704static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
3705{
3706 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
3707}
3708
3709static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
3710{
3711 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
3712}
3713
3714static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
3715{
3716 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
3717}
3718
3719static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
3720{
3721 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
3722}
3723
3724static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
3725{
3726 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
3727}
3728
3729static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
3730{
3731 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
3732}
3733
3734static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
3735{
3736 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
3737}
3738
3739static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
3740{
3741 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
3742}
3743
3744static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
3745{
3746 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
3747}
3748
3749static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
3750{
3751 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
3752}
3753
3754static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
3755{
3756 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
3757}
3758
3759static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
3760{
3761 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
3762}
3763
3764static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
3765{
3766 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
3767}
3768
3769static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
3770{
3771 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
3772}
3773
3774static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
3775{
3776 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
3777}
3778
3779static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
3780{
3781 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
3782}
3783
3784static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
3785{
3786 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
3787}
3788
3789static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
3790{
3791 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
3792}
3793
3794static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
3795{
3796 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
3797}
3798
3799static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
3800{
3801 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
3802}
3803
3804static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
3805{
3806 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
3807}
3808
3809static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
3810{
3811 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
3812}
3813
3814static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
3815{
3816 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
3817}
3818
3819static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
3820{
3821 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
3822}
3823
3824static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
3825{
3826 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
3827}
3828
3829/*
3830 * Float class 2
3831 */
3832
3833static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
ebe9383c
RH
3834{
3835 TCGv_i32 ta, tb, tc, ty;
3836
3837 nullify_over(ctx);
3838
1ca74648
RH
3839 ta = load_frw0_i32(a->r1);
3840 tb = load_frw0_i32(a->r2);
29dd6f64
RH
3841 ty = tcg_constant_i32(a->y);
3842 tc = tcg_constant_i32(a->c);
ebe9383c 3843
ad75a51e 3844 gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc);
ebe9383c 3845
1ca74648 3846 return nullify_end(ctx);
ebe9383c
RH
3847}
3848
1ca74648 3849static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
ebe9383c 3850{
ebe9383c
RH
3851 TCGv_i64 ta, tb;
3852 TCGv_i32 tc, ty;
3853
3854 nullify_over(ctx);
3855
1ca74648
RH
3856 ta = load_frd0(a->r1);
3857 tb = load_frd0(a->r2);
29dd6f64
RH
3858 ty = tcg_constant_i32(a->y);
3859 tc = tcg_constant_i32(a->c);
ebe9383c 3860
ad75a51e 3861 gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc);
ebe9383c 3862
31234768 3863 return nullify_end(ctx);
ebe9383c
RH
3864}
3865
1ca74648 3866static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
ebe9383c 3867{
eaa3783b 3868 TCGv_reg t;
ebe9383c
RH
3869
3870 nullify_over(ctx);
3871
e12c6309 3872 t = tcg_temp_new();
ad75a51e 3873 tcg_gen_ld32u_reg(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
ebe9383c 3874
1ca74648
RH
3875 if (a->y == 1) {
3876 int mask;
3877 bool inv = false;
3878
3879 switch (a->c) {
3880 case 0: /* simple */
3881 tcg_gen_andi_reg(t, t, 0x4000000);
3882 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3883 goto done;
3884 case 2: /* rej */
3885 inv = true;
3886 /* fallthru */
3887 case 1: /* acc */
3888 mask = 0x43ff800;
3889 break;
3890 case 6: /* rej8 */
3891 inv = true;
3892 /* fallthru */
3893 case 5: /* acc8 */
3894 mask = 0x43f8000;
3895 break;
3896 case 9: /* acc6 */
3897 mask = 0x43e0000;
3898 break;
3899 case 13: /* acc4 */
3900 mask = 0x4380000;
3901 break;
3902 case 17: /* acc2 */
3903 mask = 0x4200000;
3904 break;
3905 default:
3906 gen_illegal(ctx);
3907 return true;
3908 }
3909 if (inv) {
d4e58033 3910 TCGv_reg c = tcg_constant_reg(mask);
1ca74648
RH
3911 tcg_gen_or_reg(t, t, c);
3912 ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3913 } else {
3914 tcg_gen_andi_reg(t, t, mask);
3915 ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3916 }
3917 } else {
3918 unsigned cbit = (a->y ^ 1) - 1;
3919
3920 tcg_gen_extract_reg(t, t, 21 - cbit, 1);
3921 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
1ca74648
RH
3922 }
3923
3924 done:
31234768 3925 return nullify_end(ctx);
ebe9383c
RH
3926}
3927
1ca74648
RH
3928/*
3929 * Float class 2
3930 */
3931
3932static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
ebe9383c 3933{
1ca74648
RH
3934 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
3935}
ebe9383c 3936
1ca74648
RH
3937static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
3938{
3939 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
3940}
ebe9383c 3941
1ca74648
RH
3942static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
3943{
3944 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
3945}
ebe9383c 3946
1ca74648
RH
3947static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
3948{
3949 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
ebe9383c
RH
3950}
3951
1ca74648 3952static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
ebe9383c 3953{
1ca74648
RH
3954 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
3955}
3956
3957static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
3958{
3959 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
3960}
3961
3962static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
3963{
3964 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
3965}
3966
3967static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
3968{
3969 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
3970}
3971
3972static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
3973{
3974 TCGv_i64 x, y;
ebe9383c
RH
3975
3976 nullify_over(ctx);
3977
1ca74648
RH
3978 x = load_frw0_i64(a->r1);
3979 y = load_frw0_i64(a->r2);
3980 tcg_gen_mul_i64(x, x, y);
3981 save_frd(a->t, x);
ebe9383c 3982
31234768 3983 return nullify_end(ctx);
ebe9383c
RH
3984}
3985
ebe9383c
RH
3986/* Convert the fmpyadd single-precision register encodings to standard. */
3987static inline int fmpyadd_s_reg(unsigned r)
3988{
3989 return (r & 16) * 2 + 16 + (r & 15);
3990}
3991
b1e2af57 3992static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
ebe9383c 3993{
b1e2af57
RH
3994 int tm = fmpyadd_s_reg(a->tm);
3995 int ra = fmpyadd_s_reg(a->ra);
3996 int ta = fmpyadd_s_reg(a->ta);
3997 int rm2 = fmpyadd_s_reg(a->rm2);
3998 int rm1 = fmpyadd_s_reg(a->rm1);
ebe9383c
RH
3999
4000 nullify_over(ctx);
4001
b1e2af57
RH
4002 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4003 do_fop_weww(ctx, ta, ta, ra,
4004 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
ebe9383c 4005
31234768 4006 return nullify_end(ctx);
ebe9383c
RH
4007}
4008
b1e2af57
RH
4009static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4010{
4011 return do_fmpyadd_s(ctx, a, false);
4012}
4013
4014static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4015{
4016 return do_fmpyadd_s(ctx, a, true);
4017}
4018
4019static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4020{
4021 nullify_over(ctx);
4022
4023 do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4024 do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4025 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4026
4027 return nullify_end(ctx);
4028}
4029
4030static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4031{
4032 return do_fmpyadd_d(ctx, a, false);
4033}
4034
4035static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4036{
4037 return do_fmpyadd_d(ctx, a, true);
4038}
4039
c3bad4f8 4040static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
ebe9383c 4041{
c3bad4f8 4042 TCGv_i32 x, y, z;
ebe9383c
RH
4043
4044 nullify_over(ctx);
c3bad4f8
RH
4045 x = load_frw0_i32(a->rm1);
4046 y = load_frw0_i32(a->rm2);
4047 z = load_frw0_i32(a->ra3);
ebe9383c 4048
c3bad4f8 4049 if (a->neg) {
ad75a51e 4050 gen_helper_fmpynfadd_s(x, tcg_env, x, y, z);
ebe9383c 4051 } else {
ad75a51e 4052 gen_helper_fmpyfadd_s(x, tcg_env, x, y, z);
ebe9383c
RH
4053 }
4054
c3bad4f8 4055 save_frw_i32(a->t, x);
31234768 4056 return nullify_end(ctx);
ebe9383c
RH
4057}
4058
c3bad4f8 4059static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
ebe9383c 4060{
c3bad4f8 4061 TCGv_i64 x, y, z;
ebe9383c
RH
4062
4063 nullify_over(ctx);
c3bad4f8
RH
4064 x = load_frd0(a->rm1);
4065 y = load_frd0(a->rm2);
4066 z = load_frd0(a->ra3);
ebe9383c 4067
c3bad4f8 4068 if (a->neg) {
ad75a51e 4069 gen_helper_fmpynfadd_d(x, tcg_env, x, y, z);
ebe9383c 4070 } else {
ad75a51e 4071 gen_helper_fmpyfadd_d(x, tcg_env, x, y, z);
ebe9383c
RH
4072 }
4073
c3bad4f8 4074 save_frd(a->t, x);
31234768 4075 return nullify_end(ctx);
ebe9383c
RH
4076}
4077
15da177b
SS
4078static bool trans_diag(DisasContext *ctx, arg_diag *a)
4079{
cf6b28d4
HD
4080 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4081#ifndef CONFIG_USER_ONLY
4082 if (a->i == 0x100) {
4083 /* emulate PDC BTLB, called by SeaBIOS-hppa */
ad75a51e
RH
4084 nullify_over(ctx);
4085 gen_helper_diag_btlb(tcg_env);
4086 return nullify_end(ctx);
cf6b28d4 4087 }
ad75a51e
RH
4088#endif
4089 qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
4090 return true;
15da177b
SS
4091}
4092
b542683d 4093static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
61766fe9 4094{
51b061fb 4095 DisasContext *ctx = container_of(dcbase, DisasContext, base);
f764718d 4096 int bound;
61766fe9 4097
51b061fb 4098 ctx->cs = cs;
494737b7 4099 ctx->tb_flags = ctx->base.tb->flags;
bd6243a3 4100 ctx->is_pa20 = hppa_is_pa20(cpu_env(cs));
3d68ee7b
RH
4101
4102#ifdef CONFIG_USER_ONLY
c01e5dfb 4103 ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX);
3d68ee7b 4104 ctx->mmu_idx = MMU_USER_IDX;
c01e5dfb
HD
4105 ctx->iaoq_f = ctx->base.pc_first | ctx->privilege;
4106 ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege;
217d1a5e 4107 ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
3d68ee7b 4108#else
494737b7 4109 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
bb67ec32
RH
4110 ctx->mmu_idx = (ctx->tb_flags & PSW_D
4111 ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
4112 : MMU_PHYS_IDX);
3d68ee7b 4113
c301f34e
RH
4114 /* Recover the IAOQ values from the GVA + PRIV. */
4115 uint64_t cs_base = ctx->base.tb->cs_base;
4116 uint64_t iasq_f = cs_base & ~0xffffffffull;
4117 int32_t diff = cs_base;
4118
4119 ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4120 ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4121#endif
51b061fb 4122 ctx->iaoq_n = -1;
f764718d 4123 ctx->iaoq_n_var = NULL;
61766fe9 4124
3d68ee7b
RH
4125 /* Bound the number of instructions by those left on the page. */
4126 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
b542683d 4127 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
51b061fb 4128}
61766fe9 4129
51b061fb
RH
4130static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4131{
4132 DisasContext *ctx = container_of(dcbase, DisasContext, base);
61766fe9 4133
3d68ee7b 4134 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
51b061fb
RH
4135 ctx->null_cond = cond_make_f();
4136 ctx->psw_n_nonzero = false;
494737b7 4137 if (ctx->tb_flags & PSW_N) {
51b061fb
RH
4138 ctx->null_cond.c = TCG_COND_ALWAYS;
4139 ctx->psw_n_nonzero = true;
129e9cc3 4140 }
51b061fb
RH
4141 ctx->null_lab = NULL;
4142}
129e9cc3 4143
51b061fb
RH
4144static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4145{
4146 DisasContext *ctx = container_of(dcbase, DisasContext, base);
61766fe9 4147
51b061fb
RH
4148 tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4149}
4150
51b061fb
RH
4151static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4152{
4153 DisasContext *ctx = container_of(dcbase, DisasContext, base);
b77af26e 4154 CPUHPPAState *env = cpu_env(cs);
51b061fb 4155 DisasJumpType ret;
51b061fb
RH
4156
4157 /* Execute one insn. */
ba1d0b44 4158#ifdef CONFIG_USER_ONLY
c301f34e 4159 if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
31234768
RH
4160 do_page_zero(ctx);
4161 ret = ctx->base.is_jmp;
51b061fb 4162 assert(ret != DISAS_NEXT);
ba1d0b44
RH
4163 } else
4164#endif
4165 {
51b061fb
RH
4166 /* Always fetch the insn, even if nullified, so that we check
4167 the page permissions for execute. */
4e116893 4168 uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
51b061fb
RH
4169
4170 /* Set up the IA queue for the next insn.
4171 This will be overwritten by a branch. */
4172 if (ctx->iaoq_b == -1) {
4173 ctx->iaoq_n = -1;
e12c6309 4174 ctx->iaoq_n_var = tcg_temp_new();
eaa3783b 4175 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
7ad439df 4176 } else {
51b061fb 4177 ctx->iaoq_n = ctx->iaoq_b + 4;
f764718d 4178 ctx->iaoq_n_var = NULL;
61766fe9
RH
4179 }
4180
51b061fb
RH
4181 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4182 ctx->null_cond.c = TCG_COND_NEVER;
4183 ret = DISAS_NEXT;
4184 } else {
1a19da0d 4185 ctx->insn = insn;
31274b46
RH
4186 if (!decode(ctx, insn)) {
4187 gen_illegal(ctx);
4188 }
31234768 4189 ret = ctx->base.is_jmp;
51b061fb 4190 assert(ctx->null_lab == NULL);
61766fe9 4191 }
51b061fb 4192 }
61766fe9 4193
3d68ee7b
RH
4194 /* Advance the insn queue. Note that this check also detects
4195 a priority change within the instruction queue. */
51b061fb 4196 if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
c301f34e
RH
4197 if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4198 && use_goto_tb(ctx, ctx->iaoq_b)
4199 && (ctx->null_cond.c == TCG_COND_NEVER
4200 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
51b061fb
RH
4201 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4202 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
31234768 4203 ctx->base.is_jmp = ret = DISAS_NORETURN;
51b061fb 4204 } else {
31234768 4205 ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
c301f34e 4206 }
61766fe9 4207 }
51b061fb
RH
4208 ctx->iaoq_f = ctx->iaoq_b;
4209 ctx->iaoq_b = ctx->iaoq_n;
c301f34e 4210 ctx->base.pc_next += 4;
51b061fb 4211
c5d0aec2
RH
4212 switch (ret) {
4213 case DISAS_NORETURN:
4214 case DISAS_IAQ_N_UPDATED:
4215 break;
4216
4217 case DISAS_NEXT:
4218 case DISAS_IAQ_N_STALE:
4219 case DISAS_IAQ_N_STALE_EXIT:
4220 if (ctx->iaoq_f == -1) {
4221 tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b);
741322f4 4222 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
c301f34e 4223#ifndef CONFIG_USER_ONLY
c5d0aec2 4224 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
c301f34e 4225#endif
c5d0aec2
RH
4226 nullify_save(ctx);
4227 ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4228 ? DISAS_EXIT
4229 : DISAS_IAQ_N_UPDATED);
4230 } else if (ctx->iaoq_b == -1) {
4231 tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var);
4232 }
4233 break;
4234
4235 default:
4236 g_assert_not_reached();
51b061fb
RH
4237 }
4238}
4239
4240static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4241{
4242 DisasContext *ctx = container_of(dcbase, DisasContext, base);
e1b5a5ed 4243 DisasJumpType is_jmp = ctx->base.is_jmp;
61766fe9 4244
e1b5a5ed 4245 switch (is_jmp) {
869051ea 4246 case DISAS_NORETURN:
61766fe9 4247 break;
51b061fb 4248 case DISAS_TOO_MANY:
869051ea 4249 case DISAS_IAQ_N_STALE:
e1b5a5ed 4250 case DISAS_IAQ_N_STALE_EXIT:
741322f4
RH
4251 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4252 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
51b061fb 4253 nullify_save(ctx);
61766fe9 4254 /* FALLTHRU */
869051ea 4255 case DISAS_IAQ_N_UPDATED:
8532a14e 4256 if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
7f11636d 4257 tcg_gen_lookup_and_goto_ptr();
8532a14e 4258 break;
61766fe9 4259 }
c5d0aec2
RH
4260 /* FALLTHRU */
4261 case DISAS_EXIT:
4262 tcg_gen_exit_tb(NULL, 0);
61766fe9
RH
4263 break;
4264 default:
51b061fb 4265 g_assert_not_reached();
61766fe9 4266 }
51b061fb 4267}
61766fe9 4268
8eb806a7
RH
4269static void hppa_tr_disas_log(const DisasContextBase *dcbase,
4270 CPUState *cs, FILE *logfile)
51b061fb 4271{
c301f34e 4272 target_ulong pc = dcbase->pc_first;
61766fe9 4273
ba1d0b44
RH
4274#ifdef CONFIG_USER_ONLY
4275 switch (pc) {
51b061fb 4276 case 0x00:
8eb806a7 4277 fprintf(logfile, "IN:\n0x00000000: (null)\n");
ba1d0b44 4278 return;
51b061fb 4279 case 0xb0:
8eb806a7 4280 fprintf(logfile, "IN:\n0x000000b0: light-weight-syscall\n");
ba1d0b44 4281 return;
51b061fb 4282 case 0xe0:
8eb806a7 4283 fprintf(logfile, "IN:\n0x000000e0: set-thread-pointer-syscall\n");
ba1d0b44 4284 return;
51b061fb 4285 case 0x100:
8eb806a7 4286 fprintf(logfile, "IN:\n0x00000100: syscall\n");
ba1d0b44 4287 return;
61766fe9 4288 }
ba1d0b44
RH
4289#endif
4290
8eb806a7
RH
4291 fprintf(logfile, "IN: %s\n", lookup_symbol(pc));
4292 target_disas(logfile, cs, pc, dcbase->tb->size);
51b061fb
RH
4293}
4294
4295static const TranslatorOps hppa_tr_ops = {
4296 .init_disas_context = hppa_tr_init_disas_context,
4297 .tb_start = hppa_tr_tb_start,
4298 .insn_start = hppa_tr_insn_start,
51b061fb
RH
4299 .translate_insn = hppa_tr_translate_insn,
4300 .tb_stop = hppa_tr_tb_stop,
4301 .disas_log = hppa_tr_disas_log,
4302};
4303
597f9b2d 4304void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
306c8721 4305 target_ulong pc, void *host_pc)
51b061fb
RH
4306{
4307 DisasContext ctx;
306c8721 4308 translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
61766fe9 4309}