]> git.proxmox.com Git - mirror_qemu.git/blame - target/hppa/translate.c
target/hppa: sar register allows only 5 bits on 32-bit CPU
[mirror_qemu.git] / target / hppa / translate.c
CommitLineData
61766fe9
RH
1/*
2 * HPPA emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
d6ea4236 9 * version 2.1 of the License, or (at your option) any later version.
61766fe9
RH
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "qemu/osdep.h"
21#include "cpu.h"
22#include "disas/disas.h"
23#include "qemu/host-utils.h"
24#include "exec/exec-all.h"
dcb32f1d 25#include "tcg/tcg-op.h"
61766fe9
RH
26#include "exec/helper-proto.h"
27#include "exec/helper-gen.h"
869051ea 28#include "exec/translator.h"
61766fe9
RH
29#include "exec/log.h"
30
d53106c9
RH
31#define HELPER_H "helper.h"
32#include "exec/helper-info.c.inc"
33#undef HELPER_H
34
35
eaa3783b
RH
36/* Since we have a distinction between register size and address size,
37 we need to redefine all of these. */
38
39#undef TCGv
40#undef tcg_temp_new
eaa3783b 41#undef tcg_global_mem_new
eaa3783b
RH
42
43#if TARGET_LONG_BITS == 64
44#define TCGv_tl TCGv_i64
45#define tcg_temp_new_tl tcg_temp_new_i64
eaa3783b
RH
46#if TARGET_REGISTER_BITS == 64
47#define tcg_gen_extu_reg_tl tcg_gen_mov_i64
48#else
49#define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64
50#endif
51#else
52#define TCGv_tl TCGv_i32
53#define tcg_temp_new_tl tcg_temp_new_i32
eaa3783b
RH
54#define tcg_gen_extu_reg_tl tcg_gen_mov_i32
55#endif
56
57#if TARGET_REGISTER_BITS == 64
58#define TCGv_reg TCGv_i64
59
60#define tcg_temp_new tcg_temp_new_i64
eaa3783b 61#define tcg_global_mem_new tcg_global_mem_new_i64
eaa3783b
RH
62
63#define tcg_gen_movi_reg tcg_gen_movi_i64
64#define tcg_gen_mov_reg tcg_gen_mov_i64
65#define tcg_gen_ld8u_reg tcg_gen_ld8u_i64
66#define tcg_gen_ld8s_reg tcg_gen_ld8s_i64
67#define tcg_gen_ld16u_reg tcg_gen_ld16u_i64
68#define tcg_gen_ld16s_reg tcg_gen_ld16s_i64
69#define tcg_gen_ld32u_reg tcg_gen_ld32u_i64
70#define tcg_gen_ld32s_reg tcg_gen_ld32s_i64
71#define tcg_gen_ld_reg tcg_gen_ld_i64
72#define tcg_gen_st8_reg tcg_gen_st8_i64
73#define tcg_gen_st16_reg tcg_gen_st16_i64
74#define tcg_gen_st32_reg tcg_gen_st32_i64
75#define tcg_gen_st_reg tcg_gen_st_i64
76#define tcg_gen_add_reg tcg_gen_add_i64
77#define tcg_gen_addi_reg tcg_gen_addi_i64
78#define tcg_gen_sub_reg tcg_gen_sub_i64
79#define tcg_gen_neg_reg tcg_gen_neg_i64
80#define tcg_gen_subfi_reg tcg_gen_subfi_i64
81#define tcg_gen_subi_reg tcg_gen_subi_i64
82#define tcg_gen_and_reg tcg_gen_and_i64
83#define tcg_gen_andi_reg tcg_gen_andi_i64
84#define tcg_gen_or_reg tcg_gen_or_i64
85#define tcg_gen_ori_reg tcg_gen_ori_i64
86#define tcg_gen_xor_reg tcg_gen_xor_i64
87#define tcg_gen_xori_reg tcg_gen_xori_i64
88#define tcg_gen_not_reg tcg_gen_not_i64
89#define tcg_gen_shl_reg tcg_gen_shl_i64
90#define tcg_gen_shli_reg tcg_gen_shli_i64
91#define tcg_gen_shr_reg tcg_gen_shr_i64
92#define tcg_gen_shri_reg tcg_gen_shri_i64
93#define tcg_gen_sar_reg tcg_gen_sar_i64
94#define tcg_gen_sari_reg tcg_gen_sari_i64
95#define tcg_gen_brcond_reg tcg_gen_brcond_i64
96#define tcg_gen_brcondi_reg tcg_gen_brcondi_i64
97#define tcg_gen_setcond_reg tcg_gen_setcond_i64
98#define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
99#define tcg_gen_mul_reg tcg_gen_mul_i64
100#define tcg_gen_muli_reg tcg_gen_muli_i64
101#define tcg_gen_div_reg tcg_gen_div_i64
102#define tcg_gen_rem_reg tcg_gen_rem_i64
103#define tcg_gen_divu_reg tcg_gen_divu_i64
104#define tcg_gen_remu_reg tcg_gen_remu_i64
105#define tcg_gen_discard_reg tcg_gen_discard_i64
106#define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
107#define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
108#define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
109#define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64
110#define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
111#define tcg_gen_ext_reg_i64 tcg_gen_mov_i64
112#define tcg_gen_ext8u_reg tcg_gen_ext8u_i64
113#define tcg_gen_ext8s_reg tcg_gen_ext8s_i64
114#define tcg_gen_ext16u_reg tcg_gen_ext16u_i64
115#define tcg_gen_ext16s_reg tcg_gen_ext16s_i64
116#define tcg_gen_ext32u_reg tcg_gen_ext32u_i64
117#define tcg_gen_ext32s_reg tcg_gen_ext32s_i64
118#define tcg_gen_bswap16_reg tcg_gen_bswap16_i64
119#define tcg_gen_bswap32_reg tcg_gen_bswap32_i64
120#define tcg_gen_bswap64_reg tcg_gen_bswap64_i64
121#define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
122#define tcg_gen_andc_reg tcg_gen_andc_i64
123#define tcg_gen_eqv_reg tcg_gen_eqv_i64
124#define tcg_gen_nand_reg tcg_gen_nand_i64
125#define tcg_gen_nor_reg tcg_gen_nor_i64
126#define tcg_gen_orc_reg tcg_gen_orc_i64
127#define tcg_gen_clz_reg tcg_gen_clz_i64
128#define tcg_gen_ctz_reg tcg_gen_ctz_i64
129#define tcg_gen_clzi_reg tcg_gen_clzi_i64
130#define tcg_gen_ctzi_reg tcg_gen_ctzi_i64
131#define tcg_gen_clrsb_reg tcg_gen_clrsb_i64
132#define tcg_gen_ctpop_reg tcg_gen_ctpop_i64
133#define tcg_gen_rotl_reg tcg_gen_rotl_i64
134#define tcg_gen_rotli_reg tcg_gen_rotli_i64
135#define tcg_gen_rotr_reg tcg_gen_rotr_i64
136#define tcg_gen_rotri_reg tcg_gen_rotri_i64
137#define tcg_gen_deposit_reg tcg_gen_deposit_i64
138#define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
139#define tcg_gen_extract_reg tcg_gen_extract_i64
140#define tcg_gen_sextract_reg tcg_gen_sextract_i64
05bfd4db 141#define tcg_gen_extract2_reg tcg_gen_extract2_i64
29dd6f64 142#define tcg_constant_reg tcg_constant_i64
eaa3783b
RH
143#define tcg_gen_movcond_reg tcg_gen_movcond_i64
144#define tcg_gen_add2_reg tcg_gen_add2_i64
145#define tcg_gen_sub2_reg tcg_gen_sub2_i64
146#define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64
147#define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64
148#define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
5bfa8034 149#define tcg_gen_trunc_reg_ptr tcg_gen_trunc_i64_ptr
eaa3783b
RH
150#else
151#define TCGv_reg TCGv_i32
152#define tcg_temp_new tcg_temp_new_i32
eaa3783b 153#define tcg_global_mem_new tcg_global_mem_new_i32
eaa3783b
RH
154
155#define tcg_gen_movi_reg tcg_gen_movi_i32
156#define tcg_gen_mov_reg tcg_gen_mov_i32
157#define tcg_gen_ld8u_reg tcg_gen_ld8u_i32
158#define tcg_gen_ld8s_reg tcg_gen_ld8s_i32
159#define tcg_gen_ld16u_reg tcg_gen_ld16u_i32
160#define tcg_gen_ld16s_reg tcg_gen_ld16s_i32
161#define tcg_gen_ld32u_reg tcg_gen_ld_i32
162#define tcg_gen_ld32s_reg tcg_gen_ld_i32
163#define tcg_gen_ld_reg tcg_gen_ld_i32
164#define tcg_gen_st8_reg tcg_gen_st8_i32
165#define tcg_gen_st16_reg tcg_gen_st16_i32
166#define tcg_gen_st32_reg tcg_gen_st32_i32
167#define tcg_gen_st_reg tcg_gen_st_i32
168#define tcg_gen_add_reg tcg_gen_add_i32
169#define tcg_gen_addi_reg tcg_gen_addi_i32
170#define tcg_gen_sub_reg tcg_gen_sub_i32
171#define tcg_gen_neg_reg tcg_gen_neg_i32
172#define tcg_gen_subfi_reg tcg_gen_subfi_i32
173#define tcg_gen_subi_reg tcg_gen_subi_i32
174#define tcg_gen_and_reg tcg_gen_and_i32
175#define tcg_gen_andi_reg tcg_gen_andi_i32
176#define tcg_gen_or_reg tcg_gen_or_i32
177#define tcg_gen_ori_reg tcg_gen_ori_i32
178#define tcg_gen_xor_reg tcg_gen_xor_i32
179#define tcg_gen_xori_reg tcg_gen_xori_i32
180#define tcg_gen_not_reg tcg_gen_not_i32
181#define tcg_gen_shl_reg tcg_gen_shl_i32
182#define tcg_gen_shli_reg tcg_gen_shli_i32
183#define tcg_gen_shr_reg tcg_gen_shr_i32
184#define tcg_gen_shri_reg tcg_gen_shri_i32
185#define tcg_gen_sar_reg tcg_gen_sar_i32
186#define tcg_gen_sari_reg tcg_gen_sari_i32
187#define tcg_gen_brcond_reg tcg_gen_brcond_i32
188#define tcg_gen_brcondi_reg tcg_gen_brcondi_i32
189#define tcg_gen_setcond_reg tcg_gen_setcond_i32
190#define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
191#define tcg_gen_mul_reg tcg_gen_mul_i32
192#define tcg_gen_muli_reg tcg_gen_muli_i32
193#define tcg_gen_div_reg tcg_gen_div_i32
194#define tcg_gen_rem_reg tcg_gen_rem_i32
195#define tcg_gen_divu_reg tcg_gen_divu_i32
196#define tcg_gen_remu_reg tcg_gen_remu_i32
197#define tcg_gen_discard_reg tcg_gen_discard_i32
198#define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
199#define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
200#define tcg_gen_extu_i32_reg tcg_gen_mov_i32
201#define tcg_gen_ext_i32_reg tcg_gen_mov_i32
202#define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
203#define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64
204#define tcg_gen_ext8u_reg tcg_gen_ext8u_i32
205#define tcg_gen_ext8s_reg tcg_gen_ext8s_i32
206#define tcg_gen_ext16u_reg tcg_gen_ext16u_i32
207#define tcg_gen_ext16s_reg tcg_gen_ext16s_i32
208#define tcg_gen_ext32u_reg tcg_gen_mov_i32
209#define tcg_gen_ext32s_reg tcg_gen_mov_i32
210#define tcg_gen_bswap16_reg tcg_gen_bswap16_i32
211#define tcg_gen_bswap32_reg tcg_gen_bswap32_i32
212#define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
213#define tcg_gen_andc_reg tcg_gen_andc_i32
214#define tcg_gen_eqv_reg tcg_gen_eqv_i32
215#define tcg_gen_nand_reg tcg_gen_nand_i32
216#define tcg_gen_nor_reg tcg_gen_nor_i32
217#define tcg_gen_orc_reg tcg_gen_orc_i32
218#define tcg_gen_clz_reg tcg_gen_clz_i32
219#define tcg_gen_ctz_reg tcg_gen_ctz_i32
220#define tcg_gen_clzi_reg tcg_gen_clzi_i32
221#define tcg_gen_ctzi_reg tcg_gen_ctzi_i32
222#define tcg_gen_clrsb_reg tcg_gen_clrsb_i32
223#define tcg_gen_ctpop_reg tcg_gen_ctpop_i32
224#define tcg_gen_rotl_reg tcg_gen_rotl_i32
225#define tcg_gen_rotli_reg tcg_gen_rotli_i32
226#define tcg_gen_rotr_reg tcg_gen_rotr_i32
227#define tcg_gen_rotri_reg tcg_gen_rotri_i32
228#define tcg_gen_deposit_reg tcg_gen_deposit_i32
229#define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
230#define tcg_gen_extract_reg tcg_gen_extract_i32
231#define tcg_gen_sextract_reg tcg_gen_sextract_i32
05bfd4db 232#define tcg_gen_extract2_reg tcg_gen_extract2_i32
29dd6f64 233#define tcg_constant_reg tcg_constant_i32
eaa3783b
RH
234#define tcg_gen_movcond_reg tcg_gen_movcond_i32
235#define tcg_gen_add2_reg tcg_gen_add2_i32
236#define tcg_gen_sub2_reg tcg_gen_sub2_i32
237#define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32
238#define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32
239#define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
5bfa8034 240#define tcg_gen_trunc_reg_ptr tcg_gen_ext_i32_ptr
eaa3783b
RH
241#endif /* TARGET_REGISTER_BITS */
242
61766fe9
RH
243typedef struct DisasCond {
244 TCGCond c;
eaa3783b 245 TCGv_reg a0, a1;
61766fe9
RH
246} DisasCond;
247
248typedef struct DisasContext {
d01a3625 249 DisasContextBase base;
61766fe9
RH
250 CPUState *cs;
251
eaa3783b
RH
252 target_ureg iaoq_f;
253 target_ureg iaoq_b;
254 target_ureg iaoq_n;
255 TCGv_reg iaoq_n_var;
61766fe9 256
61766fe9
RH
257 DisasCond null_cond;
258 TCGLabel *null_lab;
259
1a19da0d 260 uint32_t insn;
494737b7 261 uint32_t tb_flags;
3d68ee7b
RH
262 int mmu_idx;
263 int privilege;
61766fe9 264 bool psw_n_nonzero;
bd6243a3 265 bool is_pa20;
217d1a5e
RH
266
267#ifdef CONFIG_USER_ONLY
268 MemOp unalign;
269#endif
61766fe9
RH
270} DisasContext;
271
217d1a5e
RH
272#ifdef CONFIG_USER_ONLY
273#define UNALIGN(C) (C)->unalign
274#else
2d4afb03 275#define UNALIGN(C) MO_ALIGN
217d1a5e
RH
276#endif
277
e36f27ef 278/* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
451e4ffd 279static int expand_sm_imm(DisasContext *ctx, int val)
e36f27ef
RH
280{
281 if (val & PSW_SM_E) {
282 val = (val & ~PSW_SM_E) | PSW_E;
283 }
284 if (val & PSW_SM_W) {
285 val = (val & ~PSW_SM_W) | PSW_W;
286 }
287 return val;
288}
289
deee69a1 290/* Inverted space register indicates 0 means sr0 not inferred from base. */
451e4ffd 291static int expand_sr3x(DisasContext *ctx, int val)
deee69a1
RH
292{
293 return ~val;
294}
295
1cd012a5
RH
296/* Convert the M:A bits within a memory insn to the tri-state value
297 we use for the final M. */
451e4ffd 298static int ma_to_m(DisasContext *ctx, int val)
1cd012a5
RH
299{
300 return val & 2 ? (val & 1 ? -1 : 1) : 0;
301}
302
740038d7 303/* Convert the sign of the displacement to a pre or post-modify. */
451e4ffd 304static int pos_to_m(DisasContext *ctx, int val)
740038d7
RH
305{
306 return val ? 1 : -1;
307}
308
451e4ffd 309static int neg_to_m(DisasContext *ctx, int val)
740038d7
RH
310{
311 return val ? -1 : 1;
312}
313
314/* Used for branch targets and fp memory ops. */
451e4ffd 315static int expand_shl2(DisasContext *ctx, int val)
01afb7be
RH
316{
317 return val << 2;
318}
319
740038d7 320/* Used for fp memory ops. */
451e4ffd 321static int expand_shl3(DisasContext *ctx, int val)
740038d7
RH
322{
323 return val << 3;
324}
325
0588e061 326/* Used for assemble_21. */
451e4ffd 327static int expand_shl11(DisasContext *ctx, int val)
0588e061
RH
328{
329 return val << 11;
330}
331
01afb7be 332
40f9f908 333/* Include the auto-generated decoder. */
abff1abf 334#include "decode-insns.c.inc"
40f9f908 335
869051ea
RH
336/* We are not using a goto_tb (for whatever reason), but have updated
337 the iaq (for whatever reason), so don't do it again on exit. */
338#define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
61766fe9 339
869051ea
RH
340/* We are exiting the TB, but have neither emitted a goto_tb, nor
341 updated the iaq for the next instruction to be executed. */
342#define DISAS_IAQ_N_STALE DISAS_TARGET_1
61766fe9 343
e1b5a5ed
RH
344/* Similarly, but we want to return to the main loop immediately
345 to recognize unmasked interrupts. */
346#define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
c5d0aec2 347#define DISAS_EXIT DISAS_TARGET_3
e1b5a5ed 348
61766fe9 349/* global register indexes */
eaa3783b 350static TCGv_reg cpu_gr[32];
33423472 351static TCGv_i64 cpu_sr[4];
494737b7 352static TCGv_i64 cpu_srH;
eaa3783b
RH
353static TCGv_reg cpu_iaoq_f;
354static TCGv_reg cpu_iaoq_b;
c301f34e
RH
355static TCGv_i64 cpu_iasq_f;
356static TCGv_i64 cpu_iasq_b;
eaa3783b
RH
357static TCGv_reg cpu_sar;
358static TCGv_reg cpu_psw_n;
359static TCGv_reg cpu_psw_v;
360static TCGv_reg cpu_psw_cb;
361static TCGv_reg cpu_psw_cb_msb;
61766fe9 362
61766fe9
RH
363void hppa_translate_init(void)
364{
365#define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
366
eaa3783b 367 typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
61766fe9 368 static const GlobalVar vars[] = {
35136a77 369 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
61766fe9
RH
370 DEF_VAR(psw_n),
371 DEF_VAR(psw_v),
372 DEF_VAR(psw_cb),
373 DEF_VAR(psw_cb_msb),
374 DEF_VAR(iaoq_f),
375 DEF_VAR(iaoq_b),
376 };
377
378#undef DEF_VAR
379
380 /* Use the symbolic register names that match the disassembler. */
381 static const char gr_names[32][4] = {
382 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
383 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
384 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
385 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
386 };
33423472 387 /* SR[4-7] are not global registers so that we can index them. */
494737b7
RH
388 static const char sr_names[5][4] = {
389 "sr0", "sr1", "sr2", "sr3", "srH"
33423472 390 };
61766fe9 391
61766fe9
RH
392 int i;
393
f764718d 394 cpu_gr[0] = NULL;
61766fe9 395 for (i = 1; i < 32; i++) {
ad75a51e 396 cpu_gr[i] = tcg_global_mem_new(tcg_env,
61766fe9
RH
397 offsetof(CPUHPPAState, gr[i]),
398 gr_names[i]);
399 }
33423472 400 for (i = 0; i < 4; i++) {
ad75a51e 401 cpu_sr[i] = tcg_global_mem_new_i64(tcg_env,
33423472
RH
402 offsetof(CPUHPPAState, sr[i]),
403 sr_names[i]);
404 }
ad75a51e 405 cpu_srH = tcg_global_mem_new_i64(tcg_env,
494737b7
RH
406 offsetof(CPUHPPAState, sr[4]),
407 sr_names[4]);
61766fe9
RH
408
409 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
410 const GlobalVar *v = &vars[i];
ad75a51e 411 *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name);
61766fe9 412 }
c301f34e 413
ad75a51e 414 cpu_iasq_f = tcg_global_mem_new_i64(tcg_env,
c301f34e
RH
415 offsetof(CPUHPPAState, iasq_f),
416 "iasq_f");
ad75a51e 417 cpu_iasq_b = tcg_global_mem_new_i64(tcg_env,
c301f34e
RH
418 offsetof(CPUHPPAState, iasq_b),
419 "iasq_b");
61766fe9
RH
420}
421
129e9cc3
RH
422static DisasCond cond_make_f(void)
423{
f764718d
RH
424 return (DisasCond){
425 .c = TCG_COND_NEVER,
426 .a0 = NULL,
427 .a1 = NULL,
428 };
129e9cc3
RH
429}
430
df0232fe
RH
431static DisasCond cond_make_t(void)
432{
433 return (DisasCond){
434 .c = TCG_COND_ALWAYS,
435 .a0 = NULL,
436 .a1 = NULL,
437 };
438}
439
129e9cc3
RH
440static DisasCond cond_make_n(void)
441{
f764718d
RH
442 return (DisasCond){
443 .c = TCG_COND_NE,
444 .a0 = cpu_psw_n,
6e94937a 445 .a1 = tcg_constant_reg(0)
f764718d 446 };
129e9cc3
RH
447}
448
b47a4a02 449static DisasCond cond_make_0_tmp(TCGCond c, TCGv_reg a0)
129e9cc3 450{
129e9cc3 451 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
b47a4a02 452 return (DisasCond){
6e94937a 453 .c = c, .a0 = a0, .a1 = tcg_constant_reg(0)
b47a4a02
SS
454 };
455}
129e9cc3 456
b47a4a02
SS
457static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
458{
459 TCGv_reg tmp = tcg_temp_new();
460 tcg_gen_mov_reg(tmp, a0);
461 return cond_make_0_tmp(c, tmp);
129e9cc3
RH
462}
463
eaa3783b 464static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
129e9cc3
RH
465{
466 DisasCond r = { .c = c };
467
468 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
469 r.a0 = tcg_temp_new();
eaa3783b 470 tcg_gen_mov_reg(r.a0, a0);
129e9cc3 471 r.a1 = tcg_temp_new();
eaa3783b 472 tcg_gen_mov_reg(r.a1, a1);
129e9cc3
RH
473
474 return r;
475}
476
129e9cc3
RH
477static void cond_free(DisasCond *cond)
478{
479 switch (cond->c) {
480 default:
f764718d
RH
481 cond->a0 = NULL;
482 cond->a1 = NULL;
129e9cc3
RH
483 /* fallthru */
484 case TCG_COND_ALWAYS:
485 cond->c = TCG_COND_NEVER;
486 break;
487 case TCG_COND_NEVER:
488 break;
489 }
490}
491
eaa3783b 492static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
61766fe9
RH
493{
494 if (reg == 0) {
e12c6309 495 TCGv_reg t = tcg_temp_new();
eaa3783b 496 tcg_gen_movi_reg(t, 0);
61766fe9
RH
497 return t;
498 } else {
499 return cpu_gr[reg];
500 }
501}
502
eaa3783b 503static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
61766fe9 504{
129e9cc3 505 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
e12c6309 506 return tcg_temp_new();
61766fe9
RH
507 } else {
508 return cpu_gr[reg];
509 }
510}
511
eaa3783b 512static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
129e9cc3
RH
513{
514 if (ctx->null_cond.c != TCG_COND_NEVER) {
eaa3783b 515 tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
6e94937a 516 ctx->null_cond.a1, dest, t);
129e9cc3 517 } else {
eaa3783b 518 tcg_gen_mov_reg(dest, t);
129e9cc3
RH
519 }
520}
521
eaa3783b 522static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
129e9cc3
RH
523{
524 if (reg != 0) {
525 save_or_nullify(ctx, cpu_gr[reg], t);
526 }
527}
528
e03b5686 529#if HOST_BIG_ENDIAN
96d6407f
RH
530# define HI_OFS 0
531# define LO_OFS 4
532#else
533# define HI_OFS 4
534# define LO_OFS 0
535#endif
536
537static TCGv_i32 load_frw_i32(unsigned rt)
538{
539 TCGv_i32 ret = tcg_temp_new_i32();
ad75a51e 540 tcg_gen_ld_i32(ret, tcg_env,
96d6407f
RH
541 offsetof(CPUHPPAState, fr[rt & 31])
542 + (rt & 32 ? LO_OFS : HI_OFS));
543 return ret;
544}
545
ebe9383c
RH
546static TCGv_i32 load_frw0_i32(unsigned rt)
547{
548 if (rt == 0) {
0992a930
RH
549 TCGv_i32 ret = tcg_temp_new_i32();
550 tcg_gen_movi_i32(ret, 0);
551 return ret;
ebe9383c
RH
552 } else {
553 return load_frw_i32(rt);
554 }
555}
556
557static TCGv_i64 load_frw0_i64(unsigned rt)
558{
0992a930 559 TCGv_i64 ret = tcg_temp_new_i64();
ebe9383c 560 if (rt == 0) {
0992a930 561 tcg_gen_movi_i64(ret, 0);
ebe9383c 562 } else {
ad75a51e 563 tcg_gen_ld32u_i64(ret, tcg_env,
ebe9383c
RH
564 offsetof(CPUHPPAState, fr[rt & 31])
565 + (rt & 32 ? LO_OFS : HI_OFS));
ebe9383c 566 }
0992a930 567 return ret;
ebe9383c
RH
568}
569
96d6407f
RH
570static void save_frw_i32(unsigned rt, TCGv_i32 val)
571{
ad75a51e 572 tcg_gen_st_i32(val, tcg_env,
96d6407f
RH
573 offsetof(CPUHPPAState, fr[rt & 31])
574 + (rt & 32 ? LO_OFS : HI_OFS));
575}
576
577#undef HI_OFS
578#undef LO_OFS
579
580static TCGv_i64 load_frd(unsigned rt)
581{
582 TCGv_i64 ret = tcg_temp_new_i64();
ad75a51e 583 tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt]));
96d6407f
RH
584 return ret;
585}
586
ebe9383c
RH
587static TCGv_i64 load_frd0(unsigned rt)
588{
589 if (rt == 0) {
0992a930
RH
590 TCGv_i64 ret = tcg_temp_new_i64();
591 tcg_gen_movi_i64(ret, 0);
592 return ret;
ebe9383c
RH
593 } else {
594 return load_frd(rt);
595 }
596}
597
96d6407f
RH
598static void save_frd(unsigned rt, TCGv_i64 val)
599{
ad75a51e 600 tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt]));
96d6407f
RH
601}
602
33423472
RH
603static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
604{
605#ifdef CONFIG_USER_ONLY
606 tcg_gen_movi_i64(dest, 0);
607#else
608 if (reg < 4) {
609 tcg_gen_mov_i64(dest, cpu_sr[reg]);
494737b7
RH
610 } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
611 tcg_gen_mov_i64(dest, cpu_srH);
33423472 612 } else {
ad75a51e 613 tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg]));
33423472
RH
614 }
615#endif
616}
617
129e9cc3
RH
618/* Skip over the implementation of an insn that has been nullified.
619 Use this when the insn is too complex for a conditional move. */
620static void nullify_over(DisasContext *ctx)
621{
622 if (ctx->null_cond.c != TCG_COND_NEVER) {
623 /* The always condition should have been handled in the main loop. */
624 assert(ctx->null_cond.c != TCG_COND_ALWAYS);
625
626 ctx->null_lab = gen_new_label();
129e9cc3
RH
627
628 /* If we're using PSW[N], copy it to a temp because... */
6e94937a 629 if (ctx->null_cond.a0 == cpu_psw_n) {
129e9cc3 630 ctx->null_cond.a0 = tcg_temp_new();
eaa3783b 631 tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
129e9cc3
RH
632 }
633 /* ... we clear it before branching over the implementation,
634 so that (1) it's clear after nullifying this insn and
635 (2) if this insn nullifies the next, PSW[N] is valid. */
636 if (ctx->psw_n_nonzero) {
637 ctx->psw_n_nonzero = false;
eaa3783b 638 tcg_gen_movi_reg(cpu_psw_n, 0);
129e9cc3
RH
639 }
640
eaa3783b 641 tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
6e94937a 642 ctx->null_cond.a1, ctx->null_lab);
129e9cc3
RH
643 cond_free(&ctx->null_cond);
644 }
645}
646
647/* Save the current nullification state to PSW[N]. */
648static void nullify_save(DisasContext *ctx)
649{
650 if (ctx->null_cond.c == TCG_COND_NEVER) {
651 if (ctx->psw_n_nonzero) {
eaa3783b 652 tcg_gen_movi_reg(cpu_psw_n, 0);
129e9cc3
RH
653 }
654 return;
655 }
6e94937a 656 if (ctx->null_cond.a0 != cpu_psw_n) {
eaa3783b 657 tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
6e94937a 658 ctx->null_cond.a0, ctx->null_cond.a1);
129e9cc3
RH
659 ctx->psw_n_nonzero = true;
660 }
661 cond_free(&ctx->null_cond);
662}
663
664/* Set a PSW[N] to X. The intention is that this is used immediately
665 before a goto_tb/exit_tb, so that there is no fallthru path to other
666 code within the TB. Therefore we do not update psw_n_nonzero. */
667static void nullify_set(DisasContext *ctx, bool x)
668{
669 if (ctx->psw_n_nonzero || x) {
eaa3783b 670 tcg_gen_movi_reg(cpu_psw_n, x);
129e9cc3
RH
671 }
672}
673
674/* Mark the end of an instruction that may have been nullified.
40f9f908
RH
675 This is the pair to nullify_over. Always returns true so that
676 it may be tail-called from a translate function. */
31234768 677static bool nullify_end(DisasContext *ctx)
129e9cc3
RH
678{
679 TCGLabel *null_lab = ctx->null_lab;
31234768 680 DisasJumpType status = ctx->base.is_jmp;
129e9cc3 681
f49b3537
RH
682 /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
683 For UPDATED, we cannot update on the nullified path. */
684 assert(status != DISAS_IAQ_N_UPDATED);
685
129e9cc3
RH
686 if (likely(null_lab == NULL)) {
687 /* The current insn wasn't conditional or handled the condition
688 applied to it without a branch, so the (new) setting of
689 NULL_COND can be applied directly to the next insn. */
31234768 690 return true;
129e9cc3
RH
691 }
692 ctx->null_lab = NULL;
693
694 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
695 /* The next instruction will be unconditional,
696 and NULL_COND already reflects that. */
697 gen_set_label(null_lab);
698 } else {
699 /* The insn that we just executed is itself nullifying the next
700 instruction. Store the condition in the PSW[N] global.
701 We asserted PSW[N] = 0 in nullify_over, so that after the
702 label we have the proper value in place. */
703 nullify_save(ctx);
704 gen_set_label(null_lab);
705 ctx->null_cond = cond_make_n();
706 }
869051ea 707 if (status == DISAS_NORETURN) {
31234768 708 ctx->base.is_jmp = DISAS_NEXT;
129e9cc3 709 }
31234768 710 return true;
129e9cc3
RH
711}
712
698240d1
RH
713static target_ureg gva_offset_mask(DisasContext *ctx)
714{
715 return (ctx->tb_flags & PSW_W
716 ? MAKE_64BIT_MASK(0, 62)
717 : MAKE_64BIT_MASK(0, 32));
718}
719
741322f4
RH
720static void copy_iaoq_entry(DisasContext *ctx, TCGv_reg dest,
721 target_ureg ival, TCGv_reg vval)
61766fe9 722{
f13bf343
RH
723 target_ureg mask = gva_offset_mask(ctx);
724
725 if (ival != -1) {
726 tcg_gen_movi_reg(dest, ival & mask);
727 return;
728 }
729 tcg_debug_assert(vval != NULL);
730
731 /*
732 * We know that the IAOQ is already properly masked.
733 * This optimization is primarily for "iaoq_f = iaoq_b".
734 */
735 if (vval == cpu_iaoq_f || vval == cpu_iaoq_b) {
eaa3783b 736 tcg_gen_mov_reg(dest, vval);
61766fe9 737 } else {
f13bf343 738 tcg_gen_andi_reg(dest, vval, mask);
61766fe9
RH
739 }
740}
741
eaa3783b 742static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
61766fe9
RH
743{
744 return ctx->iaoq_f + disp + 8;
745}
746
747static void gen_excp_1(int exception)
748{
ad75a51e 749 gen_helper_excp(tcg_env, tcg_constant_i32(exception));
61766fe9
RH
750}
751
31234768 752static void gen_excp(DisasContext *ctx, int exception)
61766fe9 753{
741322f4
RH
754 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
755 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
129e9cc3 756 nullify_save(ctx);
61766fe9 757 gen_excp_1(exception);
31234768 758 ctx->base.is_jmp = DISAS_NORETURN;
61766fe9
RH
759}
760
31234768 761static bool gen_excp_iir(DisasContext *ctx, int exc)
1a19da0d 762{
31234768 763 nullify_over(ctx);
29dd6f64 764 tcg_gen_st_reg(tcg_constant_reg(ctx->insn),
ad75a51e 765 tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
31234768
RH
766 gen_excp(ctx, exc);
767 return nullify_end(ctx);
1a19da0d
RH
768}
769
31234768 770static bool gen_illegal(DisasContext *ctx)
61766fe9 771{
31234768 772 return gen_excp_iir(ctx, EXCP_ILL);
61766fe9
RH
773}
774
40f9f908
RH
775#ifdef CONFIG_USER_ONLY
776#define CHECK_MOST_PRIVILEGED(EXCP) \
777 return gen_excp_iir(ctx, EXCP)
778#else
779#define CHECK_MOST_PRIVILEGED(EXCP) \
31234768
RH
780 do { \
781 if (ctx->privilege != 0) { \
782 return gen_excp_iir(ctx, EXCP); \
783 } \
e1b5a5ed 784 } while (0)
40f9f908 785#endif
e1b5a5ed 786
eaa3783b 787static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
61766fe9 788{
57f91498 789 return translator_use_goto_tb(&ctx->base, dest);
61766fe9
RH
790}
791
129e9cc3
RH
792/* If the next insn is to be nullified, and it's on the same page,
793 and we're not attempting to set a breakpoint on it, then we can
794 totally skip the nullified insn. This avoids creating and
795 executing a TB that merely branches to the next TB. */
796static bool use_nullify_skip(DisasContext *ctx)
797{
798 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
799 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
800}
801
61766fe9 802static void gen_goto_tb(DisasContext *ctx, int which,
eaa3783b 803 target_ureg f, target_ureg b)
61766fe9
RH
804{
805 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
806 tcg_gen_goto_tb(which);
a0180973
RH
807 copy_iaoq_entry(ctx, cpu_iaoq_f, f, NULL);
808 copy_iaoq_entry(ctx, cpu_iaoq_b, b, NULL);
07ea28b4 809 tcg_gen_exit_tb(ctx->base.tb, which);
61766fe9 810 } else {
741322f4
RH
811 copy_iaoq_entry(ctx, cpu_iaoq_f, f, cpu_iaoq_b);
812 copy_iaoq_entry(ctx, cpu_iaoq_b, b, ctx->iaoq_n_var);
8532a14e 813 tcg_gen_lookup_and_goto_ptr();
61766fe9
RH
814 }
815}
816
b47a4a02
SS
817static bool cond_need_sv(int c)
818{
819 return c == 2 || c == 3 || c == 6;
820}
821
822static bool cond_need_cb(int c)
823{
824 return c == 4 || c == 5;
825}
826
72ca8753
RH
827/* Need extensions from TCGv_i32 to TCGv_reg. */
828static bool cond_need_ext(DisasContext *ctx, bool d)
829{
830 return TARGET_REGISTER_BITS == 64 && !d;
831}
832
b47a4a02
SS
833/*
834 * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of
835 * the Parisc 1.1 Architecture Reference Manual for details.
836 */
b2167459 837
eaa3783b
RH
838static DisasCond do_cond(unsigned cf, TCGv_reg res,
839 TCGv_reg cb_msb, TCGv_reg sv)
b2167459
RH
840{
841 DisasCond cond;
eaa3783b 842 TCGv_reg tmp;
b2167459
RH
843
844 switch (cf >> 1) {
b47a4a02 845 case 0: /* Never / TR (0 / 1) */
b2167459
RH
846 cond = cond_make_f();
847 break;
848 case 1: /* = / <> (Z / !Z) */
849 cond = cond_make_0(TCG_COND_EQ, res);
850 break;
b47a4a02
SS
851 case 2: /* < / >= (N ^ V / !(N ^ V) */
852 tmp = tcg_temp_new();
853 tcg_gen_xor_reg(tmp, res, sv);
854 cond = cond_make_0_tmp(TCG_COND_LT, tmp);
b2167459 855 break;
b47a4a02
SS
856 case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */
857 /*
858 * Simplify:
859 * (N ^ V) | Z
860 * ((res < 0) ^ (sv < 0)) | !res
861 * ((res ^ sv) < 0) | !res
862 * (~(res ^ sv) >= 0) | !res
863 * !(~(res ^ sv) >> 31) | !res
864 * !(~(res ^ sv) >> 31 & res)
865 */
866 tmp = tcg_temp_new();
867 tcg_gen_eqv_reg(tmp, res, sv);
868 tcg_gen_sari_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
869 tcg_gen_and_reg(tmp, tmp, res);
870 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
b2167459
RH
871 break;
872 case 4: /* NUV / UV (!C / C) */
873 cond = cond_make_0(TCG_COND_EQ, cb_msb);
874 break;
875 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
876 tmp = tcg_temp_new();
eaa3783b
RH
877 tcg_gen_neg_reg(tmp, cb_msb);
878 tcg_gen_and_reg(tmp, tmp, res);
b47a4a02 879 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
b2167459
RH
880 break;
881 case 6: /* SV / NSV (V / !V) */
882 cond = cond_make_0(TCG_COND_LT, sv);
883 break;
884 case 7: /* OD / EV */
885 tmp = tcg_temp_new();
eaa3783b 886 tcg_gen_andi_reg(tmp, res, 1);
b47a4a02 887 cond = cond_make_0_tmp(TCG_COND_NE, tmp);
b2167459
RH
888 break;
889 default:
890 g_assert_not_reached();
891 }
892 if (cf & 1) {
893 cond.c = tcg_invert_cond(cond.c);
894 }
895
896 return cond;
897}
898
899/* Similar, but for the special case of subtraction without borrow, we
900 can use the inputs directly. This can allow other computation to be
901 deleted as unused. */
902
eaa3783b
RH
903static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
904 TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
b2167459
RH
905{
906 DisasCond cond;
907
908 switch (cf >> 1) {
909 case 1: /* = / <> */
910 cond = cond_make(TCG_COND_EQ, in1, in2);
911 break;
912 case 2: /* < / >= */
913 cond = cond_make(TCG_COND_LT, in1, in2);
914 break;
915 case 3: /* <= / > */
916 cond = cond_make(TCG_COND_LE, in1, in2);
917 break;
918 case 4: /* << / >>= */
919 cond = cond_make(TCG_COND_LTU, in1, in2);
920 break;
921 case 5: /* <<= / >> */
922 cond = cond_make(TCG_COND_LEU, in1, in2);
923 break;
924 default:
b47a4a02 925 return do_cond(cf, res, NULL, sv);
b2167459
RH
926 }
927 if (cf & 1) {
928 cond.c = tcg_invert_cond(cond.c);
929 }
930
931 return cond;
932}
933
df0232fe
RH
934/*
935 * Similar, but for logicals, where the carry and overflow bits are not
936 * computed, and use of them is undefined.
937 *
938 * Undefined or not, hardware does not trap. It seems reasonable to
939 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
940 * how cases c={2,3} are treated.
941 */
b2167459 942
eaa3783b 943static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
b2167459 944{
df0232fe
RH
945 switch (cf) {
946 case 0: /* never */
947 case 9: /* undef, C */
948 case 11: /* undef, C & !Z */
949 case 12: /* undef, V */
950 return cond_make_f();
951
952 case 1: /* true */
953 case 8: /* undef, !C */
954 case 10: /* undef, !C | Z */
955 case 13: /* undef, !V */
956 return cond_make_t();
957
958 case 2: /* == */
959 return cond_make_0(TCG_COND_EQ, res);
960 case 3: /* <> */
961 return cond_make_0(TCG_COND_NE, res);
962 case 4: /* < */
963 return cond_make_0(TCG_COND_LT, res);
964 case 5: /* >= */
965 return cond_make_0(TCG_COND_GE, res);
966 case 6: /* <= */
967 return cond_make_0(TCG_COND_LE, res);
968 case 7: /* > */
969 return cond_make_0(TCG_COND_GT, res);
970
971 case 14: /* OD */
972 case 15: /* EV */
973 return do_cond(cf, res, NULL, NULL);
974
975 default:
976 g_assert_not_reached();
b2167459 977 }
b2167459
RH
978}
979
98cd9ca7
RH
980/* Similar, but for shift/extract/deposit conditions. */
981
eaa3783b 982static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
98cd9ca7
RH
983{
984 unsigned c, f;
985
986 /* Convert the compressed condition codes to standard.
987 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
988 4-7 are the reverse of 0-3. */
989 c = orig & 3;
990 if (c == 3) {
991 c = 7;
992 }
993 f = (orig & 4) / 4;
994
995 return do_log_cond(c * 2 + f, res);
996}
997
b2167459
RH
998/* Similar, but for unit conditions. */
999
eaa3783b
RH
1000static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
1001 TCGv_reg in1, TCGv_reg in2)
b2167459
RH
1002{
1003 DisasCond cond;
eaa3783b 1004 TCGv_reg tmp, cb = NULL;
b2167459 1005
b2167459
RH
1006 if (cf & 8) {
1007 /* Since we want to test lots of carry-out bits all at once, do not
1008 * do our normal thing and compute carry-in of bit B+1 since that
1009 * leaves us with carry bits spread across two words.
1010 */
1011 cb = tcg_temp_new();
1012 tmp = tcg_temp_new();
eaa3783b
RH
1013 tcg_gen_or_reg(cb, in1, in2);
1014 tcg_gen_and_reg(tmp, in1, in2);
1015 tcg_gen_andc_reg(cb, cb, res);
1016 tcg_gen_or_reg(cb, cb, tmp);
b2167459
RH
1017 }
1018
1019 switch (cf >> 1) {
1020 case 0: /* never / TR */
1021 case 1: /* undefined */
1022 case 5: /* undefined */
1023 cond = cond_make_f();
1024 break;
1025
1026 case 2: /* SBZ / NBZ */
1027 /* See hasless(v,1) from
1028 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1029 */
1030 tmp = tcg_temp_new();
eaa3783b
RH
1031 tcg_gen_subi_reg(tmp, res, 0x01010101u);
1032 tcg_gen_andc_reg(tmp, tmp, res);
1033 tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
b2167459 1034 cond = cond_make_0(TCG_COND_NE, tmp);
b2167459
RH
1035 break;
1036
1037 case 3: /* SHZ / NHZ */
1038 tmp = tcg_temp_new();
eaa3783b
RH
1039 tcg_gen_subi_reg(tmp, res, 0x00010001u);
1040 tcg_gen_andc_reg(tmp, tmp, res);
1041 tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
b2167459 1042 cond = cond_make_0(TCG_COND_NE, tmp);
b2167459
RH
1043 break;
1044
1045 case 4: /* SDC / NDC */
eaa3783b 1046 tcg_gen_andi_reg(cb, cb, 0x88888888u);
b2167459
RH
1047 cond = cond_make_0(TCG_COND_NE, cb);
1048 break;
1049
1050 case 6: /* SBC / NBC */
eaa3783b 1051 tcg_gen_andi_reg(cb, cb, 0x80808080u);
b2167459
RH
1052 cond = cond_make_0(TCG_COND_NE, cb);
1053 break;
1054
1055 case 7: /* SHC / NHC */
eaa3783b 1056 tcg_gen_andi_reg(cb, cb, 0x80008000u);
b2167459
RH
1057 cond = cond_make_0(TCG_COND_NE, cb);
1058 break;
1059
1060 default:
1061 g_assert_not_reached();
1062 }
b2167459
RH
1063 if (cf & 1) {
1064 cond.c = tcg_invert_cond(cond.c);
1065 }
1066
1067 return cond;
1068}
1069
72ca8753
RH
1070static TCGv_reg get_carry(DisasContext *ctx, bool d,
1071 TCGv_reg cb, TCGv_reg cb_msb)
1072{
1073 if (cond_need_ext(ctx, d)) {
1074 TCGv_reg t = tcg_temp_new();
1075 tcg_gen_extract_reg(t, cb, 32, 1);
1076 return t;
1077 }
1078 return cb_msb;
1079}
1080
1081static TCGv_reg get_psw_carry(DisasContext *ctx, bool d)
1082{
1083 return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb);
1084}
1085
b2167459 1086/* Compute signed overflow for addition. */
eaa3783b
RH
1087static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1088 TCGv_reg in1, TCGv_reg in2)
b2167459 1089{
e12c6309 1090 TCGv_reg sv = tcg_temp_new();
eaa3783b 1091 TCGv_reg tmp = tcg_temp_new();
b2167459 1092
eaa3783b
RH
1093 tcg_gen_xor_reg(sv, res, in1);
1094 tcg_gen_xor_reg(tmp, in1, in2);
1095 tcg_gen_andc_reg(sv, sv, tmp);
b2167459
RH
1096
1097 return sv;
1098}
1099
1100/* Compute signed overflow for subtraction. */
eaa3783b
RH
1101static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1102 TCGv_reg in1, TCGv_reg in2)
b2167459 1103{
e12c6309 1104 TCGv_reg sv = tcg_temp_new();
eaa3783b 1105 TCGv_reg tmp = tcg_temp_new();
b2167459 1106
eaa3783b
RH
1107 tcg_gen_xor_reg(sv, res, in1);
1108 tcg_gen_xor_reg(tmp, in1, in2);
1109 tcg_gen_and_reg(sv, sv, tmp);
b2167459
RH
1110
1111 return sv;
1112}
1113
31234768
RH
1114static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1115 TCGv_reg in2, unsigned shift, bool is_l,
1116 bool is_tsv, bool is_tc, bool is_c, unsigned cf)
b2167459 1117{
bdcccc17 1118 TCGv_reg dest, cb, cb_msb, cb_cond, sv, tmp;
b2167459
RH
1119 unsigned c = cf >> 1;
1120 DisasCond cond;
bdcccc17 1121 bool d = false;
b2167459
RH
1122
1123 dest = tcg_temp_new();
f764718d
RH
1124 cb = NULL;
1125 cb_msb = NULL;
bdcccc17 1126 cb_cond = NULL;
b2167459
RH
1127
1128 if (shift) {
e12c6309 1129 tmp = tcg_temp_new();
eaa3783b 1130 tcg_gen_shli_reg(tmp, in1, shift);
b2167459
RH
1131 in1 = tmp;
1132 }
1133
b47a4a02 1134 if (!is_l || cond_need_cb(c)) {
29dd6f64 1135 TCGv_reg zero = tcg_constant_reg(0);
e12c6309 1136 cb_msb = tcg_temp_new();
bdcccc17
RH
1137 cb = tcg_temp_new();
1138
eaa3783b 1139 tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
b2167459 1140 if (is_c) {
bdcccc17
RH
1141 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb,
1142 get_psw_carry(ctx, d), zero);
b2167459 1143 }
bdcccc17
RH
1144 tcg_gen_xor_reg(cb, in1, in2);
1145 tcg_gen_xor_reg(cb, cb, dest);
1146 if (cond_need_cb(c)) {
1147 cb_cond = get_carry(ctx, d, cb, cb_msb);
b2167459
RH
1148 }
1149 } else {
eaa3783b 1150 tcg_gen_add_reg(dest, in1, in2);
b2167459 1151 if (is_c) {
bdcccc17 1152 tcg_gen_add_reg(dest, dest, get_psw_carry(ctx, d));
b2167459
RH
1153 }
1154 }
1155
1156 /* Compute signed overflow if required. */
f764718d 1157 sv = NULL;
b47a4a02 1158 if (is_tsv || cond_need_sv(c)) {
b2167459
RH
1159 sv = do_add_sv(ctx, dest, in1, in2);
1160 if (is_tsv) {
1161 /* ??? Need to include overflow from shift. */
ad75a51e 1162 gen_helper_tsv(tcg_env, sv);
b2167459
RH
1163 }
1164 }
1165
1166 /* Emit any conditional trap before any writeback. */
bdcccc17 1167 cond = do_cond(cf, dest, cb_cond, sv);
b2167459 1168 if (is_tc) {
b2167459 1169 tmp = tcg_temp_new();
eaa3783b 1170 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
ad75a51e 1171 gen_helper_tcond(tcg_env, tmp);
b2167459
RH
1172 }
1173
1174 /* Write back the result. */
1175 if (!is_l) {
1176 save_or_nullify(ctx, cpu_psw_cb, cb);
1177 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1178 }
1179 save_gpr(ctx, rt, dest);
b2167459
RH
1180
1181 /* Install the new nullification. */
1182 cond_free(&ctx->null_cond);
1183 ctx->null_cond = cond;
b2167459
RH
1184}
1185
0c982a28
RH
1186static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_sh *a,
1187 bool is_l, bool is_tsv, bool is_tc, bool is_c)
1188{
1189 TCGv_reg tcg_r1, tcg_r2;
1190
1191 if (a->cf) {
1192 nullify_over(ctx);
1193 }
1194 tcg_r1 = load_gpr(ctx, a->r1);
1195 tcg_r2 = load_gpr(ctx, a->r2);
1196 do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, is_tsv, is_tc, is_c, a->cf);
1197 return nullify_end(ctx);
1198}
1199
0588e061
RH
1200static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1201 bool is_tsv, bool is_tc)
1202{
1203 TCGv_reg tcg_im, tcg_r2;
1204
1205 if (a->cf) {
1206 nullify_over(ctx);
1207 }
d4e58033 1208 tcg_im = tcg_constant_reg(a->i);
0588e061
RH
1209 tcg_r2 = load_gpr(ctx, a->r);
1210 do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf);
1211 return nullify_end(ctx);
1212}
1213
31234768
RH
1214static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1215 TCGv_reg in2, bool is_tsv, bool is_b,
1216 bool is_tc, unsigned cf)
b2167459 1217{
eaa3783b 1218 TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
b2167459
RH
1219 unsigned c = cf >> 1;
1220 DisasCond cond;
bdcccc17 1221 bool d = false;
b2167459
RH
1222
1223 dest = tcg_temp_new();
1224 cb = tcg_temp_new();
1225 cb_msb = tcg_temp_new();
1226
29dd6f64 1227 zero = tcg_constant_reg(0);
b2167459
RH
1228 if (is_b) {
1229 /* DEST,C = IN1 + ~IN2 + C. */
eaa3783b 1230 tcg_gen_not_reg(cb, in2);
bdcccc17 1231 tcg_gen_add2_reg(dest, cb_msb, in1, zero, get_psw_carry(ctx, d), zero);
eaa3783b
RH
1232 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1233 tcg_gen_xor_reg(cb, cb, in1);
1234 tcg_gen_xor_reg(cb, cb, dest);
b2167459 1235 } else {
bdcccc17
RH
1236 /*
1237 * DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
1238 * operations by seeding the high word with 1 and subtracting.
1239 */
1240 TCGv_reg one = tcg_constant_reg(1);
1241 tcg_gen_sub2_reg(dest, cb_msb, in1, one, in2, zero);
eaa3783b
RH
1242 tcg_gen_eqv_reg(cb, in1, in2);
1243 tcg_gen_xor_reg(cb, cb, dest);
b2167459 1244 }
b2167459
RH
1245
1246 /* Compute signed overflow if required. */
f764718d 1247 sv = NULL;
b47a4a02 1248 if (is_tsv || cond_need_sv(c)) {
b2167459
RH
1249 sv = do_sub_sv(ctx, dest, in1, in2);
1250 if (is_tsv) {
ad75a51e 1251 gen_helper_tsv(tcg_env, sv);
b2167459
RH
1252 }
1253 }
1254
1255 /* Compute the condition. We cannot use the special case for borrow. */
1256 if (!is_b) {
1257 cond = do_sub_cond(cf, dest, in1, in2, sv);
1258 } else {
bdcccc17 1259 cond = do_cond(cf, dest, get_carry(ctx, d, cb, cb_msb), sv);
b2167459
RH
1260 }
1261
1262 /* Emit any conditional trap before any writeback. */
1263 if (is_tc) {
b2167459 1264 tmp = tcg_temp_new();
eaa3783b 1265 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
ad75a51e 1266 gen_helper_tcond(tcg_env, tmp);
b2167459
RH
1267 }
1268
1269 /* Write back the result. */
1270 save_or_nullify(ctx, cpu_psw_cb, cb);
1271 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1272 save_gpr(ctx, rt, dest);
b2167459
RH
1273
1274 /* Install the new nullification. */
1275 cond_free(&ctx->null_cond);
1276 ctx->null_cond = cond;
b2167459
RH
1277}
1278
0c982a28
RH
1279static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf *a,
1280 bool is_tsv, bool is_b, bool is_tc)
1281{
1282 TCGv_reg tcg_r1, tcg_r2;
1283
1284 if (a->cf) {
1285 nullify_over(ctx);
1286 }
1287 tcg_r1 = load_gpr(ctx, a->r1);
1288 tcg_r2 = load_gpr(ctx, a->r2);
1289 do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf);
1290 return nullify_end(ctx);
1291}
1292
0588e061
RH
1293static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1294{
1295 TCGv_reg tcg_im, tcg_r2;
1296
1297 if (a->cf) {
1298 nullify_over(ctx);
1299 }
d4e58033 1300 tcg_im = tcg_constant_reg(a->i);
0588e061
RH
1301 tcg_r2 = load_gpr(ctx, a->r);
1302 do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf);
1303 return nullify_end(ctx);
1304}
1305
31234768
RH
1306static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1307 TCGv_reg in2, unsigned cf)
b2167459 1308{
eaa3783b 1309 TCGv_reg dest, sv;
b2167459
RH
1310 DisasCond cond;
1311
1312 dest = tcg_temp_new();
eaa3783b 1313 tcg_gen_sub_reg(dest, in1, in2);
b2167459
RH
1314
1315 /* Compute signed overflow if required. */
f764718d 1316 sv = NULL;
b47a4a02 1317 if (cond_need_sv(cf >> 1)) {
b2167459
RH
1318 sv = do_sub_sv(ctx, dest, in1, in2);
1319 }
1320
1321 /* Form the condition for the compare. */
1322 cond = do_sub_cond(cf, dest, in1, in2, sv);
1323
1324 /* Clear. */
eaa3783b 1325 tcg_gen_movi_reg(dest, 0);
b2167459 1326 save_gpr(ctx, rt, dest);
b2167459
RH
1327
1328 /* Install the new nullification. */
1329 cond_free(&ctx->null_cond);
1330 ctx->null_cond = cond;
b2167459
RH
1331}
1332
31234768
RH
1333static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1334 TCGv_reg in2, unsigned cf,
1335 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
b2167459 1336{
eaa3783b 1337 TCGv_reg dest = dest_gpr(ctx, rt);
b2167459
RH
1338
1339 /* Perform the operation, and writeback. */
1340 fn(dest, in1, in2);
1341 save_gpr(ctx, rt, dest);
1342
1343 /* Install the new nullification. */
1344 cond_free(&ctx->null_cond);
1345 if (cf) {
1346 ctx->null_cond = do_log_cond(cf, dest);
1347 }
b2167459
RH
1348}
1349
0c982a28
RH
1350static bool do_log_reg(DisasContext *ctx, arg_rrr_cf *a,
1351 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1352{
1353 TCGv_reg tcg_r1, tcg_r2;
1354
1355 if (a->cf) {
1356 nullify_over(ctx);
1357 }
1358 tcg_r1 = load_gpr(ctx, a->r1);
1359 tcg_r2 = load_gpr(ctx, a->r2);
1360 do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, fn);
1361 return nullify_end(ctx);
1362}
1363
31234768
RH
1364static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1365 TCGv_reg in2, unsigned cf, bool is_tc,
1366 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
b2167459 1367{
eaa3783b 1368 TCGv_reg dest;
b2167459
RH
1369 DisasCond cond;
1370
1371 if (cf == 0) {
1372 dest = dest_gpr(ctx, rt);
1373 fn(dest, in1, in2);
1374 save_gpr(ctx, rt, dest);
1375 cond_free(&ctx->null_cond);
1376 } else {
1377 dest = tcg_temp_new();
1378 fn(dest, in1, in2);
1379
1380 cond = do_unit_cond(cf, dest, in1, in2);
1381
1382 if (is_tc) {
eaa3783b 1383 TCGv_reg tmp = tcg_temp_new();
eaa3783b 1384 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
ad75a51e 1385 gen_helper_tcond(tcg_env, tmp);
b2167459
RH
1386 }
1387 save_gpr(ctx, rt, dest);
1388
1389 cond_free(&ctx->null_cond);
1390 ctx->null_cond = cond;
1391 }
b2167459
RH
1392}
1393
86f8d05f 1394#ifndef CONFIG_USER_ONLY
8d6ae7fb
RH
1395/* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1396 from the top 2 bits of the base register. There are a few system
1397 instructions that have a 3-bit space specifier, for which SR0 is
1398 not special. To handle this, pass ~SP. */
86f8d05f
RH
1399static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
1400{
1401 TCGv_ptr ptr;
1402 TCGv_reg tmp;
1403 TCGv_i64 spc;
1404
1405 if (sp != 0) {
8d6ae7fb
RH
1406 if (sp < 0) {
1407 sp = ~sp;
1408 }
a6779861 1409 spc = tcg_temp_new_tl();
8d6ae7fb
RH
1410 load_spr(ctx, spc, sp);
1411 return spc;
86f8d05f 1412 }
494737b7
RH
1413 if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1414 return cpu_srH;
1415 }
86f8d05f
RH
1416
1417 ptr = tcg_temp_new_ptr();
1418 tmp = tcg_temp_new();
a6779861 1419 spc = tcg_temp_new_tl();
86f8d05f 1420
698240d1
RH
1421 /* Extract top 2 bits of the address, shift left 3 for uint64_t index. */
1422 tcg_gen_shri_reg(tmp, base, (ctx->tb_flags & PSW_W ? 64 : 32) - 5);
86f8d05f
RH
1423 tcg_gen_andi_reg(tmp, tmp, 030);
1424 tcg_gen_trunc_reg_ptr(ptr, tmp);
86f8d05f 1425
ad75a51e 1426 tcg_gen_add_ptr(ptr, ptr, tcg_env);
86f8d05f 1427 tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
86f8d05f
RH
1428
1429 return spc;
1430}
1431#endif
1432
1433static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
1434 unsigned rb, unsigned rx, int scale, target_sreg disp,
1435 unsigned sp, int modify, bool is_phys)
1436{
1437 TCGv_reg base = load_gpr(ctx, rb);
1438 TCGv_reg ofs;
698240d1 1439 TCGv_tl addr;
86f8d05f
RH
1440
1441 /* Note that RX is mutually exclusive with DISP. */
1442 if (rx) {
e12c6309 1443 ofs = tcg_temp_new();
86f8d05f
RH
1444 tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
1445 tcg_gen_add_reg(ofs, ofs, base);
1446 } else if (disp || modify) {
e12c6309 1447 ofs = tcg_temp_new();
86f8d05f
RH
1448 tcg_gen_addi_reg(ofs, base, disp);
1449 } else {
1450 ofs = base;
1451 }
1452
1453 *pofs = ofs;
698240d1 1454 *pgva = addr = tcg_temp_new_tl();
86f8d05f 1455 tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
698240d1
RH
1456 tcg_gen_andi_tl(addr, addr, gva_offset_mask(ctx));
1457#ifndef CONFIG_USER_ONLY
86f8d05f
RH
1458 if (!is_phys) {
1459 tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1460 }
86f8d05f
RH
1461#endif
1462}
1463
96d6407f
RH
1464/* Emit a memory load. The modify parameter should be
1465 * < 0 for pre-modify,
1466 * > 0 for post-modify,
1467 * = 0 for no base register update.
1468 */
1469static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
eaa3783b 1470 unsigned rx, int scale, target_sreg disp,
14776ab5 1471 unsigned sp, int modify, MemOp mop)
96d6407f 1472{
86f8d05f
RH
1473 TCGv_reg ofs;
1474 TCGv_tl addr;
96d6407f
RH
1475
1476 /* Caller uses nullify_over/nullify_end. */
1477 assert(ctx->null_cond.c == TCG_COND_NEVER);
1478
86f8d05f
RH
1479 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1480 ctx->mmu_idx == MMU_PHYS_IDX);
c1f55d97 1481 tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
86f8d05f
RH
1482 if (modify) {
1483 save_gpr(ctx, rb, ofs);
96d6407f 1484 }
96d6407f
RH
1485}
1486
1487static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
eaa3783b 1488 unsigned rx, int scale, target_sreg disp,
14776ab5 1489 unsigned sp, int modify, MemOp mop)
96d6407f 1490{
86f8d05f
RH
1491 TCGv_reg ofs;
1492 TCGv_tl addr;
96d6407f
RH
1493
1494 /* Caller uses nullify_over/nullify_end. */
1495 assert(ctx->null_cond.c == TCG_COND_NEVER);
1496
86f8d05f
RH
1497 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1498 ctx->mmu_idx == MMU_PHYS_IDX);
217d1a5e 1499 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
86f8d05f
RH
1500 if (modify) {
1501 save_gpr(ctx, rb, ofs);
96d6407f 1502 }
96d6407f
RH
1503}
1504
1505static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
eaa3783b 1506 unsigned rx, int scale, target_sreg disp,
14776ab5 1507 unsigned sp, int modify, MemOp mop)
96d6407f 1508{
86f8d05f
RH
1509 TCGv_reg ofs;
1510 TCGv_tl addr;
96d6407f
RH
1511
1512 /* Caller uses nullify_over/nullify_end. */
1513 assert(ctx->null_cond.c == TCG_COND_NEVER);
1514
86f8d05f
RH
1515 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1516 ctx->mmu_idx == MMU_PHYS_IDX);
217d1a5e 1517 tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
86f8d05f
RH
1518 if (modify) {
1519 save_gpr(ctx, rb, ofs);
96d6407f 1520 }
96d6407f
RH
1521}
1522
1523static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
eaa3783b 1524 unsigned rx, int scale, target_sreg disp,
14776ab5 1525 unsigned sp, int modify, MemOp mop)
96d6407f 1526{
86f8d05f
RH
1527 TCGv_reg ofs;
1528 TCGv_tl addr;
96d6407f
RH
1529
1530 /* Caller uses nullify_over/nullify_end. */
1531 assert(ctx->null_cond.c == TCG_COND_NEVER);
1532
86f8d05f
RH
1533 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1534 ctx->mmu_idx == MMU_PHYS_IDX);
217d1a5e 1535 tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
86f8d05f
RH
1536 if (modify) {
1537 save_gpr(ctx, rb, ofs);
96d6407f 1538 }
96d6407f
RH
1539}
1540
eaa3783b
RH
1541#if TARGET_REGISTER_BITS == 64
1542#define do_load_reg do_load_64
1543#define do_store_reg do_store_64
96d6407f 1544#else
eaa3783b
RH
1545#define do_load_reg do_load_32
1546#define do_store_reg do_store_32
96d6407f
RH
1547#endif
1548
1cd012a5 1549static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
31234768 1550 unsigned rx, int scale, target_sreg disp,
14776ab5 1551 unsigned sp, int modify, MemOp mop)
96d6407f 1552{
eaa3783b 1553 TCGv_reg dest;
96d6407f
RH
1554
1555 nullify_over(ctx);
1556
1557 if (modify == 0) {
1558 /* No base register update. */
1559 dest = dest_gpr(ctx, rt);
1560 } else {
1561 /* Make sure if RT == RB, we see the result of the load. */
e12c6309 1562 dest = tcg_temp_new();
96d6407f 1563 }
86f8d05f 1564 do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
96d6407f
RH
1565 save_gpr(ctx, rt, dest);
1566
1cd012a5 1567 return nullify_end(ctx);
96d6407f
RH
1568}
1569
740038d7 1570static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
31234768
RH
1571 unsigned rx, int scale, target_sreg disp,
1572 unsigned sp, int modify)
96d6407f
RH
1573{
1574 TCGv_i32 tmp;
1575
1576 nullify_over(ctx);
1577
1578 tmp = tcg_temp_new_i32();
86f8d05f 1579 do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
96d6407f 1580 save_frw_i32(rt, tmp);
96d6407f
RH
1581
1582 if (rt == 0) {
ad75a51e 1583 gen_helper_loaded_fr0(tcg_env);
96d6407f
RH
1584 }
1585
740038d7
RH
1586 return nullify_end(ctx);
1587}
1588
1589static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1590{
1591 return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1592 a->disp, a->sp, a->m);
96d6407f
RH
1593}
1594
740038d7 1595static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
31234768
RH
1596 unsigned rx, int scale, target_sreg disp,
1597 unsigned sp, int modify)
96d6407f
RH
1598{
1599 TCGv_i64 tmp;
1600
1601 nullify_over(ctx);
1602
1603 tmp = tcg_temp_new_i64();
fc313c64 1604 do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
96d6407f 1605 save_frd(rt, tmp);
96d6407f
RH
1606
1607 if (rt == 0) {
ad75a51e 1608 gen_helper_loaded_fr0(tcg_env);
96d6407f
RH
1609 }
1610
740038d7
RH
1611 return nullify_end(ctx);
1612}
1613
1614static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1615{
1616 return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1617 a->disp, a->sp, a->m);
96d6407f
RH
1618}
1619
1cd012a5 1620static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
31234768 1621 target_sreg disp, unsigned sp,
14776ab5 1622 int modify, MemOp mop)
96d6407f
RH
1623{
1624 nullify_over(ctx);
86f8d05f 1625 do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1cd012a5 1626 return nullify_end(ctx);
96d6407f
RH
1627}
1628
740038d7 1629static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
31234768
RH
1630 unsigned rx, int scale, target_sreg disp,
1631 unsigned sp, int modify)
96d6407f
RH
1632{
1633 TCGv_i32 tmp;
1634
1635 nullify_over(ctx);
1636
1637 tmp = load_frw_i32(rt);
86f8d05f 1638 do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
96d6407f 1639
740038d7
RH
1640 return nullify_end(ctx);
1641}
1642
1643static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1644{
1645 return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1646 a->disp, a->sp, a->m);
96d6407f
RH
1647}
1648
740038d7 1649static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
31234768
RH
1650 unsigned rx, int scale, target_sreg disp,
1651 unsigned sp, int modify)
96d6407f
RH
1652{
1653 TCGv_i64 tmp;
1654
1655 nullify_over(ctx);
1656
1657 tmp = load_frd(rt);
fc313c64 1658 do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
96d6407f 1659
740038d7
RH
1660 return nullify_end(ctx);
1661}
1662
1663static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1664{
1665 return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1666 a->disp, a->sp, a->m);
96d6407f
RH
1667}
1668
1ca74648 1669static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
31234768 1670 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
ebe9383c
RH
1671{
1672 TCGv_i32 tmp;
1673
1674 nullify_over(ctx);
1675 tmp = load_frw0_i32(ra);
1676
ad75a51e 1677 func(tmp, tcg_env, tmp);
ebe9383c
RH
1678
1679 save_frw_i32(rt, tmp);
1ca74648 1680 return nullify_end(ctx);
ebe9383c
RH
1681}
1682
1ca74648 1683static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
31234768 1684 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
ebe9383c
RH
1685{
1686 TCGv_i32 dst;
1687 TCGv_i64 src;
1688
1689 nullify_over(ctx);
1690 src = load_frd(ra);
1691 dst = tcg_temp_new_i32();
1692
ad75a51e 1693 func(dst, tcg_env, src);
ebe9383c 1694
ebe9383c 1695 save_frw_i32(rt, dst);
1ca74648 1696 return nullify_end(ctx);
ebe9383c
RH
1697}
1698
1ca74648 1699static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
31234768 1700 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
ebe9383c
RH
1701{
1702 TCGv_i64 tmp;
1703
1704 nullify_over(ctx);
1705 tmp = load_frd0(ra);
1706
ad75a51e 1707 func(tmp, tcg_env, tmp);
ebe9383c
RH
1708
1709 save_frd(rt, tmp);
1ca74648 1710 return nullify_end(ctx);
ebe9383c
RH
1711}
1712
1ca74648 1713static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
31234768 1714 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
ebe9383c
RH
1715{
1716 TCGv_i32 src;
1717 TCGv_i64 dst;
1718
1719 nullify_over(ctx);
1720 src = load_frw0_i32(ra);
1721 dst = tcg_temp_new_i64();
1722
ad75a51e 1723 func(dst, tcg_env, src);
ebe9383c 1724
ebe9383c 1725 save_frd(rt, dst);
1ca74648 1726 return nullify_end(ctx);
ebe9383c
RH
1727}
1728
1ca74648 1729static bool do_fop_weww(DisasContext *ctx, unsigned rt,
31234768
RH
1730 unsigned ra, unsigned rb,
1731 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
ebe9383c
RH
1732{
1733 TCGv_i32 a, b;
1734
1735 nullify_over(ctx);
1736 a = load_frw0_i32(ra);
1737 b = load_frw0_i32(rb);
1738
ad75a51e 1739 func(a, tcg_env, a, b);
ebe9383c 1740
ebe9383c 1741 save_frw_i32(rt, a);
1ca74648 1742 return nullify_end(ctx);
ebe9383c
RH
1743}
1744
1ca74648 1745static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
31234768
RH
1746 unsigned ra, unsigned rb,
1747 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
ebe9383c
RH
1748{
1749 TCGv_i64 a, b;
1750
1751 nullify_over(ctx);
1752 a = load_frd0(ra);
1753 b = load_frd0(rb);
1754
ad75a51e 1755 func(a, tcg_env, a, b);
ebe9383c 1756
ebe9383c 1757 save_frd(rt, a);
1ca74648 1758 return nullify_end(ctx);
ebe9383c
RH
1759}
1760
98cd9ca7
RH
1761/* Emit an unconditional branch to a direct target, which may or may not
1762 have already had nullification handled. */
01afb7be 1763static bool do_dbranch(DisasContext *ctx, target_ureg dest,
31234768 1764 unsigned link, bool is_n)
98cd9ca7
RH
1765{
1766 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1767 if (link != 0) {
741322f4 1768 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
98cd9ca7
RH
1769 }
1770 ctx->iaoq_n = dest;
1771 if (is_n) {
1772 ctx->null_cond.c = TCG_COND_ALWAYS;
1773 }
98cd9ca7
RH
1774 } else {
1775 nullify_over(ctx);
1776
1777 if (link != 0) {
741322f4 1778 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
98cd9ca7
RH
1779 }
1780
1781 if (is_n && use_nullify_skip(ctx)) {
1782 nullify_set(ctx, 0);
1783 gen_goto_tb(ctx, 0, dest, dest + 4);
1784 } else {
1785 nullify_set(ctx, is_n);
1786 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1787 }
1788
31234768 1789 nullify_end(ctx);
98cd9ca7
RH
1790
1791 nullify_set(ctx, 0);
1792 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
31234768 1793 ctx->base.is_jmp = DISAS_NORETURN;
98cd9ca7 1794 }
01afb7be 1795 return true;
98cd9ca7
RH
1796}
1797
1798/* Emit a conditional branch to a direct target. If the branch itself
1799 is nullified, we should have already used nullify_over. */
01afb7be 1800static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
31234768 1801 DisasCond *cond)
98cd9ca7 1802{
eaa3783b 1803 target_ureg dest = iaoq_dest(ctx, disp);
98cd9ca7
RH
1804 TCGLabel *taken = NULL;
1805 TCGCond c = cond->c;
98cd9ca7
RH
1806 bool n;
1807
1808 assert(ctx->null_cond.c == TCG_COND_NEVER);
1809
1810 /* Handle TRUE and NEVER as direct branches. */
1811 if (c == TCG_COND_ALWAYS) {
01afb7be 1812 return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
98cd9ca7
RH
1813 }
1814 if (c == TCG_COND_NEVER) {
01afb7be 1815 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
98cd9ca7
RH
1816 }
1817
1818 taken = gen_new_label();
eaa3783b 1819 tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
98cd9ca7
RH
1820 cond_free(cond);
1821
1822 /* Not taken: Condition not satisfied; nullify on backward branches. */
1823 n = is_n && disp < 0;
1824 if (n && use_nullify_skip(ctx)) {
1825 nullify_set(ctx, 0);
a881c8e7 1826 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
98cd9ca7
RH
1827 } else {
1828 if (!n && ctx->null_lab) {
1829 gen_set_label(ctx->null_lab);
1830 ctx->null_lab = NULL;
1831 }
1832 nullify_set(ctx, n);
c301f34e
RH
1833 if (ctx->iaoq_n == -1) {
1834 /* The temporary iaoq_n_var died at the branch above.
1835 Regenerate it here instead of saving it. */
1836 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1837 }
a881c8e7 1838 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
98cd9ca7
RH
1839 }
1840
1841 gen_set_label(taken);
1842
1843 /* Taken: Condition satisfied; nullify on forward branches. */
1844 n = is_n && disp >= 0;
1845 if (n && use_nullify_skip(ctx)) {
1846 nullify_set(ctx, 0);
a881c8e7 1847 gen_goto_tb(ctx, 1, dest, dest + 4);
98cd9ca7
RH
1848 } else {
1849 nullify_set(ctx, n);
a881c8e7 1850 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
98cd9ca7
RH
1851 }
1852
1853 /* Not taken: the branch itself was nullified. */
1854 if (ctx->null_lab) {
1855 gen_set_label(ctx->null_lab);
1856 ctx->null_lab = NULL;
31234768 1857 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
98cd9ca7 1858 } else {
31234768 1859 ctx->base.is_jmp = DISAS_NORETURN;
98cd9ca7 1860 }
01afb7be 1861 return true;
98cd9ca7
RH
1862}
1863
1864/* Emit an unconditional branch to an indirect target. This handles
1865 nullification of the branch itself. */
01afb7be 1866static bool do_ibranch(DisasContext *ctx, TCGv_reg dest,
31234768 1867 unsigned link, bool is_n)
98cd9ca7 1868{
eaa3783b 1869 TCGv_reg a0, a1, next, tmp;
98cd9ca7
RH
1870 TCGCond c;
1871
1872 assert(ctx->null_lab == NULL);
1873
1874 if (ctx->null_cond.c == TCG_COND_NEVER) {
1875 if (link != 0) {
741322f4 1876 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
98cd9ca7 1877 }
e12c6309 1878 next = tcg_temp_new();
eaa3783b 1879 tcg_gen_mov_reg(next, dest);
98cd9ca7 1880 if (is_n) {
c301f34e 1881 if (use_nullify_skip(ctx)) {
a0180973
RH
1882 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, next);
1883 tcg_gen_addi_reg(next, next, 4);
1884 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
c301f34e 1885 nullify_set(ctx, 0);
31234768 1886 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
01afb7be 1887 return true;
c301f34e 1888 }
98cd9ca7
RH
1889 ctx->null_cond.c = TCG_COND_ALWAYS;
1890 }
c301f34e
RH
1891 ctx->iaoq_n = -1;
1892 ctx->iaoq_n_var = next;
98cd9ca7
RH
1893 } else if (is_n && use_nullify_skip(ctx)) {
1894 /* The (conditional) branch, B, nullifies the next insn, N,
1895 and we're allowed to skip execution N (no single-step or
4137cb83 1896 tracepoint in effect). Since the goto_ptr that we must use
98cd9ca7
RH
1897 for the indirect branch consumes no special resources, we
1898 can (conditionally) skip B and continue execution. */
1899 /* The use_nullify_skip test implies we have a known control path. */
1900 tcg_debug_assert(ctx->iaoq_b != -1);
1901 tcg_debug_assert(ctx->iaoq_n != -1);
1902
1903 /* We do have to handle the non-local temporary, DEST, before
1904 branching. Since IOAQ_F is not really live at this point, we
1905 can simply store DEST optimistically. Similarly with IAOQ_B. */
a0180973
RH
1906 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, dest);
1907 next = tcg_temp_new();
1908 tcg_gen_addi_reg(next, dest, 4);
1909 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, next);
98cd9ca7
RH
1910
1911 nullify_over(ctx);
1912 if (link != 0) {
9a91dd84 1913 copy_iaoq_entry(ctx, cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
98cd9ca7 1914 }
7f11636d 1915 tcg_gen_lookup_and_goto_ptr();
01afb7be 1916 return nullify_end(ctx);
98cd9ca7 1917 } else {
98cd9ca7
RH
1918 c = ctx->null_cond.c;
1919 a0 = ctx->null_cond.a0;
1920 a1 = ctx->null_cond.a1;
1921
1922 tmp = tcg_temp_new();
e12c6309 1923 next = tcg_temp_new();
98cd9ca7 1924
741322f4 1925 copy_iaoq_entry(ctx, tmp, ctx->iaoq_n, ctx->iaoq_n_var);
eaa3783b 1926 tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
98cd9ca7
RH
1927 ctx->iaoq_n = -1;
1928 ctx->iaoq_n_var = next;
1929
1930 if (link != 0) {
eaa3783b 1931 tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
98cd9ca7
RH
1932 }
1933
1934 if (is_n) {
1935 /* The branch nullifies the next insn, which means the state of N
1936 after the branch is the inverse of the state of N that applied
1937 to the branch. */
eaa3783b 1938 tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
98cd9ca7
RH
1939 cond_free(&ctx->null_cond);
1940 ctx->null_cond = cond_make_n();
1941 ctx->psw_n_nonzero = true;
1942 } else {
1943 cond_free(&ctx->null_cond);
1944 }
1945 }
01afb7be 1946 return true;
98cd9ca7
RH
1947}
1948
660eefe1
RH
1949/* Implement
1950 * if (IAOQ_Front{30..31} < GR[b]{30..31})
1951 * IAOQ_Next{30..31} ← GR[b]{30..31};
1952 * else
1953 * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1954 * which keeps the privilege level from being increased.
1955 */
1956static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
1957{
660eefe1
RH
1958 TCGv_reg dest;
1959 switch (ctx->privilege) {
1960 case 0:
1961 /* Privilege 0 is maximum and is allowed to decrease. */
1962 return offset;
1963 case 3:
993119fe 1964 /* Privilege 3 is minimum and is never allowed to increase. */
e12c6309 1965 dest = tcg_temp_new();
660eefe1
RH
1966 tcg_gen_ori_reg(dest, offset, 3);
1967 break;
1968 default:
e12c6309 1969 dest = tcg_temp_new();
660eefe1
RH
1970 tcg_gen_andi_reg(dest, offset, -4);
1971 tcg_gen_ori_reg(dest, dest, ctx->privilege);
1972 tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
660eefe1
RH
1973 break;
1974 }
1975 return dest;
660eefe1
RH
1976}
1977
ba1d0b44 1978#ifdef CONFIG_USER_ONLY
7ad439df
RH
1979/* On Linux, page zero is normally marked execute only + gateway.
1980 Therefore normal read or write is supposed to fail, but specific
1981 offsets have kernel code mapped to raise permissions to implement
1982 system calls. Handling this via an explicit check here, rather
1983 in than the "be disp(sr2,r0)" instruction that probably sent us
1984 here, is the easiest way to handle the branch delay slot on the
1985 aforementioned BE. */
31234768 1986static void do_page_zero(DisasContext *ctx)
7ad439df 1987{
a0180973
RH
1988 TCGv_reg tmp;
1989
7ad439df
RH
1990 /* If by some means we get here with PSW[N]=1, that implies that
1991 the B,GATE instruction would be skipped, and we'd fault on the
8b81968c 1992 next insn within the privileged page. */
7ad439df
RH
1993 switch (ctx->null_cond.c) {
1994 case TCG_COND_NEVER:
1995 break;
1996 case TCG_COND_ALWAYS:
eaa3783b 1997 tcg_gen_movi_reg(cpu_psw_n, 0);
7ad439df
RH
1998 goto do_sigill;
1999 default:
2000 /* Since this is always the first (and only) insn within the
2001 TB, we should know the state of PSW[N] from TB->FLAGS. */
2002 g_assert_not_reached();
2003 }
2004
2005 /* Check that we didn't arrive here via some means that allowed
2006 non-sequential instruction execution. Normally the PSW[B] bit
2007 detects this by disallowing the B,GATE instruction to execute
2008 under such conditions. */
2009 if (ctx->iaoq_b != ctx->iaoq_f + 4) {
2010 goto do_sigill;
2011 }
2012
ebd0e151 2013 switch (ctx->iaoq_f & -4) {
7ad439df 2014 case 0x00: /* Null pointer call */
2986721d 2015 gen_excp_1(EXCP_IMP);
31234768
RH
2016 ctx->base.is_jmp = DISAS_NORETURN;
2017 break;
7ad439df
RH
2018
2019 case 0xb0: /* LWS */
2020 gen_excp_1(EXCP_SYSCALL_LWS);
31234768
RH
2021 ctx->base.is_jmp = DISAS_NORETURN;
2022 break;
7ad439df
RH
2023
2024 case 0xe0: /* SET_THREAD_POINTER */
ad75a51e 2025 tcg_gen_st_reg(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27]));
a0180973
RH
2026 tmp = tcg_temp_new();
2027 tcg_gen_ori_reg(tmp, cpu_gr[31], 3);
2028 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
2029 tcg_gen_addi_reg(tmp, tmp, 4);
2030 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
31234768
RH
2031 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
2032 break;
7ad439df
RH
2033
2034 case 0x100: /* SYSCALL */
2035 gen_excp_1(EXCP_SYSCALL);
31234768
RH
2036 ctx->base.is_jmp = DISAS_NORETURN;
2037 break;
7ad439df
RH
2038
2039 default:
2040 do_sigill:
2986721d 2041 gen_excp_1(EXCP_ILL);
31234768
RH
2042 ctx->base.is_jmp = DISAS_NORETURN;
2043 break;
7ad439df
RH
2044 }
2045}
ba1d0b44 2046#endif
7ad439df 2047
deee69a1 2048static bool trans_nop(DisasContext *ctx, arg_nop *a)
b2167459
RH
2049{
2050 cond_free(&ctx->null_cond);
31234768 2051 return true;
b2167459
RH
2052}
2053
40f9f908 2054static bool trans_break(DisasContext *ctx, arg_break *a)
98a9cb79 2055{
31234768 2056 return gen_excp_iir(ctx, EXCP_BREAK);
98a9cb79
RH
2057}
2058
e36f27ef 2059static bool trans_sync(DisasContext *ctx, arg_sync *a)
98a9cb79
RH
2060{
2061 /* No point in nullifying the memory barrier. */
2062 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2063
2064 cond_free(&ctx->null_cond);
31234768 2065 return true;
98a9cb79
RH
2066}
2067
c603e14a 2068static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
98a9cb79 2069{
c603e14a 2070 unsigned rt = a->t;
eaa3783b
RH
2071 TCGv_reg tmp = dest_gpr(ctx, rt);
2072 tcg_gen_movi_reg(tmp, ctx->iaoq_f);
98a9cb79
RH
2073 save_gpr(ctx, rt, tmp);
2074
2075 cond_free(&ctx->null_cond);
31234768 2076 return true;
98a9cb79
RH
2077}
2078
c603e14a 2079static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
98a9cb79 2080{
c603e14a
RH
2081 unsigned rt = a->t;
2082 unsigned rs = a->sp;
33423472
RH
2083 TCGv_i64 t0 = tcg_temp_new_i64();
2084 TCGv_reg t1 = tcg_temp_new();
98a9cb79 2085
33423472
RH
2086 load_spr(ctx, t0, rs);
2087 tcg_gen_shri_i64(t0, t0, 32);
2088 tcg_gen_trunc_i64_reg(t1, t0);
2089
2090 save_gpr(ctx, rt, t1);
98a9cb79
RH
2091
2092 cond_free(&ctx->null_cond);
31234768 2093 return true;
98a9cb79
RH
2094}
2095
c603e14a 2096static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
98a9cb79 2097{
c603e14a
RH
2098 unsigned rt = a->t;
2099 unsigned ctl = a->r;
eaa3783b 2100 TCGv_reg tmp;
98a9cb79
RH
2101
2102 switch (ctl) {
35136a77 2103 case CR_SAR:
98a9cb79 2104#ifdef TARGET_HPPA64
c603e14a 2105 if (a->e == 0) {
98a9cb79
RH
2106 /* MFSAR without ,W masks low 5 bits. */
2107 tmp = dest_gpr(ctx, rt);
eaa3783b 2108 tcg_gen_andi_reg(tmp, cpu_sar, 31);
98a9cb79 2109 save_gpr(ctx, rt, tmp);
35136a77 2110 goto done;
98a9cb79
RH
2111 }
2112#endif
2113 save_gpr(ctx, rt, cpu_sar);
35136a77
RH
2114 goto done;
2115 case CR_IT: /* Interval Timer */
2116 /* FIXME: Respect PSW_S bit. */
2117 nullify_over(ctx);
98a9cb79 2118 tmp = dest_gpr(ctx, rt);
dfd1b812 2119 if (translator_io_start(&ctx->base)) {
49c29d6c 2120 gen_helper_read_interval_timer(tmp);
31234768 2121 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
49c29d6c
RH
2122 } else {
2123 gen_helper_read_interval_timer(tmp);
49c29d6c 2124 }
98a9cb79 2125 save_gpr(ctx, rt, tmp);
31234768 2126 return nullify_end(ctx);
98a9cb79 2127 case 26:
98a9cb79 2128 case 27:
98a9cb79
RH
2129 break;
2130 default:
2131 /* All other control registers are privileged. */
35136a77
RH
2132 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2133 break;
98a9cb79
RH
2134 }
2135
e12c6309 2136 tmp = tcg_temp_new();
ad75a51e 2137 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
35136a77
RH
2138 save_gpr(ctx, rt, tmp);
2139
2140 done:
98a9cb79 2141 cond_free(&ctx->null_cond);
31234768 2142 return true;
98a9cb79
RH
2143}
2144
c603e14a 2145static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
33423472 2146{
c603e14a
RH
2147 unsigned rr = a->r;
2148 unsigned rs = a->sp;
33423472
RH
2149 TCGv_i64 t64;
2150
2151 if (rs >= 5) {
2152 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2153 }
2154 nullify_over(ctx);
2155
2156 t64 = tcg_temp_new_i64();
2157 tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2158 tcg_gen_shli_i64(t64, t64, 32);
2159
2160 if (rs >= 4) {
ad75a51e 2161 tcg_gen_st_i64(t64, tcg_env, offsetof(CPUHPPAState, sr[rs]));
494737b7 2162 ctx->tb_flags &= ~TB_FLAG_SR_SAME;
33423472
RH
2163 } else {
2164 tcg_gen_mov_i64(cpu_sr[rs], t64);
2165 }
33423472 2166
31234768 2167 return nullify_end(ctx);
33423472
RH
2168}
2169
c603e14a 2170static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
98a9cb79 2171{
c603e14a 2172 unsigned ctl = a->t;
4845f015 2173 TCGv_reg reg;
eaa3783b 2174 TCGv_reg tmp;
98a9cb79 2175
35136a77 2176 if (ctl == CR_SAR) {
4845f015 2177 reg = load_gpr(ctx, a->r);
98a9cb79 2178 tmp = tcg_temp_new();
f3618f59 2179 tcg_gen_andi_reg(tmp, reg, ctx->is_pa20 ? 63 : 31);
98a9cb79 2180 save_or_nullify(ctx, cpu_sar, tmp);
35136a77
RH
2181
2182 cond_free(&ctx->null_cond);
31234768 2183 return true;
98a9cb79
RH
2184 }
2185
35136a77
RH
2186 /* All other control registers are privileged or read-only. */
2187 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2188
c603e14a 2189#ifndef CONFIG_USER_ONLY
35136a77 2190 nullify_over(ctx);
4845f015
SS
2191 reg = load_gpr(ctx, a->r);
2192
35136a77
RH
2193 switch (ctl) {
2194 case CR_IT:
ad75a51e 2195 gen_helper_write_interval_timer(tcg_env, reg);
35136a77 2196 break;
4f5f2548 2197 case CR_EIRR:
ad75a51e 2198 gen_helper_write_eirr(tcg_env, reg);
4f5f2548
RH
2199 break;
2200 case CR_EIEM:
ad75a51e 2201 gen_helper_write_eiem(tcg_env, reg);
31234768 2202 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
4f5f2548
RH
2203 break;
2204
35136a77
RH
2205 case CR_IIASQ:
2206 case CR_IIAOQ:
2207 /* FIXME: Respect PSW_Q bit */
2208 /* The write advances the queue and stores to the back element. */
e12c6309 2209 tmp = tcg_temp_new();
ad75a51e 2210 tcg_gen_ld_reg(tmp, tcg_env,
35136a77 2211 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
ad75a51e
RH
2212 tcg_gen_st_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2213 tcg_gen_st_reg(reg, tcg_env,
35136a77
RH
2214 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2215 break;
2216
d5de20bd
SS
2217 case CR_PID1:
2218 case CR_PID2:
2219 case CR_PID3:
2220 case CR_PID4:
ad75a51e 2221 tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
d5de20bd 2222#ifndef CONFIG_USER_ONLY
ad75a51e 2223 gen_helper_change_prot_id(tcg_env);
d5de20bd
SS
2224#endif
2225 break;
2226
35136a77 2227 default:
ad75a51e 2228 tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
35136a77
RH
2229 break;
2230 }
31234768 2231 return nullify_end(ctx);
4f5f2548 2232#endif
98a9cb79
RH
2233}
2234
c603e14a 2235static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
98a9cb79 2236{
eaa3783b 2237 TCGv_reg tmp = tcg_temp_new();
98a9cb79 2238
c603e14a 2239 tcg_gen_not_reg(tmp, load_gpr(ctx, a->r));
f3618f59 2240 tcg_gen_andi_reg(tmp, tmp, ctx->is_pa20 ? 63 : 31);
98a9cb79 2241 save_or_nullify(ctx, cpu_sar, tmp);
98a9cb79
RH
2242
2243 cond_free(&ctx->null_cond);
31234768 2244 return true;
98a9cb79
RH
2245}
2246
e36f27ef 2247static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
98a9cb79 2248{
e36f27ef 2249 TCGv_reg dest = dest_gpr(ctx, a->t);
98a9cb79 2250
2330504c
HD
2251#ifdef CONFIG_USER_ONLY
2252 /* We don't implement space registers in user mode. */
eaa3783b 2253 tcg_gen_movi_reg(dest, 0);
2330504c 2254#else
2330504c
HD
2255 TCGv_i64 t0 = tcg_temp_new_i64();
2256
e36f27ef 2257 tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2330504c
HD
2258 tcg_gen_shri_i64(t0, t0, 32);
2259 tcg_gen_trunc_i64_reg(dest, t0);
2330504c 2260#endif
e36f27ef 2261 save_gpr(ctx, a->t, dest);
98a9cb79
RH
2262
2263 cond_free(&ctx->null_cond);
31234768 2264 return true;
98a9cb79
RH
2265}
2266
e36f27ef 2267static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
e1b5a5ed 2268{
e36f27ef
RH
2269 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2270#ifndef CONFIG_USER_ONLY
e1b5a5ed
RH
2271 TCGv_reg tmp;
2272
e1b5a5ed
RH
2273 nullify_over(ctx);
2274
e12c6309 2275 tmp = tcg_temp_new();
ad75a51e 2276 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw));
e36f27ef 2277 tcg_gen_andi_reg(tmp, tmp, ~a->i);
ad75a51e 2278 gen_helper_swap_system_mask(tmp, tcg_env, tmp);
e36f27ef 2279 save_gpr(ctx, a->t, tmp);
e1b5a5ed
RH
2280
2281 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
31234768
RH
2282 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2283 return nullify_end(ctx);
e36f27ef 2284#endif
e1b5a5ed
RH
2285}
2286
e36f27ef 2287static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
e1b5a5ed 2288{
e36f27ef
RH
2289 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2290#ifndef CONFIG_USER_ONLY
e1b5a5ed
RH
2291 TCGv_reg tmp;
2292
e1b5a5ed
RH
2293 nullify_over(ctx);
2294
e12c6309 2295 tmp = tcg_temp_new();
ad75a51e 2296 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw));
e36f27ef 2297 tcg_gen_ori_reg(tmp, tmp, a->i);
ad75a51e 2298 gen_helper_swap_system_mask(tmp, tcg_env, tmp);
e36f27ef 2299 save_gpr(ctx, a->t, tmp);
e1b5a5ed
RH
2300
2301 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
31234768
RH
2302 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2303 return nullify_end(ctx);
e36f27ef 2304#endif
e1b5a5ed
RH
2305}
2306
c603e14a 2307static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
e1b5a5ed 2308{
e1b5a5ed 2309 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
c603e14a
RH
2310#ifndef CONFIG_USER_ONLY
2311 TCGv_reg tmp, reg;
e1b5a5ed
RH
2312 nullify_over(ctx);
2313
c603e14a 2314 reg = load_gpr(ctx, a->r);
e12c6309 2315 tmp = tcg_temp_new();
ad75a51e 2316 gen_helper_swap_system_mask(tmp, tcg_env, reg);
e1b5a5ed
RH
2317
2318 /* Exit the TB to recognize new interrupts. */
31234768
RH
2319 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2320 return nullify_end(ctx);
c603e14a 2321#endif
e1b5a5ed 2322}
f49b3537 2323
e36f27ef 2324static bool do_rfi(DisasContext *ctx, bool rfi_r)
f49b3537 2325{
f49b3537 2326 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
e36f27ef 2327#ifndef CONFIG_USER_ONLY
f49b3537
RH
2328 nullify_over(ctx);
2329
e36f27ef 2330 if (rfi_r) {
ad75a51e 2331 gen_helper_rfi_r(tcg_env);
f49b3537 2332 } else {
ad75a51e 2333 gen_helper_rfi(tcg_env);
f49b3537 2334 }
31234768 2335 /* Exit the TB to recognize new interrupts. */
8532a14e 2336 tcg_gen_exit_tb(NULL, 0);
31234768 2337 ctx->base.is_jmp = DISAS_NORETURN;
f49b3537 2338
31234768 2339 return nullify_end(ctx);
e36f27ef
RH
2340#endif
2341}
2342
2343static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2344{
2345 return do_rfi(ctx, false);
2346}
2347
2348static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2349{
2350 return do_rfi(ctx, true);
f49b3537 2351}
6210db05 2352
96927adb
RH
2353static bool trans_halt(DisasContext *ctx, arg_halt *a)
2354{
2355 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
e36f27ef 2356#ifndef CONFIG_USER_ONLY
96927adb 2357 nullify_over(ctx);
ad75a51e 2358 gen_helper_halt(tcg_env);
96927adb
RH
2359 ctx->base.is_jmp = DISAS_NORETURN;
2360 return nullify_end(ctx);
2361#endif
2362}
2363
2364static bool trans_reset(DisasContext *ctx, arg_reset *a)
6210db05
HD
2365{
2366 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
96927adb 2367#ifndef CONFIG_USER_ONLY
6210db05 2368 nullify_over(ctx);
ad75a51e 2369 gen_helper_reset(tcg_env);
31234768
RH
2370 ctx->base.is_jmp = DISAS_NORETURN;
2371 return nullify_end(ctx);
96927adb 2372#endif
6210db05 2373}
e1b5a5ed 2374
4a4554c6
HD
2375static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2376{
2377 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2378#ifndef CONFIG_USER_ONLY
2379 nullify_over(ctx);
ad75a51e 2380 gen_helper_getshadowregs(tcg_env);
4a4554c6
HD
2381 return nullify_end(ctx);
2382#endif
2383}
2384
deee69a1 2385static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
98a9cb79 2386{
deee69a1
RH
2387 if (a->m) {
2388 TCGv_reg dest = dest_gpr(ctx, a->b);
2389 TCGv_reg src1 = load_gpr(ctx, a->b);
2390 TCGv_reg src2 = load_gpr(ctx, a->x);
98a9cb79 2391
deee69a1
RH
2392 /* The only thing we need to do is the base register modification. */
2393 tcg_gen_add_reg(dest, src1, src2);
2394 save_gpr(ctx, a->b, dest);
2395 }
98a9cb79 2396 cond_free(&ctx->null_cond);
31234768 2397 return true;
98a9cb79
RH
2398}
2399
deee69a1 2400static bool trans_probe(DisasContext *ctx, arg_probe *a)
98a9cb79 2401{
86f8d05f 2402 TCGv_reg dest, ofs;
eed14219 2403 TCGv_i32 level, want;
86f8d05f 2404 TCGv_tl addr;
98a9cb79
RH
2405
2406 nullify_over(ctx);
2407
deee69a1
RH
2408 dest = dest_gpr(ctx, a->t);
2409 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
eed14219 2410
deee69a1 2411 if (a->imm) {
29dd6f64 2412 level = tcg_constant_i32(a->ri);
98a9cb79 2413 } else {
eed14219 2414 level = tcg_temp_new_i32();
deee69a1 2415 tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri));
eed14219 2416 tcg_gen_andi_i32(level, level, 3);
98a9cb79 2417 }
29dd6f64 2418 want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
eed14219 2419
ad75a51e 2420 gen_helper_probe(dest, tcg_env, addr, level, want);
eed14219 2421
deee69a1 2422 save_gpr(ctx, a->t, dest);
31234768 2423 return nullify_end(ctx);
98a9cb79
RH
2424}
2425
deee69a1 2426static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
8d6ae7fb 2427{
deee69a1
RH
2428 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2429#ifndef CONFIG_USER_ONLY
8d6ae7fb
RH
2430 TCGv_tl addr;
2431 TCGv_reg ofs, reg;
2432
8d6ae7fb
RH
2433 nullify_over(ctx);
2434
deee69a1
RH
2435 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2436 reg = load_gpr(ctx, a->r);
2437 if (a->addr) {
ad75a51e 2438 gen_helper_itlba(tcg_env, addr, reg);
8d6ae7fb 2439 } else {
ad75a51e 2440 gen_helper_itlbp(tcg_env, addr, reg);
8d6ae7fb
RH
2441 }
2442
32dc7569
SS
2443 /* Exit TB for TLB change if mmu is enabled. */
2444 if (ctx->tb_flags & PSW_C) {
31234768
RH
2445 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2446 }
2447 return nullify_end(ctx);
deee69a1 2448#endif
8d6ae7fb 2449}
63300a00 2450
deee69a1 2451static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
63300a00 2452{
deee69a1
RH
2453 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2454#ifndef CONFIG_USER_ONLY
63300a00
RH
2455 TCGv_tl addr;
2456 TCGv_reg ofs;
2457
63300a00
RH
2458 nullify_over(ctx);
2459
deee69a1
RH
2460 form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2461 if (a->m) {
2462 save_gpr(ctx, a->b, ofs);
63300a00 2463 }
deee69a1 2464 if (a->local) {
ad75a51e 2465 gen_helper_ptlbe(tcg_env);
63300a00 2466 } else {
ad75a51e 2467 gen_helper_ptlb(tcg_env, addr);
63300a00
RH
2468 }
2469
2470 /* Exit TB for TLB change if mmu is enabled. */
6797c315
NH
2471 if (ctx->tb_flags & PSW_C) {
2472 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2473 }
2474 return nullify_end(ctx);
2475#endif
2476}
2477
2478/*
2479 * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2480 * See
2481 * https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2482 * page 13-9 (195/206)
2483 */
2484static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2485{
2486 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2487#ifndef CONFIG_USER_ONLY
2488 TCGv_tl addr, atl, stl;
2489 TCGv_reg reg;
2490
2491 nullify_over(ctx);
2492
2493 /*
2494 * FIXME:
2495 * if (not (pcxl or pcxl2))
2496 * return gen_illegal(ctx);
2497 *
2498 * Note for future: these are 32-bit systems; no hppa64.
2499 */
2500
2501 atl = tcg_temp_new_tl();
2502 stl = tcg_temp_new_tl();
2503 addr = tcg_temp_new_tl();
2504
ad75a51e 2505 tcg_gen_ld32u_i64(stl, tcg_env,
6797c315
NH
2506 a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2507 : offsetof(CPUHPPAState, cr[CR_IIASQ]));
ad75a51e 2508 tcg_gen_ld32u_i64(atl, tcg_env,
6797c315
NH
2509 a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2510 : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2511 tcg_gen_shli_i64(stl, stl, 32);
2512 tcg_gen_or_tl(addr, atl, stl);
6797c315
NH
2513
2514 reg = load_gpr(ctx, a->r);
2515 if (a->addr) {
ad75a51e 2516 gen_helper_itlba(tcg_env, addr, reg);
6797c315 2517 } else {
ad75a51e 2518 gen_helper_itlbp(tcg_env, addr, reg);
6797c315 2519 }
6797c315
NH
2520
2521 /* Exit TB for TLB change if mmu is enabled. */
32dc7569 2522 if (ctx->tb_flags & PSW_C) {
31234768
RH
2523 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2524 }
2525 return nullify_end(ctx);
deee69a1 2526#endif
63300a00 2527}
2dfcca9f 2528
deee69a1 2529static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2dfcca9f 2530{
deee69a1
RH
2531 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2532#ifndef CONFIG_USER_ONLY
2dfcca9f
RH
2533 TCGv_tl vaddr;
2534 TCGv_reg ofs, paddr;
2535
2dfcca9f
RH
2536 nullify_over(ctx);
2537
deee69a1 2538 form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2dfcca9f
RH
2539
2540 paddr = tcg_temp_new();
ad75a51e 2541 gen_helper_lpa(paddr, tcg_env, vaddr);
2dfcca9f
RH
2542
2543 /* Note that physical address result overrides base modification. */
deee69a1
RH
2544 if (a->m) {
2545 save_gpr(ctx, a->b, ofs);
2dfcca9f 2546 }
deee69a1 2547 save_gpr(ctx, a->t, paddr);
2dfcca9f 2548
31234768 2549 return nullify_end(ctx);
deee69a1 2550#endif
2dfcca9f 2551}
43a97b81 2552
deee69a1 2553static bool trans_lci(DisasContext *ctx, arg_lci *a)
43a97b81 2554{
43a97b81
RH
2555 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2556
2557 /* The Coherence Index is an implementation-defined function of the
2558 physical address. Two addresses with the same CI have a coherent
2559 view of the cache. Our implementation is to return 0 for all,
2560 since the entire address space is coherent. */
29dd6f64 2561 save_gpr(ctx, a->t, tcg_constant_reg(0));
43a97b81 2562
31234768
RH
2563 cond_free(&ctx->null_cond);
2564 return true;
43a97b81 2565}
98a9cb79 2566
0c982a28 2567static bool trans_add(DisasContext *ctx, arg_rrr_cf_sh *a)
b2167459 2568{
0c982a28
RH
2569 return do_add_reg(ctx, a, false, false, false, false);
2570}
b2167459 2571
0c982a28
RH
2572static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_sh *a)
2573{
2574 return do_add_reg(ctx, a, true, false, false, false);
2575}
b2167459 2576
0c982a28
RH
2577static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2578{
2579 return do_add_reg(ctx, a, false, true, false, false);
b2167459
RH
2580}
2581
0c982a28 2582static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_sh *a)
b2167459 2583{
0c982a28
RH
2584 return do_add_reg(ctx, a, false, false, false, true);
2585}
b2167459 2586
0c982a28
RH
2587static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2588{
2589 return do_add_reg(ctx, a, false, true, false, true);
2590}
b2167459 2591
0c982a28
RH
2592static bool trans_sub(DisasContext *ctx, arg_rrr_cf *a)
2593{
2594 return do_sub_reg(ctx, a, false, false, false);
b2167459
RH
2595}
2596
0c982a28 2597static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2598{
0c982a28
RH
2599 return do_sub_reg(ctx, a, true, false, false);
2600}
b2167459 2601
0c982a28
RH
2602static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf *a)
2603{
2604 return do_sub_reg(ctx, a, false, false, true);
b2167459
RH
2605}
2606
0c982a28 2607static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2608{
0c982a28
RH
2609 return do_sub_reg(ctx, a, true, false, true);
2610}
2611
2612static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf *a)
2613{
2614 return do_sub_reg(ctx, a, false, true, false);
2615}
2616
2617static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf *a)
2618{
2619 return do_sub_reg(ctx, a, true, true, false);
2620}
2621
2622static bool trans_andcm(DisasContext *ctx, arg_rrr_cf *a)
2623{
2624 return do_log_reg(ctx, a, tcg_gen_andc_reg);
2625}
2626
2627static bool trans_and(DisasContext *ctx, arg_rrr_cf *a)
2628{
2629 return do_log_reg(ctx, a, tcg_gen_and_reg);
2630}
2631
2632static bool trans_or(DisasContext *ctx, arg_rrr_cf *a)
2633{
2634 if (a->cf == 0) {
2635 unsigned r2 = a->r2;
2636 unsigned r1 = a->r1;
2637 unsigned rt = a->t;
b2167459 2638
7aee8189
RH
2639 if (rt == 0) { /* NOP */
2640 cond_free(&ctx->null_cond);
2641 return true;
2642 }
2643 if (r2 == 0) { /* COPY */
2644 if (r1 == 0) {
2645 TCGv_reg dest = dest_gpr(ctx, rt);
2646 tcg_gen_movi_reg(dest, 0);
2647 save_gpr(ctx, rt, dest);
2648 } else {
2649 save_gpr(ctx, rt, cpu_gr[r1]);
2650 }
2651 cond_free(&ctx->null_cond);
2652 return true;
2653 }
2654#ifndef CONFIG_USER_ONLY
2655 /* These are QEMU extensions and are nops in the real architecture:
2656 *
2657 * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2658 * or %r31,%r31,%r31 -- death loop; offline cpu
2659 * currently implemented as idle.
2660 */
2661 if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
7aee8189
RH
2662 /* No need to check for supervisor, as userland can only pause
2663 until the next timer interrupt. */
2664 nullify_over(ctx);
2665
2666 /* Advance the instruction queue. */
741322f4
RH
2667 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2668 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
7aee8189
RH
2669 nullify_set(ctx, 0);
2670
2671 /* Tell the qemu main loop to halt until this cpu has work. */
ad75a51e 2672 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
29dd6f64 2673 offsetof(CPUState, halted) - offsetof(HPPACPU, env));
7aee8189
RH
2674 gen_excp_1(EXCP_HALTED);
2675 ctx->base.is_jmp = DISAS_NORETURN;
2676
2677 return nullify_end(ctx);
2678 }
2679#endif
b2167459 2680 }
0c982a28
RH
2681 return do_log_reg(ctx, a, tcg_gen_or_reg);
2682}
7aee8189 2683
0c982a28
RH
2684static bool trans_xor(DisasContext *ctx, arg_rrr_cf *a)
2685{
2686 return do_log_reg(ctx, a, tcg_gen_xor_reg);
b2167459
RH
2687}
2688
0c982a28 2689static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2690{
eaa3783b 2691 TCGv_reg tcg_r1, tcg_r2;
b2167459 2692
0c982a28 2693 if (a->cf) {
b2167459
RH
2694 nullify_over(ctx);
2695 }
0c982a28
RH
2696 tcg_r1 = load_gpr(ctx, a->r1);
2697 tcg_r2 = load_gpr(ctx, a->r2);
2698 do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf);
31234768 2699 return nullify_end(ctx);
b2167459
RH
2700}
2701
0c982a28 2702static bool trans_uxor(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2703{
eaa3783b 2704 TCGv_reg tcg_r1, tcg_r2;
b2167459 2705
0c982a28 2706 if (a->cf) {
b2167459
RH
2707 nullify_over(ctx);
2708 }
0c982a28
RH
2709 tcg_r1 = load_gpr(ctx, a->r1);
2710 tcg_r2 = load_gpr(ctx, a->r2);
2711 do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, false, tcg_gen_xor_reg);
31234768 2712 return nullify_end(ctx);
b2167459
RH
2713}
2714
0c982a28 2715static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf *a, bool is_tc)
b2167459 2716{
eaa3783b 2717 TCGv_reg tcg_r1, tcg_r2, tmp;
b2167459 2718
0c982a28 2719 if (a->cf) {
b2167459
RH
2720 nullify_over(ctx);
2721 }
0c982a28
RH
2722 tcg_r1 = load_gpr(ctx, a->r1);
2723 tcg_r2 = load_gpr(ctx, a->r2);
e12c6309 2724 tmp = tcg_temp_new();
eaa3783b 2725 tcg_gen_not_reg(tmp, tcg_r2);
0c982a28 2726 do_unit(ctx, a->t, tcg_r1, tmp, a->cf, is_tc, tcg_gen_add_reg);
31234768 2727 return nullify_end(ctx);
b2167459
RH
2728}
2729
0c982a28
RH
2730static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf *a)
2731{
2732 return do_uaddcm(ctx, a, false);
2733}
2734
2735static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf *a)
2736{
2737 return do_uaddcm(ctx, a, true);
2738}
2739
2740static bool do_dcor(DisasContext *ctx, arg_rr_cf *a, bool is_i)
b2167459 2741{
eaa3783b 2742 TCGv_reg tmp;
b2167459
RH
2743
2744 nullify_over(ctx);
2745
e12c6309 2746 tmp = tcg_temp_new();
eaa3783b 2747 tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
b2167459 2748 if (!is_i) {
eaa3783b 2749 tcg_gen_not_reg(tmp, tmp);
b2167459 2750 }
eaa3783b
RH
2751 tcg_gen_andi_reg(tmp, tmp, 0x11111111);
2752 tcg_gen_muli_reg(tmp, tmp, 6);
60e29463 2753 do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, false,
31234768 2754 is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
31234768 2755 return nullify_end(ctx);
b2167459
RH
2756}
2757
0c982a28
RH
2758static bool trans_dcor(DisasContext *ctx, arg_rr_cf *a)
2759{
2760 return do_dcor(ctx, a, false);
2761}
2762
2763static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf *a)
2764{
2765 return do_dcor(ctx, a, true);
2766}
2767
2768static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2769{
eaa3783b 2770 TCGv_reg dest, add1, add2, addc, zero, in1, in2;
72ca8753 2771 TCGv_reg cout;
b2167459
RH
2772
2773 nullify_over(ctx);
2774
0c982a28
RH
2775 in1 = load_gpr(ctx, a->r1);
2776 in2 = load_gpr(ctx, a->r2);
b2167459
RH
2777
2778 add1 = tcg_temp_new();
2779 add2 = tcg_temp_new();
2780 addc = tcg_temp_new();
2781 dest = tcg_temp_new();
29dd6f64 2782 zero = tcg_constant_reg(0);
b2167459
RH
2783
2784 /* Form R1 << 1 | PSW[CB]{8}. */
eaa3783b 2785 tcg_gen_add_reg(add1, in1, in1);
72ca8753 2786 tcg_gen_add_reg(add1, add1, get_psw_carry(ctx, false));
b2167459 2787
72ca8753
RH
2788 /*
2789 * Add or subtract R2, depending on PSW[V]. Proper computation of
2790 * carry requires that we subtract via + ~R2 + 1, as described in
2791 * the manual. By extracting and masking V, we can produce the
2792 * proper inputs to the addition without movcond.
2793 */
2794 tcg_gen_sextract_reg(addc, cpu_psw_v, 31, 1);
eaa3783b
RH
2795 tcg_gen_xor_reg(add2, in2, addc);
2796 tcg_gen_andi_reg(addc, addc, 1);
72ca8753
RH
2797
2798 tcg_gen_add2_reg(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2799 tcg_gen_add2_reg(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
b2167459 2800
b2167459 2801 /* Write back the result register. */
0c982a28 2802 save_gpr(ctx, a->t, dest);
b2167459
RH
2803
2804 /* Write back PSW[CB]. */
eaa3783b
RH
2805 tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2806 tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
b2167459
RH
2807
2808 /* Write back PSW[V] for the division step. */
72ca8753
RH
2809 cout = get_psw_carry(ctx, false);
2810 tcg_gen_neg_reg(cpu_psw_v, cout);
eaa3783b 2811 tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
b2167459
RH
2812
2813 /* Install the new nullification. */
0c982a28 2814 if (a->cf) {
eaa3783b 2815 TCGv_reg sv = NULL;
b47a4a02 2816 if (cond_need_sv(a->cf >> 1)) {
b2167459
RH
2817 /* ??? The lshift is supposed to contribute to overflow. */
2818 sv = do_add_sv(ctx, dest, add1, add2);
2819 }
72ca8753 2820 ctx->null_cond = do_cond(a->cf, dest, cout, sv);
b2167459
RH
2821 }
2822
31234768 2823 return nullify_end(ctx);
b2167459
RH
2824}
2825
0588e061 2826static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
b2167459 2827{
0588e061
RH
2828 return do_add_imm(ctx, a, false, false);
2829}
b2167459 2830
0588e061
RH
2831static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2832{
2833 return do_add_imm(ctx, a, true, false);
b2167459
RH
2834}
2835
0588e061 2836static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
b2167459 2837{
0588e061
RH
2838 return do_add_imm(ctx, a, false, true);
2839}
b2167459 2840
0588e061
RH
2841static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2842{
2843 return do_add_imm(ctx, a, true, true);
2844}
b2167459 2845
0588e061
RH
2846static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2847{
2848 return do_sub_imm(ctx, a, false);
2849}
b2167459 2850
0588e061
RH
2851static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2852{
2853 return do_sub_imm(ctx, a, true);
b2167459
RH
2854}
2855
0588e061 2856static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf *a)
b2167459 2857{
eaa3783b 2858 TCGv_reg tcg_im, tcg_r2;
b2167459 2859
0588e061 2860 if (a->cf) {
b2167459
RH
2861 nullify_over(ctx);
2862 }
2863
d4e58033 2864 tcg_im = tcg_constant_reg(a->i);
0588e061
RH
2865 tcg_r2 = load_gpr(ctx, a->r);
2866 do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf);
b2167459 2867
31234768 2868 return nullify_end(ctx);
b2167459
RH
2869}
2870
1cd012a5 2871static bool trans_ld(DisasContext *ctx, arg_ldst *a)
96d6407f 2872{
0786a3b6
HD
2873 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
2874 return gen_illegal(ctx);
2875 } else {
2876 return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
1cd012a5 2877 a->disp, a->sp, a->m, a->size | MO_TE);
0786a3b6 2878 }
96d6407f
RH
2879}
2880
1cd012a5 2881static bool trans_st(DisasContext *ctx, arg_ldst *a)
96d6407f 2882{
1cd012a5 2883 assert(a->x == 0 && a->scale == 0);
0786a3b6
HD
2884 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
2885 return gen_illegal(ctx);
2886 } else {
2887 return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
2888 }
96d6407f
RH
2889}
2890
1cd012a5 2891static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
96d6407f 2892{
b1af755c 2893 MemOp mop = MO_TE | MO_ALIGN | a->size;
86f8d05f
RH
2894 TCGv_reg zero, dest, ofs;
2895 TCGv_tl addr;
96d6407f
RH
2896
2897 nullify_over(ctx);
2898
1cd012a5 2899 if (a->m) {
86f8d05f
RH
2900 /* Base register modification. Make sure if RT == RB,
2901 we see the result of the load. */
e12c6309 2902 dest = tcg_temp_new();
96d6407f 2903 } else {
1cd012a5 2904 dest = dest_gpr(ctx, a->t);
96d6407f
RH
2905 }
2906
1cd012a5
RH
2907 form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
2908 a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
b1af755c
RH
2909
2910 /*
2911 * For hppa1.1, LDCW is undefined unless aligned mod 16.
2912 * However actual hardware succeeds with aligned mod 4.
2913 * Detect this case and log a GUEST_ERROR.
2914 *
2915 * TODO: HPPA64 relaxes the over-alignment requirement
2916 * with the ,co completer.
2917 */
2918 gen_helper_ldc_check(addr);
2919
29dd6f64 2920 zero = tcg_constant_reg(0);
86f8d05f 2921 tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
b1af755c 2922
1cd012a5
RH
2923 if (a->m) {
2924 save_gpr(ctx, a->b, ofs);
96d6407f 2925 }
1cd012a5 2926 save_gpr(ctx, a->t, dest);
96d6407f 2927
31234768 2928 return nullify_end(ctx);
96d6407f
RH
2929}
2930
1cd012a5 2931static bool trans_stby(DisasContext *ctx, arg_stby *a)
96d6407f 2932{
86f8d05f
RH
2933 TCGv_reg ofs, val;
2934 TCGv_tl addr;
96d6407f
RH
2935
2936 nullify_over(ctx);
2937
1cd012a5 2938 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
86f8d05f 2939 ctx->mmu_idx == MMU_PHYS_IDX);
1cd012a5
RH
2940 val = load_gpr(ctx, a->r);
2941 if (a->a) {
f9f46db4 2942 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
ad75a51e 2943 gen_helper_stby_e_parallel(tcg_env, addr, val);
f9f46db4 2944 } else {
ad75a51e 2945 gen_helper_stby_e(tcg_env, addr, val);
f9f46db4 2946 }
96d6407f 2947 } else {
f9f46db4 2948 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
ad75a51e 2949 gen_helper_stby_b_parallel(tcg_env, addr, val);
f9f46db4 2950 } else {
ad75a51e 2951 gen_helper_stby_b(tcg_env, addr, val);
f9f46db4 2952 }
96d6407f 2953 }
1cd012a5 2954 if (a->m) {
86f8d05f 2955 tcg_gen_andi_reg(ofs, ofs, ~3);
1cd012a5 2956 save_gpr(ctx, a->b, ofs);
96d6407f 2957 }
96d6407f 2958
31234768 2959 return nullify_end(ctx);
96d6407f
RH
2960}
2961
1cd012a5 2962static bool trans_lda(DisasContext *ctx, arg_ldst *a)
d0a851cc
RH
2963{
2964 int hold_mmu_idx = ctx->mmu_idx;
d0a851cc
RH
2965
2966 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
d0a851cc 2967 ctx->mmu_idx = MMU_PHYS_IDX;
1cd012a5 2968 trans_ld(ctx, a);
d0a851cc 2969 ctx->mmu_idx = hold_mmu_idx;
31234768 2970 return true;
d0a851cc
RH
2971}
2972
1cd012a5 2973static bool trans_sta(DisasContext *ctx, arg_ldst *a)
d0a851cc
RH
2974{
2975 int hold_mmu_idx = ctx->mmu_idx;
d0a851cc
RH
2976
2977 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
d0a851cc 2978 ctx->mmu_idx = MMU_PHYS_IDX;
1cd012a5 2979 trans_st(ctx, a);
d0a851cc 2980 ctx->mmu_idx = hold_mmu_idx;
31234768 2981 return true;
d0a851cc 2982}
95412a61 2983
0588e061 2984static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
b2167459 2985{
0588e061 2986 TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
b2167459 2987
0588e061
RH
2988 tcg_gen_movi_reg(tcg_rt, a->i);
2989 save_gpr(ctx, a->t, tcg_rt);
b2167459 2990 cond_free(&ctx->null_cond);
31234768 2991 return true;
b2167459
RH
2992}
2993
0588e061 2994static bool trans_addil(DisasContext *ctx, arg_addil *a)
b2167459 2995{
0588e061 2996 TCGv_reg tcg_rt = load_gpr(ctx, a->r);
eaa3783b 2997 TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
b2167459 2998
0588e061 2999 tcg_gen_addi_reg(tcg_r1, tcg_rt, a->i);
b2167459
RH
3000 save_gpr(ctx, 1, tcg_r1);
3001 cond_free(&ctx->null_cond);
31234768 3002 return true;
b2167459
RH
3003}
3004
0588e061 3005static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
b2167459 3006{
0588e061 3007 TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
b2167459
RH
3008
3009 /* Special case rb == 0, for the LDI pseudo-op.
3010 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
0588e061
RH
3011 if (a->b == 0) {
3012 tcg_gen_movi_reg(tcg_rt, a->i);
b2167459 3013 } else {
0588e061 3014 tcg_gen_addi_reg(tcg_rt, cpu_gr[a->b], a->i);
b2167459 3015 }
0588e061 3016 save_gpr(ctx, a->t, tcg_rt);
b2167459 3017 cond_free(&ctx->null_cond);
31234768 3018 return true;
b2167459
RH
3019}
3020
01afb7be
RH
3021static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3022 unsigned c, unsigned f, unsigned n, int disp)
98cd9ca7 3023{
01afb7be 3024 TCGv_reg dest, in2, sv;
98cd9ca7
RH
3025 DisasCond cond;
3026
98cd9ca7 3027 in2 = load_gpr(ctx, r);
e12c6309 3028 dest = tcg_temp_new();
98cd9ca7 3029
eaa3783b 3030 tcg_gen_sub_reg(dest, in1, in2);
98cd9ca7 3031
f764718d 3032 sv = NULL;
b47a4a02 3033 if (cond_need_sv(c)) {
98cd9ca7
RH
3034 sv = do_sub_sv(ctx, dest, in1, in2);
3035 }
3036
01afb7be
RH
3037 cond = do_sub_cond(c * 2 + f, dest, in1, in2, sv);
3038 return do_cbranch(ctx, disp, n, &cond);
98cd9ca7
RH
3039}
3040
01afb7be 3041static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
98cd9ca7 3042{
01afb7be
RH
3043 nullify_over(ctx);
3044 return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3045}
98cd9ca7 3046
01afb7be
RH
3047static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3048{
98cd9ca7 3049 nullify_over(ctx);
d4e58033 3050 return do_cmpb(ctx, a->r, tcg_constant_reg(a->i), a->c, a->f, a->n, a->disp);
01afb7be
RH
3051}
3052
3053static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3054 unsigned c, unsigned f, unsigned n, int disp)
3055{
bdcccc17 3056 TCGv_reg dest, in2, sv, cb_cond;
01afb7be 3057 DisasCond cond;
bdcccc17 3058 bool d = false;
98cd9ca7 3059
98cd9ca7 3060 in2 = load_gpr(ctx, r);
43675d20 3061 dest = tcg_temp_new();
f764718d 3062 sv = NULL;
bdcccc17 3063 cb_cond = NULL;
98cd9ca7 3064
b47a4a02 3065 if (cond_need_cb(c)) {
bdcccc17
RH
3066 TCGv_reg cb = tcg_temp_new();
3067 TCGv_reg cb_msb = tcg_temp_new();
3068
eaa3783b
RH
3069 tcg_gen_movi_reg(cb_msb, 0);
3070 tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
bdcccc17
RH
3071 tcg_gen_xor_reg(cb, in1, in2);
3072 tcg_gen_xor_reg(cb, cb, dest);
3073 cb_cond = get_carry(ctx, d, cb, cb_msb);
b47a4a02 3074 } else {
eaa3783b 3075 tcg_gen_add_reg(dest, in1, in2);
b47a4a02
SS
3076 }
3077 if (cond_need_sv(c)) {
98cd9ca7 3078 sv = do_add_sv(ctx, dest, in1, in2);
98cd9ca7
RH
3079 }
3080
bdcccc17 3081 cond = do_cond(c * 2 + f, dest, cb_cond, sv);
43675d20 3082 save_gpr(ctx, r, dest);
01afb7be 3083 return do_cbranch(ctx, disp, n, &cond);
98cd9ca7
RH
3084}
3085
01afb7be
RH
3086static bool trans_addb(DisasContext *ctx, arg_addb *a)
3087{
3088 nullify_over(ctx);
3089 return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3090}
3091
3092static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3093{
3094 nullify_over(ctx);
d4e58033 3095 return do_addb(ctx, a->r, tcg_constant_reg(a->i), a->c, a->f, a->n, a->disp);
01afb7be
RH
3096}
3097
3098static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
98cd9ca7 3099{
eaa3783b 3100 TCGv_reg tmp, tcg_r;
98cd9ca7 3101 DisasCond cond;
1e9ab9fb 3102 bool d = false;
98cd9ca7
RH
3103
3104 nullify_over(ctx);
3105
3106 tmp = tcg_temp_new();
01afb7be 3107 tcg_r = load_gpr(ctx, a->r);
1e9ab9fb
RH
3108 if (cond_need_ext(ctx, d)) {
3109 /* Force shift into [32,63] */
3110 tcg_gen_ori_reg(tmp, cpu_sar, 32);
3111 tcg_gen_shl_reg(tmp, tcg_r, tmp);
3112 } else {
3113 tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
3114 }
98cd9ca7 3115
1e9ab9fb 3116 cond = cond_make_0_tmp(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
01afb7be 3117 return do_cbranch(ctx, a->disp, a->n, &cond);
98cd9ca7
RH
3118}
3119
01afb7be
RH
3120static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3121{
3122 TCGv_reg tmp, tcg_r;
3123 DisasCond cond;
1e9ab9fb
RH
3124 bool d = false;
3125 int p;
01afb7be
RH
3126
3127 nullify_over(ctx);
3128
3129 tmp = tcg_temp_new();
3130 tcg_r = load_gpr(ctx, a->r);
1e9ab9fb
RH
3131 p = a->p | (cond_need_ext(ctx, d) ? 32 : 0);
3132 tcg_gen_shli_reg(tmp, tcg_r, p);
01afb7be
RH
3133
3134 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
01afb7be
RH
3135 return do_cbranch(ctx, a->disp, a->n, &cond);
3136}
3137
3138static bool trans_movb(DisasContext *ctx, arg_movb *a)
98cd9ca7 3139{
eaa3783b 3140 TCGv_reg dest;
98cd9ca7
RH
3141 DisasCond cond;
3142
3143 nullify_over(ctx);
3144
01afb7be
RH
3145 dest = dest_gpr(ctx, a->r2);
3146 if (a->r1 == 0) {
eaa3783b 3147 tcg_gen_movi_reg(dest, 0);
98cd9ca7 3148 } else {
01afb7be 3149 tcg_gen_mov_reg(dest, cpu_gr[a->r1]);
98cd9ca7
RH
3150 }
3151
01afb7be
RH
3152 cond = do_sed_cond(a->c, dest);
3153 return do_cbranch(ctx, a->disp, a->n, &cond);
3154}
3155
3156static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3157{
3158 TCGv_reg dest;
3159 DisasCond cond;
3160
3161 nullify_over(ctx);
3162
3163 dest = dest_gpr(ctx, a->r);
3164 tcg_gen_movi_reg(dest, a->i);
3165
3166 cond = do_sed_cond(a->c, dest);
3167 return do_cbranch(ctx, a->disp, a->n, &cond);
98cd9ca7
RH
3168}
3169
30878590 3170static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a)
0b1347d2 3171{
eaa3783b 3172 TCGv_reg dest;
0b1347d2 3173
30878590 3174 if (a->c) {
0b1347d2
RH
3175 nullify_over(ctx);
3176 }
3177
30878590
RH
3178 dest = dest_gpr(ctx, a->t);
3179 if (a->r1 == 0) {
3180 tcg_gen_ext32u_reg(dest, load_gpr(ctx, a->r2));
eaa3783b 3181 tcg_gen_shr_reg(dest, dest, cpu_sar);
30878590 3182 } else if (a->r1 == a->r2) {
0b1347d2 3183 TCGv_i32 t32 = tcg_temp_new_i32();
e1d635e8
RH
3184 TCGv_i32 s32 = tcg_temp_new_i32();
3185
30878590 3186 tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, a->r2));
e1d635e8
RH
3187 tcg_gen_trunc_reg_i32(s32, cpu_sar);
3188 tcg_gen_rotr_i32(t32, t32, s32);
eaa3783b 3189 tcg_gen_extu_i32_reg(dest, t32);
0b1347d2
RH
3190 } else {
3191 TCGv_i64 t = tcg_temp_new_i64();
3192 TCGv_i64 s = tcg_temp_new_i64();
3193
30878590 3194 tcg_gen_concat_reg_i64(t, load_gpr(ctx, a->r2), load_gpr(ctx, a->r1));
eaa3783b 3195 tcg_gen_extu_reg_i64(s, cpu_sar);
0b1347d2 3196 tcg_gen_shr_i64(t, t, s);
eaa3783b 3197 tcg_gen_trunc_i64_reg(dest, t);
0b1347d2 3198 }
30878590 3199 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3200
3201 /* Install the new nullification. */
3202 cond_free(&ctx->null_cond);
30878590
RH
3203 if (a->c) {
3204 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3205 }
31234768 3206 return nullify_end(ctx);
0b1347d2
RH
3207}
3208
30878590 3209static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a)
0b1347d2 3210{
30878590 3211 unsigned sa = 31 - a->cpos;
eaa3783b 3212 TCGv_reg dest, t2;
0b1347d2 3213
30878590 3214 if (a->c) {
0b1347d2
RH
3215 nullify_over(ctx);
3216 }
3217
30878590
RH
3218 dest = dest_gpr(ctx, a->t);
3219 t2 = load_gpr(ctx, a->r2);
05bfd4db
RH
3220 if (a->r1 == 0) {
3221 tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
3222 } else if (TARGET_REGISTER_BITS == 32) {
3223 tcg_gen_extract2_reg(dest, t2, cpu_gr[a->r1], sa);
3224 } else if (a->r1 == a->r2) {
0b1347d2 3225 TCGv_i32 t32 = tcg_temp_new_i32();
eaa3783b 3226 tcg_gen_trunc_reg_i32(t32, t2);
0b1347d2 3227 tcg_gen_rotri_i32(t32, t32, sa);
eaa3783b 3228 tcg_gen_extu_i32_reg(dest, t32);
0b1347d2 3229 } else {
05bfd4db
RH
3230 TCGv_i64 t64 = tcg_temp_new_i64();
3231 tcg_gen_concat_reg_i64(t64, t2, cpu_gr[a->r1]);
3232 tcg_gen_shri_i64(t64, t64, sa);
3233 tcg_gen_trunc_i64_reg(dest, t64);
0b1347d2 3234 }
30878590 3235 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3236
3237 /* Install the new nullification. */
3238 cond_free(&ctx->null_cond);
30878590
RH
3239 if (a->c) {
3240 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3241 }
31234768 3242 return nullify_end(ctx);
0b1347d2
RH
3243}
3244
30878590 3245static bool trans_extrw_sar(DisasContext *ctx, arg_extrw_sar *a)
0b1347d2 3246{
30878590 3247 unsigned len = 32 - a->clen;
eaa3783b 3248 TCGv_reg dest, src, tmp;
0b1347d2 3249
30878590 3250 if (a->c) {
0b1347d2
RH
3251 nullify_over(ctx);
3252 }
3253
30878590
RH
3254 dest = dest_gpr(ctx, a->t);
3255 src = load_gpr(ctx, a->r);
0b1347d2
RH
3256 tmp = tcg_temp_new();
3257
3258 /* Recall that SAR is using big-endian bit numbering. */
d781cb77
RH
3259 tcg_gen_andi_reg(tmp, cpu_sar, 31);
3260 tcg_gen_xori_reg(tmp, tmp, 31);
3261
30878590 3262 if (a->se) {
eaa3783b
RH
3263 tcg_gen_sar_reg(dest, src, tmp);
3264 tcg_gen_sextract_reg(dest, dest, 0, len);
0b1347d2 3265 } else {
eaa3783b
RH
3266 tcg_gen_shr_reg(dest, src, tmp);
3267 tcg_gen_extract_reg(dest, dest, 0, len);
0b1347d2 3268 }
30878590 3269 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3270
3271 /* Install the new nullification. */
3272 cond_free(&ctx->null_cond);
30878590
RH
3273 if (a->c) {
3274 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3275 }
31234768 3276 return nullify_end(ctx);
0b1347d2
RH
3277}
3278
30878590 3279static bool trans_extrw_imm(DisasContext *ctx, arg_extrw_imm *a)
0b1347d2 3280{
30878590
RH
3281 unsigned len = 32 - a->clen;
3282 unsigned cpos = 31 - a->pos;
eaa3783b 3283 TCGv_reg dest, src;
0b1347d2 3284
30878590 3285 if (a->c) {
0b1347d2
RH
3286 nullify_over(ctx);
3287 }
3288
30878590
RH
3289 dest = dest_gpr(ctx, a->t);
3290 src = load_gpr(ctx, a->r);
3291 if (a->se) {
eaa3783b 3292 tcg_gen_sextract_reg(dest, src, cpos, len);
0b1347d2 3293 } else {
eaa3783b 3294 tcg_gen_extract_reg(dest, src, cpos, len);
0b1347d2 3295 }
30878590 3296 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3297
3298 /* Install the new nullification. */
3299 cond_free(&ctx->null_cond);
30878590
RH
3300 if (a->c) {
3301 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3302 }
31234768 3303 return nullify_end(ctx);
0b1347d2
RH
3304}
3305
30878590 3306static bool trans_depwi_imm(DisasContext *ctx, arg_depwi_imm *a)
0b1347d2 3307{
30878590 3308 unsigned len = 32 - a->clen;
eaa3783b
RH
3309 target_sreg mask0, mask1;
3310 TCGv_reg dest;
0b1347d2 3311
30878590 3312 if (a->c) {
0b1347d2
RH
3313 nullify_over(ctx);
3314 }
30878590
RH
3315 if (a->cpos + len > 32) {
3316 len = 32 - a->cpos;
0b1347d2
RH
3317 }
3318
30878590
RH
3319 dest = dest_gpr(ctx, a->t);
3320 mask0 = deposit64(0, a->cpos, len, a->i);
3321 mask1 = deposit64(-1, a->cpos, len, a->i);
0b1347d2 3322
30878590
RH
3323 if (a->nz) {
3324 TCGv_reg src = load_gpr(ctx, a->t);
0b1347d2 3325 if (mask1 != -1) {
eaa3783b 3326 tcg_gen_andi_reg(dest, src, mask1);
0b1347d2
RH
3327 src = dest;
3328 }
eaa3783b 3329 tcg_gen_ori_reg(dest, src, mask0);
0b1347d2 3330 } else {
eaa3783b 3331 tcg_gen_movi_reg(dest, mask0);
0b1347d2 3332 }
30878590 3333 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3334
3335 /* Install the new nullification. */
3336 cond_free(&ctx->null_cond);
30878590
RH
3337 if (a->c) {
3338 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3339 }
31234768 3340 return nullify_end(ctx);
0b1347d2
RH
3341}
3342
30878590 3343static bool trans_depw_imm(DisasContext *ctx, arg_depw_imm *a)
0b1347d2 3344{
30878590
RH
3345 unsigned rs = a->nz ? a->t : 0;
3346 unsigned len = 32 - a->clen;
eaa3783b 3347 TCGv_reg dest, val;
0b1347d2 3348
30878590 3349 if (a->c) {
0b1347d2
RH
3350 nullify_over(ctx);
3351 }
30878590
RH
3352 if (a->cpos + len > 32) {
3353 len = 32 - a->cpos;
0b1347d2
RH
3354 }
3355
30878590
RH
3356 dest = dest_gpr(ctx, a->t);
3357 val = load_gpr(ctx, a->r);
0b1347d2 3358 if (rs == 0) {
30878590 3359 tcg_gen_deposit_z_reg(dest, val, a->cpos, len);
0b1347d2 3360 } else {
30878590 3361 tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len);
0b1347d2 3362 }
30878590 3363 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3364
3365 /* Install the new nullification. */
3366 cond_free(&ctx->null_cond);
30878590
RH
3367 if (a->c) {
3368 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3369 }
31234768 3370 return nullify_end(ctx);
0b1347d2
RH
3371}
3372
30878590
RH
3373static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c,
3374 unsigned nz, unsigned clen, TCGv_reg val)
0b1347d2 3375{
0b1347d2
RH
3376 unsigned rs = nz ? rt : 0;
3377 unsigned len = 32 - clen;
30878590 3378 TCGv_reg mask, tmp, shift, dest;
0b1347d2
RH
3379 unsigned msb = 1U << (len - 1);
3380
0b1347d2
RH
3381 dest = dest_gpr(ctx, rt);
3382 shift = tcg_temp_new();
3383 tmp = tcg_temp_new();
3384
3385 /* Convert big-endian bit numbering in SAR to left-shift. */
d781cb77
RH
3386 tcg_gen_andi_reg(shift, cpu_sar, 31);
3387 tcg_gen_xori_reg(shift, shift, 31);
0b1347d2 3388
0992a930
RH
3389 mask = tcg_temp_new();
3390 tcg_gen_movi_reg(mask, msb + (msb - 1));
eaa3783b 3391 tcg_gen_and_reg(tmp, val, mask);
0b1347d2 3392 if (rs) {
eaa3783b
RH
3393 tcg_gen_shl_reg(mask, mask, shift);
3394 tcg_gen_shl_reg(tmp, tmp, shift);
3395 tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3396 tcg_gen_or_reg(dest, dest, tmp);
0b1347d2 3397 } else {
eaa3783b 3398 tcg_gen_shl_reg(dest, tmp, shift);
0b1347d2 3399 }
0b1347d2
RH
3400 save_gpr(ctx, rt, dest);
3401
3402 /* Install the new nullification. */
3403 cond_free(&ctx->null_cond);
3404 if (c) {
3405 ctx->null_cond = do_sed_cond(c, dest);
3406 }
31234768 3407 return nullify_end(ctx);
0b1347d2
RH
3408}
3409
30878590
RH
3410static bool trans_depw_sar(DisasContext *ctx, arg_depw_sar *a)
3411{
a6deecce
SS
3412 if (a->c) {
3413 nullify_over(ctx);
3414 }
30878590
RH
3415 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_gpr(ctx, a->r));
3416}
3417
3418static bool trans_depwi_sar(DisasContext *ctx, arg_depwi_sar *a)
3419{
a6deecce
SS
3420 if (a->c) {
3421 nullify_over(ctx);
3422 }
d4e58033 3423 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, tcg_constant_reg(a->i));
30878590 3424}
0b1347d2 3425
8340f534 3426static bool trans_be(DisasContext *ctx, arg_be *a)
98cd9ca7 3427{
660eefe1 3428 TCGv_reg tmp;
98cd9ca7 3429
c301f34e 3430#ifdef CONFIG_USER_ONLY
98cd9ca7
RH
3431 /* ??? It seems like there should be a good way of using
3432 "be disp(sr2, r0)", the canonical gateway entry mechanism
3433 to our advantage. But that appears to be inconvenient to
3434 manage along side branch delay slots. Therefore we handle
3435 entry into the gateway page via absolute address. */
98cd9ca7
RH
3436 /* Since we don't implement spaces, just branch. Do notice the special
3437 case of "be disp(*,r0)" using a direct branch to disp, so that we can
3438 goto_tb to the TB containing the syscall. */
8340f534
RH
3439 if (a->b == 0) {
3440 return do_dbranch(ctx, a->disp, a->l, a->n);
98cd9ca7 3441 }
c301f34e 3442#else
c301f34e 3443 nullify_over(ctx);
660eefe1
RH
3444#endif
3445
e12c6309 3446 tmp = tcg_temp_new();
8340f534 3447 tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp);
660eefe1 3448 tmp = do_ibranch_priv(ctx, tmp);
c301f34e
RH
3449
3450#ifdef CONFIG_USER_ONLY
8340f534 3451 return do_ibranch(ctx, tmp, a->l, a->n);
c301f34e
RH
3452#else
3453 TCGv_i64 new_spc = tcg_temp_new_i64();
3454
8340f534
RH
3455 load_spr(ctx, new_spc, a->sp);
3456 if (a->l) {
741322f4 3457 copy_iaoq_entry(ctx, cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
c301f34e
RH
3458 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3459 }
8340f534 3460 if (a->n && use_nullify_skip(ctx)) {
a0180973
RH
3461 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, tmp);
3462 tcg_gen_addi_reg(tmp, tmp, 4);
3463 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
c301f34e
RH
3464 tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3465 tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3466 } else {
741322f4 3467 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
c301f34e
RH
3468 if (ctx->iaoq_b == -1) {
3469 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3470 }
a0180973 3471 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, tmp);
c301f34e 3472 tcg_gen_mov_i64(cpu_iasq_b, new_spc);
8340f534 3473 nullify_set(ctx, a->n);
c301f34e 3474 }
c301f34e 3475 tcg_gen_lookup_and_goto_ptr();
31234768
RH
3476 ctx->base.is_jmp = DISAS_NORETURN;
3477 return nullify_end(ctx);
c301f34e 3478#endif
98cd9ca7
RH
3479}
3480
8340f534 3481static bool trans_bl(DisasContext *ctx, arg_bl *a)
98cd9ca7 3482{
8340f534 3483 return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
98cd9ca7
RH
3484}
3485
8340f534 3486static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
43e05652 3487{
8340f534 3488 target_ureg dest = iaoq_dest(ctx, a->disp);
43e05652 3489
6e5f5300
SS
3490 nullify_over(ctx);
3491
43e05652
RH
3492 /* Make sure the caller hasn't done something weird with the queue.
3493 * ??? This is not quite the same as the PSW[B] bit, which would be
3494 * expensive to track. Real hardware will trap for
3495 * b gateway
3496 * b gateway+4 (in delay slot of first branch)
3497 * However, checking for a non-sequential instruction queue *will*
3498 * diagnose the security hole
3499 * b gateway
3500 * b evil
3501 * in which instructions at evil would run with increased privs.
3502 */
3503 if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3504 return gen_illegal(ctx);
3505 }
3506
3507#ifndef CONFIG_USER_ONLY
3508 if (ctx->tb_flags & PSW_C) {
b77af26e 3509 CPUHPPAState *env = cpu_env(ctx->cs);
43e05652
RH
3510 int type = hppa_artype_for_page(env, ctx->base.pc_next);
3511 /* If we could not find a TLB entry, then we need to generate an
3512 ITLB miss exception so the kernel will provide it.
3513 The resulting TLB fill operation will invalidate this TB and
3514 we will re-translate, at which point we *will* be able to find
3515 the TLB entry and determine if this is in fact a gateway page. */
3516 if (type < 0) {
31234768
RH
3517 gen_excp(ctx, EXCP_ITLB_MISS);
3518 return true;
43e05652
RH
3519 }
3520 /* No change for non-gateway pages or for priv decrease. */
3521 if (type >= 4 && type - 4 < ctx->privilege) {
3522 dest = deposit32(dest, 0, 2, type - 4);
3523 }
3524 } else {
3525 dest &= -4; /* priv = 0 */
3526 }
3527#endif
3528
6e5f5300
SS
3529 if (a->l) {
3530 TCGv_reg tmp = dest_gpr(ctx, a->l);
3531 if (ctx->privilege < 3) {
3532 tcg_gen_andi_reg(tmp, tmp, -4);
3533 }
3534 tcg_gen_ori_reg(tmp, tmp, ctx->privilege);
3535 save_gpr(ctx, a->l, tmp);
3536 }
3537
3538 return do_dbranch(ctx, dest, 0, a->n);
43e05652
RH
3539}
3540
8340f534 3541static bool trans_blr(DisasContext *ctx, arg_blr *a)
98cd9ca7 3542{
b35aec85 3543 if (a->x) {
e12c6309 3544 TCGv_reg tmp = tcg_temp_new();
b35aec85
RH
3545 tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3);
3546 tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
3547 /* The computation here never changes privilege level. */
3548 return do_ibranch(ctx, tmp, a->l, a->n);
3549 } else {
3550 /* BLR R0,RX is a good way to load PC+8 into RX. */
3551 return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3552 }
98cd9ca7
RH
3553}
3554
8340f534 3555static bool trans_bv(DisasContext *ctx, arg_bv *a)
98cd9ca7 3556{
eaa3783b 3557 TCGv_reg dest;
98cd9ca7 3558
8340f534
RH
3559 if (a->x == 0) {
3560 dest = load_gpr(ctx, a->b);
98cd9ca7 3561 } else {
e12c6309 3562 dest = tcg_temp_new();
8340f534
RH
3563 tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3);
3564 tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b));
98cd9ca7 3565 }
660eefe1 3566 dest = do_ibranch_priv(ctx, dest);
8340f534 3567 return do_ibranch(ctx, dest, 0, a->n);
98cd9ca7
RH
3568}
3569
8340f534 3570static bool trans_bve(DisasContext *ctx, arg_bve *a)
98cd9ca7 3571{
660eefe1 3572 TCGv_reg dest;
98cd9ca7 3573
c301f34e 3574#ifdef CONFIG_USER_ONLY
8340f534
RH
3575 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3576 return do_ibranch(ctx, dest, a->l, a->n);
c301f34e
RH
3577#else
3578 nullify_over(ctx);
8340f534 3579 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
c301f34e 3580
741322f4 3581 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
c301f34e
RH
3582 if (ctx->iaoq_b == -1) {
3583 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3584 }
741322f4 3585 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, dest);
c301f34e 3586 tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
8340f534 3587 if (a->l) {
741322f4 3588 copy_iaoq_entry(ctx, cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
c301f34e 3589 }
8340f534 3590 nullify_set(ctx, a->n);
c301f34e 3591 tcg_gen_lookup_and_goto_ptr();
31234768
RH
3592 ctx->base.is_jmp = DISAS_NORETURN;
3593 return nullify_end(ctx);
c301f34e 3594#endif
98cd9ca7
RH
3595}
3596
1ca74648
RH
3597/*
3598 * Float class 0
3599 */
ebe9383c 3600
1ca74648 3601static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
ebe9383c 3602{
1ca74648 3603 tcg_gen_mov_i32(dst, src);
ebe9383c
RH
3604}
3605
59f8c04b
HD
3606static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
3607{
a300dad3
RH
3608 uint64_t ret;
3609
3610 if (TARGET_REGISTER_BITS == 64) {
3611 ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
3612 } else {
3613 ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
3614 }
3615
59f8c04b 3616 nullify_over(ctx);
a300dad3 3617 save_frd(0, tcg_constant_i64(ret));
59f8c04b
HD
3618 return nullify_end(ctx);
3619}
3620
1ca74648 3621static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3622{
1ca74648 3623 return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
ebe9383c
RH
3624}
3625
1ca74648 3626static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
ebe9383c 3627{
1ca74648 3628 tcg_gen_mov_i64(dst, src);
ebe9383c
RH
3629}
3630
1ca74648 3631static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3632{
1ca74648 3633 return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
ebe9383c
RH
3634}
3635
1ca74648 3636static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
ebe9383c 3637{
1ca74648 3638 tcg_gen_andi_i32(dst, src, INT32_MAX);
ebe9383c
RH
3639}
3640
1ca74648 3641static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3642{
1ca74648 3643 return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
ebe9383c
RH
3644}
3645
1ca74648 3646static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
ebe9383c 3647{
1ca74648 3648 tcg_gen_andi_i64(dst, src, INT64_MAX);
ebe9383c
RH
3649}
3650
1ca74648 3651static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3652{
1ca74648 3653 return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
ebe9383c
RH
3654}
3655
1ca74648 3656static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3657{
1ca74648 3658 return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
ebe9383c
RH
3659}
3660
1ca74648 3661static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3662{
1ca74648 3663 return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
ebe9383c
RH
3664}
3665
1ca74648 3666static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3667{
1ca74648 3668 return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
ebe9383c
RH
3669}
3670
1ca74648 3671static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3672{
1ca74648 3673 return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
ebe9383c
RH
3674}
3675
1ca74648 3676static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
ebe9383c 3677{
1ca74648 3678 tcg_gen_xori_i32(dst, src, INT32_MIN);
ebe9383c
RH
3679}
3680
1ca74648 3681static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3682{
1ca74648 3683 return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
ebe9383c
RH
3684}
3685
3686static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3687{
3688 tcg_gen_xori_i64(dst, src, INT64_MIN);
3689}
3690
1ca74648
RH
3691static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3692{
3693 return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3694}
3695
3696static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
ebe9383c
RH
3697{
3698 tcg_gen_ori_i32(dst, src, INT32_MIN);
3699}
3700
1ca74648
RH
3701static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3702{
3703 return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3704}
3705
ebe9383c
RH
3706static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3707{
3708 tcg_gen_ori_i64(dst, src, INT64_MIN);
3709}
3710
1ca74648
RH
3711static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
3712{
3713 return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
3714}
3715
3716/*
3717 * Float class 1
3718 */
3719
3720static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
3721{
3722 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
3723}
3724
3725static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
3726{
3727 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
3728}
3729
3730static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
3731{
3732 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
3733}
3734
3735static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
3736{
3737 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
3738}
3739
3740static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
3741{
3742 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
3743}
3744
3745static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
3746{
3747 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
3748}
3749
3750static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
3751{
3752 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
3753}
3754
3755static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
3756{
3757 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
3758}
3759
3760static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
3761{
3762 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
3763}
3764
3765static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
3766{
3767 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
3768}
3769
3770static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
3771{
3772 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
3773}
3774
3775static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
3776{
3777 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
3778}
3779
3780static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
3781{
3782 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
3783}
3784
3785static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
3786{
3787 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
3788}
3789
3790static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
3791{
3792 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
3793}
3794
3795static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
3796{
3797 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
3798}
3799
3800static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
3801{
3802 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
3803}
3804
3805static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
3806{
3807 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
3808}
3809
3810static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
3811{
3812 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
3813}
3814
3815static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
3816{
3817 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
3818}
3819
3820static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
3821{
3822 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
3823}
3824
3825static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
3826{
3827 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
3828}
3829
3830static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
3831{
3832 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
3833}
3834
3835static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
3836{
3837 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
3838}
3839
3840static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
3841{
3842 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
3843}
3844
3845static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
3846{
3847 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
3848}
3849
3850/*
3851 * Float class 2
3852 */
3853
3854static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
ebe9383c
RH
3855{
3856 TCGv_i32 ta, tb, tc, ty;
3857
3858 nullify_over(ctx);
3859
1ca74648
RH
3860 ta = load_frw0_i32(a->r1);
3861 tb = load_frw0_i32(a->r2);
29dd6f64
RH
3862 ty = tcg_constant_i32(a->y);
3863 tc = tcg_constant_i32(a->c);
ebe9383c 3864
ad75a51e 3865 gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc);
ebe9383c 3866
1ca74648 3867 return nullify_end(ctx);
ebe9383c
RH
3868}
3869
1ca74648 3870static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
ebe9383c 3871{
ebe9383c
RH
3872 TCGv_i64 ta, tb;
3873 TCGv_i32 tc, ty;
3874
3875 nullify_over(ctx);
3876
1ca74648
RH
3877 ta = load_frd0(a->r1);
3878 tb = load_frd0(a->r2);
29dd6f64
RH
3879 ty = tcg_constant_i32(a->y);
3880 tc = tcg_constant_i32(a->c);
ebe9383c 3881
ad75a51e 3882 gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc);
ebe9383c 3883
31234768 3884 return nullify_end(ctx);
ebe9383c
RH
3885}
3886
1ca74648 3887static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
ebe9383c 3888{
eaa3783b 3889 TCGv_reg t;
ebe9383c
RH
3890
3891 nullify_over(ctx);
3892
e12c6309 3893 t = tcg_temp_new();
ad75a51e 3894 tcg_gen_ld32u_reg(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
ebe9383c 3895
1ca74648
RH
3896 if (a->y == 1) {
3897 int mask;
3898 bool inv = false;
3899
3900 switch (a->c) {
3901 case 0: /* simple */
3902 tcg_gen_andi_reg(t, t, 0x4000000);
3903 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3904 goto done;
3905 case 2: /* rej */
3906 inv = true;
3907 /* fallthru */
3908 case 1: /* acc */
3909 mask = 0x43ff800;
3910 break;
3911 case 6: /* rej8 */
3912 inv = true;
3913 /* fallthru */
3914 case 5: /* acc8 */
3915 mask = 0x43f8000;
3916 break;
3917 case 9: /* acc6 */
3918 mask = 0x43e0000;
3919 break;
3920 case 13: /* acc4 */
3921 mask = 0x4380000;
3922 break;
3923 case 17: /* acc2 */
3924 mask = 0x4200000;
3925 break;
3926 default:
3927 gen_illegal(ctx);
3928 return true;
3929 }
3930 if (inv) {
d4e58033 3931 TCGv_reg c = tcg_constant_reg(mask);
1ca74648
RH
3932 tcg_gen_or_reg(t, t, c);
3933 ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3934 } else {
3935 tcg_gen_andi_reg(t, t, mask);
3936 ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3937 }
3938 } else {
3939 unsigned cbit = (a->y ^ 1) - 1;
3940
3941 tcg_gen_extract_reg(t, t, 21 - cbit, 1);
3942 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
1ca74648
RH
3943 }
3944
3945 done:
31234768 3946 return nullify_end(ctx);
ebe9383c
RH
3947}
3948
1ca74648
RH
3949/*
3950 * Float class 2
3951 */
3952
3953static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
ebe9383c 3954{
1ca74648
RH
3955 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
3956}
ebe9383c 3957
1ca74648
RH
3958static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
3959{
3960 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
3961}
ebe9383c 3962
1ca74648
RH
3963static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
3964{
3965 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
3966}
ebe9383c 3967
1ca74648
RH
3968static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
3969{
3970 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
ebe9383c
RH
3971}
3972
1ca74648 3973static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
ebe9383c 3974{
1ca74648
RH
3975 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
3976}
3977
3978static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
3979{
3980 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
3981}
3982
3983static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
3984{
3985 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
3986}
3987
3988static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
3989{
3990 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
3991}
3992
3993static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
3994{
3995 TCGv_i64 x, y;
ebe9383c
RH
3996
3997 nullify_over(ctx);
3998
1ca74648
RH
3999 x = load_frw0_i64(a->r1);
4000 y = load_frw0_i64(a->r2);
4001 tcg_gen_mul_i64(x, x, y);
4002 save_frd(a->t, x);
ebe9383c 4003
31234768 4004 return nullify_end(ctx);
ebe9383c
RH
4005}
4006
ebe9383c
RH
4007/* Convert the fmpyadd single-precision register encodings to standard. */
4008static inline int fmpyadd_s_reg(unsigned r)
4009{
4010 return (r & 16) * 2 + 16 + (r & 15);
4011}
4012
b1e2af57 4013static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
ebe9383c 4014{
b1e2af57
RH
4015 int tm = fmpyadd_s_reg(a->tm);
4016 int ra = fmpyadd_s_reg(a->ra);
4017 int ta = fmpyadd_s_reg(a->ta);
4018 int rm2 = fmpyadd_s_reg(a->rm2);
4019 int rm1 = fmpyadd_s_reg(a->rm1);
ebe9383c
RH
4020
4021 nullify_over(ctx);
4022
b1e2af57
RH
4023 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
4024 do_fop_weww(ctx, ta, ta, ra,
4025 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
ebe9383c 4026
31234768 4027 return nullify_end(ctx);
ebe9383c
RH
4028}
4029
b1e2af57
RH
4030static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4031{
4032 return do_fmpyadd_s(ctx, a, false);
4033}
4034
4035static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4036{
4037 return do_fmpyadd_s(ctx, a, true);
4038}
4039
4040static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4041{
4042 nullify_over(ctx);
4043
4044 do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4045 do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4046 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4047
4048 return nullify_end(ctx);
4049}
4050
4051static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4052{
4053 return do_fmpyadd_d(ctx, a, false);
4054}
4055
4056static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4057{
4058 return do_fmpyadd_d(ctx, a, true);
4059}
4060
c3bad4f8 4061static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
ebe9383c 4062{
c3bad4f8 4063 TCGv_i32 x, y, z;
ebe9383c
RH
4064
4065 nullify_over(ctx);
c3bad4f8
RH
4066 x = load_frw0_i32(a->rm1);
4067 y = load_frw0_i32(a->rm2);
4068 z = load_frw0_i32(a->ra3);
ebe9383c 4069
c3bad4f8 4070 if (a->neg) {
ad75a51e 4071 gen_helper_fmpynfadd_s(x, tcg_env, x, y, z);
ebe9383c 4072 } else {
ad75a51e 4073 gen_helper_fmpyfadd_s(x, tcg_env, x, y, z);
ebe9383c
RH
4074 }
4075
c3bad4f8 4076 save_frw_i32(a->t, x);
31234768 4077 return nullify_end(ctx);
ebe9383c
RH
4078}
4079
c3bad4f8 4080static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
ebe9383c 4081{
c3bad4f8 4082 TCGv_i64 x, y, z;
ebe9383c
RH
4083
4084 nullify_over(ctx);
c3bad4f8
RH
4085 x = load_frd0(a->rm1);
4086 y = load_frd0(a->rm2);
4087 z = load_frd0(a->ra3);
ebe9383c 4088
c3bad4f8 4089 if (a->neg) {
ad75a51e 4090 gen_helper_fmpynfadd_d(x, tcg_env, x, y, z);
ebe9383c 4091 } else {
ad75a51e 4092 gen_helper_fmpyfadd_d(x, tcg_env, x, y, z);
ebe9383c
RH
4093 }
4094
c3bad4f8 4095 save_frd(a->t, x);
31234768 4096 return nullify_end(ctx);
ebe9383c
RH
4097}
4098
15da177b
SS
4099static bool trans_diag(DisasContext *ctx, arg_diag *a)
4100{
cf6b28d4
HD
4101 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4102#ifndef CONFIG_USER_ONLY
4103 if (a->i == 0x100) {
4104 /* emulate PDC BTLB, called by SeaBIOS-hppa */
ad75a51e
RH
4105 nullify_over(ctx);
4106 gen_helper_diag_btlb(tcg_env);
4107 return nullify_end(ctx);
cf6b28d4 4108 }
ad75a51e
RH
4109#endif
4110 qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
4111 return true;
15da177b
SS
4112}
4113
b542683d 4114static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
61766fe9 4115{
51b061fb 4116 DisasContext *ctx = container_of(dcbase, DisasContext, base);
f764718d 4117 int bound;
61766fe9 4118
51b061fb 4119 ctx->cs = cs;
494737b7 4120 ctx->tb_flags = ctx->base.tb->flags;
bd6243a3 4121 ctx->is_pa20 = hppa_is_pa20(cpu_env(cs));
3d68ee7b
RH
4122
4123#ifdef CONFIG_USER_ONLY
c01e5dfb 4124 ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX);
3d68ee7b 4125 ctx->mmu_idx = MMU_USER_IDX;
c01e5dfb
HD
4126 ctx->iaoq_f = ctx->base.pc_first | ctx->privilege;
4127 ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege;
217d1a5e 4128 ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
3d68ee7b 4129#else
494737b7 4130 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
bb67ec32
RH
4131 ctx->mmu_idx = (ctx->tb_flags & PSW_D
4132 ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
4133 : MMU_PHYS_IDX);
3d68ee7b 4134
c301f34e
RH
4135 /* Recover the IAOQ values from the GVA + PRIV. */
4136 uint64_t cs_base = ctx->base.tb->cs_base;
4137 uint64_t iasq_f = cs_base & ~0xffffffffull;
4138 int32_t diff = cs_base;
4139
4140 ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4141 ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4142#endif
51b061fb 4143 ctx->iaoq_n = -1;
f764718d 4144 ctx->iaoq_n_var = NULL;
61766fe9 4145
3d68ee7b
RH
4146 /* Bound the number of instructions by those left on the page. */
4147 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
b542683d 4148 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
51b061fb 4149}
61766fe9 4150
51b061fb
RH
4151static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4152{
4153 DisasContext *ctx = container_of(dcbase, DisasContext, base);
61766fe9 4154
3d68ee7b 4155 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
51b061fb
RH
4156 ctx->null_cond = cond_make_f();
4157 ctx->psw_n_nonzero = false;
494737b7 4158 if (ctx->tb_flags & PSW_N) {
51b061fb
RH
4159 ctx->null_cond.c = TCG_COND_ALWAYS;
4160 ctx->psw_n_nonzero = true;
129e9cc3 4161 }
51b061fb
RH
4162 ctx->null_lab = NULL;
4163}
129e9cc3 4164
51b061fb
RH
4165static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4166{
4167 DisasContext *ctx = container_of(dcbase, DisasContext, base);
61766fe9 4168
51b061fb
RH
4169 tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4170}
4171
51b061fb
RH
4172static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4173{
4174 DisasContext *ctx = container_of(dcbase, DisasContext, base);
b77af26e 4175 CPUHPPAState *env = cpu_env(cs);
51b061fb 4176 DisasJumpType ret;
51b061fb
RH
4177
4178 /* Execute one insn. */
ba1d0b44 4179#ifdef CONFIG_USER_ONLY
c301f34e 4180 if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
31234768
RH
4181 do_page_zero(ctx);
4182 ret = ctx->base.is_jmp;
51b061fb 4183 assert(ret != DISAS_NEXT);
ba1d0b44
RH
4184 } else
4185#endif
4186 {
51b061fb
RH
4187 /* Always fetch the insn, even if nullified, so that we check
4188 the page permissions for execute. */
4e116893 4189 uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
51b061fb
RH
4190
4191 /* Set up the IA queue for the next insn.
4192 This will be overwritten by a branch. */
4193 if (ctx->iaoq_b == -1) {
4194 ctx->iaoq_n = -1;
e12c6309 4195 ctx->iaoq_n_var = tcg_temp_new();
eaa3783b 4196 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
7ad439df 4197 } else {
51b061fb 4198 ctx->iaoq_n = ctx->iaoq_b + 4;
f764718d 4199 ctx->iaoq_n_var = NULL;
61766fe9
RH
4200 }
4201
51b061fb
RH
4202 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4203 ctx->null_cond.c = TCG_COND_NEVER;
4204 ret = DISAS_NEXT;
4205 } else {
1a19da0d 4206 ctx->insn = insn;
31274b46
RH
4207 if (!decode(ctx, insn)) {
4208 gen_illegal(ctx);
4209 }
31234768 4210 ret = ctx->base.is_jmp;
51b061fb 4211 assert(ctx->null_lab == NULL);
61766fe9 4212 }
51b061fb 4213 }
61766fe9 4214
3d68ee7b
RH
4215 /* Advance the insn queue. Note that this check also detects
4216 a priority change within the instruction queue. */
51b061fb 4217 if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
c301f34e
RH
4218 if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4219 && use_goto_tb(ctx, ctx->iaoq_b)
4220 && (ctx->null_cond.c == TCG_COND_NEVER
4221 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
51b061fb
RH
4222 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4223 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
31234768 4224 ctx->base.is_jmp = ret = DISAS_NORETURN;
51b061fb 4225 } else {
31234768 4226 ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
c301f34e 4227 }
61766fe9 4228 }
51b061fb
RH
4229 ctx->iaoq_f = ctx->iaoq_b;
4230 ctx->iaoq_b = ctx->iaoq_n;
c301f34e 4231 ctx->base.pc_next += 4;
51b061fb 4232
c5d0aec2
RH
4233 switch (ret) {
4234 case DISAS_NORETURN:
4235 case DISAS_IAQ_N_UPDATED:
4236 break;
4237
4238 case DISAS_NEXT:
4239 case DISAS_IAQ_N_STALE:
4240 case DISAS_IAQ_N_STALE_EXIT:
4241 if (ctx->iaoq_f == -1) {
a0180973 4242 copy_iaoq_entry(ctx, cpu_iaoq_f, -1, cpu_iaoq_b);
741322f4 4243 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
c301f34e 4244#ifndef CONFIG_USER_ONLY
c5d0aec2 4245 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
c301f34e 4246#endif
c5d0aec2
RH
4247 nullify_save(ctx);
4248 ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4249 ? DISAS_EXIT
4250 : DISAS_IAQ_N_UPDATED);
4251 } else if (ctx->iaoq_b == -1) {
a0180973 4252 copy_iaoq_entry(ctx, cpu_iaoq_b, -1, ctx->iaoq_n_var);
c5d0aec2
RH
4253 }
4254 break;
4255
4256 default:
4257 g_assert_not_reached();
51b061fb
RH
4258 }
4259}
4260
4261static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4262{
4263 DisasContext *ctx = container_of(dcbase, DisasContext, base);
e1b5a5ed 4264 DisasJumpType is_jmp = ctx->base.is_jmp;
61766fe9 4265
e1b5a5ed 4266 switch (is_jmp) {
869051ea 4267 case DISAS_NORETURN:
61766fe9 4268 break;
51b061fb 4269 case DISAS_TOO_MANY:
869051ea 4270 case DISAS_IAQ_N_STALE:
e1b5a5ed 4271 case DISAS_IAQ_N_STALE_EXIT:
741322f4
RH
4272 copy_iaoq_entry(ctx, cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4273 copy_iaoq_entry(ctx, cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
51b061fb 4274 nullify_save(ctx);
61766fe9 4275 /* FALLTHRU */
869051ea 4276 case DISAS_IAQ_N_UPDATED:
8532a14e 4277 if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
7f11636d 4278 tcg_gen_lookup_and_goto_ptr();
8532a14e 4279 break;
61766fe9 4280 }
c5d0aec2
RH
4281 /* FALLTHRU */
4282 case DISAS_EXIT:
4283 tcg_gen_exit_tb(NULL, 0);
61766fe9
RH
4284 break;
4285 default:
51b061fb 4286 g_assert_not_reached();
61766fe9 4287 }
51b061fb 4288}
61766fe9 4289
8eb806a7
RH
4290static void hppa_tr_disas_log(const DisasContextBase *dcbase,
4291 CPUState *cs, FILE *logfile)
51b061fb 4292{
c301f34e 4293 target_ulong pc = dcbase->pc_first;
61766fe9 4294
ba1d0b44
RH
4295#ifdef CONFIG_USER_ONLY
4296 switch (pc) {
51b061fb 4297 case 0x00:
8eb806a7 4298 fprintf(logfile, "IN:\n0x00000000: (null)\n");
ba1d0b44 4299 return;
51b061fb 4300 case 0xb0:
8eb806a7 4301 fprintf(logfile, "IN:\n0x000000b0: light-weight-syscall\n");
ba1d0b44 4302 return;
51b061fb 4303 case 0xe0:
8eb806a7 4304 fprintf(logfile, "IN:\n0x000000e0: set-thread-pointer-syscall\n");
ba1d0b44 4305 return;
51b061fb 4306 case 0x100:
8eb806a7 4307 fprintf(logfile, "IN:\n0x00000100: syscall\n");
ba1d0b44 4308 return;
61766fe9 4309 }
ba1d0b44
RH
4310#endif
4311
8eb806a7
RH
4312 fprintf(logfile, "IN: %s\n", lookup_symbol(pc));
4313 target_disas(logfile, cs, pc, dcbase->tb->size);
51b061fb
RH
4314}
4315
4316static const TranslatorOps hppa_tr_ops = {
4317 .init_disas_context = hppa_tr_init_disas_context,
4318 .tb_start = hppa_tr_tb_start,
4319 .insn_start = hppa_tr_insn_start,
51b061fb
RH
4320 .translate_insn = hppa_tr_translate_insn,
4321 .tb_stop = hppa_tr_tb_stop,
4322 .disas_log = hppa_tr_disas_log,
4323};
4324
597f9b2d 4325void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
306c8721 4326 target_ulong pc, void *host_pc)
51b061fb
RH
4327{
4328 DisasContext ctx;
306c8721 4329 translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
61766fe9 4330}