]> git.proxmox.com Git - mirror_qemu.git/blame - target/hppa/translate.c
target/hppa: Fix bb_sar for hppa64
[mirror_qemu.git] / target / hppa / translate.c
CommitLineData
61766fe9
RH
1/*
2 * HPPA emulation cpu translation for qemu.
3 *
4 * Copyright (c) 2016 Richard Henderson <rth@twiddle.net>
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
d6ea4236 9 * version 2.1 of the License, or (at your option) any later version.
61766fe9
RH
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "qemu/osdep.h"
21#include "cpu.h"
22#include "disas/disas.h"
23#include "qemu/host-utils.h"
24#include "exec/exec-all.h"
dcb32f1d 25#include "tcg/tcg-op.h"
61766fe9
RH
26#include "exec/helper-proto.h"
27#include "exec/helper-gen.h"
869051ea 28#include "exec/translator.h"
61766fe9
RH
29#include "exec/log.h"
30
d53106c9
RH
31#define HELPER_H "helper.h"
32#include "exec/helper-info.c.inc"
33#undef HELPER_H
34
35
eaa3783b
RH
36/* Since we have a distinction between register size and address size,
37 we need to redefine all of these. */
38
39#undef TCGv
40#undef tcg_temp_new
eaa3783b 41#undef tcg_global_mem_new
eaa3783b
RH
42
43#if TARGET_LONG_BITS == 64
44#define TCGv_tl TCGv_i64
45#define tcg_temp_new_tl tcg_temp_new_i64
eaa3783b
RH
46#if TARGET_REGISTER_BITS == 64
47#define tcg_gen_extu_reg_tl tcg_gen_mov_i64
48#else
49#define tcg_gen_extu_reg_tl tcg_gen_extu_i32_i64
50#endif
51#else
52#define TCGv_tl TCGv_i32
53#define tcg_temp_new_tl tcg_temp_new_i32
eaa3783b
RH
54#define tcg_gen_extu_reg_tl tcg_gen_mov_i32
55#endif
56
57#if TARGET_REGISTER_BITS == 64
58#define TCGv_reg TCGv_i64
59
60#define tcg_temp_new tcg_temp_new_i64
eaa3783b 61#define tcg_global_mem_new tcg_global_mem_new_i64
eaa3783b
RH
62
63#define tcg_gen_movi_reg tcg_gen_movi_i64
64#define tcg_gen_mov_reg tcg_gen_mov_i64
65#define tcg_gen_ld8u_reg tcg_gen_ld8u_i64
66#define tcg_gen_ld8s_reg tcg_gen_ld8s_i64
67#define tcg_gen_ld16u_reg tcg_gen_ld16u_i64
68#define tcg_gen_ld16s_reg tcg_gen_ld16s_i64
69#define tcg_gen_ld32u_reg tcg_gen_ld32u_i64
70#define tcg_gen_ld32s_reg tcg_gen_ld32s_i64
71#define tcg_gen_ld_reg tcg_gen_ld_i64
72#define tcg_gen_st8_reg tcg_gen_st8_i64
73#define tcg_gen_st16_reg tcg_gen_st16_i64
74#define tcg_gen_st32_reg tcg_gen_st32_i64
75#define tcg_gen_st_reg tcg_gen_st_i64
76#define tcg_gen_add_reg tcg_gen_add_i64
77#define tcg_gen_addi_reg tcg_gen_addi_i64
78#define tcg_gen_sub_reg tcg_gen_sub_i64
79#define tcg_gen_neg_reg tcg_gen_neg_i64
80#define tcg_gen_subfi_reg tcg_gen_subfi_i64
81#define tcg_gen_subi_reg tcg_gen_subi_i64
82#define tcg_gen_and_reg tcg_gen_and_i64
83#define tcg_gen_andi_reg tcg_gen_andi_i64
84#define tcg_gen_or_reg tcg_gen_or_i64
85#define tcg_gen_ori_reg tcg_gen_ori_i64
86#define tcg_gen_xor_reg tcg_gen_xor_i64
87#define tcg_gen_xori_reg tcg_gen_xori_i64
88#define tcg_gen_not_reg tcg_gen_not_i64
89#define tcg_gen_shl_reg tcg_gen_shl_i64
90#define tcg_gen_shli_reg tcg_gen_shli_i64
91#define tcg_gen_shr_reg tcg_gen_shr_i64
92#define tcg_gen_shri_reg tcg_gen_shri_i64
93#define tcg_gen_sar_reg tcg_gen_sar_i64
94#define tcg_gen_sari_reg tcg_gen_sari_i64
95#define tcg_gen_brcond_reg tcg_gen_brcond_i64
96#define tcg_gen_brcondi_reg tcg_gen_brcondi_i64
97#define tcg_gen_setcond_reg tcg_gen_setcond_i64
98#define tcg_gen_setcondi_reg tcg_gen_setcondi_i64
99#define tcg_gen_mul_reg tcg_gen_mul_i64
100#define tcg_gen_muli_reg tcg_gen_muli_i64
101#define tcg_gen_div_reg tcg_gen_div_i64
102#define tcg_gen_rem_reg tcg_gen_rem_i64
103#define tcg_gen_divu_reg tcg_gen_divu_i64
104#define tcg_gen_remu_reg tcg_gen_remu_i64
105#define tcg_gen_discard_reg tcg_gen_discard_i64
106#define tcg_gen_trunc_reg_i32 tcg_gen_extrl_i64_i32
107#define tcg_gen_trunc_i64_reg tcg_gen_mov_i64
108#define tcg_gen_extu_i32_reg tcg_gen_extu_i32_i64
109#define tcg_gen_ext_i32_reg tcg_gen_ext_i32_i64
110#define tcg_gen_extu_reg_i64 tcg_gen_mov_i64
111#define tcg_gen_ext_reg_i64 tcg_gen_mov_i64
112#define tcg_gen_ext8u_reg tcg_gen_ext8u_i64
113#define tcg_gen_ext8s_reg tcg_gen_ext8s_i64
114#define tcg_gen_ext16u_reg tcg_gen_ext16u_i64
115#define tcg_gen_ext16s_reg tcg_gen_ext16s_i64
116#define tcg_gen_ext32u_reg tcg_gen_ext32u_i64
117#define tcg_gen_ext32s_reg tcg_gen_ext32s_i64
118#define tcg_gen_bswap16_reg tcg_gen_bswap16_i64
119#define tcg_gen_bswap32_reg tcg_gen_bswap32_i64
120#define tcg_gen_bswap64_reg tcg_gen_bswap64_i64
121#define tcg_gen_concat_reg_i64 tcg_gen_concat32_i64
122#define tcg_gen_andc_reg tcg_gen_andc_i64
123#define tcg_gen_eqv_reg tcg_gen_eqv_i64
124#define tcg_gen_nand_reg tcg_gen_nand_i64
125#define tcg_gen_nor_reg tcg_gen_nor_i64
126#define tcg_gen_orc_reg tcg_gen_orc_i64
127#define tcg_gen_clz_reg tcg_gen_clz_i64
128#define tcg_gen_ctz_reg tcg_gen_ctz_i64
129#define tcg_gen_clzi_reg tcg_gen_clzi_i64
130#define tcg_gen_ctzi_reg tcg_gen_ctzi_i64
131#define tcg_gen_clrsb_reg tcg_gen_clrsb_i64
132#define tcg_gen_ctpop_reg tcg_gen_ctpop_i64
133#define tcg_gen_rotl_reg tcg_gen_rotl_i64
134#define tcg_gen_rotli_reg tcg_gen_rotli_i64
135#define tcg_gen_rotr_reg tcg_gen_rotr_i64
136#define tcg_gen_rotri_reg tcg_gen_rotri_i64
137#define tcg_gen_deposit_reg tcg_gen_deposit_i64
138#define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i64
139#define tcg_gen_extract_reg tcg_gen_extract_i64
140#define tcg_gen_sextract_reg tcg_gen_sextract_i64
05bfd4db 141#define tcg_gen_extract2_reg tcg_gen_extract2_i64
29dd6f64 142#define tcg_constant_reg tcg_constant_i64
eaa3783b
RH
143#define tcg_gen_movcond_reg tcg_gen_movcond_i64
144#define tcg_gen_add2_reg tcg_gen_add2_i64
145#define tcg_gen_sub2_reg tcg_gen_sub2_i64
146#define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i64
147#define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i64
148#define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i64
5bfa8034 149#define tcg_gen_trunc_reg_ptr tcg_gen_trunc_i64_ptr
eaa3783b
RH
150#else
151#define TCGv_reg TCGv_i32
152#define tcg_temp_new tcg_temp_new_i32
eaa3783b 153#define tcg_global_mem_new tcg_global_mem_new_i32
eaa3783b
RH
154
155#define tcg_gen_movi_reg tcg_gen_movi_i32
156#define tcg_gen_mov_reg tcg_gen_mov_i32
157#define tcg_gen_ld8u_reg tcg_gen_ld8u_i32
158#define tcg_gen_ld8s_reg tcg_gen_ld8s_i32
159#define tcg_gen_ld16u_reg tcg_gen_ld16u_i32
160#define tcg_gen_ld16s_reg tcg_gen_ld16s_i32
161#define tcg_gen_ld32u_reg tcg_gen_ld_i32
162#define tcg_gen_ld32s_reg tcg_gen_ld_i32
163#define tcg_gen_ld_reg tcg_gen_ld_i32
164#define tcg_gen_st8_reg tcg_gen_st8_i32
165#define tcg_gen_st16_reg tcg_gen_st16_i32
166#define tcg_gen_st32_reg tcg_gen_st32_i32
167#define tcg_gen_st_reg tcg_gen_st_i32
168#define tcg_gen_add_reg tcg_gen_add_i32
169#define tcg_gen_addi_reg tcg_gen_addi_i32
170#define tcg_gen_sub_reg tcg_gen_sub_i32
171#define tcg_gen_neg_reg tcg_gen_neg_i32
172#define tcg_gen_subfi_reg tcg_gen_subfi_i32
173#define tcg_gen_subi_reg tcg_gen_subi_i32
174#define tcg_gen_and_reg tcg_gen_and_i32
175#define tcg_gen_andi_reg tcg_gen_andi_i32
176#define tcg_gen_or_reg tcg_gen_or_i32
177#define tcg_gen_ori_reg tcg_gen_ori_i32
178#define tcg_gen_xor_reg tcg_gen_xor_i32
179#define tcg_gen_xori_reg tcg_gen_xori_i32
180#define tcg_gen_not_reg tcg_gen_not_i32
181#define tcg_gen_shl_reg tcg_gen_shl_i32
182#define tcg_gen_shli_reg tcg_gen_shli_i32
183#define tcg_gen_shr_reg tcg_gen_shr_i32
184#define tcg_gen_shri_reg tcg_gen_shri_i32
185#define tcg_gen_sar_reg tcg_gen_sar_i32
186#define tcg_gen_sari_reg tcg_gen_sari_i32
187#define tcg_gen_brcond_reg tcg_gen_brcond_i32
188#define tcg_gen_brcondi_reg tcg_gen_brcondi_i32
189#define tcg_gen_setcond_reg tcg_gen_setcond_i32
190#define tcg_gen_setcondi_reg tcg_gen_setcondi_i32
191#define tcg_gen_mul_reg tcg_gen_mul_i32
192#define tcg_gen_muli_reg tcg_gen_muli_i32
193#define tcg_gen_div_reg tcg_gen_div_i32
194#define tcg_gen_rem_reg tcg_gen_rem_i32
195#define tcg_gen_divu_reg tcg_gen_divu_i32
196#define tcg_gen_remu_reg tcg_gen_remu_i32
197#define tcg_gen_discard_reg tcg_gen_discard_i32
198#define tcg_gen_trunc_reg_i32 tcg_gen_mov_i32
199#define tcg_gen_trunc_i64_reg tcg_gen_extrl_i64_i32
200#define tcg_gen_extu_i32_reg tcg_gen_mov_i32
201#define tcg_gen_ext_i32_reg tcg_gen_mov_i32
202#define tcg_gen_extu_reg_i64 tcg_gen_extu_i32_i64
203#define tcg_gen_ext_reg_i64 tcg_gen_ext_i32_i64
204#define tcg_gen_ext8u_reg tcg_gen_ext8u_i32
205#define tcg_gen_ext8s_reg tcg_gen_ext8s_i32
206#define tcg_gen_ext16u_reg tcg_gen_ext16u_i32
207#define tcg_gen_ext16s_reg tcg_gen_ext16s_i32
208#define tcg_gen_ext32u_reg tcg_gen_mov_i32
209#define tcg_gen_ext32s_reg tcg_gen_mov_i32
210#define tcg_gen_bswap16_reg tcg_gen_bswap16_i32
211#define tcg_gen_bswap32_reg tcg_gen_bswap32_i32
212#define tcg_gen_concat_reg_i64 tcg_gen_concat_i32_i64
213#define tcg_gen_andc_reg tcg_gen_andc_i32
214#define tcg_gen_eqv_reg tcg_gen_eqv_i32
215#define tcg_gen_nand_reg tcg_gen_nand_i32
216#define tcg_gen_nor_reg tcg_gen_nor_i32
217#define tcg_gen_orc_reg tcg_gen_orc_i32
218#define tcg_gen_clz_reg tcg_gen_clz_i32
219#define tcg_gen_ctz_reg tcg_gen_ctz_i32
220#define tcg_gen_clzi_reg tcg_gen_clzi_i32
221#define tcg_gen_ctzi_reg tcg_gen_ctzi_i32
222#define tcg_gen_clrsb_reg tcg_gen_clrsb_i32
223#define tcg_gen_ctpop_reg tcg_gen_ctpop_i32
224#define tcg_gen_rotl_reg tcg_gen_rotl_i32
225#define tcg_gen_rotli_reg tcg_gen_rotli_i32
226#define tcg_gen_rotr_reg tcg_gen_rotr_i32
227#define tcg_gen_rotri_reg tcg_gen_rotri_i32
228#define tcg_gen_deposit_reg tcg_gen_deposit_i32
229#define tcg_gen_deposit_z_reg tcg_gen_deposit_z_i32
230#define tcg_gen_extract_reg tcg_gen_extract_i32
231#define tcg_gen_sextract_reg tcg_gen_sextract_i32
05bfd4db 232#define tcg_gen_extract2_reg tcg_gen_extract2_i32
29dd6f64 233#define tcg_constant_reg tcg_constant_i32
eaa3783b
RH
234#define tcg_gen_movcond_reg tcg_gen_movcond_i32
235#define tcg_gen_add2_reg tcg_gen_add2_i32
236#define tcg_gen_sub2_reg tcg_gen_sub2_i32
237#define tcg_gen_qemu_ld_reg tcg_gen_qemu_ld_i32
238#define tcg_gen_qemu_st_reg tcg_gen_qemu_st_i32
239#define tcg_gen_atomic_xchg_reg tcg_gen_atomic_xchg_i32
5bfa8034 240#define tcg_gen_trunc_reg_ptr tcg_gen_ext_i32_ptr
eaa3783b
RH
241#endif /* TARGET_REGISTER_BITS */
242
61766fe9
RH
243typedef struct DisasCond {
244 TCGCond c;
eaa3783b 245 TCGv_reg a0, a1;
61766fe9
RH
246} DisasCond;
247
248typedef struct DisasContext {
d01a3625 249 DisasContextBase base;
61766fe9
RH
250 CPUState *cs;
251
eaa3783b
RH
252 target_ureg iaoq_f;
253 target_ureg iaoq_b;
254 target_ureg iaoq_n;
255 TCGv_reg iaoq_n_var;
61766fe9 256
61766fe9
RH
257 DisasCond null_cond;
258 TCGLabel *null_lab;
259
1a19da0d 260 uint32_t insn;
494737b7 261 uint32_t tb_flags;
3d68ee7b
RH
262 int mmu_idx;
263 int privilege;
61766fe9 264 bool psw_n_nonzero;
217d1a5e
RH
265
266#ifdef CONFIG_USER_ONLY
267 MemOp unalign;
268#endif
61766fe9
RH
269} DisasContext;
270
217d1a5e
RH
271#ifdef CONFIG_USER_ONLY
272#define UNALIGN(C) (C)->unalign
273#else
2d4afb03 274#define UNALIGN(C) MO_ALIGN
217d1a5e
RH
275#endif
276
e36f27ef 277/* Note that ssm/rsm instructions number PSW_W and PSW_E differently. */
451e4ffd 278static int expand_sm_imm(DisasContext *ctx, int val)
e36f27ef
RH
279{
280 if (val & PSW_SM_E) {
281 val = (val & ~PSW_SM_E) | PSW_E;
282 }
283 if (val & PSW_SM_W) {
284 val = (val & ~PSW_SM_W) | PSW_W;
285 }
286 return val;
287}
288
deee69a1 289/* Inverted space register indicates 0 means sr0 not inferred from base. */
451e4ffd 290static int expand_sr3x(DisasContext *ctx, int val)
deee69a1
RH
291{
292 return ~val;
293}
294
1cd012a5
RH
295/* Convert the M:A bits within a memory insn to the tri-state value
296 we use for the final M. */
451e4ffd 297static int ma_to_m(DisasContext *ctx, int val)
1cd012a5
RH
298{
299 return val & 2 ? (val & 1 ? -1 : 1) : 0;
300}
301
740038d7 302/* Convert the sign of the displacement to a pre or post-modify. */
451e4ffd 303static int pos_to_m(DisasContext *ctx, int val)
740038d7
RH
304{
305 return val ? 1 : -1;
306}
307
451e4ffd 308static int neg_to_m(DisasContext *ctx, int val)
740038d7
RH
309{
310 return val ? -1 : 1;
311}
312
313/* Used for branch targets and fp memory ops. */
451e4ffd 314static int expand_shl2(DisasContext *ctx, int val)
01afb7be
RH
315{
316 return val << 2;
317}
318
740038d7 319/* Used for fp memory ops. */
451e4ffd 320static int expand_shl3(DisasContext *ctx, int val)
740038d7
RH
321{
322 return val << 3;
323}
324
0588e061 325/* Used for assemble_21. */
451e4ffd 326static int expand_shl11(DisasContext *ctx, int val)
0588e061
RH
327{
328 return val << 11;
329}
330
01afb7be 331
40f9f908 332/* Include the auto-generated decoder. */
abff1abf 333#include "decode-insns.c.inc"
40f9f908 334
869051ea
RH
335/* We are not using a goto_tb (for whatever reason), but have updated
336 the iaq (for whatever reason), so don't do it again on exit. */
337#define DISAS_IAQ_N_UPDATED DISAS_TARGET_0
61766fe9 338
869051ea
RH
339/* We are exiting the TB, but have neither emitted a goto_tb, nor
340 updated the iaq for the next instruction to be executed. */
341#define DISAS_IAQ_N_STALE DISAS_TARGET_1
61766fe9 342
e1b5a5ed
RH
343/* Similarly, but we want to return to the main loop immediately
344 to recognize unmasked interrupts. */
345#define DISAS_IAQ_N_STALE_EXIT DISAS_TARGET_2
c5d0aec2 346#define DISAS_EXIT DISAS_TARGET_3
e1b5a5ed 347
61766fe9 348/* global register indexes */
eaa3783b 349static TCGv_reg cpu_gr[32];
33423472 350static TCGv_i64 cpu_sr[4];
494737b7 351static TCGv_i64 cpu_srH;
eaa3783b
RH
352static TCGv_reg cpu_iaoq_f;
353static TCGv_reg cpu_iaoq_b;
c301f34e
RH
354static TCGv_i64 cpu_iasq_f;
355static TCGv_i64 cpu_iasq_b;
eaa3783b
RH
356static TCGv_reg cpu_sar;
357static TCGv_reg cpu_psw_n;
358static TCGv_reg cpu_psw_v;
359static TCGv_reg cpu_psw_cb;
360static TCGv_reg cpu_psw_cb_msb;
61766fe9 361
61766fe9
RH
362void hppa_translate_init(void)
363{
364#define DEF_VAR(V) { &cpu_##V, #V, offsetof(CPUHPPAState, V) }
365
eaa3783b 366 typedef struct { TCGv_reg *var; const char *name; int ofs; } GlobalVar;
61766fe9 367 static const GlobalVar vars[] = {
35136a77 368 { &cpu_sar, "sar", offsetof(CPUHPPAState, cr[CR_SAR]) },
61766fe9
RH
369 DEF_VAR(psw_n),
370 DEF_VAR(psw_v),
371 DEF_VAR(psw_cb),
372 DEF_VAR(psw_cb_msb),
373 DEF_VAR(iaoq_f),
374 DEF_VAR(iaoq_b),
375 };
376
377#undef DEF_VAR
378
379 /* Use the symbolic register names that match the disassembler. */
380 static const char gr_names[32][4] = {
381 "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
382 "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
383 "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
384 "r24", "r25", "r26", "r27", "r28", "r29", "r30", "r31"
385 };
33423472 386 /* SR[4-7] are not global registers so that we can index them. */
494737b7
RH
387 static const char sr_names[5][4] = {
388 "sr0", "sr1", "sr2", "sr3", "srH"
33423472 389 };
61766fe9 390
61766fe9
RH
391 int i;
392
f764718d 393 cpu_gr[0] = NULL;
61766fe9 394 for (i = 1; i < 32; i++) {
ad75a51e 395 cpu_gr[i] = tcg_global_mem_new(tcg_env,
61766fe9
RH
396 offsetof(CPUHPPAState, gr[i]),
397 gr_names[i]);
398 }
33423472 399 for (i = 0; i < 4; i++) {
ad75a51e 400 cpu_sr[i] = tcg_global_mem_new_i64(tcg_env,
33423472
RH
401 offsetof(CPUHPPAState, sr[i]),
402 sr_names[i]);
403 }
ad75a51e 404 cpu_srH = tcg_global_mem_new_i64(tcg_env,
494737b7
RH
405 offsetof(CPUHPPAState, sr[4]),
406 sr_names[4]);
61766fe9
RH
407
408 for (i = 0; i < ARRAY_SIZE(vars); ++i) {
409 const GlobalVar *v = &vars[i];
ad75a51e 410 *v->var = tcg_global_mem_new(tcg_env, v->ofs, v->name);
61766fe9 411 }
c301f34e 412
ad75a51e 413 cpu_iasq_f = tcg_global_mem_new_i64(tcg_env,
c301f34e
RH
414 offsetof(CPUHPPAState, iasq_f),
415 "iasq_f");
ad75a51e 416 cpu_iasq_b = tcg_global_mem_new_i64(tcg_env,
c301f34e
RH
417 offsetof(CPUHPPAState, iasq_b),
418 "iasq_b");
61766fe9
RH
419}
420
129e9cc3
RH
421static DisasCond cond_make_f(void)
422{
f764718d
RH
423 return (DisasCond){
424 .c = TCG_COND_NEVER,
425 .a0 = NULL,
426 .a1 = NULL,
427 };
129e9cc3
RH
428}
429
df0232fe
RH
430static DisasCond cond_make_t(void)
431{
432 return (DisasCond){
433 .c = TCG_COND_ALWAYS,
434 .a0 = NULL,
435 .a1 = NULL,
436 };
437}
438
129e9cc3
RH
439static DisasCond cond_make_n(void)
440{
f764718d
RH
441 return (DisasCond){
442 .c = TCG_COND_NE,
443 .a0 = cpu_psw_n,
6e94937a 444 .a1 = tcg_constant_reg(0)
f764718d 445 };
129e9cc3
RH
446}
447
b47a4a02 448static DisasCond cond_make_0_tmp(TCGCond c, TCGv_reg a0)
129e9cc3 449{
129e9cc3 450 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
b47a4a02 451 return (DisasCond){
6e94937a 452 .c = c, .a0 = a0, .a1 = tcg_constant_reg(0)
b47a4a02
SS
453 };
454}
129e9cc3 455
b47a4a02
SS
456static DisasCond cond_make_0(TCGCond c, TCGv_reg a0)
457{
458 TCGv_reg tmp = tcg_temp_new();
459 tcg_gen_mov_reg(tmp, a0);
460 return cond_make_0_tmp(c, tmp);
129e9cc3
RH
461}
462
eaa3783b 463static DisasCond cond_make(TCGCond c, TCGv_reg a0, TCGv_reg a1)
129e9cc3
RH
464{
465 DisasCond r = { .c = c };
466
467 assert (c != TCG_COND_NEVER && c != TCG_COND_ALWAYS);
468 r.a0 = tcg_temp_new();
eaa3783b 469 tcg_gen_mov_reg(r.a0, a0);
129e9cc3 470 r.a1 = tcg_temp_new();
eaa3783b 471 tcg_gen_mov_reg(r.a1, a1);
129e9cc3
RH
472
473 return r;
474}
475
129e9cc3
RH
476static void cond_free(DisasCond *cond)
477{
478 switch (cond->c) {
479 default:
f764718d
RH
480 cond->a0 = NULL;
481 cond->a1 = NULL;
129e9cc3
RH
482 /* fallthru */
483 case TCG_COND_ALWAYS:
484 cond->c = TCG_COND_NEVER;
485 break;
486 case TCG_COND_NEVER:
487 break;
488 }
489}
490
eaa3783b 491static TCGv_reg load_gpr(DisasContext *ctx, unsigned reg)
61766fe9
RH
492{
493 if (reg == 0) {
e12c6309 494 TCGv_reg t = tcg_temp_new();
eaa3783b 495 tcg_gen_movi_reg(t, 0);
61766fe9
RH
496 return t;
497 } else {
498 return cpu_gr[reg];
499 }
500}
501
eaa3783b 502static TCGv_reg dest_gpr(DisasContext *ctx, unsigned reg)
61766fe9 503{
129e9cc3 504 if (reg == 0 || ctx->null_cond.c != TCG_COND_NEVER) {
e12c6309 505 return tcg_temp_new();
61766fe9
RH
506 } else {
507 return cpu_gr[reg];
508 }
509}
510
eaa3783b 511static void save_or_nullify(DisasContext *ctx, TCGv_reg dest, TCGv_reg t)
129e9cc3
RH
512{
513 if (ctx->null_cond.c != TCG_COND_NEVER) {
eaa3783b 514 tcg_gen_movcond_reg(ctx->null_cond.c, dest, ctx->null_cond.a0,
6e94937a 515 ctx->null_cond.a1, dest, t);
129e9cc3 516 } else {
eaa3783b 517 tcg_gen_mov_reg(dest, t);
129e9cc3
RH
518 }
519}
520
eaa3783b 521static void save_gpr(DisasContext *ctx, unsigned reg, TCGv_reg t)
129e9cc3
RH
522{
523 if (reg != 0) {
524 save_or_nullify(ctx, cpu_gr[reg], t);
525 }
526}
527
e03b5686 528#if HOST_BIG_ENDIAN
96d6407f
RH
529# define HI_OFS 0
530# define LO_OFS 4
531#else
532# define HI_OFS 4
533# define LO_OFS 0
534#endif
535
536static TCGv_i32 load_frw_i32(unsigned rt)
537{
538 TCGv_i32 ret = tcg_temp_new_i32();
ad75a51e 539 tcg_gen_ld_i32(ret, tcg_env,
96d6407f
RH
540 offsetof(CPUHPPAState, fr[rt & 31])
541 + (rt & 32 ? LO_OFS : HI_OFS));
542 return ret;
543}
544
ebe9383c
RH
545static TCGv_i32 load_frw0_i32(unsigned rt)
546{
547 if (rt == 0) {
0992a930
RH
548 TCGv_i32 ret = tcg_temp_new_i32();
549 tcg_gen_movi_i32(ret, 0);
550 return ret;
ebe9383c
RH
551 } else {
552 return load_frw_i32(rt);
553 }
554}
555
556static TCGv_i64 load_frw0_i64(unsigned rt)
557{
0992a930 558 TCGv_i64 ret = tcg_temp_new_i64();
ebe9383c 559 if (rt == 0) {
0992a930 560 tcg_gen_movi_i64(ret, 0);
ebe9383c 561 } else {
ad75a51e 562 tcg_gen_ld32u_i64(ret, tcg_env,
ebe9383c
RH
563 offsetof(CPUHPPAState, fr[rt & 31])
564 + (rt & 32 ? LO_OFS : HI_OFS));
ebe9383c 565 }
0992a930 566 return ret;
ebe9383c
RH
567}
568
96d6407f
RH
569static void save_frw_i32(unsigned rt, TCGv_i32 val)
570{
ad75a51e 571 tcg_gen_st_i32(val, tcg_env,
96d6407f
RH
572 offsetof(CPUHPPAState, fr[rt & 31])
573 + (rt & 32 ? LO_OFS : HI_OFS));
574}
575
576#undef HI_OFS
577#undef LO_OFS
578
579static TCGv_i64 load_frd(unsigned rt)
580{
581 TCGv_i64 ret = tcg_temp_new_i64();
ad75a51e 582 tcg_gen_ld_i64(ret, tcg_env, offsetof(CPUHPPAState, fr[rt]));
96d6407f
RH
583 return ret;
584}
585
ebe9383c
RH
586static TCGv_i64 load_frd0(unsigned rt)
587{
588 if (rt == 0) {
0992a930
RH
589 TCGv_i64 ret = tcg_temp_new_i64();
590 tcg_gen_movi_i64(ret, 0);
591 return ret;
ebe9383c
RH
592 } else {
593 return load_frd(rt);
594 }
595}
596
96d6407f
RH
597static void save_frd(unsigned rt, TCGv_i64 val)
598{
ad75a51e 599 tcg_gen_st_i64(val, tcg_env, offsetof(CPUHPPAState, fr[rt]));
96d6407f
RH
600}
601
33423472
RH
602static void load_spr(DisasContext *ctx, TCGv_i64 dest, unsigned reg)
603{
604#ifdef CONFIG_USER_ONLY
605 tcg_gen_movi_i64(dest, 0);
606#else
607 if (reg < 4) {
608 tcg_gen_mov_i64(dest, cpu_sr[reg]);
494737b7
RH
609 } else if (ctx->tb_flags & TB_FLAG_SR_SAME) {
610 tcg_gen_mov_i64(dest, cpu_srH);
33423472 611 } else {
ad75a51e 612 tcg_gen_ld_i64(dest, tcg_env, offsetof(CPUHPPAState, sr[reg]));
33423472
RH
613 }
614#endif
615}
616
129e9cc3
RH
617/* Skip over the implementation of an insn that has been nullified.
618 Use this when the insn is too complex for a conditional move. */
619static void nullify_over(DisasContext *ctx)
620{
621 if (ctx->null_cond.c != TCG_COND_NEVER) {
622 /* The always condition should have been handled in the main loop. */
623 assert(ctx->null_cond.c != TCG_COND_ALWAYS);
624
625 ctx->null_lab = gen_new_label();
129e9cc3
RH
626
627 /* If we're using PSW[N], copy it to a temp because... */
6e94937a 628 if (ctx->null_cond.a0 == cpu_psw_n) {
129e9cc3 629 ctx->null_cond.a0 = tcg_temp_new();
eaa3783b 630 tcg_gen_mov_reg(ctx->null_cond.a0, cpu_psw_n);
129e9cc3
RH
631 }
632 /* ... we clear it before branching over the implementation,
633 so that (1) it's clear after nullifying this insn and
634 (2) if this insn nullifies the next, PSW[N] is valid. */
635 if (ctx->psw_n_nonzero) {
636 ctx->psw_n_nonzero = false;
eaa3783b 637 tcg_gen_movi_reg(cpu_psw_n, 0);
129e9cc3
RH
638 }
639
eaa3783b 640 tcg_gen_brcond_reg(ctx->null_cond.c, ctx->null_cond.a0,
6e94937a 641 ctx->null_cond.a1, ctx->null_lab);
129e9cc3
RH
642 cond_free(&ctx->null_cond);
643 }
644}
645
646/* Save the current nullification state to PSW[N]. */
647static void nullify_save(DisasContext *ctx)
648{
649 if (ctx->null_cond.c == TCG_COND_NEVER) {
650 if (ctx->psw_n_nonzero) {
eaa3783b 651 tcg_gen_movi_reg(cpu_psw_n, 0);
129e9cc3
RH
652 }
653 return;
654 }
6e94937a 655 if (ctx->null_cond.a0 != cpu_psw_n) {
eaa3783b 656 tcg_gen_setcond_reg(ctx->null_cond.c, cpu_psw_n,
6e94937a 657 ctx->null_cond.a0, ctx->null_cond.a1);
129e9cc3
RH
658 ctx->psw_n_nonzero = true;
659 }
660 cond_free(&ctx->null_cond);
661}
662
663/* Set a PSW[N] to X. The intention is that this is used immediately
664 before a goto_tb/exit_tb, so that there is no fallthru path to other
665 code within the TB. Therefore we do not update psw_n_nonzero. */
666static void nullify_set(DisasContext *ctx, bool x)
667{
668 if (ctx->psw_n_nonzero || x) {
eaa3783b 669 tcg_gen_movi_reg(cpu_psw_n, x);
129e9cc3
RH
670 }
671}
672
673/* Mark the end of an instruction that may have been nullified.
40f9f908
RH
674 This is the pair to nullify_over. Always returns true so that
675 it may be tail-called from a translate function. */
31234768 676static bool nullify_end(DisasContext *ctx)
129e9cc3
RH
677{
678 TCGLabel *null_lab = ctx->null_lab;
31234768 679 DisasJumpType status = ctx->base.is_jmp;
129e9cc3 680
f49b3537
RH
681 /* For NEXT, NORETURN, STALE, we can easily continue (or exit).
682 For UPDATED, we cannot update on the nullified path. */
683 assert(status != DISAS_IAQ_N_UPDATED);
684
129e9cc3
RH
685 if (likely(null_lab == NULL)) {
686 /* The current insn wasn't conditional or handled the condition
687 applied to it without a branch, so the (new) setting of
688 NULL_COND can be applied directly to the next insn. */
31234768 689 return true;
129e9cc3
RH
690 }
691 ctx->null_lab = NULL;
692
693 if (likely(ctx->null_cond.c == TCG_COND_NEVER)) {
694 /* The next instruction will be unconditional,
695 and NULL_COND already reflects that. */
696 gen_set_label(null_lab);
697 } else {
698 /* The insn that we just executed is itself nullifying the next
699 instruction. Store the condition in the PSW[N] global.
700 We asserted PSW[N] = 0 in nullify_over, so that after the
701 label we have the proper value in place. */
702 nullify_save(ctx);
703 gen_set_label(null_lab);
704 ctx->null_cond = cond_make_n();
705 }
869051ea 706 if (status == DISAS_NORETURN) {
31234768 707 ctx->base.is_jmp = DISAS_NEXT;
129e9cc3 708 }
31234768 709 return true;
129e9cc3
RH
710}
711
eaa3783b 712static void copy_iaoq_entry(TCGv_reg dest, target_ureg ival, TCGv_reg vval)
61766fe9
RH
713{
714 if (unlikely(ival == -1)) {
eaa3783b 715 tcg_gen_mov_reg(dest, vval);
61766fe9 716 } else {
eaa3783b 717 tcg_gen_movi_reg(dest, ival);
61766fe9
RH
718 }
719}
720
eaa3783b 721static inline target_ureg iaoq_dest(DisasContext *ctx, target_sreg disp)
61766fe9
RH
722{
723 return ctx->iaoq_f + disp + 8;
724}
725
726static void gen_excp_1(int exception)
727{
ad75a51e 728 gen_helper_excp(tcg_env, tcg_constant_i32(exception));
61766fe9
RH
729}
730
31234768 731static void gen_excp(DisasContext *ctx, int exception)
61766fe9
RH
732{
733 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
734 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
129e9cc3 735 nullify_save(ctx);
61766fe9 736 gen_excp_1(exception);
31234768 737 ctx->base.is_jmp = DISAS_NORETURN;
61766fe9
RH
738}
739
31234768 740static bool gen_excp_iir(DisasContext *ctx, int exc)
1a19da0d 741{
31234768 742 nullify_over(ctx);
29dd6f64 743 tcg_gen_st_reg(tcg_constant_reg(ctx->insn),
ad75a51e 744 tcg_env, offsetof(CPUHPPAState, cr[CR_IIR]));
31234768
RH
745 gen_excp(ctx, exc);
746 return nullify_end(ctx);
1a19da0d
RH
747}
748
31234768 749static bool gen_illegal(DisasContext *ctx)
61766fe9 750{
31234768 751 return gen_excp_iir(ctx, EXCP_ILL);
61766fe9
RH
752}
753
40f9f908
RH
754#ifdef CONFIG_USER_ONLY
755#define CHECK_MOST_PRIVILEGED(EXCP) \
756 return gen_excp_iir(ctx, EXCP)
757#else
758#define CHECK_MOST_PRIVILEGED(EXCP) \
31234768
RH
759 do { \
760 if (ctx->privilege != 0) { \
761 return gen_excp_iir(ctx, EXCP); \
762 } \
e1b5a5ed 763 } while (0)
40f9f908 764#endif
e1b5a5ed 765
eaa3783b 766static bool use_goto_tb(DisasContext *ctx, target_ureg dest)
61766fe9 767{
57f91498 768 return translator_use_goto_tb(&ctx->base, dest);
61766fe9
RH
769}
770
129e9cc3
RH
771/* If the next insn is to be nullified, and it's on the same page,
772 and we're not attempting to set a breakpoint on it, then we can
773 totally skip the nullified insn. This avoids creating and
774 executing a TB that merely branches to the next TB. */
775static bool use_nullify_skip(DisasContext *ctx)
776{
777 return (((ctx->iaoq_b ^ ctx->iaoq_f) & TARGET_PAGE_MASK) == 0
778 && !cpu_breakpoint_test(ctx->cs, ctx->iaoq_b, BP_ANY));
779}
780
61766fe9 781static void gen_goto_tb(DisasContext *ctx, int which,
eaa3783b 782 target_ureg f, target_ureg b)
61766fe9
RH
783{
784 if (f != -1 && b != -1 && use_goto_tb(ctx, f)) {
785 tcg_gen_goto_tb(which);
eaa3783b
RH
786 tcg_gen_movi_reg(cpu_iaoq_f, f);
787 tcg_gen_movi_reg(cpu_iaoq_b, b);
07ea28b4 788 tcg_gen_exit_tb(ctx->base.tb, which);
61766fe9
RH
789 } else {
790 copy_iaoq_entry(cpu_iaoq_f, f, cpu_iaoq_b);
791 copy_iaoq_entry(cpu_iaoq_b, b, ctx->iaoq_n_var);
8532a14e 792 tcg_gen_lookup_and_goto_ptr();
61766fe9
RH
793 }
794}
795
b47a4a02
SS
796static bool cond_need_sv(int c)
797{
798 return c == 2 || c == 3 || c == 6;
799}
800
801static bool cond_need_cb(int c)
802{
803 return c == 4 || c == 5;
804}
805
72ca8753
RH
806/* Need extensions from TCGv_i32 to TCGv_reg. */
807static bool cond_need_ext(DisasContext *ctx, bool d)
808{
809 return TARGET_REGISTER_BITS == 64 && !d;
810}
811
b47a4a02
SS
812/*
813 * Compute conditional for arithmetic. See Page 5-3, Table 5-1, of
814 * the Parisc 1.1 Architecture Reference Manual for details.
815 */
b2167459 816
eaa3783b
RH
817static DisasCond do_cond(unsigned cf, TCGv_reg res,
818 TCGv_reg cb_msb, TCGv_reg sv)
b2167459
RH
819{
820 DisasCond cond;
eaa3783b 821 TCGv_reg tmp;
b2167459
RH
822
823 switch (cf >> 1) {
b47a4a02 824 case 0: /* Never / TR (0 / 1) */
b2167459
RH
825 cond = cond_make_f();
826 break;
827 case 1: /* = / <> (Z / !Z) */
828 cond = cond_make_0(TCG_COND_EQ, res);
829 break;
b47a4a02
SS
830 case 2: /* < / >= (N ^ V / !(N ^ V) */
831 tmp = tcg_temp_new();
832 tcg_gen_xor_reg(tmp, res, sv);
833 cond = cond_make_0_tmp(TCG_COND_LT, tmp);
b2167459 834 break;
b47a4a02
SS
835 case 3: /* <= / > (N ^ V) | Z / !((N ^ V) | Z) */
836 /*
837 * Simplify:
838 * (N ^ V) | Z
839 * ((res < 0) ^ (sv < 0)) | !res
840 * ((res ^ sv) < 0) | !res
841 * (~(res ^ sv) >= 0) | !res
842 * !(~(res ^ sv) >> 31) | !res
843 * !(~(res ^ sv) >> 31 & res)
844 */
845 tmp = tcg_temp_new();
846 tcg_gen_eqv_reg(tmp, res, sv);
847 tcg_gen_sari_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
848 tcg_gen_and_reg(tmp, tmp, res);
849 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
b2167459
RH
850 break;
851 case 4: /* NUV / UV (!C / C) */
852 cond = cond_make_0(TCG_COND_EQ, cb_msb);
853 break;
854 case 5: /* ZNV / VNZ (!C | Z / C & !Z) */
855 tmp = tcg_temp_new();
eaa3783b
RH
856 tcg_gen_neg_reg(tmp, cb_msb);
857 tcg_gen_and_reg(tmp, tmp, res);
b47a4a02 858 cond = cond_make_0_tmp(TCG_COND_EQ, tmp);
b2167459
RH
859 break;
860 case 6: /* SV / NSV (V / !V) */
861 cond = cond_make_0(TCG_COND_LT, sv);
862 break;
863 case 7: /* OD / EV */
864 tmp = tcg_temp_new();
eaa3783b 865 tcg_gen_andi_reg(tmp, res, 1);
b47a4a02 866 cond = cond_make_0_tmp(TCG_COND_NE, tmp);
b2167459
RH
867 break;
868 default:
869 g_assert_not_reached();
870 }
871 if (cf & 1) {
872 cond.c = tcg_invert_cond(cond.c);
873 }
874
875 return cond;
876}
877
878/* Similar, but for the special case of subtraction without borrow, we
879 can use the inputs directly. This can allow other computation to be
880 deleted as unused. */
881
eaa3783b
RH
882static DisasCond do_sub_cond(unsigned cf, TCGv_reg res,
883 TCGv_reg in1, TCGv_reg in2, TCGv_reg sv)
b2167459
RH
884{
885 DisasCond cond;
886
887 switch (cf >> 1) {
888 case 1: /* = / <> */
889 cond = cond_make(TCG_COND_EQ, in1, in2);
890 break;
891 case 2: /* < / >= */
892 cond = cond_make(TCG_COND_LT, in1, in2);
893 break;
894 case 3: /* <= / > */
895 cond = cond_make(TCG_COND_LE, in1, in2);
896 break;
897 case 4: /* << / >>= */
898 cond = cond_make(TCG_COND_LTU, in1, in2);
899 break;
900 case 5: /* <<= / >> */
901 cond = cond_make(TCG_COND_LEU, in1, in2);
902 break;
903 default:
b47a4a02 904 return do_cond(cf, res, NULL, sv);
b2167459
RH
905 }
906 if (cf & 1) {
907 cond.c = tcg_invert_cond(cond.c);
908 }
909
910 return cond;
911}
912
df0232fe
RH
913/*
914 * Similar, but for logicals, where the carry and overflow bits are not
915 * computed, and use of them is undefined.
916 *
917 * Undefined or not, hardware does not trap. It seems reasonable to
918 * assume hardware treats cases c={4,5,6} as if C=0 & V=0, since that's
919 * how cases c={2,3} are treated.
920 */
b2167459 921
eaa3783b 922static DisasCond do_log_cond(unsigned cf, TCGv_reg res)
b2167459 923{
df0232fe
RH
924 switch (cf) {
925 case 0: /* never */
926 case 9: /* undef, C */
927 case 11: /* undef, C & !Z */
928 case 12: /* undef, V */
929 return cond_make_f();
930
931 case 1: /* true */
932 case 8: /* undef, !C */
933 case 10: /* undef, !C | Z */
934 case 13: /* undef, !V */
935 return cond_make_t();
936
937 case 2: /* == */
938 return cond_make_0(TCG_COND_EQ, res);
939 case 3: /* <> */
940 return cond_make_0(TCG_COND_NE, res);
941 case 4: /* < */
942 return cond_make_0(TCG_COND_LT, res);
943 case 5: /* >= */
944 return cond_make_0(TCG_COND_GE, res);
945 case 6: /* <= */
946 return cond_make_0(TCG_COND_LE, res);
947 case 7: /* > */
948 return cond_make_0(TCG_COND_GT, res);
949
950 case 14: /* OD */
951 case 15: /* EV */
952 return do_cond(cf, res, NULL, NULL);
953
954 default:
955 g_assert_not_reached();
b2167459 956 }
b2167459
RH
957}
958
98cd9ca7
RH
959/* Similar, but for shift/extract/deposit conditions. */
960
eaa3783b 961static DisasCond do_sed_cond(unsigned orig, TCGv_reg res)
98cd9ca7
RH
962{
963 unsigned c, f;
964
965 /* Convert the compressed condition codes to standard.
966 0-2 are the same as logicals (nv,<,<=), while 3 is OD.
967 4-7 are the reverse of 0-3. */
968 c = orig & 3;
969 if (c == 3) {
970 c = 7;
971 }
972 f = (orig & 4) / 4;
973
974 return do_log_cond(c * 2 + f, res);
975}
976
b2167459
RH
977/* Similar, but for unit conditions. */
978
eaa3783b
RH
979static DisasCond do_unit_cond(unsigned cf, TCGv_reg res,
980 TCGv_reg in1, TCGv_reg in2)
b2167459
RH
981{
982 DisasCond cond;
eaa3783b 983 TCGv_reg tmp, cb = NULL;
b2167459 984
b2167459
RH
985 if (cf & 8) {
986 /* Since we want to test lots of carry-out bits all at once, do not
987 * do our normal thing and compute carry-in of bit B+1 since that
988 * leaves us with carry bits spread across two words.
989 */
990 cb = tcg_temp_new();
991 tmp = tcg_temp_new();
eaa3783b
RH
992 tcg_gen_or_reg(cb, in1, in2);
993 tcg_gen_and_reg(tmp, in1, in2);
994 tcg_gen_andc_reg(cb, cb, res);
995 tcg_gen_or_reg(cb, cb, tmp);
b2167459
RH
996 }
997
998 switch (cf >> 1) {
999 case 0: /* never / TR */
1000 case 1: /* undefined */
1001 case 5: /* undefined */
1002 cond = cond_make_f();
1003 break;
1004
1005 case 2: /* SBZ / NBZ */
1006 /* See hasless(v,1) from
1007 * https://graphics.stanford.edu/~seander/bithacks.html#ZeroInWord
1008 */
1009 tmp = tcg_temp_new();
eaa3783b
RH
1010 tcg_gen_subi_reg(tmp, res, 0x01010101u);
1011 tcg_gen_andc_reg(tmp, tmp, res);
1012 tcg_gen_andi_reg(tmp, tmp, 0x80808080u);
b2167459 1013 cond = cond_make_0(TCG_COND_NE, tmp);
b2167459
RH
1014 break;
1015
1016 case 3: /* SHZ / NHZ */
1017 tmp = tcg_temp_new();
eaa3783b
RH
1018 tcg_gen_subi_reg(tmp, res, 0x00010001u);
1019 tcg_gen_andc_reg(tmp, tmp, res);
1020 tcg_gen_andi_reg(tmp, tmp, 0x80008000u);
b2167459 1021 cond = cond_make_0(TCG_COND_NE, tmp);
b2167459
RH
1022 break;
1023
1024 case 4: /* SDC / NDC */
eaa3783b 1025 tcg_gen_andi_reg(cb, cb, 0x88888888u);
b2167459
RH
1026 cond = cond_make_0(TCG_COND_NE, cb);
1027 break;
1028
1029 case 6: /* SBC / NBC */
eaa3783b 1030 tcg_gen_andi_reg(cb, cb, 0x80808080u);
b2167459
RH
1031 cond = cond_make_0(TCG_COND_NE, cb);
1032 break;
1033
1034 case 7: /* SHC / NHC */
eaa3783b 1035 tcg_gen_andi_reg(cb, cb, 0x80008000u);
b2167459
RH
1036 cond = cond_make_0(TCG_COND_NE, cb);
1037 break;
1038
1039 default:
1040 g_assert_not_reached();
1041 }
b2167459
RH
1042 if (cf & 1) {
1043 cond.c = tcg_invert_cond(cond.c);
1044 }
1045
1046 return cond;
1047}
1048
72ca8753
RH
1049static TCGv_reg get_carry(DisasContext *ctx, bool d,
1050 TCGv_reg cb, TCGv_reg cb_msb)
1051{
1052 if (cond_need_ext(ctx, d)) {
1053 TCGv_reg t = tcg_temp_new();
1054 tcg_gen_extract_reg(t, cb, 32, 1);
1055 return t;
1056 }
1057 return cb_msb;
1058}
1059
1060static TCGv_reg get_psw_carry(DisasContext *ctx, bool d)
1061{
1062 return get_carry(ctx, d, cpu_psw_cb, cpu_psw_cb_msb);
1063}
1064
b2167459 1065/* Compute signed overflow for addition. */
eaa3783b
RH
1066static TCGv_reg do_add_sv(DisasContext *ctx, TCGv_reg res,
1067 TCGv_reg in1, TCGv_reg in2)
b2167459 1068{
e12c6309 1069 TCGv_reg sv = tcg_temp_new();
eaa3783b 1070 TCGv_reg tmp = tcg_temp_new();
b2167459 1071
eaa3783b
RH
1072 tcg_gen_xor_reg(sv, res, in1);
1073 tcg_gen_xor_reg(tmp, in1, in2);
1074 tcg_gen_andc_reg(sv, sv, tmp);
b2167459
RH
1075
1076 return sv;
1077}
1078
1079/* Compute signed overflow for subtraction. */
eaa3783b
RH
1080static TCGv_reg do_sub_sv(DisasContext *ctx, TCGv_reg res,
1081 TCGv_reg in1, TCGv_reg in2)
b2167459 1082{
e12c6309 1083 TCGv_reg sv = tcg_temp_new();
eaa3783b 1084 TCGv_reg tmp = tcg_temp_new();
b2167459 1085
eaa3783b
RH
1086 tcg_gen_xor_reg(sv, res, in1);
1087 tcg_gen_xor_reg(tmp, in1, in2);
1088 tcg_gen_and_reg(sv, sv, tmp);
b2167459
RH
1089
1090 return sv;
1091}
1092
31234768
RH
1093static void do_add(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1094 TCGv_reg in2, unsigned shift, bool is_l,
1095 bool is_tsv, bool is_tc, bool is_c, unsigned cf)
b2167459 1096{
bdcccc17 1097 TCGv_reg dest, cb, cb_msb, cb_cond, sv, tmp;
b2167459
RH
1098 unsigned c = cf >> 1;
1099 DisasCond cond;
bdcccc17 1100 bool d = false;
b2167459
RH
1101
1102 dest = tcg_temp_new();
f764718d
RH
1103 cb = NULL;
1104 cb_msb = NULL;
bdcccc17 1105 cb_cond = NULL;
b2167459
RH
1106
1107 if (shift) {
e12c6309 1108 tmp = tcg_temp_new();
eaa3783b 1109 tcg_gen_shli_reg(tmp, in1, shift);
b2167459
RH
1110 in1 = tmp;
1111 }
1112
b47a4a02 1113 if (!is_l || cond_need_cb(c)) {
29dd6f64 1114 TCGv_reg zero = tcg_constant_reg(0);
e12c6309 1115 cb_msb = tcg_temp_new();
bdcccc17
RH
1116 cb = tcg_temp_new();
1117
eaa3783b 1118 tcg_gen_add2_reg(dest, cb_msb, in1, zero, in2, zero);
b2167459 1119 if (is_c) {
bdcccc17
RH
1120 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb,
1121 get_psw_carry(ctx, d), zero);
b2167459 1122 }
bdcccc17
RH
1123 tcg_gen_xor_reg(cb, in1, in2);
1124 tcg_gen_xor_reg(cb, cb, dest);
1125 if (cond_need_cb(c)) {
1126 cb_cond = get_carry(ctx, d, cb, cb_msb);
b2167459
RH
1127 }
1128 } else {
eaa3783b 1129 tcg_gen_add_reg(dest, in1, in2);
b2167459 1130 if (is_c) {
bdcccc17 1131 tcg_gen_add_reg(dest, dest, get_psw_carry(ctx, d));
b2167459
RH
1132 }
1133 }
1134
1135 /* Compute signed overflow if required. */
f764718d 1136 sv = NULL;
b47a4a02 1137 if (is_tsv || cond_need_sv(c)) {
b2167459
RH
1138 sv = do_add_sv(ctx, dest, in1, in2);
1139 if (is_tsv) {
1140 /* ??? Need to include overflow from shift. */
ad75a51e 1141 gen_helper_tsv(tcg_env, sv);
b2167459
RH
1142 }
1143 }
1144
1145 /* Emit any conditional trap before any writeback. */
bdcccc17 1146 cond = do_cond(cf, dest, cb_cond, sv);
b2167459 1147 if (is_tc) {
b2167459 1148 tmp = tcg_temp_new();
eaa3783b 1149 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
ad75a51e 1150 gen_helper_tcond(tcg_env, tmp);
b2167459
RH
1151 }
1152
1153 /* Write back the result. */
1154 if (!is_l) {
1155 save_or_nullify(ctx, cpu_psw_cb, cb);
1156 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1157 }
1158 save_gpr(ctx, rt, dest);
b2167459
RH
1159
1160 /* Install the new nullification. */
1161 cond_free(&ctx->null_cond);
1162 ctx->null_cond = cond;
b2167459
RH
1163}
1164
0c982a28
RH
1165static bool do_add_reg(DisasContext *ctx, arg_rrr_cf_sh *a,
1166 bool is_l, bool is_tsv, bool is_tc, bool is_c)
1167{
1168 TCGv_reg tcg_r1, tcg_r2;
1169
1170 if (a->cf) {
1171 nullify_over(ctx);
1172 }
1173 tcg_r1 = load_gpr(ctx, a->r1);
1174 tcg_r2 = load_gpr(ctx, a->r2);
1175 do_add(ctx, a->t, tcg_r1, tcg_r2, a->sh, is_l, is_tsv, is_tc, is_c, a->cf);
1176 return nullify_end(ctx);
1177}
1178
0588e061
RH
1179static bool do_add_imm(DisasContext *ctx, arg_rri_cf *a,
1180 bool is_tsv, bool is_tc)
1181{
1182 TCGv_reg tcg_im, tcg_r2;
1183
1184 if (a->cf) {
1185 nullify_over(ctx);
1186 }
d4e58033 1187 tcg_im = tcg_constant_reg(a->i);
0588e061
RH
1188 tcg_r2 = load_gpr(ctx, a->r);
1189 do_add(ctx, a->t, tcg_im, tcg_r2, 0, 0, is_tsv, is_tc, 0, a->cf);
1190 return nullify_end(ctx);
1191}
1192
31234768
RH
1193static void do_sub(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1194 TCGv_reg in2, bool is_tsv, bool is_b,
1195 bool is_tc, unsigned cf)
b2167459 1196{
eaa3783b 1197 TCGv_reg dest, sv, cb, cb_msb, zero, tmp;
b2167459
RH
1198 unsigned c = cf >> 1;
1199 DisasCond cond;
bdcccc17 1200 bool d = false;
b2167459
RH
1201
1202 dest = tcg_temp_new();
1203 cb = tcg_temp_new();
1204 cb_msb = tcg_temp_new();
1205
29dd6f64 1206 zero = tcg_constant_reg(0);
b2167459
RH
1207 if (is_b) {
1208 /* DEST,C = IN1 + ~IN2 + C. */
eaa3783b 1209 tcg_gen_not_reg(cb, in2);
bdcccc17 1210 tcg_gen_add2_reg(dest, cb_msb, in1, zero, get_psw_carry(ctx, d), zero);
eaa3783b
RH
1211 tcg_gen_add2_reg(dest, cb_msb, dest, cb_msb, cb, zero);
1212 tcg_gen_xor_reg(cb, cb, in1);
1213 tcg_gen_xor_reg(cb, cb, dest);
b2167459 1214 } else {
bdcccc17
RH
1215 /*
1216 * DEST,C = IN1 + ~IN2 + 1. We can produce the same result in fewer
1217 * operations by seeding the high word with 1 and subtracting.
1218 */
1219 TCGv_reg one = tcg_constant_reg(1);
1220 tcg_gen_sub2_reg(dest, cb_msb, in1, one, in2, zero);
eaa3783b
RH
1221 tcg_gen_eqv_reg(cb, in1, in2);
1222 tcg_gen_xor_reg(cb, cb, dest);
b2167459 1223 }
b2167459
RH
1224
1225 /* Compute signed overflow if required. */
f764718d 1226 sv = NULL;
b47a4a02 1227 if (is_tsv || cond_need_sv(c)) {
b2167459
RH
1228 sv = do_sub_sv(ctx, dest, in1, in2);
1229 if (is_tsv) {
ad75a51e 1230 gen_helper_tsv(tcg_env, sv);
b2167459
RH
1231 }
1232 }
1233
1234 /* Compute the condition. We cannot use the special case for borrow. */
1235 if (!is_b) {
1236 cond = do_sub_cond(cf, dest, in1, in2, sv);
1237 } else {
bdcccc17 1238 cond = do_cond(cf, dest, get_carry(ctx, d, cb, cb_msb), sv);
b2167459
RH
1239 }
1240
1241 /* Emit any conditional trap before any writeback. */
1242 if (is_tc) {
b2167459 1243 tmp = tcg_temp_new();
eaa3783b 1244 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
ad75a51e 1245 gen_helper_tcond(tcg_env, tmp);
b2167459
RH
1246 }
1247
1248 /* Write back the result. */
1249 save_or_nullify(ctx, cpu_psw_cb, cb);
1250 save_or_nullify(ctx, cpu_psw_cb_msb, cb_msb);
1251 save_gpr(ctx, rt, dest);
b2167459
RH
1252
1253 /* Install the new nullification. */
1254 cond_free(&ctx->null_cond);
1255 ctx->null_cond = cond;
b2167459
RH
1256}
1257
0c982a28
RH
1258static bool do_sub_reg(DisasContext *ctx, arg_rrr_cf *a,
1259 bool is_tsv, bool is_b, bool is_tc)
1260{
1261 TCGv_reg tcg_r1, tcg_r2;
1262
1263 if (a->cf) {
1264 nullify_over(ctx);
1265 }
1266 tcg_r1 = load_gpr(ctx, a->r1);
1267 tcg_r2 = load_gpr(ctx, a->r2);
1268 do_sub(ctx, a->t, tcg_r1, tcg_r2, is_tsv, is_b, is_tc, a->cf);
1269 return nullify_end(ctx);
1270}
1271
0588e061
RH
1272static bool do_sub_imm(DisasContext *ctx, arg_rri_cf *a, bool is_tsv)
1273{
1274 TCGv_reg tcg_im, tcg_r2;
1275
1276 if (a->cf) {
1277 nullify_over(ctx);
1278 }
d4e58033 1279 tcg_im = tcg_constant_reg(a->i);
0588e061
RH
1280 tcg_r2 = load_gpr(ctx, a->r);
1281 do_sub(ctx, a->t, tcg_im, tcg_r2, is_tsv, 0, 0, a->cf);
1282 return nullify_end(ctx);
1283}
1284
31234768
RH
1285static void do_cmpclr(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1286 TCGv_reg in2, unsigned cf)
b2167459 1287{
eaa3783b 1288 TCGv_reg dest, sv;
b2167459
RH
1289 DisasCond cond;
1290
1291 dest = tcg_temp_new();
eaa3783b 1292 tcg_gen_sub_reg(dest, in1, in2);
b2167459
RH
1293
1294 /* Compute signed overflow if required. */
f764718d 1295 sv = NULL;
b47a4a02 1296 if (cond_need_sv(cf >> 1)) {
b2167459
RH
1297 sv = do_sub_sv(ctx, dest, in1, in2);
1298 }
1299
1300 /* Form the condition for the compare. */
1301 cond = do_sub_cond(cf, dest, in1, in2, sv);
1302
1303 /* Clear. */
eaa3783b 1304 tcg_gen_movi_reg(dest, 0);
b2167459 1305 save_gpr(ctx, rt, dest);
b2167459
RH
1306
1307 /* Install the new nullification. */
1308 cond_free(&ctx->null_cond);
1309 ctx->null_cond = cond;
b2167459
RH
1310}
1311
31234768
RH
1312static void do_log(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1313 TCGv_reg in2, unsigned cf,
1314 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
b2167459 1315{
eaa3783b 1316 TCGv_reg dest = dest_gpr(ctx, rt);
b2167459
RH
1317
1318 /* Perform the operation, and writeback. */
1319 fn(dest, in1, in2);
1320 save_gpr(ctx, rt, dest);
1321
1322 /* Install the new nullification. */
1323 cond_free(&ctx->null_cond);
1324 if (cf) {
1325 ctx->null_cond = do_log_cond(cf, dest);
1326 }
b2167459
RH
1327}
1328
0c982a28
RH
1329static bool do_log_reg(DisasContext *ctx, arg_rrr_cf *a,
1330 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
1331{
1332 TCGv_reg tcg_r1, tcg_r2;
1333
1334 if (a->cf) {
1335 nullify_over(ctx);
1336 }
1337 tcg_r1 = load_gpr(ctx, a->r1);
1338 tcg_r2 = load_gpr(ctx, a->r2);
1339 do_log(ctx, a->t, tcg_r1, tcg_r2, a->cf, fn);
1340 return nullify_end(ctx);
1341}
1342
31234768
RH
1343static void do_unit(DisasContext *ctx, unsigned rt, TCGv_reg in1,
1344 TCGv_reg in2, unsigned cf, bool is_tc,
1345 void (*fn)(TCGv_reg, TCGv_reg, TCGv_reg))
b2167459 1346{
eaa3783b 1347 TCGv_reg dest;
b2167459
RH
1348 DisasCond cond;
1349
1350 if (cf == 0) {
1351 dest = dest_gpr(ctx, rt);
1352 fn(dest, in1, in2);
1353 save_gpr(ctx, rt, dest);
1354 cond_free(&ctx->null_cond);
1355 } else {
1356 dest = tcg_temp_new();
1357 fn(dest, in1, in2);
1358
1359 cond = do_unit_cond(cf, dest, in1, in2);
1360
1361 if (is_tc) {
eaa3783b 1362 TCGv_reg tmp = tcg_temp_new();
eaa3783b 1363 tcg_gen_setcond_reg(cond.c, tmp, cond.a0, cond.a1);
ad75a51e 1364 gen_helper_tcond(tcg_env, tmp);
b2167459
RH
1365 }
1366 save_gpr(ctx, rt, dest);
1367
1368 cond_free(&ctx->null_cond);
1369 ctx->null_cond = cond;
1370 }
b2167459
RH
1371}
1372
86f8d05f 1373#ifndef CONFIG_USER_ONLY
8d6ae7fb
RH
1374/* The "normal" usage is SP >= 0, wherein SP == 0 selects the space
1375 from the top 2 bits of the base register. There are a few system
1376 instructions that have a 3-bit space specifier, for which SR0 is
1377 not special. To handle this, pass ~SP. */
86f8d05f
RH
1378static TCGv_i64 space_select(DisasContext *ctx, int sp, TCGv_reg base)
1379{
1380 TCGv_ptr ptr;
1381 TCGv_reg tmp;
1382 TCGv_i64 spc;
1383
1384 if (sp != 0) {
8d6ae7fb
RH
1385 if (sp < 0) {
1386 sp = ~sp;
1387 }
a6779861 1388 spc = tcg_temp_new_tl();
8d6ae7fb
RH
1389 load_spr(ctx, spc, sp);
1390 return spc;
86f8d05f 1391 }
494737b7
RH
1392 if (ctx->tb_flags & TB_FLAG_SR_SAME) {
1393 return cpu_srH;
1394 }
86f8d05f
RH
1395
1396 ptr = tcg_temp_new_ptr();
1397 tmp = tcg_temp_new();
a6779861 1398 spc = tcg_temp_new_tl();
86f8d05f
RH
1399
1400 tcg_gen_shri_reg(tmp, base, TARGET_REGISTER_BITS - 5);
1401 tcg_gen_andi_reg(tmp, tmp, 030);
1402 tcg_gen_trunc_reg_ptr(ptr, tmp);
86f8d05f 1403
ad75a51e 1404 tcg_gen_add_ptr(ptr, ptr, tcg_env);
86f8d05f 1405 tcg_gen_ld_i64(spc, ptr, offsetof(CPUHPPAState, sr[4]));
86f8d05f
RH
1406
1407 return spc;
1408}
1409#endif
1410
1411static void form_gva(DisasContext *ctx, TCGv_tl *pgva, TCGv_reg *pofs,
1412 unsigned rb, unsigned rx, int scale, target_sreg disp,
1413 unsigned sp, int modify, bool is_phys)
1414{
1415 TCGv_reg base = load_gpr(ctx, rb);
1416 TCGv_reg ofs;
1417
1418 /* Note that RX is mutually exclusive with DISP. */
1419 if (rx) {
e12c6309 1420 ofs = tcg_temp_new();
86f8d05f
RH
1421 tcg_gen_shli_reg(ofs, cpu_gr[rx], scale);
1422 tcg_gen_add_reg(ofs, ofs, base);
1423 } else if (disp || modify) {
e12c6309 1424 ofs = tcg_temp_new();
86f8d05f
RH
1425 tcg_gen_addi_reg(ofs, base, disp);
1426 } else {
1427 ofs = base;
1428 }
1429
1430 *pofs = ofs;
1431#ifdef CONFIG_USER_ONLY
1432 *pgva = (modify <= 0 ? ofs : base);
1433#else
a6779861 1434 TCGv_tl addr = tcg_temp_new_tl();
86f8d05f 1435 tcg_gen_extu_reg_tl(addr, modify <= 0 ? ofs : base);
494737b7 1436 if (ctx->tb_flags & PSW_W) {
86f8d05f
RH
1437 tcg_gen_andi_tl(addr, addr, 0x3fffffffffffffffull);
1438 }
1439 if (!is_phys) {
1440 tcg_gen_or_tl(addr, addr, space_select(ctx, sp, base));
1441 }
1442 *pgva = addr;
1443#endif
1444}
1445
96d6407f
RH
1446/* Emit a memory load. The modify parameter should be
1447 * < 0 for pre-modify,
1448 * > 0 for post-modify,
1449 * = 0 for no base register update.
1450 */
1451static void do_load_32(DisasContext *ctx, TCGv_i32 dest, unsigned rb,
eaa3783b 1452 unsigned rx, int scale, target_sreg disp,
14776ab5 1453 unsigned sp, int modify, MemOp mop)
96d6407f 1454{
86f8d05f
RH
1455 TCGv_reg ofs;
1456 TCGv_tl addr;
96d6407f
RH
1457
1458 /* Caller uses nullify_over/nullify_end. */
1459 assert(ctx->null_cond.c == TCG_COND_NEVER);
1460
86f8d05f
RH
1461 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1462 ctx->mmu_idx == MMU_PHYS_IDX);
c1f55d97 1463 tcg_gen_qemu_ld_i32(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
86f8d05f
RH
1464 if (modify) {
1465 save_gpr(ctx, rb, ofs);
96d6407f 1466 }
96d6407f
RH
1467}
1468
1469static void do_load_64(DisasContext *ctx, TCGv_i64 dest, unsigned rb,
eaa3783b 1470 unsigned rx, int scale, target_sreg disp,
14776ab5 1471 unsigned sp, int modify, MemOp mop)
96d6407f 1472{
86f8d05f
RH
1473 TCGv_reg ofs;
1474 TCGv_tl addr;
96d6407f
RH
1475
1476 /* Caller uses nullify_over/nullify_end. */
1477 assert(ctx->null_cond.c == TCG_COND_NEVER);
1478
86f8d05f
RH
1479 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1480 ctx->mmu_idx == MMU_PHYS_IDX);
217d1a5e 1481 tcg_gen_qemu_ld_i64(dest, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
86f8d05f
RH
1482 if (modify) {
1483 save_gpr(ctx, rb, ofs);
96d6407f 1484 }
96d6407f
RH
1485}
1486
1487static void do_store_32(DisasContext *ctx, TCGv_i32 src, unsigned rb,
eaa3783b 1488 unsigned rx, int scale, target_sreg disp,
14776ab5 1489 unsigned sp, int modify, MemOp mop)
96d6407f 1490{
86f8d05f
RH
1491 TCGv_reg ofs;
1492 TCGv_tl addr;
96d6407f
RH
1493
1494 /* Caller uses nullify_over/nullify_end. */
1495 assert(ctx->null_cond.c == TCG_COND_NEVER);
1496
86f8d05f
RH
1497 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1498 ctx->mmu_idx == MMU_PHYS_IDX);
217d1a5e 1499 tcg_gen_qemu_st_i32(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
86f8d05f
RH
1500 if (modify) {
1501 save_gpr(ctx, rb, ofs);
96d6407f 1502 }
96d6407f
RH
1503}
1504
1505static void do_store_64(DisasContext *ctx, TCGv_i64 src, unsigned rb,
eaa3783b 1506 unsigned rx, int scale, target_sreg disp,
14776ab5 1507 unsigned sp, int modify, MemOp mop)
96d6407f 1508{
86f8d05f
RH
1509 TCGv_reg ofs;
1510 TCGv_tl addr;
96d6407f
RH
1511
1512 /* Caller uses nullify_over/nullify_end. */
1513 assert(ctx->null_cond.c == TCG_COND_NEVER);
1514
86f8d05f
RH
1515 form_gva(ctx, &addr, &ofs, rb, rx, scale, disp, sp, modify,
1516 ctx->mmu_idx == MMU_PHYS_IDX);
217d1a5e 1517 tcg_gen_qemu_st_i64(src, addr, ctx->mmu_idx, mop | UNALIGN(ctx));
86f8d05f
RH
1518 if (modify) {
1519 save_gpr(ctx, rb, ofs);
96d6407f 1520 }
96d6407f
RH
1521}
1522
eaa3783b
RH
1523#if TARGET_REGISTER_BITS == 64
1524#define do_load_reg do_load_64
1525#define do_store_reg do_store_64
96d6407f 1526#else
eaa3783b
RH
1527#define do_load_reg do_load_32
1528#define do_store_reg do_store_32
96d6407f
RH
1529#endif
1530
1cd012a5 1531static bool do_load(DisasContext *ctx, unsigned rt, unsigned rb,
31234768 1532 unsigned rx, int scale, target_sreg disp,
14776ab5 1533 unsigned sp, int modify, MemOp mop)
96d6407f 1534{
eaa3783b 1535 TCGv_reg dest;
96d6407f
RH
1536
1537 nullify_over(ctx);
1538
1539 if (modify == 0) {
1540 /* No base register update. */
1541 dest = dest_gpr(ctx, rt);
1542 } else {
1543 /* Make sure if RT == RB, we see the result of the load. */
e12c6309 1544 dest = tcg_temp_new();
96d6407f 1545 }
86f8d05f 1546 do_load_reg(ctx, dest, rb, rx, scale, disp, sp, modify, mop);
96d6407f
RH
1547 save_gpr(ctx, rt, dest);
1548
1cd012a5 1549 return nullify_end(ctx);
96d6407f
RH
1550}
1551
740038d7 1552static bool do_floadw(DisasContext *ctx, unsigned rt, unsigned rb,
31234768
RH
1553 unsigned rx, int scale, target_sreg disp,
1554 unsigned sp, int modify)
96d6407f
RH
1555{
1556 TCGv_i32 tmp;
1557
1558 nullify_over(ctx);
1559
1560 tmp = tcg_temp_new_i32();
86f8d05f 1561 do_load_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
96d6407f 1562 save_frw_i32(rt, tmp);
96d6407f
RH
1563
1564 if (rt == 0) {
ad75a51e 1565 gen_helper_loaded_fr0(tcg_env);
96d6407f
RH
1566 }
1567
740038d7
RH
1568 return nullify_end(ctx);
1569}
1570
1571static bool trans_fldw(DisasContext *ctx, arg_ldst *a)
1572{
1573 return do_floadw(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1574 a->disp, a->sp, a->m);
96d6407f
RH
1575}
1576
740038d7 1577static bool do_floadd(DisasContext *ctx, unsigned rt, unsigned rb,
31234768
RH
1578 unsigned rx, int scale, target_sreg disp,
1579 unsigned sp, int modify)
96d6407f
RH
1580{
1581 TCGv_i64 tmp;
1582
1583 nullify_over(ctx);
1584
1585 tmp = tcg_temp_new_i64();
fc313c64 1586 do_load_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
96d6407f 1587 save_frd(rt, tmp);
96d6407f
RH
1588
1589 if (rt == 0) {
ad75a51e 1590 gen_helper_loaded_fr0(tcg_env);
96d6407f
RH
1591 }
1592
740038d7
RH
1593 return nullify_end(ctx);
1594}
1595
1596static bool trans_fldd(DisasContext *ctx, arg_ldst *a)
1597{
1598 return do_floadd(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1599 a->disp, a->sp, a->m);
96d6407f
RH
1600}
1601
1cd012a5 1602static bool do_store(DisasContext *ctx, unsigned rt, unsigned rb,
31234768 1603 target_sreg disp, unsigned sp,
14776ab5 1604 int modify, MemOp mop)
96d6407f
RH
1605{
1606 nullify_over(ctx);
86f8d05f 1607 do_store_reg(ctx, load_gpr(ctx, rt), rb, 0, 0, disp, sp, modify, mop);
1cd012a5 1608 return nullify_end(ctx);
96d6407f
RH
1609}
1610
740038d7 1611static bool do_fstorew(DisasContext *ctx, unsigned rt, unsigned rb,
31234768
RH
1612 unsigned rx, int scale, target_sreg disp,
1613 unsigned sp, int modify)
96d6407f
RH
1614{
1615 TCGv_i32 tmp;
1616
1617 nullify_over(ctx);
1618
1619 tmp = load_frw_i32(rt);
86f8d05f 1620 do_store_32(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUL);
96d6407f 1621
740038d7
RH
1622 return nullify_end(ctx);
1623}
1624
1625static bool trans_fstw(DisasContext *ctx, arg_ldst *a)
1626{
1627 return do_fstorew(ctx, a->t, a->b, a->x, a->scale ? 2 : 0,
1628 a->disp, a->sp, a->m);
96d6407f
RH
1629}
1630
740038d7 1631static bool do_fstored(DisasContext *ctx, unsigned rt, unsigned rb,
31234768
RH
1632 unsigned rx, int scale, target_sreg disp,
1633 unsigned sp, int modify)
96d6407f
RH
1634{
1635 TCGv_i64 tmp;
1636
1637 nullify_over(ctx);
1638
1639 tmp = load_frd(rt);
fc313c64 1640 do_store_64(ctx, tmp, rb, rx, scale, disp, sp, modify, MO_TEUQ);
96d6407f 1641
740038d7
RH
1642 return nullify_end(ctx);
1643}
1644
1645static bool trans_fstd(DisasContext *ctx, arg_ldst *a)
1646{
1647 return do_fstored(ctx, a->t, a->b, a->x, a->scale ? 3 : 0,
1648 a->disp, a->sp, a->m);
96d6407f
RH
1649}
1650
1ca74648 1651static bool do_fop_wew(DisasContext *ctx, unsigned rt, unsigned ra,
31234768 1652 void (*func)(TCGv_i32, TCGv_env, TCGv_i32))
ebe9383c
RH
1653{
1654 TCGv_i32 tmp;
1655
1656 nullify_over(ctx);
1657 tmp = load_frw0_i32(ra);
1658
ad75a51e 1659 func(tmp, tcg_env, tmp);
ebe9383c
RH
1660
1661 save_frw_i32(rt, tmp);
1ca74648 1662 return nullify_end(ctx);
ebe9383c
RH
1663}
1664
1ca74648 1665static bool do_fop_wed(DisasContext *ctx, unsigned rt, unsigned ra,
31234768 1666 void (*func)(TCGv_i32, TCGv_env, TCGv_i64))
ebe9383c
RH
1667{
1668 TCGv_i32 dst;
1669 TCGv_i64 src;
1670
1671 nullify_over(ctx);
1672 src = load_frd(ra);
1673 dst = tcg_temp_new_i32();
1674
ad75a51e 1675 func(dst, tcg_env, src);
ebe9383c 1676
ebe9383c 1677 save_frw_i32(rt, dst);
1ca74648 1678 return nullify_end(ctx);
ebe9383c
RH
1679}
1680
1ca74648 1681static bool do_fop_ded(DisasContext *ctx, unsigned rt, unsigned ra,
31234768 1682 void (*func)(TCGv_i64, TCGv_env, TCGv_i64))
ebe9383c
RH
1683{
1684 TCGv_i64 tmp;
1685
1686 nullify_over(ctx);
1687 tmp = load_frd0(ra);
1688
ad75a51e 1689 func(tmp, tcg_env, tmp);
ebe9383c
RH
1690
1691 save_frd(rt, tmp);
1ca74648 1692 return nullify_end(ctx);
ebe9383c
RH
1693}
1694
1ca74648 1695static bool do_fop_dew(DisasContext *ctx, unsigned rt, unsigned ra,
31234768 1696 void (*func)(TCGv_i64, TCGv_env, TCGv_i32))
ebe9383c
RH
1697{
1698 TCGv_i32 src;
1699 TCGv_i64 dst;
1700
1701 nullify_over(ctx);
1702 src = load_frw0_i32(ra);
1703 dst = tcg_temp_new_i64();
1704
ad75a51e 1705 func(dst, tcg_env, src);
ebe9383c 1706
ebe9383c 1707 save_frd(rt, dst);
1ca74648 1708 return nullify_end(ctx);
ebe9383c
RH
1709}
1710
1ca74648 1711static bool do_fop_weww(DisasContext *ctx, unsigned rt,
31234768
RH
1712 unsigned ra, unsigned rb,
1713 void (*func)(TCGv_i32, TCGv_env, TCGv_i32, TCGv_i32))
ebe9383c
RH
1714{
1715 TCGv_i32 a, b;
1716
1717 nullify_over(ctx);
1718 a = load_frw0_i32(ra);
1719 b = load_frw0_i32(rb);
1720
ad75a51e 1721 func(a, tcg_env, a, b);
ebe9383c 1722
ebe9383c 1723 save_frw_i32(rt, a);
1ca74648 1724 return nullify_end(ctx);
ebe9383c
RH
1725}
1726
1ca74648 1727static bool do_fop_dedd(DisasContext *ctx, unsigned rt,
31234768
RH
1728 unsigned ra, unsigned rb,
1729 void (*func)(TCGv_i64, TCGv_env, TCGv_i64, TCGv_i64))
ebe9383c
RH
1730{
1731 TCGv_i64 a, b;
1732
1733 nullify_over(ctx);
1734 a = load_frd0(ra);
1735 b = load_frd0(rb);
1736
ad75a51e 1737 func(a, tcg_env, a, b);
ebe9383c 1738
ebe9383c 1739 save_frd(rt, a);
1ca74648 1740 return nullify_end(ctx);
ebe9383c
RH
1741}
1742
98cd9ca7
RH
1743/* Emit an unconditional branch to a direct target, which may or may not
1744 have already had nullification handled. */
01afb7be 1745static bool do_dbranch(DisasContext *ctx, target_ureg dest,
31234768 1746 unsigned link, bool is_n)
98cd9ca7
RH
1747{
1748 if (ctx->null_cond.c == TCG_COND_NEVER && ctx->null_lab == NULL) {
1749 if (link != 0) {
1750 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1751 }
1752 ctx->iaoq_n = dest;
1753 if (is_n) {
1754 ctx->null_cond.c = TCG_COND_ALWAYS;
1755 }
98cd9ca7
RH
1756 } else {
1757 nullify_over(ctx);
1758
1759 if (link != 0) {
1760 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1761 }
1762
1763 if (is_n && use_nullify_skip(ctx)) {
1764 nullify_set(ctx, 0);
1765 gen_goto_tb(ctx, 0, dest, dest + 4);
1766 } else {
1767 nullify_set(ctx, is_n);
1768 gen_goto_tb(ctx, 0, ctx->iaoq_b, dest);
1769 }
1770
31234768 1771 nullify_end(ctx);
98cd9ca7
RH
1772
1773 nullify_set(ctx, 0);
1774 gen_goto_tb(ctx, 1, ctx->iaoq_b, ctx->iaoq_n);
31234768 1775 ctx->base.is_jmp = DISAS_NORETURN;
98cd9ca7 1776 }
01afb7be 1777 return true;
98cd9ca7
RH
1778}
1779
1780/* Emit a conditional branch to a direct target. If the branch itself
1781 is nullified, we should have already used nullify_over. */
01afb7be 1782static bool do_cbranch(DisasContext *ctx, target_sreg disp, bool is_n,
31234768 1783 DisasCond *cond)
98cd9ca7 1784{
eaa3783b 1785 target_ureg dest = iaoq_dest(ctx, disp);
98cd9ca7
RH
1786 TCGLabel *taken = NULL;
1787 TCGCond c = cond->c;
98cd9ca7
RH
1788 bool n;
1789
1790 assert(ctx->null_cond.c == TCG_COND_NEVER);
1791
1792 /* Handle TRUE and NEVER as direct branches. */
1793 if (c == TCG_COND_ALWAYS) {
01afb7be 1794 return do_dbranch(ctx, dest, 0, is_n && disp >= 0);
98cd9ca7
RH
1795 }
1796 if (c == TCG_COND_NEVER) {
01afb7be 1797 return do_dbranch(ctx, ctx->iaoq_n, 0, is_n && disp < 0);
98cd9ca7
RH
1798 }
1799
1800 taken = gen_new_label();
eaa3783b 1801 tcg_gen_brcond_reg(c, cond->a0, cond->a1, taken);
98cd9ca7
RH
1802 cond_free(cond);
1803
1804 /* Not taken: Condition not satisfied; nullify on backward branches. */
1805 n = is_n && disp < 0;
1806 if (n && use_nullify_skip(ctx)) {
1807 nullify_set(ctx, 0);
a881c8e7 1808 gen_goto_tb(ctx, 0, ctx->iaoq_n, ctx->iaoq_n + 4);
98cd9ca7
RH
1809 } else {
1810 if (!n && ctx->null_lab) {
1811 gen_set_label(ctx->null_lab);
1812 ctx->null_lab = NULL;
1813 }
1814 nullify_set(ctx, n);
c301f34e
RH
1815 if (ctx->iaoq_n == -1) {
1816 /* The temporary iaoq_n_var died at the branch above.
1817 Regenerate it here instead of saving it. */
1818 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
1819 }
a881c8e7 1820 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
98cd9ca7
RH
1821 }
1822
1823 gen_set_label(taken);
1824
1825 /* Taken: Condition satisfied; nullify on forward branches. */
1826 n = is_n && disp >= 0;
1827 if (n && use_nullify_skip(ctx)) {
1828 nullify_set(ctx, 0);
a881c8e7 1829 gen_goto_tb(ctx, 1, dest, dest + 4);
98cd9ca7
RH
1830 } else {
1831 nullify_set(ctx, n);
a881c8e7 1832 gen_goto_tb(ctx, 1, ctx->iaoq_b, dest);
98cd9ca7
RH
1833 }
1834
1835 /* Not taken: the branch itself was nullified. */
1836 if (ctx->null_lab) {
1837 gen_set_label(ctx->null_lab);
1838 ctx->null_lab = NULL;
31234768 1839 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
98cd9ca7 1840 } else {
31234768 1841 ctx->base.is_jmp = DISAS_NORETURN;
98cd9ca7 1842 }
01afb7be 1843 return true;
98cd9ca7
RH
1844}
1845
1846/* Emit an unconditional branch to an indirect target. This handles
1847 nullification of the branch itself. */
01afb7be 1848static bool do_ibranch(DisasContext *ctx, TCGv_reg dest,
31234768 1849 unsigned link, bool is_n)
98cd9ca7 1850{
eaa3783b 1851 TCGv_reg a0, a1, next, tmp;
98cd9ca7
RH
1852 TCGCond c;
1853
1854 assert(ctx->null_lab == NULL);
1855
1856 if (ctx->null_cond.c == TCG_COND_NEVER) {
1857 if (link != 0) {
1858 copy_iaoq_entry(cpu_gr[link], ctx->iaoq_n, ctx->iaoq_n_var);
1859 }
e12c6309 1860 next = tcg_temp_new();
eaa3783b 1861 tcg_gen_mov_reg(next, dest);
98cd9ca7 1862 if (is_n) {
c301f34e
RH
1863 if (use_nullify_skip(ctx)) {
1864 tcg_gen_mov_reg(cpu_iaoq_f, next);
1865 tcg_gen_addi_reg(cpu_iaoq_b, next, 4);
1866 nullify_set(ctx, 0);
31234768 1867 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
01afb7be 1868 return true;
c301f34e 1869 }
98cd9ca7
RH
1870 ctx->null_cond.c = TCG_COND_ALWAYS;
1871 }
c301f34e
RH
1872 ctx->iaoq_n = -1;
1873 ctx->iaoq_n_var = next;
98cd9ca7
RH
1874 } else if (is_n && use_nullify_skip(ctx)) {
1875 /* The (conditional) branch, B, nullifies the next insn, N,
1876 and we're allowed to skip execution N (no single-step or
4137cb83 1877 tracepoint in effect). Since the goto_ptr that we must use
98cd9ca7
RH
1878 for the indirect branch consumes no special resources, we
1879 can (conditionally) skip B and continue execution. */
1880 /* The use_nullify_skip test implies we have a known control path. */
1881 tcg_debug_assert(ctx->iaoq_b != -1);
1882 tcg_debug_assert(ctx->iaoq_n != -1);
1883
1884 /* We do have to handle the non-local temporary, DEST, before
1885 branching. Since IOAQ_F is not really live at this point, we
1886 can simply store DEST optimistically. Similarly with IAOQ_B. */
eaa3783b
RH
1887 tcg_gen_mov_reg(cpu_iaoq_f, dest);
1888 tcg_gen_addi_reg(cpu_iaoq_b, dest, 4);
98cd9ca7
RH
1889
1890 nullify_over(ctx);
1891 if (link != 0) {
eaa3783b 1892 tcg_gen_movi_reg(cpu_gr[link], ctx->iaoq_n);
98cd9ca7 1893 }
7f11636d 1894 tcg_gen_lookup_and_goto_ptr();
01afb7be 1895 return nullify_end(ctx);
98cd9ca7 1896 } else {
98cd9ca7
RH
1897 c = ctx->null_cond.c;
1898 a0 = ctx->null_cond.a0;
1899 a1 = ctx->null_cond.a1;
1900
1901 tmp = tcg_temp_new();
e12c6309 1902 next = tcg_temp_new();
98cd9ca7
RH
1903
1904 copy_iaoq_entry(tmp, ctx->iaoq_n, ctx->iaoq_n_var);
eaa3783b 1905 tcg_gen_movcond_reg(c, next, a0, a1, tmp, dest);
98cd9ca7
RH
1906 ctx->iaoq_n = -1;
1907 ctx->iaoq_n_var = next;
1908
1909 if (link != 0) {
eaa3783b 1910 tcg_gen_movcond_reg(c, cpu_gr[link], a0, a1, cpu_gr[link], tmp);
98cd9ca7
RH
1911 }
1912
1913 if (is_n) {
1914 /* The branch nullifies the next insn, which means the state of N
1915 after the branch is the inverse of the state of N that applied
1916 to the branch. */
eaa3783b 1917 tcg_gen_setcond_reg(tcg_invert_cond(c), cpu_psw_n, a0, a1);
98cd9ca7
RH
1918 cond_free(&ctx->null_cond);
1919 ctx->null_cond = cond_make_n();
1920 ctx->psw_n_nonzero = true;
1921 } else {
1922 cond_free(&ctx->null_cond);
1923 }
1924 }
01afb7be 1925 return true;
98cd9ca7
RH
1926}
1927
660eefe1
RH
1928/* Implement
1929 * if (IAOQ_Front{30..31} < GR[b]{30..31})
1930 * IAOQ_Next{30..31} ← GR[b]{30..31};
1931 * else
1932 * IAOQ_Next{30..31} ← IAOQ_Front{30..31};
1933 * which keeps the privilege level from being increased.
1934 */
1935static TCGv_reg do_ibranch_priv(DisasContext *ctx, TCGv_reg offset)
1936{
660eefe1
RH
1937 TCGv_reg dest;
1938 switch (ctx->privilege) {
1939 case 0:
1940 /* Privilege 0 is maximum and is allowed to decrease. */
1941 return offset;
1942 case 3:
993119fe 1943 /* Privilege 3 is minimum and is never allowed to increase. */
e12c6309 1944 dest = tcg_temp_new();
660eefe1
RH
1945 tcg_gen_ori_reg(dest, offset, 3);
1946 break;
1947 default:
e12c6309 1948 dest = tcg_temp_new();
660eefe1
RH
1949 tcg_gen_andi_reg(dest, offset, -4);
1950 tcg_gen_ori_reg(dest, dest, ctx->privilege);
1951 tcg_gen_movcond_reg(TCG_COND_GTU, dest, dest, offset, dest, offset);
660eefe1
RH
1952 break;
1953 }
1954 return dest;
660eefe1
RH
1955}
1956
ba1d0b44 1957#ifdef CONFIG_USER_ONLY
7ad439df
RH
1958/* On Linux, page zero is normally marked execute only + gateway.
1959 Therefore normal read or write is supposed to fail, but specific
1960 offsets have kernel code mapped to raise permissions to implement
1961 system calls. Handling this via an explicit check here, rather
1962 in than the "be disp(sr2,r0)" instruction that probably sent us
1963 here, is the easiest way to handle the branch delay slot on the
1964 aforementioned BE. */
31234768 1965static void do_page_zero(DisasContext *ctx)
7ad439df
RH
1966{
1967 /* If by some means we get here with PSW[N]=1, that implies that
1968 the B,GATE instruction would be skipped, and we'd fault on the
8b81968c 1969 next insn within the privileged page. */
7ad439df
RH
1970 switch (ctx->null_cond.c) {
1971 case TCG_COND_NEVER:
1972 break;
1973 case TCG_COND_ALWAYS:
eaa3783b 1974 tcg_gen_movi_reg(cpu_psw_n, 0);
7ad439df
RH
1975 goto do_sigill;
1976 default:
1977 /* Since this is always the first (and only) insn within the
1978 TB, we should know the state of PSW[N] from TB->FLAGS. */
1979 g_assert_not_reached();
1980 }
1981
1982 /* Check that we didn't arrive here via some means that allowed
1983 non-sequential instruction execution. Normally the PSW[B] bit
1984 detects this by disallowing the B,GATE instruction to execute
1985 under such conditions. */
1986 if (ctx->iaoq_b != ctx->iaoq_f + 4) {
1987 goto do_sigill;
1988 }
1989
ebd0e151 1990 switch (ctx->iaoq_f & -4) {
7ad439df 1991 case 0x00: /* Null pointer call */
2986721d 1992 gen_excp_1(EXCP_IMP);
31234768
RH
1993 ctx->base.is_jmp = DISAS_NORETURN;
1994 break;
7ad439df
RH
1995
1996 case 0xb0: /* LWS */
1997 gen_excp_1(EXCP_SYSCALL_LWS);
31234768
RH
1998 ctx->base.is_jmp = DISAS_NORETURN;
1999 break;
7ad439df
RH
2000
2001 case 0xe0: /* SET_THREAD_POINTER */
ad75a51e 2002 tcg_gen_st_reg(cpu_gr[26], tcg_env, offsetof(CPUHPPAState, cr[27]));
ebd0e151 2003 tcg_gen_ori_reg(cpu_iaoq_f, cpu_gr[31], 3);
eaa3783b 2004 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
31234768
RH
2005 ctx->base.is_jmp = DISAS_IAQ_N_UPDATED;
2006 break;
7ad439df
RH
2007
2008 case 0x100: /* SYSCALL */
2009 gen_excp_1(EXCP_SYSCALL);
31234768
RH
2010 ctx->base.is_jmp = DISAS_NORETURN;
2011 break;
7ad439df
RH
2012
2013 default:
2014 do_sigill:
2986721d 2015 gen_excp_1(EXCP_ILL);
31234768
RH
2016 ctx->base.is_jmp = DISAS_NORETURN;
2017 break;
7ad439df
RH
2018 }
2019}
ba1d0b44 2020#endif
7ad439df 2021
deee69a1 2022static bool trans_nop(DisasContext *ctx, arg_nop *a)
b2167459
RH
2023{
2024 cond_free(&ctx->null_cond);
31234768 2025 return true;
b2167459
RH
2026}
2027
40f9f908 2028static bool trans_break(DisasContext *ctx, arg_break *a)
98a9cb79 2029{
31234768 2030 return gen_excp_iir(ctx, EXCP_BREAK);
98a9cb79
RH
2031}
2032
e36f27ef 2033static bool trans_sync(DisasContext *ctx, arg_sync *a)
98a9cb79
RH
2034{
2035 /* No point in nullifying the memory barrier. */
2036 tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL);
2037
2038 cond_free(&ctx->null_cond);
31234768 2039 return true;
98a9cb79
RH
2040}
2041
c603e14a 2042static bool trans_mfia(DisasContext *ctx, arg_mfia *a)
98a9cb79 2043{
c603e14a 2044 unsigned rt = a->t;
eaa3783b
RH
2045 TCGv_reg tmp = dest_gpr(ctx, rt);
2046 tcg_gen_movi_reg(tmp, ctx->iaoq_f);
98a9cb79
RH
2047 save_gpr(ctx, rt, tmp);
2048
2049 cond_free(&ctx->null_cond);
31234768 2050 return true;
98a9cb79
RH
2051}
2052
c603e14a 2053static bool trans_mfsp(DisasContext *ctx, arg_mfsp *a)
98a9cb79 2054{
c603e14a
RH
2055 unsigned rt = a->t;
2056 unsigned rs = a->sp;
33423472
RH
2057 TCGv_i64 t0 = tcg_temp_new_i64();
2058 TCGv_reg t1 = tcg_temp_new();
98a9cb79 2059
33423472
RH
2060 load_spr(ctx, t0, rs);
2061 tcg_gen_shri_i64(t0, t0, 32);
2062 tcg_gen_trunc_i64_reg(t1, t0);
2063
2064 save_gpr(ctx, rt, t1);
98a9cb79
RH
2065
2066 cond_free(&ctx->null_cond);
31234768 2067 return true;
98a9cb79
RH
2068}
2069
c603e14a 2070static bool trans_mfctl(DisasContext *ctx, arg_mfctl *a)
98a9cb79 2071{
c603e14a
RH
2072 unsigned rt = a->t;
2073 unsigned ctl = a->r;
eaa3783b 2074 TCGv_reg tmp;
98a9cb79
RH
2075
2076 switch (ctl) {
35136a77 2077 case CR_SAR:
98a9cb79 2078#ifdef TARGET_HPPA64
c603e14a 2079 if (a->e == 0) {
98a9cb79
RH
2080 /* MFSAR without ,W masks low 5 bits. */
2081 tmp = dest_gpr(ctx, rt);
eaa3783b 2082 tcg_gen_andi_reg(tmp, cpu_sar, 31);
98a9cb79 2083 save_gpr(ctx, rt, tmp);
35136a77 2084 goto done;
98a9cb79
RH
2085 }
2086#endif
2087 save_gpr(ctx, rt, cpu_sar);
35136a77
RH
2088 goto done;
2089 case CR_IT: /* Interval Timer */
2090 /* FIXME: Respect PSW_S bit. */
2091 nullify_over(ctx);
98a9cb79 2092 tmp = dest_gpr(ctx, rt);
dfd1b812 2093 if (translator_io_start(&ctx->base)) {
49c29d6c 2094 gen_helper_read_interval_timer(tmp);
31234768 2095 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
49c29d6c
RH
2096 } else {
2097 gen_helper_read_interval_timer(tmp);
49c29d6c 2098 }
98a9cb79 2099 save_gpr(ctx, rt, tmp);
31234768 2100 return nullify_end(ctx);
98a9cb79 2101 case 26:
98a9cb79 2102 case 27:
98a9cb79
RH
2103 break;
2104 default:
2105 /* All other control registers are privileged. */
35136a77
RH
2106 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2107 break;
98a9cb79
RH
2108 }
2109
e12c6309 2110 tmp = tcg_temp_new();
ad75a51e 2111 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
35136a77
RH
2112 save_gpr(ctx, rt, tmp);
2113
2114 done:
98a9cb79 2115 cond_free(&ctx->null_cond);
31234768 2116 return true;
98a9cb79
RH
2117}
2118
c603e14a 2119static bool trans_mtsp(DisasContext *ctx, arg_mtsp *a)
33423472 2120{
c603e14a
RH
2121 unsigned rr = a->r;
2122 unsigned rs = a->sp;
33423472
RH
2123 TCGv_i64 t64;
2124
2125 if (rs >= 5) {
2126 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2127 }
2128 nullify_over(ctx);
2129
2130 t64 = tcg_temp_new_i64();
2131 tcg_gen_extu_reg_i64(t64, load_gpr(ctx, rr));
2132 tcg_gen_shli_i64(t64, t64, 32);
2133
2134 if (rs >= 4) {
ad75a51e 2135 tcg_gen_st_i64(t64, tcg_env, offsetof(CPUHPPAState, sr[rs]));
494737b7 2136 ctx->tb_flags &= ~TB_FLAG_SR_SAME;
33423472
RH
2137 } else {
2138 tcg_gen_mov_i64(cpu_sr[rs], t64);
2139 }
33423472 2140
31234768 2141 return nullify_end(ctx);
33423472
RH
2142}
2143
c603e14a 2144static bool trans_mtctl(DisasContext *ctx, arg_mtctl *a)
98a9cb79 2145{
c603e14a 2146 unsigned ctl = a->t;
4845f015 2147 TCGv_reg reg;
eaa3783b 2148 TCGv_reg tmp;
98a9cb79 2149
35136a77 2150 if (ctl == CR_SAR) {
4845f015 2151 reg = load_gpr(ctx, a->r);
98a9cb79 2152 tmp = tcg_temp_new();
35136a77 2153 tcg_gen_andi_reg(tmp, reg, TARGET_REGISTER_BITS - 1);
98a9cb79 2154 save_or_nullify(ctx, cpu_sar, tmp);
35136a77
RH
2155
2156 cond_free(&ctx->null_cond);
31234768 2157 return true;
98a9cb79
RH
2158 }
2159
35136a77
RH
2160 /* All other control registers are privileged or read-only. */
2161 CHECK_MOST_PRIVILEGED(EXCP_PRIV_REG);
2162
c603e14a 2163#ifndef CONFIG_USER_ONLY
35136a77 2164 nullify_over(ctx);
4845f015
SS
2165 reg = load_gpr(ctx, a->r);
2166
35136a77
RH
2167 switch (ctl) {
2168 case CR_IT:
ad75a51e 2169 gen_helper_write_interval_timer(tcg_env, reg);
35136a77 2170 break;
4f5f2548 2171 case CR_EIRR:
ad75a51e 2172 gen_helper_write_eirr(tcg_env, reg);
4f5f2548
RH
2173 break;
2174 case CR_EIEM:
ad75a51e 2175 gen_helper_write_eiem(tcg_env, reg);
31234768 2176 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
4f5f2548
RH
2177 break;
2178
35136a77
RH
2179 case CR_IIASQ:
2180 case CR_IIAOQ:
2181 /* FIXME: Respect PSW_Q bit */
2182 /* The write advances the queue and stores to the back element. */
e12c6309 2183 tmp = tcg_temp_new();
ad75a51e 2184 tcg_gen_ld_reg(tmp, tcg_env,
35136a77 2185 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
ad75a51e
RH
2186 tcg_gen_st_reg(tmp, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
2187 tcg_gen_st_reg(reg, tcg_env,
35136a77
RH
2188 offsetof(CPUHPPAState, cr_back[ctl - CR_IIASQ]));
2189 break;
2190
d5de20bd
SS
2191 case CR_PID1:
2192 case CR_PID2:
2193 case CR_PID3:
2194 case CR_PID4:
ad75a51e 2195 tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
d5de20bd 2196#ifndef CONFIG_USER_ONLY
ad75a51e 2197 gen_helper_change_prot_id(tcg_env);
d5de20bd
SS
2198#endif
2199 break;
2200
35136a77 2201 default:
ad75a51e 2202 tcg_gen_st_reg(reg, tcg_env, offsetof(CPUHPPAState, cr[ctl]));
35136a77
RH
2203 break;
2204 }
31234768 2205 return nullify_end(ctx);
4f5f2548 2206#endif
98a9cb79
RH
2207}
2208
c603e14a 2209static bool trans_mtsarcm(DisasContext *ctx, arg_mtsarcm *a)
98a9cb79 2210{
eaa3783b 2211 TCGv_reg tmp = tcg_temp_new();
98a9cb79 2212
c603e14a 2213 tcg_gen_not_reg(tmp, load_gpr(ctx, a->r));
eaa3783b 2214 tcg_gen_andi_reg(tmp, tmp, TARGET_REGISTER_BITS - 1);
98a9cb79 2215 save_or_nullify(ctx, cpu_sar, tmp);
98a9cb79
RH
2216
2217 cond_free(&ctx->null_cond);
31234768 2218 return true;
98a9cb79
RH
2219}
2220
e36f27ef 2221static bool trans_ldsid(DisasContext *ctx, arg_ldsid *a)
98a9cb79 2222{
e36f27ef 2223 TCGv_reg dest = dest_gpr(ctx, a->t);
98a9cb79 2224
2330504c
HD
2225#ifdef CONFIG_USER_ONLY
2226 /* We don't implement space registers in user mode. */
eaa3783b 2227 tcg_gen_movi_reg(dest, 0);
2330504c 2228#else
2330504c
HD
2229 TCGv_i64 t0 = tcg_temp_new_i64();
2230
e36f27ef 2231 tcg_gen_mov_i64(t0, space_select(ctx, a->sp, load_gpr(ctx, a->b)));
2330504c
HD
2232 tcg_gen_shri_i64(t0, t0, 32);
2233 tcg_gen_trunc_i64_reg(dest, t0);
2330504c 2234#endif
e36f27ef 2235 save_gpr(ctx, a->t, dest);
98a9cb79
RH
2236
2237 cond_free(&ctx->null_cond);
31234768 2238 return true;
98a9cb79
RH
2239}
2240
e36f27ef 2241static bool trans_rsm(DisasContext *ctx, arg_rsm *a)
e1b5a5ed 2242{
e36f27ef
RH
2243 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2244#ifndef CONFIG_USER_ONLY
e1b5a5ed
RH
2245 TCGv_reg tmp;
2246
e1b5a5ed
RH
2247 nullify_over(ctx);
2248
e12c6309 2249 tmp = tcg_temp_new();
ad75a51e 2250 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw));
e36f27ef 2251 tcg_gen_andi_reg(tmp, tmp, ~a->i);
ad75a51e 2252 gen_helper_swap_system_mask(tmp, tcg_env, tmp);
e36f27ef 2253 save_gpr(ctx, a->t, tmp);
e1b5a5ed
RH
2254
2255 /* Exit the TB to recognize new interrupts, e.g. PSW_M. */
31234768
RH
2256 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2257 return nullify_end(ctx);
e36f27ef 2258#endif
e1b5a5ed
RH
2259}
2260
e36f27ef 2261static bool trans_ssm(DisasContext *ctx, arg_ssm *a)
e1b5a5ed 2262{
e36f27ef
RH
2263 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2264#ifndef CONFIG_USER_ONLY
e1b5a5ed
RH
2265 TCGv_reg tmp;
2266
e1b5a5ed
RH
2267 nullify_over(ctx);
2268
e12c6309 2269 tmp = tcg_temp_new();
ad75a51e 2270 tcg_gen_ld_reg(tmp, tcg_env, offsetof(CPUHPPAState, psw));
e36f27ef 2271 tcg_gen_ori_reg(tmp, tmp, a->i);
ad75a51e 2272 gen_helper_swap_system_mask(tmp, tcg_env, tmp);
e36f27ef 2273 save_gpr(ctx, a->t, tmp);
e1b5a5ed
RH
2274
2275 /* Exit the TB to recognize new interrupts, e.g. PSW_I. */
31234768
RH
2276 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2277 return nullify_end(ctx);
e36f27ef 2278#endif
e1b5a5ed
RH
2279}
2280
c603e14a 2281static bool trans_mtsm(DisasContext *ctx, arg_mtsm *a)
e1b5a5ed 2282{
e1b5a5ed 2283 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
c603e14a
RH
2284#ifndef CONFIG_USER_ONLY
2285 TCGv_reg tmp, reg;
e1b5a5ed
RH
2286 nullify_over(ctx);
2287
c603e14a 2288 reg = load_gpr(ctx, a->r);
e12c6309 2289 tmp = tcg_temp_new();
ad75a51e 2290 gen_helper_swap_system_mask(tmp, tcg_env, reg);
e1b5a5ed
RH
2291
2292 /* Exit the TB to recognize new interrupts. */
31234768
RH
2293 ctx->base.is_jmp = DISAS_IAQ_N_STALE_EXIT;
2294 return nullify_end(ctx);
c603e14a 2295#endif
e1b5a5ed 2296}
f49b3537 2297
e36f27ef 2298static bool do_rfi(DisasContext *ctx, bool rfi_r)
f49b3537 2299{
f49b3537 2300 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
e36f27ef 2301#ifndef CONFIG_USER_ONLY
f49b3537
RH
2302 nullify_over(ctx);
2303
e36f27ef 2304 if (rfi_r) {
ad75a51e 2305 gen_helper_rfi_r(tcg_env);
f49b3537 2306 } else {
ad75a51e 2307 gen_helper_rfi(tcg_env);
f49b3537 2308 }
31234768 2309 /* Exit the TB to recognize new interrupts. */
8532a14e 2310 tcg_gen_exit_tb(NULL, 0);
31234768 2311 ctx->base.is_jmp = DISAS_NORETURN;
f49b3537 2312
31234768 2313 return nullify_end(ctx);
e36f27ef
RH
2314#endif
2315}
2316
2317static bool trans_rfi(DisasContext *ctx, arg_rfi *a)
2318{
2319 return do_rfi(ctx, false);
2320}
2321
2322static bool trans_rfi_r(DisasContext *ctx, arg_rfi_r *a)
2323{
2324 return do_rfi(ctx, true);
f49b3537 2325}
6210db05 2326
96927adb
RH
2327static bool trans_halt(DisasContext *ctx, arg_halt *a)
2328{
2329 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
e36f27ef 2330#ifndef CONFIG_USER_ONLY
96927adb 2331 nullify_over(ctx);
ad75a51e 2332 gen_helper_halt(tcg_env);
96927adb
RH
2333 ctx->base.is_jmp = DISAS_NORETURN;
2334 return nullify_end(ctx);
2335#endif
2336}
2337
2338static bool trans_reset(DisasContext *ctx, arg_reset *a)
6210db05
HD
2339{
2340 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
96927adb 2341#ifndef CONFIG_USER_ONLY
6210db05 2342 nullify_over(ctx);
ad75a51e 2343 gen_helper_reset(tcg_env);
31234768
RH
2344 ctx->base.is_jmp = DISAS_NORETURN;
2345 return nullify_end(ctx);
96927adb 2346#endif
6210db05 2347}
e1b5a5ed 2348
4a4554c6
HD
2349static bool trans_getshadowregs(DisasContext *ctx, arg_getshadowregs *a)
2350{
2351 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2352#ifndef CONFIG_USER_ONLY
2353 nullify_over(ctx);
ad75a51e 2354 gen_helper_getshadowregs(tcg_env);
4a4554c6
HD
2355 return nullify_end(ctx);
2356#endif
2357}
2358
deee69a1 2359static bool trans_nop_addrx(DisasContext *ctx, arg_ldst *a)
98a9cb79 2360{
deee69a1
RH
2361 if (a->m) {
2362 TCGv_reg dest = dest_gpr(ctx, a->b);
2363 TCGv_reg src1 = load_gpr(ctx, a->b);
2364 TCGv_reg src2 = load_gpr(ctx, a->x);
98a9cb79 2365
deee69a1
RH
2366 /* The only thing we need to do is the base register modification. */
2367 tcg_gen_add_reg(dest, src1, src2);
2368 save_gpr(ctx, a->b, dest);
2369 }
98a9cb79 2370 cond_free(&ctx->null_cond);
31234768 2371 return true;
98a9cb79
RH
2372}
2373
deee69a1 2374static bool trans_probe(DisasContext *ctx, arg_probe *a)
98a9cb79 2375{
86f8d05f 2376 TCGv_reg dest, ofs;
eed14219 2377 TCGv_i32 level, want;
86f8d05f 2378 TCGv_tl addr;
98a9cb79
RH
2379
2380 nullify_over(ctx);
2381
deee69a1
RH
2382 dest = dest_gpr(ctx, a->t);
2383 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
eed14219 2384
deee69a1 2385 if (a->imm) {
29dd6f64 2386 level = tcg_constant_i32(a->ri);
98a9cb79 2387 } else {
eed14219 2388 level = tcg_temp_new_i32();
deee69a1 2389 tcg_gen_trunc_reg_i32(level, load_gpr(ctx, a->ri));
eed14219 2390 tcg_gen_andi_i32(level, level, 3);
98a9cb79 2391 }
29dd6f64 2392 want = tcg_constant_i32(a->write ? PAGE_WRITE : PAGE_READ);
eed14219 2393
ad75a51e 2394 gen_helper_probe(dest, tcg_env, addr, level, want);
eed14219 2395
deee69a1 2396 save_gpr(ctx, a->t, dest);
31234768 2397 return nullify_end(ctx);
98a9cb79
RH
2398}
2399
deee69a1 2400static bool trans_ixtlbx(DisasContext *ctx, arg_ixtlbx *a)
8d6ae7fb 2401{
deee69a1
RH
2402 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2403#ifndef CONFIG_USER_ONLY
8d6ae7fb
RH
2404 TCGv_tl addr;
2405 TCGv_reg ofs, reg;
2406
8d6ae7fb
RH
2407 nullify_over(ctx);
2408
deee69a1
RH
2409 form_gva(ctx, &addr, &ofs, a->b, 0, 0, 0, a->sp, 0, false);
2410 reg = load_gpr(ctx, a->r);
2411 if (a->addr) {
ad75a51e 2412 gen_helper_itlba(tcg_env, addr, reg);
8d6ae7fb 2413 } else {
ad75a51e 2414 gen_helper_itlbp(tcg_env, addr, reg);
8d6ae7fb
RH
2415 }
2416
32dc7569
SS
2417 /* Exit TB for TLB change if mmu is enabled. */
2418 if (ctx->tb_flags & PSW_C) {
31234768
RH
2419 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2420 }
2421 return nullify_end(ctx);
deee69a1 2422#endif
8d6ae7fb 2423}
63300a00 2424
deee69a1 2425static bool trans_pxtlbx(DisasContext *ctx, arg_pxtlbx *a)
63300a00 2426{
deee69a1
RH
2427 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2428#ifndef CONFIG_USER_ONLY
63300a00
RH
2429 TCGv_tl addr;
2430 TCGv_reg ofs;
2431
63300a00
RH
2432 nullify_over(ctx);
2433
deee69a1
RH
2434 form_gva(ctx, &addr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2435 if (a->m) {
2436 save_gpr(ctx, a->b, ofs);
63300a00 2437 }
deee69a1 2438 if (a->local) {
ad75a51e 2439 gen_helper_ptlbe(tcg_env);
63300a00 2440 } else {
ad75a51e 2441 gen_helper_ptlb(tcg_env, addr);
63300a00
RH
2442 }
2443
2444 /* Exit TB for TLB change if mmu is enabled. */
6797c315
NH
2445 if (ctx->tb_flags & PSW_C) {
2446 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2447 }
2448 return nullify_end(ctx);
2449#endif
2450}
2451
2452/*
2453 * Implement the pcxl and pcxl2 Fast TLB Insert instructions.
2454 * See
2455 * https://parisc.wiki.kernel.org/images-parisc/a/a9/Pcxl2_ers.pdf
2456 * page 13-9 (195/206)
2457 */
2458static bool trans_ixtlbxf(DisasContext *ctx, arg_ixtlbxf *a)
2459{
2460 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2461#ifndef CONFIG_USER_ONLY
2462 TCGv_tl addr, atl, stl;
2463 TCGv_reg reg;
2464
2465 nullify_over(ctx);
2466
2467 /*
2468 * FIXME:
2469 * if (not (pcxl or pcxl2))
2470 * return gen_illegal(ctx);
2471 *
2472 * Note for future: these are 32-bit systems; no hppa64.
2473 */
2474
2475 atl = tcg_temp_new_tl();
2476 stl = tcg_temp_new_tl();
2477 addr = tcg_temp_new_tl();
2478
ad75a51e 2479 tcg_gen_ld32u_i64(stl, tcg_env,
6797c315
NH
2480 a->data ? offsetof(CPUHPPAState, cr[CR_ISR])
2481 : offsetof(CPUHPPAState, cr[CR_IIASQ]));
ad75a51e 2482 tcg_gen_ld32u_i64(atl, tcg_env,
6797c315
NH
2483 a->data ? offsetof(CPUHPPAState, cr[CR_IOR])
2484 : offsetof(CPUHPPAState, cr[CR_IIAOQ]));
2485 tcg_gen_shli_i64(stl, stl, 32);
2486 tcg_gen_or_tl(addr, atl, stl);
6797c315
NH
2487
2488 reg = load_gpr(ctx, a->r);
2489 if (a->addr) {
ad75a51e 2490 gen_helper_itlba(tcg_env, addr, reg);
6797c315 2491 } else {
ad75a51e 2492 gen_helper_itlbp(tcg_env, addr, reg);
6797c315 2493 }
6797c315
NH
2494
2495 /* Exit TB for TLB change if mmu is enabled. */
32dc7569 2496 if (ctx->tb_flags & PSW_C) {
31234768
RH
2497 ctx->base.is_jmp = DISAS_IAQ_N_STALE;
2498 }
2499 return nullify_end(ctx);
deee69a1 2500#endif
63300a00 2501}
2dfcca9f 2502
deee69a1 2503static bool trans_lpa(DisasContext *ctx, arg_ldst *a)
2dfcca9f 2504{
deee69a1
RH
2505 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2506#ifndef CONFIG_USER_ONLY
2dfcca9f
RH
2507 TCGv_tl vaddr;
2508 TCGv_reg ofs, paddr;
2509
2dfcca9f
RH
2510 nullify_over(ctx);
2511
deee69a1 2512 form_gva(ctx, &vaddr, &ofs, a->b, a->x, 0, 0, a->sp, a->m, false);
2dfcca9f
RH
2513
2514 paddr = tcg_temp_new();
ad75a51e 2515 gen_helper_lpa(paddr, tcg_env, vaddr);
2dfcca9f
RH
2516
2517 /* Note that physical address result overrides base modification. */
deee69a1
RH
2518 if (a->m) {
2519 save_gpr(ctx, a->b, ofs);
2dfcca9f 2520 }
deee69a1 2521 save_gpr(ctx, a->t, paddr);
2dfcca9f 2522
31234768 2523 return nullify_end(ctx);
deee69a1 2524#endif
2dfcca9f 2525}
43a97b81 2526
deee69a1 2527static bool trans_lci(DisasContext *ctx, arg_lci *a)
43a97b81 2528{
43a97b81
RH
2529 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
2530
2531 /* The Coherence Index is an implementation-defined function of the
2532 physical address. Two addresses with the same CI have a coherent
2533 view of the cache. Our implementation is to return 0 for all,
2534 since the entire address space is coherent. */
29dd6f64 2535 save_gpr(ctx, a->t, tcg_constant_reg(0));
43a97b81 2536
31234768
RH
2537 cond_free(&ctx->null_cond);
2538 return true;
43a97b81 2539}
98a9cb79 2540
0c982a28 2541static bool trans_add(DisasContext *ctx, arg_rrr_cf_sh *a)
b2167459 2542{
0c982a28
RH
2543 return do_add_reg(ctx, a, false, false, false, false);
2544}
b2167459 2545
0c982a28
RH
2546static bool trans_add_l(DisasContext *ctx, arg_rrr_cf_sh *a)
2547{
2548 return do_add_reg(ctx, a, true, false, false, false);
2549}
b2167459 2550
0c982a28
RH
2551static bool trans_add_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2552{
2553 return do_add_reg(ctx, a, false, true, false, false);
b2167459
RH
2554}
2555
0c982a28 2556static bool trans_add_c(DisasContext *ctx, arg_rrr_cf_sh *a)
b2167459 2557{
0c982a28
RH
2558 return do_add_reg(ctx, a, false, false, false, true);
2559}
b2167459 2560
0c982a28
RH
2561static bool trans_add_c_tsv(DisasContext *ctx, arg_rrr_cf_sh *a)
2562{
2563 return do_add_reg(ctx, a, false, true, false, true);
2564}
b2167459 2565
0c982a28
RH
2566static bool trans_sub(DisasContext *ctx, arg_rrr_cf *a)
2567{
2568 return do_sub_reg(ctx, a, false, false, false);
b2167459
RH
2569}
2570
0c982a28 2571static bool trans_sub_tsv(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2572{
0c982a28
RH
2573 return do_sub_reg(ctx, a, true, false, false);
2574}
b2167459 2575
0c982a28
RH
2576static bool trans_sub_tc(DisasContext *ctx, arg_rrr_cf *a)
2577{
2578 return do_sub_reg(ctx, a, false, false, true);
b2167459
RH
2579}
2580
0c982a28 2581static bool trans_sub_tsv_tc(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2582{
0c982a28
RH
2583 return do_sub_reg(ctx, a, true, false, true);
2584}
2585
2586static bool trans_sub_b(DisasContext *ctx, arg_rrr_cf *a)
2587{
2588 return do_sub_reg(ctx, a, false, true, false);
2589}
2590
2591static bool trans_sub_b_tsv(DisasContext *ctx, arg_rrr_cf *a)
2592{
2593 return do_sub_reg(ctx, a, true, true, false);
2594}
2595
2596static bool trans_andcm(DisasContext *ctx, arg_rrr_cf *a)
2597{
2598 return do_log_reg(ctx, a, tcg_gen_andc_reg);
2599}
2600
2601static bool trans_and(DisasContext *ctx, arg_rrr_cf *a)
2602{
2603 return do_log_reg(ctx, a, tcg_gen_and_reg);
2604}
2605
2606static bool trans_or(DisasContext *ctx, arg_rrr_cf *a)
2607{
2608 if (a->cf == 0) {
2609 unsigned r2 = a->r2;
2610 unsigned r1 = a->r1;
2611 unsigned rt = a->t;
b2167459 2612
7aee8189
RH
2613 if (rt == 0) { /* NOP */
2614 cond_free(&ctx->null_cond);
2615 return true;
2616 }
2617 if (r2 == 0) { /* COPY */
2618 if (r1 == 0) {
2619 TCGv_reg dest = dest_gpr(ctx, rt);
2620 tcg_gen_movi_reg(dest, 0);
2621 save_gpr(ctx, rt, dest);
2622 } else {
2623 save_gpr(ctx, rt, cpu_gr[r1]);
2624 }
2625 cond_free(&ctx->null_cond);
2626 return true;
2627 }
2628#ifndef CONFIG_USER_ONLY
2629 /* These are QEMU extensions and are nops in the real architecture:
2630 *
2631 * or %r10,%r10,%r10 -- idle loop; wait for interrupt
2632 * or %r31,%r31,%r31 -- death loop; offline cpu
2633 * currently implemented as idle.
2634 */
2635 if ((rt == 10 || rt == 31) && r1 == rt && r2 == rt) { /* PAUSE */
7aee8189
RH
2636 /* No need to check for supervisor, as userland can only pause
2637 until the next timer interrupt. */
2638 nullify_over(ctx);
2639
2640 /* Advance the instruction queue. */
2641 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
2642 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
2643 nullify_set(ctx, 0);
2644
2645 /* Tell the qemu main loop to halt until this cpu has work. */
ad75a51e 2646 tcg_gen_st_i32(tcg_constant_i32(1), tcg_env,
29dd6f64 2647 offsetof(CPUState, halted) - offsetof(HPPACPU, env));
7aee8189
RH
2648 gen_excp_1(EXCP_HALTED);
2649 ctx->base.is_jmp = DISAS_NORETURN;
2650
2651 return nullify_end(ctx);
2652 }
2653#endif
b2167459 2654 }
0c982a28
RH
2655 return do_log_reg(ctx, a, tcg_gen_or_reg);
2656}
7aee8189 2657
0c982a28
RH
2658static bool trans_xor(DisasContext *ctx, arg_rrr_cf *a)
2659{
2660 return do_log_reg(ctx, a, tcg_gen_xor_reg);
b2167459
RH
2661}
2662
0c982a28 2663static bool trans_cmpclr(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2664{
eaa3783b 2665 TCGv_reg tcg_r1, tcg_r2;
b2167459 2666
0c982a28 2667 if (a->cf) {
b2167459
RH
2668 nullify_over(ctx);
2669 }
0c982a28
RH
2670 tcg_r1 = load_gpr(ctx, a->r1);
2671 tcg_r2 = load_gpr(ctx, a->r2);
2672 do_cmpclr(ctx, a->t, tcg_r1, tcg_r2, a->cf);
31234768 2673 return nullify_end(ctx);
b2167459
RH
2674}
2675
0c982a28 2676static bool trans_uxor(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2677{
eaa3783b 2678 TCGv_reg tcg_r1, tcg_r2;
b2167459 2679
0c982a28 2680 if (a->cf) {
b2167459
RH
2681 nullify_over(ctx);
2682 }
0c982a28
RH
2683 tcg_r1 = load_gpr(ctx, a->r1);
2684 tcg_r2 = load_gpr(ctx, a->r2);
2685 do_unit(ctx, a->t, tcg_r1, tcg_r2, a->cf, false, tcg_gen_xor_reg);
31234768 2686 return nullify_end(ctx);
b2167459
RH
2687}
2688
0c982a28 2689static bool do_uaddcm(DisasContext *ctx, arg_rrr_cf *a, bool is_tc)
b2167459 2690{
eaa3783b 2691 TCGv_reg tcg_r1, tcg_r2, tmp;
b2167459 2692
0c982a28 2693 if (a->cf) {
b2167459
RH
2694 nullify_over(ctx);
2695 }
0c982a28
RH
2696 tcg_r1 = load_gpr(ctx, a->r1);
2697 tcg_r2 = load_gpr(ctx, a->r2);
e12c6309 2698 tmp = tcg_temp_new();
eaa3783b 2699 tcg_gen_not_reg(tmp, tcg_r2);
0c982a28 2700 do_unit(ctx, a->t, tcg_r1, tmp, a->cf, is_tc, tcg_gen_add_reg);
31234768 2701 return nullify_end(ctx);
b2167459
RH
2702}
2703
0c982a28
RH
2704static bool trans_uaddcm(DisasContext *ctx, arg_rrr_cf *a)
2705{
2706 return do_uaddcm(ctx, a, false);
2707}
2708
2709static bool trans_uaddcm_tc(DisasContext *ctx, arg_rrr_cf *a)
2710{
2711 return do_uaddcm(ctx, a, true);
2712}
2713
2714static bool do_dcor(DisasContext *ctx, arg_rr_cf *a, bool is_i)
b2167459 2715{
eaa3783b 2716 TCGv_reg tmp;
b2167459
RH
2717
2718 nullify_over(ctx);
2719
e12c6309 2720 tmp = tcg_temp_new();
eaa3783b 2721 tcg_gen_shri_reg(tmp, cpu_psw_cb, 3);
b2167459 2722 if (!is_i) {
eaa3783b 2723 tcg_gen_not_reg(tmp, tmp);
b2167459 2724 }
eaa3783b
RH
2725 tcg_gen_andi_reg(tmp, tmp, 0x11111111);
2726 tcg_gen_muli_reg(tmp, tmp, 6);
60e29463 2727 do_unit(ctx, a->t, load_gpr(ctx, a->r), tmp, a->cf, false,
31234768 2728 is_i ? tcg_gen_add_reg : tcg_gen_sub_reg);
31234768 2729 return nullify_end(ctx);
b2167459
RH
2730}
2731
0c982a28
RH
2732static bool trans_dcor(DisasContext *ctx, arg_rr_cf *a)
2733{
2734 return do_dcor(ctx, a, false);
2735}
2736
2737static bool trans_dcor_i(DisasContext *ctx, arg_rr_cf *a)
2738{
2739 return do_dcor(ctx, a, true);
2740}
2741
2742static bool trans_ds(DisasContext *ctx, arg_rrr_cf *a)
b2167459 2743{
eaa3783b 2744 TCGv_reg dest, add1, add2, addc, zero, in1, in2;
72ca8753 2745 TCGv_reg cout;
b2167459
RH
2746
2747 nullify_over(ctx);
2748
0c982a28
RH
2749 in1 = load_gpr(ctx, a->r1);
2750 in2 = load_gpr(ctx, a->r2);
b2167459
RH
2751
2752 add1 = tcg_temp_new();
2753 add2 = tcg_temp_new();
2754 addc = tcg_temp_new();
2755 dest = tcg_temp_new();
29dd6f64 2756 zero = tcg_constant_reg(0);
b2167459
RH
2757
2758 /* Form R1 << 1 | PSW[CB]{8}. */
eaa3783b 2759 tcg_gen_add_reg(add1, in1, in1);
72ca8753 2760 tcg_gen_add_reg(add1, add1, get_psw_carry(ctx, false));
b2167459 2761
72ca8753
RH
2762 /*
2763 * Add or subtract R2, depending on PSW[V]. Proper computation of
2764 * carry requires that we subtract via + ~R2 + 1, as described in
2765 * the manual. By extracting and masking V, we can produce the
2766 * proper inputs to the addition without movcond.
2767 */
2768 tcg_gen_sextract_reg(addc, cpu_psw_v, 31, 1);
eaa3783b
RH
2769 tcg_gen_xor_reg(add2, in2, addc);
2770 tcg_gen_andi_reg(addc, addc, 1);
72ca8753
RH
2771
2772 tcg_gen_add2_reg(dest, cpu_psw_cb_msb, add1, zero, add2, zero);
2773 tcg_gen_add2_reg(dest, cpu_psw_cb_msb, dest, cpu_psw_cb_msb, addc, zero);
b2167459 2774
b2167459 2775 /* Write back the result register. */
0c982a28 2776 save_gpr(ctx, a->t, dest);
b2167459
RH
2777
2778 /* Write back PSW[CB]. */
eaa3783b
RH
2779 tcg_gen_xor_reg(cpu_psw_cb, add1, add2);
2780 tcg_gen_xor_reg(cpu_psw_cb, cpu_psw_cb, dest);
b2167459
RH
2781
2782 /* Write back PSW[V] for the division step. */
72ca8753
RH
2783 cout = get_psw_carry(ctx, false);
2784 tcg_gen_neg_reg(cpu_psw_v, cout);
eaa3783b 2785 tcg_gen_xor_reg(cpu_psw_v, cpu_psw_v, in2);
b2167459
RH
2786
2787 /* Install the new nullification. */
0c982a28 2788 if (a->cf) {
eaa3783b 2789 TCGv_reg sv = NULL;
b47a4a02 2790 if (cond_need_sv(a->cf >> 1)) {
b2167459
RH
2791 /* ??? The lshift is supposed to contribute to overflow. */
2792 sv = do_add_sv(ctx, dest, add1, add2);
2793 }
72ca8753 2794 ctx->null_cond = do_cond(a->cf, dest, cout, sv);
b2167459
RH
2795 }
2796
31234768 2797 return nullify_end(ctx);
b2167459
RH
2798}
2799
0588e061 2800static bool trans_addi(DisasContext *ctx, arg_rri_cf *a)
b2167459 2801{
0588e061
RH
2802 return do_add_imm(ctx, a, false, false);
2803}
b2167459 2804
0588e061
RH
2805static bool trans_addi_tsv(DisasContext *ctx, arg_rri_cf *a)
2806{
2807 return do_add_imm(ctx, a, true, false);
b2167459
RH
2808}
2809
0588e061 2810static bool trans_addi_tc(DisasContext *ctx, arg_rri_cf *a)
b2167459 2811{
0588e061
RH
2812 return do_add_imm(ctx, a, false, true);
2813}
b2167459 2814
0588e061
RH
2815static bool trans_addi_tc_tsv(DisasContext *ctx, arg_rri_cf *a)
2816{
2817 return do_add_imm(ctx, a, true, true);
2818}
b2167459 2819
0588e061
RH
2820static bool trans_subi(DisasContext *ctx, arg_rri_cf *a)
2821{
2822 return do_sub_imm(ctx, a, false);
2823}
b2167459 2824
0588e061
RH
2825static bool trans_subi_tsv(DisasContext *ctx, arg_rri_cf *a)
2826{
2827 return do_sub_imm(ctx, a, true);
b2167459
RH
2828}
2829
0588e061 2830static bool trans_cmpiclr(DisasContext *ctx, arg_rri_cf *a)
b2167459 2831{
eaa3783b 2832 TCGv_reg tcg_im, tcg_r2;
b2167459 2833
0588e061 2834 if (a->cf) {
b2167459
RH
2835 nullify_over(ctx);
2836 }
2837
d4e58033 2838 tcg_im = tcg_constant_reg(a->i);
0588e061
RH
2839 tcg_r2 = load_gpr(ctx, a->r);
2840 do_cmpclr(ctx, a->t, tcg_im, tcg_r2, a->cf);
b2167459 2841
31234768 2842 return nullify_end(ctx);
b2167459
RH
2843}
2844
1cd012a5 2845static bool trans_ld(DisasContext *ctx, arg_ldst *a)
96d6407f 2846{
0786a3b6
HD
2847 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
2848 return gen_illegal(ctx);
2849 } else {
2850 return do_load(ctx, a->t, a->b, a->x, a->scale ? a->size : 0,
1cd012a5 2851 a->disp, a->sp, a->m, a->size | MO_TE);
0786a3b6 2852 }
96d6407f
RH
2853}
2854
1cd012a5 2855static bool trans_st(DisasContext *ctx, arg_ldst *a)
96d6407f 2856{
1cd012a5 2857 assert(a->x == 0 && a->scale == 0);
0786a3b6
HD
2858 if (unlikely(TARGET_REGISTER_BITS == 32 && a->size > MO_32)) {
2859 return gen_illegal(ctx);
2860 } else {
2861 return do_store(ctx, a->t, a->b, a->disp, a->sp, a->m, a->size | MO_TE);
2862 }
96d6407f
RH
2863}
2864
1cd012a5 2865static bool trans_ldc(DisasContext *ctx, arg_ldst *a)
96d6407f 2866{
b1af755c 2867 MemOp mop = MO_TE | MO_ALIGN | a->size;
86f8d05f
RH
2868 TCGv_reg zero, dest, ofs;
2869 TCGv_tl addr;
96d6407f
RH
2870
2871 nullify_over(ctx);
2872
1cd012a5 2873 if (a->m) {
86f8d05f
RH
2874 /* Base register modification. Make sure if RT == RB,
2875 we see the result of the load. */
e12c6309 2876 dest = tcg_temp_new();
96d6407f 2877 } else {
1cd012a5 2878 dest = dest_gpr(ctx, a->t);
96d6407f
RH
2879 }
2880
1cd012a5
RH
2881 form_gva(ctx, &addr, &ofs, a->b, a->x, a->scale ? a->size : 0,
2882 a->disp, a->sp, a->m, ctx->mmu_idx == MMU_PHYS_IDX);
b1af755c
RH
2883
2884 /*
2885 * For hppa1.1, LDCW is undefined unless aligned mod 16.
2886 * However actual hardware succeeds with aligned mod 4.
2887 * Detect this case and log a GUEST_ERROR.
2888 *
2889 * TODO: HPPA64 relaxes the over-alignment requirement
2890 * with the ,co completer.
2891 */
2892 gen_helper_ldc_check(addr);
2893
29dd6f64 2894 zero = tcg_constant_reg(0);
86f8d05f 2895 tcg_gen_atomic_xchg_reg(dest, addr, zero, ctx->mmu_idx, mop);
b1af755c 2896
1cd012a5
RH
2897 if (a->m) {
2898 save_gpr(ctx, a->b, ofs);
96d6407f 2899 }
1cd012a5 2900 save_gpr(ctx, a->t, dest);
96d6407f 2901
31234768 2902 return nullify_end(ctx);
96d6407f
RH
2903}
2904
1cd012a5 2905static bool trans_stby(DisasContext *ctx, arg_stby *a)
96d6407f 2906{
86f8d05f
RH
2907 TCGv_reg ofs, val;
2908 TCGv_tl addr;
96d6407f
RH
2909
2910 nullify_over(ctx);
2911
1cd012a5 2912 form_gva(ctx, &addr, &ofs, a->b, 0, 0, a->disp, a->sp, a->m,
86f8d05f 2913 ctx->mmu_idx == MMU_PHYS_IDX);
1cd012a5
RH
2914 val = load_gpr(ctx, a->r);
2915 if (a->a) {
f9f46db4 2916 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
ad75a51e 2917 gen_helper_stby_e_parallel(tcg_env, addr, val);
f9f46db4 2918 } else {
ad75a51e 2919 gen_helper_stby_e(tcg_env, addr, val);
f9f46db4 2920 }
96d6407f 2921 } else {
f9f46db4 2922 if (tb_cflags(ctx->base.tb) & CF_PARALLEL) {
ad75a51e 2923 gen_helper_stby_b_parallel(tcg_env, addr, val);
f9f46db4 2924 } else {
ad75a51e 2925 gen_helper_stby_b(tcg_env, addr, val);
f9f46db4 2926 }
96d6407f 2927 }
1cd012a5 2928 if (a->m) {
86f8d05f 2929 tcg_gen_andi_reg(ofs, ofs, ~3);
1cd012a5 2930 save_gpr(ctx, a->b, ofs);
96d6407f 2931 }
96d6407f 2932
31234768 2933 return nullify_end(ctx);
96d6407f
RH
2934}
2935
1cd012a5 2936static bool trans_lda(DisasContext *ctx, arg_ldst *a)
d0a851cc
RH
2937{
2938 int hold_mmu_idx = ctx->mmu_idx;
d0a851cc
RH
2939
2940 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
d0a851cc 2941 ctx->mmu_idx = MMU_PHYS_IDX;
1cd012a5 2942 trans_ld(ctx, a);
d0a851cc 2943 ctx->mmu_idx = hold_mmu_idx;
31234768 2944 return true;
d0a851cc
RH
2945}
2946
1cd012a5 2947static bool trans_sta(DisasContext *ctx, arg_ldst *a)
d0a851cc
RH
2948{
2949 int hold_mmu_idx = ctx->mmu_idx;
d0a851cc
RH
2950
2951 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
d0a851cc 2952 ctx->mmu_idx = MMU_PHYS_IDX;
1cd012a5 2953 trans_st(ctx, a);
d0a851cc 2954 ctx->mmu_idx = hold_mmu_idx;
31234768 2955 return true;
d0a851cc 2956}
95412a61 2957
0588e061 2958static bool trans_ldil(DisasContext *ctx, arg_ldil *a)
b2167459 2959{
0588e061 2960 TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
b2167459 2961
0588e061
RH
2962 tcg_gen_movi_reg(tcg_rt, a->i);
2963 save_gpr(ctx, a->t, tcg_rt);
b2167459 2964 cond_free(&ctx->null_cond);
31234768 2965 return true;
b2167459
RH
2966}
2967
0588e061 2968static bool trans_addil(DisasContext *ctx, arg_addil *a)
b2167459 2969{
0588e061 2970 TCGv_reg tcg_rt = load_gpr(ctx, a->r);
eaa3783b 2971 TCGv_reg tcg_r1 = dest_gpr(ctx, 1);
b2167459 2972
0588e061 2973 tcg_gen_addi_reg(tcg_r1, tcg_rt, a->i);
b2167459
RH
2974 save_gpr(ctx, 1, tcg_r1);
2975 cond_free(&ctx->null_cond);
31234768 2976 return true;
b2167459
RH
2977}
2978
0588e061 2979static bool trans_ldo(DisasContext *ctx, arg_ldo *a)
b2167459 2980{
0588e061 2981 TCGv_reg tcg_rt = dest_gpr(ctx, a->t);
b2167459
RH
2982
2983 /* Special case rb == 0, for the LDI pseudo-op.
2984 The COPY pseudo-op is handled for free within tcg_gen_addi_tl. */
0588e061
RH
2985 if (a->b == 0) {
2986 tcg_gen_movi_reg(tcg_rt, a->i);
b2167459 2987 } else {
0588e061 2988 tcg_gen_addi_reg(tcg_rt, cpu_gr[a->b], a->i);
b2167459 2989 }
0588e061 2990 save_gpr(ctx, a->t, tcg_rt);
b2167459 2991 cond_free(&ctx->null_cond);
31234768 2992 return true;
b2167459
RH
2993}
2994
01afb7be
RH
2995static bool do_cmpb(DisasContext *ctx, unsigned r, TCGv_reg in1,
2996 unsigned c, unsigned f, unsigned n, int disp)
98cd9ca7 2997{
01afb7be 2998 TCGv_reg dest, in2, sv;
98cd9ca7
RH
2999 DisasCond cond;
3000
98cd9ca7 3001 in2 = load_gpr(ctx, r);
e12c6309 3002 dest = tcg_temp_new();
98cd9ca7 3003
eaa3783b 3004 tcg_gen_sub_reg(dest, in1, in2);
98cd9ca7 3005
f764718d 3006 sv = NULL;
b47a4a02 3007 if (cond_need_sv(c)) {
98cd9ca7
RH
3008 sv = do_sub_sv(ctx, dest, in1, in2);
3009 }
3010
01afb7be
RH
3011 cond = do_sub_cond(c * 2 + f, dest, in1, in2, sv);
3012 return do_cbranch(ctx, disp, n, &cond);
98cd9ca7
RH
3013}
3014
01afb7be 3015static bool trans_cmpb(DisasContext *ctx, arg_cmpb *a)
98cd9ca7 3016{
01afb7be
RH
3017 nullify_over(ctx);
3018 return do_cmpb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3019}
98cd9ca7 3020
01afb7be
RH
3021static bool trans_cmpbi(DisasContext *ctx, arg_cmpbi *a)
3022{
98cd9ca7 3023 nullify_over(ctx);
d4e58033 3024 return do_cmpb(ctx, a->r, tcg_constant_reg(a->i), a->c, a->f, a->n, a->disp);
01afb7be
RH
3025}
3026
3027static bool do_addb(DisasContext *ctx, unsigned r, TCGv_reg in1,
3028 unsigned c, unsigned f, unsigned n, int disp)
3029{
bdcccc17 3030 TCGv_reg dest, in2, sv, cb_cond;
01afb7be 3031 DisasCond cond;
bdcccc17 3032 bool d = false;
98cd9ca7 3033
98cd9ca7 3034 in2 = load_gpr(ctx, r);
43675d20 3035 dest = tcg_temp_new();
f764718d 3036 sv = NULL;
bdcccc17 3037 cb_cond = NULL;
98cd9ca7 3038
b47a4a02 3039 if (cond_need_cb(c)) {
bdcccc17
RH
3040 TCGv_reg cb = tcg_temp_new();
3041 TCGv_reg cb_msb = tcg_temp_new();
3042
eaa3783b
RH
3043 tcg_gen_movi_reg(cb_msb, 0);
3044 tcg_gen_add2_reg(dest, cb_msb, in1, cb_msb, in2, cb_msb);
bdcccc17
RH
3045 tcg_gen_xor_reg(cb, in1, in2);
3046 tcg_gen_xor_reg(cb, cb, dest);
3047 cb_cond = get_carry(ctx, d, cb, cb_msb);
b47a4a02 3048 } else {
eaa3783b 3049 tcg_gen_add_reg(dest, in1, in2);
b47a4a02
SS
3050 }
3051 if (cond_need_sv(c)) {
98cd9ca7 3052 sv = do_add_sv(ctx, dest, in1, in2);
98cd9ca7
RH
3053 }
3054
bdcccc17 3055 cond = do_cond(c * 2 + f, dest, cb_cond, sv);
43675d20 3056 save_gpr(ctx, r, dest);
01afb7be 3057 return do_cbranch(ctx, disp, n, &cond);
98cd9ca7
RH
3058}
3059
01afb7be
RH
3060static bool trans_addb(DisasContext *ctx, arg_addb *a)
3061{
3062 nullify_over(ctx);
3063 return do_addb(ctx, a->r2, load_gpr(ctx, a->r1), a->c, a->f, a->n, a->disp);
3064}
3065
3066static bool trans_addbi(DisasContext *ctx, arg_addbi *a)
3067{
3068 nullify_over(ctx);
d4e58033 3069 return do_addb(ctx, a->r, tcg_constant_reg(a->i), a->c, a->f, a->n, a->disp);
01afb7be
RH
3070}
3071
3072static bool trans_bb_sar(DisasContext *ctx, arg_bb_sar *a)
98cd9ca7 3073{
eaa3783b 3074 TCGv_reg tmp, tcg_r;
98cd9ca7 3075 DisasCond cond;
1e9ab9fb 3076 bool d = false;
98cd9ca7
RH
3077
3078 nullify_over(ctx);
3079
3080 tmp = tcg_temp_new();
01afb7be 3081 tcg_r = load_gpr(ctx, a->r);
1e9ab9fb
RH
3082 if (cond_need_ext(ctx, d)) {
3083 /* Force shift into [32,63] */
3084 tcg_gen_ori_reg(tmp, cpu_sar, 32);
3085 tcg_gen_shl_reg(tmp, tcg_r, tmp);
3086 } else {
3087 tcg_gen_shl_reg(tmp, tcg_r, cpu_sar);
3088 }
98cd9ca7 3089
1e9ab9fb 3090 cond = cond_make_0_tmp(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
01afb7be 3091 return do_cbranch(ctx, a->disp, a->n, &cond);
98cd9ca7
RH
3092}
3093
01afb7be
RH
3094static bool trans_bb_imm(DisasContext *ctx, arg_bb_imm *a)
3095{
3096 TCGv_reg tmp, tcg_r;
3097 DisasCond cond;
1e9ab9fb
RH
3098 bool d = false;
3099 int p;
01afb7be
RH
3100
3101 nullify_over(ctx);
3102
3103 tmp = tcg_temp_new();
3104 tcg_r = load_gpr(ctx, a->r);
1e9ab9fb
RH
3105 p = a->p | (cond_need_ext(ctx, d) ? 32 : 0);
3106 tcg_gen_shli_reg(tmp, tcg_r, p);
01afb7be
RH
3107
3108 cond = cond_make_0(a->c ? TCG_COND_GE : TCG_COND_LT, tmp);
01afb7be
RH
3109 return do_cbranch(ctx, a->disp, a->n, &cond);
3110}
3111
3112static bool trans_movb(DisasContext *ctx, arg_movb *a)
98cd9ca7 3113{
eaa3783b 3114 TCGv_reg dest;
98cd9ca7
RH
3115 DisasCond cond;
3116
3117 nullify_over(ctx);
3118
01afb7be
RH
3119 dest = dest_gpr(ctx, a->r2);
3120 if (a->r1 == 0) {
eaa3783b 3121 tcg_gen_movi_reg(dest, 0);
98cd9ca7 3122 } else {
01afb7be 3123 tcg_gen_mov_reg(dest, cpu_gr[a->r1]);
98cd9ca7
RH
3124 }
3125
01afb7be
RH
3126 cond = do_sed_cond(a->c, dest);
3127 return do_cbranch(ctx, a->disp, a->n, &cond);
3128}
3129
3130static bool trans_movbi(DisasContext *ctx, arg_movbi *a)
3131{
3132 TCGv_reg dest;
3133 DisasCond cond;
3134
3135 nullify_over(ctx);
3136
3137 dest = dest_gpr(ctx, a->r);
3138 tcg_gen_movi_reg(dest, a->i);
3139
3140 cond = do_sed_cond(a->c, dest);
3141 return do_cbranch(ctx, a->disp, a->n, &cond);
98cd9ca7
RH
3142}
3143
30878590 3144static bool trans_shrpw_sar(DisasContext *ctx, arg_shrpw_sar *a)
0b1347d2 3145{
eaa3783b 3146 TCGv_reg dest;
0b1347d2 3147
30878590 3148 if (a->c) {
0b1347d2
RH
3149 nullify_over(ctx);
3150 }
3151
30878590
RH
3152 dest = dest_gpr(ctx, a->t);
3153 if (a->r1 == 0) {
3154 tcg_gen_ext32u_reg(dest, load_gpr(ctx, a->r2));
eaa3783b 3155 tcg_gen_shr_reg(dest, dest, cpu_sar);
30878590 3156 } else if (a->r1 == a->r2) {
0b1347d2 3157 TCGv_i32 t32 = tcg_temp_new_i32();
e1d635e8
RH
3158 TCGv_i32 s32 = tcg_temp_new_i32();
3159
30878590 3160 tcg_gen_trunc_reg_i32(t32, load_gpr(ctx, a->r2));
e1d635e8
RH
3161 tcg_gen_trunc_reg_i32(s32, cpu_sar);
3162 tcg_gen_rotr_i32(t32, t32, s32);
eaa3783b 3163 tcg_gen_extu_i32_reg(dest, t32);
0b1347d2
RH
3164 } else {
3165 TCGv_i64 t = tcg_temp_new_i64();
3166 TCGv_i64 s = tcg_temp_new_i64();
3167
30878590 3168 tcg_gen_concat_reg_i64(t, load_gpr(ctx, a->r2), load_gpr(ctx, a->r1));
eaa3783b 3169 tcg_gen_extu_reg_i64(s, cpu_sar);
0b1347d2 3170 tcg_gen_shr_i64(t, t, s);
eaa3783b 3171 tcg_gen_trunc_i64_reg(dest, t);
0b1347d2 3172 }
30878590 3173 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3174
3175 /* Install the new nullification. */
3176 cond_free(&ctx->null_cond);
30878590
RH
3177 if (a->c) {
3178 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3179 }
31234768 3180 return nullify_end(ctx);
0b1347d2
RH
3181}
3182
30878590 3183static bool trans_shrpw_imm(DisasContext *ctx, arg_shrpw_imm *a)
0b1347d2 3184{
30878590 3185 unsigned sa = 31 - a->cpos;
eaa3783b 3186 TCGv_reg dest, t2;
0b1347d2 3187
30878590 3188 if (a->c) {
0b1347d2
RH
3189 nullify_over(ctx);
3190 }
3191
30878590
RH
3192 dest = dest_gpr(ctx, a->t);
3193 t2 = load_gpr(ctx, a->r2);
05bfd4db
RH
3194 if (a->r1 == 0) {
3195 tcg_gen_extract_reg(dest, t2, sa, 32 - sa);
3196 } else if (TARGET_REGISTER_BITS == 32) {
3197 tcg_gen_extract2_reg(dest, t2, cpu_gr[a->r1], sa);
3198 } else if (a->r1 == a->r2) {
0b1347d2 3199 TCGv_i32 t32 = tcg_temp_new_i32();
eaa3783b 3200 tcg_gen_trunc_reg_i32(t32, t2);
0b1347d2 3201 tcg_gen_rotri_i32(t32, t32, sa);
eaa3783b 3202 tcg_gen_extu_i32_reg(dest, t32);
0b1347d2 3203 } else {
05bfd4db
RH
3204 TCGv_i64 t64 = tcg_temp_new_i64();
3205 tcg_gen_concat_reg_i64(t64, t2, cpu_gr[a->r1]);
3206 tcg_gen_shri_i64(t64, t64, sa);
3207 tcg_gen_trunc_i64_reg(dest, t64);
0b1347d2 3208 }
30878590 3209 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3210
3211 /* Install the new nullification. */
3212 cond_free(&ctx->null_cond);
30878590
RH
3213 if (a->c) {
3214 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3215 }
31234768 3216 return nullify_end(ctx);
0b1347d2
RH
3217}
3218
30878590 3219static bool trans_extrw_sar(DisasContext *ctx, arg_extrw_sar *a)
0b1347d2 3220{
30878590 3221 unsigned len = 32 - a->clen;
eaa3783b 3222 TCGv_reg dest, src, tmp;
0b1347d2 3223
30878590 3224 if (a->c) {
0b1347d2
RH
3225 nullify_over(ctx);
3226 }
3227
30878590
RH
3228 dest = dest_gpr(ctx, a->t);
3229 src = load_gpr(ctx, a->r);
0b1347d2
RH
3230 tmp = tcg_temp_new();
3231
3232 /* Recall that SAR is using big-endian bit numbering. */
eaa3783b 3233 tcg_gen_xori_reg(tmp, cpu_sar, TARGET_REGISTER_BITS - 1);
30878590 3234 if (a->se) {
eaa3783b
RH
3235 tcg_gen_sar_reg(dest, src, tmp);
3236 tcg_gen_sextract_reg(dest, dest, 0, len);
0b1347d2 3237 } else {
eaa3783b
RH
3238 tcg_gen_shr_reg(dest, src, tmp);
3239 tcg_gen_extract_reg(dest, dest, 0, len);
0b1347d2 3240 }
30878590 3241 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3242
3243 /* Install the new nullification. */
3244 cond_free(&ctx->null_cond);
30878590
RH
3245 if (a->c) {
3246 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3247 }
31234768 3248 return nullify_end(ctx);
0b1347d2
RH
3249}
3250
30878590 3251static bool trans_extrw_imm(DisasContext *ctx, arg_extrw_imm *a)
0b1347d2 3252{
30878590
RH
3253 unsigned len = 32 - a->clen;
3254 unsigned cpos = 31 - a->pos;
eaa3783b 3255 TCGv_reg dest, src;
0b1347d2 3256
30878590 3257 if (a->c) {
0b1347d2
RH
3258 nullify_over(ctx);
3259 }
3260
30878590
RH
3261 dest = dest_gpr(ctx, a->t);
3262 src = load_gpr(ctx, a->r);
3263 if (a->se) {
eaa3783b 3264 tcg_gen_sextract_reg(dest, src, cpos, len);
0b1347d2 3265 } else {
eaa3783b 3266 tcg_gen_extract_reg(dest, src, cpos, len);
0b1347d2 3267 }
30878590 3268 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3269
3270 /* Install the new nullification. */
3271 cond_free(&ctx->null_cond);
30878590
RH
3272 if (a->c) {
3273 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3274 }
31234768 3275 return nullify_end(ctx);
0b1347d2
RH
3276}
3277
30878590 3278static bool trans_depwi_imm(DisasContext *ctx, arg_depwi_imm *a)
0b1347d2 3279{
30878590 3280 unsigned len = 32 - a->clen;
eaa3783b
RH
3281 target_sreg mask0, mask1;
3282 TCGv_reg dest;
0b1347d2 3283
30878590 3284 if (a->c) {
0b1347d2
RH
3285 nullify_over(ctx);
3286 }
30878590
RH
3287 if (a->cpos + len > 32) {
3288 len = 32 - a->cpos;
0b1347d2
RH
3289 }
3290
30878590
RH
3291 dest = dest_gpr(ctx, a->t);
3292 mask0 = deposit64(0, a->cpos, len, a->i);
3293 mask1 = deposit64(-1, a->cpos, len, a->i);
0b1347d2 3294
30878590
RH
3295 if (a->nz) {
3296 TCGv_reg src = load_gpr(ctx, a->t);
0b1347d2 3297 if (mask1 != -1) {
eaa3783b 3298 tcg_gen_andi_reg(dest, src, mask1);
0b1347d2
RH
3299 src = dest;
3300 }
eaa3783b 3301 tcg_gen_ori_reg(dest, src, mask0);
0b1347d2 3302 } else {
eaa3783b 3303 tcg_gen_movi_reg(dest, mask0);
0b1347d2 3304 }
30878590 3305 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3306
3307 /* Install the new nullification. */
3308 cond_free(&ctx->null_cond);
30878590
RH
3309 if (a->c) {
3310 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3311 }
31234768 3312 return nullify_end(ctx);
0b1347d2
RH
3313}
3314
30878590 3315static bool trans_depw_imm(DisasContext *ctx, arg_depw_imm *a)
0b1347d2 3316{
30878590
RH
3317 unsigned rs = a->nz ? a->t : 0;
3318 unsigned len = 32 - a->clen;
eaa3783b 3319 TCGv_reg dest, val;
0b1347d2 3320
30878590 3321 if (a->c) {
0b1347d2
RH
3322 nullify_over(ctx);
3323 }
30878590
RH
3324 if (a->cpos + len > 32) {
3325 len = 32 - a->cpos;
0b1347d2
RH
3326 }
3327
30878590
RH
3328 dest = dest_gpr(ctx, a->t);
3329 val = load_gpr(ctx, a->r);
0b1347d2 3330 if (rs == 0) {
30878590 3331 tcg_gen_deposit_z_reg(dest, val, a->cpos, len);
0b1347d2 3332 } else {
30878590 3333 tcg_gen_deposit_reg(dest, cpu_gr[rs], val, a->cpos, len);
0b1347d2 3334 }
30878590 3335 save_gpr(ctx, a->t, dest);
0b1347d2
RH
3336
3337 /* Install the new nullification. */
3338 cond_free(&ctx->null_cond);
30878590
RH
3339 if (a->c) {
3340 ctx->null_cond = do_sed_cond(a->c, dest);
0b1347d2 3341 }
31234768 3342 return nullify_end(ctx);
0b1347d2
RH
3343}
3344
30878590
RH
3345static bool do_depw_sar(DisasContext *ctx, unsigned rt, unsigned c,
3346 unsigned nz, unsigned clen, TCGv_reg val)
0b1347d2 3347{
0b1347d2
RH
3348 unsigned rs = nz ? rt : 0;
3349 unsigned len = 32 - clen;
30878590 3350 TCGv_reg mask, tmp, shift, dest;
0b1347d2
RH
3351 unsigned msb = 1U << (len - 1);
3352
0b1347d2
RH
3353 dest = dest_gpr(ctx, rt);
3354 shift = tcg_temp_new();
3355 tmp = tcg_temp_new();
3356
3357 /* Convert big-endian bit numbering in SAR to left-shift. */
eaa3783b 3358 tcg_gen_xori_reg(shift, cpu_sar, TARGET_REGISTER_BITS - 1);
0b1347d2 3359
0992a930
RH
3360 mask = tcg_temp_new();
3361 tcg_gen_movi_reg(mask, msb + (msb - 1));
eaa3783b 3362 tcg_gen_and_reg(tmp, val, mask);
0b1347d2 3363 if (rs) {
eaa3783b
RH
3364 tcg_gen_shl_reg(mask, mask, shift);
3365 tcg_gen_shl_reg(tmp, tmp, shift);
3366 tcg_gen_andc_reg(dest, cpu_gr[rs], mask);
3367 tcg_gen_or_reg(dest, dest, tmp);
0b1347d2 3368 } else {
eaa3783b 3369 tcg_gen_shl_reg(dest, tmp, shift);
0b1347d2 3370 }
0b1347d2
RH
3371 save_gpr(ctx, rt, dest);
3372
3373 /* Install the new nullification. */
3374 cond_free(&ctx->null_cond);
3375 if (c) {
3376 ctx->null_cond = do_sed_cond(c, dest);
3377 }
31234768 3378 return nullify_end(ctx);
0b1347d2
RH
3379}
3380
30878590
RH
3381static bool trans_depw_sar(DisasContext *ctx, arg_depw_sar *a)
3382{
a6deecce
SS
3383 if (a->c) {
3384 nullify_over(ctx);
3385 }
30878590
RH
3386 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, load_gpr(ctx, a->r));
3387}
3388
3389static bool trans_depwi_sar(DisasContext *ctx, arg_depwi_sar *a)
3390{
a6deecce
SS
3391 if (a->c) {
3392 nullify_over(ctx);
3393 }
d4e58033 3394 return do_depw_sar(ctx, a->t, a->c, a->nz, a->clen, tcg_constant_reg(a->i));
30878590 3395}
0b1347d2 3396
8340f534 3397static bool trans_be(DisasContext *ctx, arg_be *a)
98cd9ca7 3398{
660eefe1 3399 TCGv_reg tmp;
98cd9ca7 3400
c301f34e 3401#ifdef CONFIG_USER_ONLY
98cd9ca7
RH
3402 /* ??? It seems like there should be a good way of using
3403 "be disp(sr2, r0)", the canonical gateway entry mechanism
3404 to our advantage. But that appears to be inconvenient to
3405 manage along side branch delay slots. Therefore we handle
3406 entry into the gateway page via absolute address. */
98cd9ca7
RH
3407 /* Since we don't implement spaces, just branch. Do notice the special
3408 case of "be disp(*,r0)" using a direct branch to disp, so that we can
3409 goto_tb to the TB containing the syscall. */
8340f534
RH
3410 if (a->b == 0) {
3411 return do_dbranch(ctx, a->disp, a->l, a->n);
98cd9ca7 3412 }
c301f34e 3413#else
c301f34e 3414 nullify_over(ctx);
660eefe1
RH
3415#endif
3416
e12c6309 3417 tmp = tcg_temp_new();
8340f534 3418 tcg_gen_addi_reg(tmp, load_gpr(ctx, a->b), a->disp);
660eefe1 3419 tmp = do_ibranch_priv(ctx, tmp);
c301f34e
RH
3420
3421#ifdef CONFIG_USER_ONLY
8340f534 3422 return do_ibranch(ctx, tmp, a->l, a->n);
c301f34e
RH
3423#else
3424 TCGv_i64 new_spc = tcg_temp_new_i64();
3425
8340f534
RH
3426 load_spr(ctx, new_spc, a->sp);
3427 if (a->l) {
c301f34e
RH
3428 copy_iaoq_entry(cpu_gr[31], ctx->iaoq_n, ctx->iaoq_n_var);
3429 tcg_gen_mov_i64(cpu_sr[0], cpu_iasq_f);
3430 }
8340f534 3431 if (a->n && use_nullify_skip(ctx)) {
c301f34e
RH
3432 tcg_gen_mov_reg(cpu_iaoq_f, tmp);
3433 tcg_gen_addi_reg(cpu_iaoq_b, cpu_iaoq_f, 4);
3434 tcg_gen_mov_i64(cpu_iasq_f, new_spc);
3435 tcg_gen_mov_i64(cpu_iasq_b, cpu_iasq_f);
3436 } else {
3437 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3438 if (ctx->iaoq_b == -1) {
3439 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3440 }
3441 tcg_gen_mov_reg(cpu_iaoq_b, tmp);
3442 tcg_gen_mov_i64(cpu_iasq_b, new_spc);
8340f534 3443 nullify_set(ctx, a->n);
c301f34e 3444 }
c301f34e 3445 tcg_gen_lookup_and_goto_ptr();
31234768
RH
3446 ctx->base.is_jmp = DISAS_NORETURN;
3447 return nullify_end(ctx);
c301f34e 3448#endif
98cd9ca7
RH
3449}
3450
8340f534 3451static bool trans_bl(DisasContext *ctx, arg_bl *a)
98cd9ca7 3452{
8340f534 3453 return do_dbranch(ctx, iaoq_dest(ctx, a->disp), a->l, a->n);
98cd9ca7
RH
3454}
3455
8340f534 3456static bool trans_b_gate(DisasContext *ctx, arg_b_gate *a)
43e05652 3457{
8340f534 3458 target_ureg dest = iaoq_dest(ctx, a->disp);
43e05652 3459
6e5f5300
SS
3460 nullify_over(ctx);
3461
43e05652
RH
3462 /* Make sure the caller hasn't done something weird with the queue.
3463 * ??? This is not quite the same as the PSW[B] bit, which would be
3464 * expensive to track. Real hardware will trap for
3465 * b gateway
3466 * b gateway+4 (in delay slot of first branch)
3467 * However, checking for a non-sequential instruction queue *will*
3468 * diagnose the security hole
3469 * b gateway
3470 * b evil
3471 * in which instructions at evil would run with increased privs.
3472 */
3473 if (ctx->iaoq_b == -1 || ctx->iaoq_b != ctx->iaoq_f + 4) {
3474 return gen_illegal(ctx);
3475 }
3476
3477#ifndef CONFIG_USER_ONLY
3478 if (ctx->tb_flags & PSW_C) {
b77af26e 3479 CPUHPPAState *env = cpu_env(ctx->cs);
43e05652
RH
3480 int type = hppa_artype_for_page(env, ctx->base.pc_next);
3481 /* If we could not find a TLB entry, then we need to generate an
3482 ITLB miss exception so the kernel will provide it.
3483 The resulting TLB fill operation will invalidate this TB and
3484 we will re-translate, at which point we *will* be able to find
3485 the TLB entry and determine if this is in fact a gateway page. */
3486 if (type < 0) {
31234768
RH
3487 gen_excp(ctx, EXCP_ITLB_MISS);
3488 return true;
43e05652
RH
3489 }
3490 /* No change for non-gateway pages or for priv decrease. */
3491 if (type >= 4 && type - 4 < ctx->privilege) {
3492 dest = deposit32(dest, 0, 2, type - 4);
3493 }
3494 } else {
3495 dest &= -4; /* priv = 0 */
3496 }
3497#endif
3498
6e5f5300
SS
3499 if (a->l) {
3500 TCGv_reg tmp = dest_gpr(ctx, a->l);
3501 if (ctx->privilege < 3) {
3502 tcg_gen_andi_reg(tmp, tmp, -4);
3503 }
3504 tcg_gen_ori_reg(tmp, tmp, ctx->privilege);
3505 save_gpr(ctx, a->l, tmp);
3506 }
3507
3508 return do_dbranch(ctx, dest, 0, a->n);
43e05652
RH
3509}
3510
8340f534 3511static bool trans_blr(DisasContext *ctx, arg_blr *a)
98cd9ca7 3512{
b35aec85 3513 if (a->x) {
e12c6309 3514 TCGv_reg tmp = tcg_temp_new();
b35aec85
RH
3515 tcg_gen_shli_reg(tmp, load_gpr(ctx, a->x), 3);
3516 tcg_gen_addi_reg(tmp, tmp, ctx->iaoq_f + 8);
3517 /* The computation here never changes privilege level. */
3518 return do_ibranch(ctx, tmp, a->l, a->n);
3519 } else {
3520 /* BLR R0,RX is a good way to load PC+8 into RX. */
3521 return do_dbranch(ctx, ctx->iaoq_f + 8, a->l, a->n);
3522 }
98cd9ca7
RH
3523}
3524
8340f534 3525static bool trans_bv(DisasContext *ctx, arg_bv *a)
98cd9ca7 3526{
eaa3783b 3527 TCGv_reg dest;
98cd9ca7 3528
8340f534
RH
3529 if (a->x == 0) {
3530 dest = load_gpr(ctx, a->b);
98cd9ca7 3531 } else {
e12c6309 3532 dest = tcg_temp_new();
8340f534
RH
3533 tcg_gen_shli_reg(dest, load_gpr(ctx, a->x), 3);
3534 tcg_gen_add_reg(dest, dest, load_gpr(ctx, a->b));
98cd9ca7 3535 }
660eefe1 3536 dest = do_ibranch_priv(ctx, dest);
8340f534 3537 return do_ibranch(ctx, dest, 0, a->n);
98cd9ca7
RH
3538}
3539
8340f534 3540static bool trans_bve(DisasContext *ctx, arg_bve *a)
98cd9ca7 3541{
660eefe1 3542 TCGv_reg dest;
98cd9ca7 3543
c301f34e 3544#ifdef CONFIG_USER_ONLY
8340f534
RH
3545 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
3546 return do_ibranch(ctx, dest, a->l, a->n);
c301f34e
RH
3547#else
3548 nullify_over(ctx);
8340f534 3549 dest = do_ibranch_priv(ctx, load_gpr(ctx, a->b));
c301f34e
RH
3550
3551 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_b, cpu_iaoq_b);
3552 if (ctx->iaoq_b == -1) {
3553 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
3554 }
3555 copy_iaoq_entry(cpu_iaoq_b, -1, dest);
3556 tcg_gen_mov_i64(cpu_iasq_b, space_select(ctx, 0, dest));
8340f534
RH
3557 if (a->l) {
3558 copy_iaoq_entry(cpu_gr[a->l], ctx->iaoq_n, ctx->iaoq_n_var);
c301f34e 3559 }
8340f534 3560 nullify_set(ctx, a->n);
c301f34e 3561 tcg_gen_lookup_and_goto_ptr();
31234768
RH
3562 ctx->base.is_jmp = DISAS_NORETURN;
3563 return nullify_end(ctx);
c301f34e 3564#endif
98cd9ca7
RH
3565}
3566
1ca74648
RH
3567/*
3568 * Float class 0
3569 */
ebe9383c 3570
1ca74648 3571static void gen_fcpy_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
ebe9383c 3572{
1ca74648 3573 tcg_gen_mov_i32(dst, src);
ebe9383c
RH
3574}
3575
59f8c04b
HD
3576static bool trans_fid_f(DisasContext *ctx, arg_fid_f *a)
3577{
a300dad3
RH
3578 uint64_t ret;
3579
3580 if (TARGET_REGISTER_BITS == 64) {
3581 ret = 0x13080000000000ULL; /* PA8700 (PCX-W2) */
3582 } else {
3583 ret = 0x0f080000000000ULL; /* PA7300LC (PCX-L2) */
3584 }
3585
59f8c04b 3586 nullify_over(ctx);
a300dad3 3587 save_frd(0, tcg_constant_i64(ret));
59f8c04b
HD
3588 return nullify_end(ctx);
3589}
3590
1ca74648 3591static bool trans_fcpy_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3592{
1ca74648 3593 return do_fop_wew(ctx, a->t, a->r, gen_fcpy_f);
ebe9383c
RH
3594}
3595
1ca74648 3596static void gen_fcpy_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
ebe9383c 3597{
1ca74648 3598 tcg_gen_mov_i64(dst, src);
ebe9383c
RH
3599}
3600
1ca74648 3601static bool trans_fcpy_d(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3602{
1ca74648 3603 return do_fop_ded(ctx, a->t, a->r, gen_fcpy_d);
ebe9383c
RH
3604}
3605
1ca74648 3606static void gen_fabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
ebe9383c 3607{
1ca74648 3608 tcg_gen_andi_i32(dst, src, INT32_MAX);
ebe9383c
RH
3609}
3610
1ca74648 3611static bool trans_fabs_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3612{
1ca74648 3613 return do_fop_wew(ctx, a->t, a->r, gen_fabs_f);
ebe9383c
RH
3614}
3615
1ca74648 3616static void gen_fabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
ebe9383c 3617{
1ca74648 3618 tcg_gen_andi_i64(dst, src, INT64_MAX);
ebe9383c
RH
3619}
3620
1ca74648 3621static bool trans_fabs_d(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3622{
1ca74648 3623 return do_fop_ded(ctx, a->t, a->r, gen_fabs_d);
ebe9383c
RH
3624}
3625
1ca74648 3626static bool trans_fsqrt_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3627{
1ca74648 3628 return do_fop_wew(ctx, a->t, a->r, gen_helper_fsqrt_s);
ebe9383c
RH
3629}
3630
1ca74648 3631static bool trans_fsqrt_d(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3632{
1ca74648 3633 return do_fop_ded(ctx, a->t, a->r, gen_helper_fsqrt_d);
ebe9383c
RH
3634}
3635
1ca74648 3636static bool trans_frnd_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3637{
1ca74648 3638 return do_fop_wew(ctx, a->t, a->r, gen_helper_frnd_s);
ebe9383c
RH
3639}
3640
1ca74648 3641static bool trans_frnd_d(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3642{
1ca74648 3643 return do_fop_ded(ctx, a->t, a->r, gen_helper_frnd_d);
ebe9383c
RH
3644}
3645
1ca74648 3646static void gen_fneg_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
ebe9383c 3647{
1ca74648 3648 tcg_gen_xori_i32(dst, src, INT32_MIN);
ebe9383c
RH
3649}
3650
1ca74648 3651static bool trans_fneg_f(DisasContext *ctx, arg_fclass01 *a)
ebe9383c 3652{
1ca74648 3653 return do_fop_wew(ctx, a->t, a->r, gen_fneg_f);
ebe9383c
RH
3654}
3655
3656static void gen_fneg_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3657{
3658 tcg_gen_xori_i64(dst, src, INT64_MIN);
3659}
3660
1ca74648
RH
3661static bool trans_fneg_d(DisasContext *ctx, arg_fclass01 *a)
3662{
3663 return do_fop_ded(ctx, a->t, a->r, gen_fneg_d);
3664}
3665
3666static void gen_fnegabs_f(TCGv_i32 dst, TCGv_env unused, TCGv_i32 src)
ebe9383c
RH
3667{
3668 tcg_gen_ori_i32(dst, src, INT32_MIN);
3669}
3670
1ca74648
RH
3671static bool trans_fnegabs_f(DisasContext *ctx, arg_fclass01 *a)
3672{
3673 return do_fop_wew(ctx, a->t, a->r, gen_fnegabs_f);
3674}
3675
ebe9383c
RH
3676static void gen_fnegabs_d(TCGv_i64 dst, TCGv_env unused, TCGv_i64 src)
3677{
3678 tcg_gen_ori_i64(dst, src, INT64_MIN);
3679}
3680
1ca74648
RH
3681static bool trans_fnegabs_d(DisasContext *ctx, arg_fclass01 *a)
3682{
3683 return do_fop_ded(ctx, a->t, a->r, gen_fnegabs_d);
3684}
3685
3686/*
3687 * Float class 1
3688 */
3689
3690static bool trans_fcnv_d_f(DisasContext *ctx, arg_fclass01 *a)
3691{
3692 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_s);
3693}
3694
3695static bool trans_fcnv_f_d(DisasContext *ctx, arg_fclass01 *a)
3696{
3697 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_d);
3698}
3699
3700static bool trans_fcnv_w_f(DisasContext *ctx, arg_fclass01 *a)
3701{
3702 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_w_s);
3703}
3704
3705static bool trans_fcnv_q_f(DisasContext *ctx, arg_fclass01 *a)
3706{
3707 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_dw_s);
3708}
3709
3710static bool trans_fcnv_w_d(DisasContext *ctx, arg_fclass01 *a)
3711{
3712 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_w_d);
3713}
3714
3715static bool trans_fcnv_q_d(DisasContext *ctx, arg_fclass01 *a)
3716{
3717 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_dw_d);
3718}
3719
3720static bool trans_fcnv_f_w(DisasContext *ctx, arg_fclass01 *a)
3721{
3722 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_w);
3723}
3724
3725static bool trans_fcnv_d_w(DisasContext *ctx, arg_fclass01 *a)
3726{
3727 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_w);
3728}
3729
3730static bool trans_fcnv_f_q(DisasContext *ctx, arg_fclass01 *a)
3731{
3732 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_dw);
3733}
3734
3735static bool trans_fcnv_d_q(DisasContext *ctx, arg_fclass01 *a)
3736{
3737 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_dw);
3738}
3739
3740static bool trans_fcnv_t_f_w(DisasContext *ctx, arg_fclass01 *a)
3741{
3742 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_w);
3743}
3744
3745static bool trans_fcnv_t_d_w(DisasContext *ctx, arg_fclass01 *a)
3746{
3747 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_w);
3748}
3749
3750static bool trans_fcnv_t_f_q(DisasContext *ctx, arg_fclass01 *a)
3751{
3752 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_dw);
3753}
3754
3755static bool trans_fcnv_t_d_q(DisasContext *ctx, arg_fclass01 *a)
3756{
3757 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_dw);
3758}
3759
3760static bool trans_fcnv_uw_f(DisasContext *ctx, arg_fclass01 *a)
3761{
3762 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_uw_s);
3763}
3764
3765static bool trans_fcnv_uq_f(DisasContext *ctx, arg_fclass01 *a)
3766{
3767 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_udw_s);
3768}
3769
3770static bool trans_fcnv_uw_d(DisasContext *ctx, arg_fclass01 *a)
3771{
3772 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_uw_d);
3773}
3774
3775static bool trans_fcnv_uq_d(DisasContext *ctx, arg_fclass01 *a)
3776{
3777 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_udw_d);
3778}
3779
3780static bool trans_fcnv_f_uw(DisasContext *ctx, arg_fclass01 *a)
3781{
3782 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_s_uw);
3783}
3784
3785static bool trans_fcnv_d_uw(DisasContext *ctx, arg_fclass01 *a)
3786{
3787 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_d_uw);
3788}
3789
3790static bool trans_fcnv_f_uq(DisasContext *ctx, arg_fclass01 *a)
3791{
3792 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_s_udw);
3793}
3794
3795static bool trans_fcnv_d_uq(DisasContext *ctx, arg_fclass01 *a)
3796{
3797 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_d_udw);
3798}
3799
3800static bool trans_fcnv_t_f_uw(DisasContext *ctx, arg_fclass01 *a)
3801{
3802 return do_fop_wew(ctx, a->t, a->r, gen_helper_fcnv_t_s_uw);
3803}
3804
3805static bool trans_fcnv_t_d_uw(DisasContext *ctx, arg_fclass01 *a)
3806{
3807 return do_fop_wed(ctx, a->t, a->r, gen_helper_fcnv_t_d_uw);
3808}
3809
3810static bool trans_fcnv_t_f_uq(DisasContext *ctx, arg_fclass01 *a)
3811{
3812 return do_fop_dew(ctx, a->t, a->r, gen_helper_fcnv_t_s_udw);
3813}
3814
3815static bool trans_fcnv_t_d_uq(DisasContext *ctx, arg_fclass01 *a)
3816{
3817 return do_fop_ded(ctx, a->t, a->r, gen_helper_fcnv_t_d_udw);
3818}
3819
3820/*
3821 * Float class 2
3822 */
3823
3824static bool trans_fcmp_f(DisasContext *ctx, arg_fclass2 *a)
ebe9383c
RH
3825{
3826 TCGv_i32 ta, tb, tc, ty;
3827
3828 nullify_over(ctx);
3829
1ca74648
RH
3830 ta = load_frw0_i32(a->r1);
3831 tb = load_frw0_i32(a->r2);
29dd6f64
RH
3832 ty = tcg_constant_i32(a->y);
3833 tc = tcg_constant_i32(a->c);
ebe9383c 3834
ad75a51e 3835 gen_helper_fcmp_s(tcg_env, ta, tb, ty, tc);
ebe9383c 3836
1ca74648 3837 return nullify_end(ctx);
ebe9383c
RH
3838}
3839
1ca74648 3840static bool trans_fcmp_d(DisasContext *ctx, arg_fclass2 *a)
ebe9383c 3841{
ebe9383c
RH
3842 TCGv_i64 ta, tb;
3843 TCGv_i32 tc, ty;
3844
3845 nullify_over(ctx);
3846
1ca74648
RH
3847 ta = load_frd0(a->r1);
3848 tb = load_frd0(a->r2);
29dd6f64
RH
3849 ty = tcg_constant_i32(a->y);
3850 tc = tcg_constant_i32(a->c);
ebe9383c 3851
ad75a51e 3852 gen_helper_fcmp_d(tcg_env, ta, tb, ty, tc);
ebe9383c 3853
31234768 3854 return nullify_end(ctx);
ebe9383c
RH
3855}
3856
1ca74648 3857static bool trans_ftest(DisasContext *ctx, arg_ftest *a)
ebe9383c 3858{
eaa3783b 3859 TCGv_reg t;
ebe9383c
RH
3860
3861 nullify_over(ctx);
3862
e12c6309 3863 t = tcg_temp_new();
ad75a51e 3864 tcg_gen_ld32u_reg(t, tcg_env, offsetof(CPUHPPAState, fr0_shadow));
ebe9383c 3865
1ca74648
RH
3866 if (a->y == 1) {
3867 int mask;
3868 bool inv = false;
3869
3870 switch (a->c) {
3871 case 0: /* simple */
3872 tcg_gen_andi_reg(t, t, 0x4000000);
3873 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
3874 goto done;
3875 case 2: /* rej */
3876 inv = true;
3877 /* fallthru */
3878 case 1: /* acc */
3879 mask = 0x43ff800;
3880 break;
3881 case 6: /* rej8 */
3882 inv = true;
3883 /* fallthru */
3884 case 5: /* acc8 */
3885 mask = 0x43f8000;
3886 break;
3887 case 9: /* acc6 */
3888 mask = 0x43e0000;
3889 break;
3890 case 13: /* acc4 */
3891 mask = 0x4380000;
3892 break;
3893 case 17: /* acc2 */
3894 mask = 0x4200000;
3895 break;
3896 default:
3897 gen_illegal(ctx);
3898 return true;
3899 }
3900 if (inv) {
d4e58033 3901 TCGv_reg c = tcg_constant_reg(mask);
1ca74648
RH
3902 tcg_gen_or_reg(t, t, c);
3903 ctx->null_cond = cond_make(TCG_COND_EQ, t, c);
3904 } else {
3905 tcg_gen_andi_reg(t, t, mask);
3906 ctx->null_cond = cond_make_0(TCG_COND_EQ, t);
3907 }
3908 } else {
3909 unsigned cbit = (a->y ^ 1) - 1;
3910
3911 tcg_gen_extract_reg(t, t, 21 - cbit, 1);
3912 ctx->null_cond = cond_make_0(TCG_COND_NE, t);
1ca74648
RH
3913 }
3914
3915 done:
31234768 3916 return nullify_end(ctx);
ebe9383c
RH
3917}
3918
1ca74648
RH
3919/*
3920 * Float class 2
3921 */
3922
3923static bool trans_fadd_f(DisasContext *ctx, arg_fclass3 *a)
ebe9383c 3924{
1ca74648
RH
3925 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fadd_s);
3926}
ebe9383c 3927
1ca74648
RH
3928static bool trans_fadd_d(DisasContext *ctx, arg_fclass3 *a)
3929{
3930 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fadd_d);
3931}
ebe9383c 3932
1ca74648
RH
3933static bool trans_fsub_f(DisasContext *ctx, arg_fclass3 *a)
3934{
3935 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fsub_s);
3936}
ebe9383c 3937
1ca74648
RH
3938static bool trans_fsub_d(DisasContext *ctx, arg_fclass3 *a)
3939{
3940 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fsub_d);
ebe9383c
RH
3941}
3942
1ca74648 3943static bool trans_fmpy_f(DisasContext *ctx, arg_fclass3 *a)
ebe9383c 3944{
1ca74648
RH
3945 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_s);
3946}
3947
3948static bool trans_fmpy_d(DisasContext *ctx, arg_fclass3 *a)
3949{
3950 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fmpy_d);
3951}
3952
3953static bool trans_fdiv_f(DisasContext *ctx, arg_fclass3 *a)
3954{
3955 return do_fop_weww(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_s);
3956}
3957
3958static bool trans_fdiv_d(DisasContext *ctx, arg_fclass3 *a)
3959{
3960 return do_fop_dedd(ctx, a->t, a->r1, a->r2, gen_helper_fdiv_d);
3961}
3962
3963static bool trans_xmpyu(DisasContext *ctx, arg_xmpyu *a)
3964{
3965 TCGv_i64 x, y;
ebe9383c
RH
3966
3967 nullify_over(ctx);
3968
1ca74648
RH
3969 x = load_frw0_i64(a->r1);
3970 y = load_frw0_i64(a->r2);
3971 tcg_gen_mul_i64(x, x, y);
3972 save_frd(a->t, x);
ebe9383c 3973
31234768 3974 return nullify_end(ctx);
ebe9383c
RH
3975}
3976
ebe9383c
RH
3977/* Convert the fmpyadd single-precision register encodings to standard. */
3978static inline int fmpyadd_s_reg(unsigned r)
3979{
3980 return (r & 16) * 2 + 16 + (r & 15);
3981}
3982
b1e2af57 3983static bool do_fmpyadd_s(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
ebe9383c 3984{
b1e2af57
RH
3985 int tm = fmpyadd_s_reg(a->tm);
3986 int ra = fmpyadd_s_reg(a->ra);
3987 int ta = fmpyadd_s_reg(a->ta);
3988 int rm2 = fmpyadd_s_reg(a->rm2);
3989 int rm1 = fmpyadd_s_reg(a->rm1);
ebe9383c
RH
3990
3991 nullify_over(ctx);
3992
b1e2af57
RH
3993 do_fop_weww(ctx, tm, rm1, rm2, gen_helper_fmpy_s);
3994 do_fop_weww(ctx, ta, ta, ra,
3995 is_sub ? gen_helper_fsub_s : gen_helper_fadd_s);
ebe9383c 3996
31234768 3997 return nullify_end(ctx);
ebe9383c
RH
3998}
3999
b1e2af57
RH
4000static bool trans_fmpyadd_f(DisasContext *ctx, arg_mpyadd *a)
4001{
4002 return do_fmpyadd_s(ctx, a, false);
4003}
4004
4005static bool trans_fmpysub_f(DisasContext *ctx, arg_mpyadd *a)
4006{
4007 return do_fmpyadd_s(ctx, a, true);
4008}
4009
4010static bool do_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a, bool is_sub)
4011{
4012 nullify_over(ctx);
4013
4014 do_fop_dedd(ctx, a->tm, a->rm1, a->rm2, gen_helper_fmpy_d);
4015 do_fop_dedd(ctx, a->ta, a->ta, a->ra,
4016 is_sub ? gen_helper_fsub_d : gen_helper_fadd_d);
4017
4018 return nullify_end(ctx);
4019}
4020
4021static bool trans_fmpyadd_d(DisasContext *ctx, arg_mpyadd *a)
4022{
4023 return do_fmpyadd_d(ctx, a, false);
4024}
4025
4026static bool trans_fmpysub_d(DisasContext *ctx, arg_mpyadd *a)
4027{
4028 return do_fmpyadd_d(ctx, a, true);
4029}
4030
c3bad4f8 4031static bool trans_fmpyfadd_f(DisasContext *ctx, arg_fmpyfadd_f *a)
ebe9383c 4032{
c3bad4f8 4033 TCGv_i32 x, y, z;
ebe9383c
RH
4034
4035 nullify_over(ctx);
c3bad4f8
RH
4036 x = load_frw0_i32(a->rm1);
4037 y = load_frw0_i32(a->rm2);
4038 z = load_frw0_i32(a->ra3);
ebe9383c 4039
c3bad4f8 4040 if (a->neg) {
ad75a51e 4041 gen_helper_fmpynfadd_s(x, tcg_env, x, y, z);
ebe9383c 4042 } else {
ad75a51e 4043 gen_helper_fmpyfadd_s(x, tcg_env, x, y, z);
ebe9383c
RH
4044 }
4045
c3bad4f8 4046 save_frw_i32(a->t, x);
31234768 4047 return nullify_end(ctx);
ebe9383c
RH
4048}
4049
c3bad4f8 4050static bool trans_fmpyfadd_d(DisasContext *ctx, arg_fmpyfadd_d *a)
ebe9383c 4051{
c3bad4f8 4052 TCGv_i64 x, y, z;
ebe9383c
RH
4053
4054 nullify_over(ctx);
c3bad4f8
RH
4055 x = load_frd0(a->rm1);
4056 y = load_frd0(a->rm2);
4057 z = load_frd0(a->ra3);
ebe9383c 4058
c3bad4f8 4059 if (a->neg) {
ad75a51e 4060 gen_helper_fmpynfadd_d(x, tcg_env, x, y, z);
ebe9383c 4061 } else {
ad75a51e 4062 gen_helper_fmpyfadd_d(x, tcg_env, x, y, z);
ebe9383c
RH
4063 }
4064
c3bad4f8 4065 save_frd(a->t, x);
31234768 4066 return nullify_end(ctx);
ebe9383c
RH
4067}
4068
15da177b
SS
4069static bool trans_diag(DisasContext *ctx, arg_diag *a)
4070{
cf6b28d4
HD
4071 CHECK_MOST_PRIVILEGED(EXCP_PRIV_OPR);
4072#ifndef CONFIG_USER_ONLY
4073 if (a->i == 0x100) {
4074 /* emulate PDC BTLB, called by SeaBIOS-hppa */
ad75a51e
RH
4075 nullify_over(ctx);
4076 gen_helper_diag_btlb(tcg_env);
4077 return nullify_end(ctx);
cf6b28d4 4078 }
ad75a51e
RH
4079#endif
4080 qemu_log_mask(LOG_UNIMP, "DIAG opcode 0x%04x ignored\n", a->i);
4081 return true;
15da177b
SS
4082}
4083
b542683d 4084static void hppa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cs)
61766fe9 4085{
51b061fb 4086 DisasContext *ctx = container_of(dcbase, DisasContext, base);
f764718d 4087 int bound;
61766fe9 4088
51b061fb 4089 ctx->cs = cs;
494737b7 4090 ctx->tb_flags = ctx->base.tb->flags;
3d68ee7b
RH
4091
4092#ifdef CONFIG_USER_ONLY
c01e5dfb 4093 ctx->privilege = MMU_IDX_TO_PRIV(MMU_USER_IDX);
3d68ee7b 4094 ctx->mmu_idx = MMU_USER_IDX;
c01e5dfb
HD
4095 ctx->iaoq_f = ctx->base.pc_first | ctx->privilege;
4096 ctx->iaoq_b = ctx->base.tb->cs_base | ctx->privilege;
217d1a5e 4097 ctx->unalign = (ctx->tb_flags & TB_FLAG_UNALIGN ? MO_UNALN : MO_ALIGN);
3d68ee7b 4098#else
494737b7 4099 ctx->privilege = (ctx->tb_flags >> TB_FLAG_PRIV_SHIFT) & 3;
bb67ec32
RH
4100 ctx->mmu_idx = (ctx->tb_flags & PSW_D
4101 ? PRIV_P_TO_MMU_IDX(ctx->privilege, ctx->tb_flags & PSW_P)
4102 : MMU_PHYS_IDX);
3d68ee7b 4103
c301f34e
RH
4104 /* Recover the IAOQ values from the GVA + PRIV. */
4105 uint64_t cs_base = ctx->base.tb->cs_base;
4106 uint64_t iasq_f = cs_base & ~0xffffffffull;
4107 int32_t diff = cs_base;
4108
4109 ctx->iaoq_f = (ctx->base.pc_first & ~iasq_f) + ctx->privilege;
4110 ctx->iaoq_b = (diff ? ctx->iaoq_f + diff : -1);
4111#endif
51b061fb 4112 ctx->iaoq_n = -1;
f764718d 4113 ctx->iaoq_n_var = NULL;
61766fe9 4114
3d68ee7b
RH
4115 /* Bound the number of instructions by those left on the page. */
4116 bound = -(ctx->base.pc_first | TARGET_PAGE_MASK) / 4;
b542683d 4117 ctx->base.max_insns = MIN(ctx->base.max_insns, bound);
51b061fb 4118}
61766fe9 4119
51b061fb
RH
4120static void hppa_tr_tb_start(DisasContextBase *dcbase, CPUState *cs)
4121{
4122 DisasContext *ctx = container_of(dcbase, DisasContext, base);
61766fe9 4123
3d68ee7b 4124 /* Seed the nullification status from PSW[N], as saved in TB->FLAGS. */
51b061fb
RH
4125 ctx->null_cond = cond_make_f();
4126 ctx->psw_n_nonzero = false;
494737b7 4127 if (ctx->tb_flags & PSW_N) {
51b061fb
RH
4128 ctx->null_cond.c = TCG_COND_ALWAYS;
4129 ctx->psw_n_nonzero = true;
129e9cc3 4130 }
51b061fb
RH
4131 ctx->null_lab = NULL;
4132}
129e9cc3 4133
51b061fb
RH
4134static void hppa_tr_insn_start(DisasContextBase *dcbase, CPUState *cs)
4135{
4136 DisasContext *ctx = container_of(dcbase, DisasContext, base);
61766fe9 4137
51b061fb
RH
4138 tcg_gen_insn_start(ctx->iaoq_f, ctx->iaoq_b);
4139}
4140
51b061fb
RH
4141static void hppa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cs)
4142{
4143 DisasContext *ctx = container_of(dcbase, DisasContext, base);
b77af26e 4144 CPUHPPAState *env = cpu_env(cs);
51b061fb 4145 DisasJumpType ret;
51b061fb
RH
4146
4147 /* Execute one insn. */
ba1d0b44 4148#ifdef CONFIG_USER_ONLY
c301f34e 4149 if (ctx->base.pc_next < TARGET_PAGE_SIZE) {
31234768
RH
4150 do_page_zero(ctx);
4151 ret = ctx->base.is_jmp;
51b061fb 4152 assert(ret != DISAS_NEXT);
ba1d0b44
RH
4153 } else
4154#endif
4155 {
51b061fb
RH
4156 /* Always fetch the insn, even if nullified, so that we check
4157 the page permissions for execute. */
4e116893 4158 uint32_t insn = translator_ldl(env, &ctx->base, ctx->base.pc_next);
51b061fb
RH
4159
4160 /* Set up the IA queue for the next insn.
4161 This will be overwritten by a branch. */
4162 if (ctx->iaoq_b == -1) {
4163 ctx->iaoq_n = -1;
e12c6309 4164 ctx->iaoq_n_var = tcg_temp_new();
eaa3783b 4165 tcg_gen_addi_reg(ctx->iaoq_n_var, cpu_iaoq_b, 4);
7ad439df 4166 } else {
51b061fb 4167 ctx->iaoq_n = ctx->iaoq_b + 4;
f764718d 4168 ctx->iaoq_n_var = NULL;
61766fe9
RH
4169 }
4170
51b061fb
RH
4171 if (unlikely(ctx->null_cond.c == TCG_COND_ALWAYS)) {
4172 ctx->null_cond.c = TCG_COND_NEVER;
4173 ret = DISAS_NEXT;
4174 } else {
1a19da0d 4175 ctx->insn = insn;
31274b46
RH
4176 if (!decode(ctx, insn)) {
4177 gen_illegal(ctx);
4178 }
31234768 4179 ret = ctx->base.is_jmp;
51b061fb 4180 assert(ctx->null_lab == NULL);
61766fe9 4181 }
51b061fb 4182 }
61766fe9 4183
3d68ee7b
RH
4184 /* Advance the insn queue. Note that this check also detects
4185 a priority change within the instruction queue. */
51b061fb 4186 if (ret == DISAS_NEXT && ctx->iaoq_b != ctx->iaoq_f + 4) {
c301f34e
RH
4187 if (ctx->iaoq_b != -1 && ctx->iaoq_n != -1
4188 && use_goto_tb(ctx, ctx->iaoq_b)
4189 && (ctx->null_cond.c == TCG_COND_NEVER
4190 || ctx->null_cond.c == TCG_COND_ALWAYS)) {
51b061fb
RH
4191 nullify_set(ctx, ctx->null_cond.c == TCG_COND_ALWAYS);
4192 gen_goto_tb(ctx, 0, ctx->iaoq_b, ctx->iaoq_n);
31234768 4193 ctx->base.is_jmp = ret = DISAS_NORETURN;
51b061fb 4194 } else {
31234768 4195 ctx->base.is_jmp = ret = DISAS_IAQ_N_STALE;
c301f34e 4196 }
61766fe9 4197 }
51b061fb
RH
4198 ctx->iaoq_f = ctx->iaoq_b;
4199 ctx->iaoq_b = ctx->iaoq_n;
c301f34e 4200 ctx->base.pc_next += 4;
51b061fb 4201
c5d0aec2
RH
4202 switch (ret) {
4203 case DISAS_NORETURN:
4204 case DISAS_IAQ_N_UPDATED:
4205 break;
4206
4207 case DISAS_NEXT:
4208 case DISAS_IAQ_N_STALE:
4209 case DISAS_IAQ_N_STALE_EXIT:
4210 if (ctx->iaoq_f == -1) {
4211 tcg_gen_mov_reg(cpu_iaoq_f, cpu_iaoq_b);
4212 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_n, ctx->iaoq_n_var);
c301f34e 4213#ifndef CONFIG_USER_ONLY
c5d0aec2 4214 tcg_gen_mov_i64(cpu_iasq_f, cpu_iasq_b);
c301f34e 4215#endif
c5d0aec2
RH
4216 nullify_save(ctx);
4217 ctx->base.is_jmp = (ret == DISAS_IAQ_N_STALE_EXIT
4218 ? DISAS_EXIT
4219 : DISAS_IAQ_N_UPDATED);
4220 } else if (ctx->iaoq_b == -1) {
4221 tcg_gen_mov_reg(cpu_iaoq_b, ctx->iaoq_n_var);
4222 }
4223 break;
4224
4225 default:
4226 g_assert_not_reached();
51b061fb
RH
4227 }
4228}
4229
4230static void hppa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cs)
4231{
4232 DisasContext *ctx = container_of(dcbase, DisasContext, base);
e1b5a5ed 4233 DisasJumpType is_jmp = ctx->base.is_jmp;
61766fe9 4234
e1b5a5ed 4235 switch (is_jmp) {
869051ea 4236 case DISAS_NORETURN:
61766fe9 4237 break;
51b061fb 4238 case DISAS_TOO_MANY:
869051ea 4239 case DISAS_IAQ_N_STALE:
e1b5a5ed 4240 case DISAS_IAQ_N_STALE_EXIT:
51b061fb
RH
4241 copy_iaoq_entry(cpu_iaoq_f, ctx->iaoq_f, cpu_iaoq_f);
4242 copy_iaoq_entry(cpu_iaoq_b, ctx->iaoq_b, cpu_iaoq_b);
4243 nullify_save(ctx);
61766fe9 4244 /* FALLTHRU */
869051ea 4245 case DISAS_IAQ_N_UPDATED:
8532a14e 4246 if (is_jmp != DISAS_IAQ_N_STALE_EXIT) {
7f11636d 4247 tcg_gen_lookup_and_goto_ptr();
8532a14e 4248 break;
61766fe9 4249 }
c5d0aec2
RH
4250 /* FALLTHRU */
4251 case DISAS_EXIT:
4252 tcg_gen_exit_tb(NULL, 0);
61766fe9
RH
4253 break;
4254 default:
51b061fb 4255 g_assert_not_reached();
61766fe9 4256 }
51b061fb 4257}
61766fe9 4258
8eb806a7
RH
4259static void hppa_tr_disas_log(const DisasContextBase *dcbase,
4260 CPUState *cs, FILE *logfile)
51b061fb 4261{
c301f34e 4262 target_ulong pc = dcbase->pc_first;
61766fe9 4263
ba1d0b44
RH
4264#ifdef CONFIG_USER_ONLY
4265 switch (pc) {
51b061fb 4266 case 0x00:
8eb806a7 4267 fprintf(logfile, "IN:\n0x00000000: (null)\n");
ba1d0b44 4268 return;
51b061fb 4269 case 0xb0:
8eb806a7 4270 fprintf(logfile, "IN:\n0x000000b0: light-weight-syscall\n");
ba1d0b44 4271 return;
51b061fb 4272 case 0xe0:
8eb806a7 4273 fprintf(logfile, "IN:\n0x000000e0: set-thread-pointer-syscall\n");
ba1d0b44 4274 return;
51b061fb 4275 case 0x100:
8eb806a7 4276 fprintf(logfile, "IN:\n0x00000100: syscall\n");
ba1d0b44 4277 return;
61766fe9 4278 }
ba1d0b44
RH
4279#endif
4280
8eb806a7
RH
4281 fprintf(logfile, "IN: %s\n", lookup_symbol(pc));
4282 target_disas(logfile, cs, pc, dcbase->tb->size);
51b061fb
RH
4283}
4284
4285static const TranslatorOps hppa_tr_ops = {
4286 .init_disas_context = hppa_tr_init_disas_context,
4287 .tb_start = hppa_tr_tb_start,
4288 .insn_start = hppa_tr_insn_start,
51b061fb
RH
4289 .translate_insn = hppa_tr_translate_insn,
4290 .tb_stop = hppa_tr_tb_stop,
4291 .disas_log = hppa_tr_disas_log,
4292};
4293
597f9b2d 4294void gen_intermediate_code(CPUState *cs, TranslationBlock *tb, int *max_insns,
306c8721 4295 target_ulong pc, void *host_pc)
51b061fb
RH
4296{
4297 DisasContext ctx;
306c8721 4298 translator_loop(cs, tb, max_insns, pc, host_pc, &hppa_tr_ops, &ctx.base);
61766fe9 4299}